1515
1616/* Notes made in the collector */
1717#define HIT_PENDING 0x01 /* A front op was still pending */
18- #define SOME_EMPTY 0x02 /* One of more streams are empty */
19- #define ALL_EMPTY 0x04 /* All streams are empty */
20- #define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */
21- #define NEED_REASSESS 0x10 /* Need to loop round and reassess */
22- #define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */
23- #define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */
24- #define BUFFERED 0x80 /* The pagecache needs cleaning up */
25- #define NEED_RETRY 0x100 /* A front op requests retrying */
26- #define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */
18+ #define NEED_REASSESS 0x02 /* Need to loop round and reassess */
19+ #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
20+ #define BUFFERED 0x08 /* The pagecache needs cleaning up */
21+ #define NEED_RETRY 0x10 /* A front op requests retrying */
22+ #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
2723
2824/*
2925 * Successful completion of write of a folio to the server and/or cache. Note
@@ -85,10 +81,10 @@ int netfs_folio_written_back(struct folio *folio)
8581 * Unlock any folios we've finished with.
8682 */
8783static void netfs_writeback_unlock_folios (struct netfs_io_request * wreq ,
88- unsigned long long collected_to ,
8984 unsigned int * notes )
9085{
9186 struct folio_queue * folioq = wreq -> buffer ;
87+ unsigned long long collected_to = wreq -> collected_to ;
9288 unsigned int slot = wreq -> buffer_head_slot ;
9389
9490 if (slot >= folioq_nr_slots (folioq )) {
@@ -117,12 +113,6 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
117113
118114 trace_netfs_collect_folio (wreq , folio , fend , collected_to );
119115
120- if (fpos + fsize > wreq -> contiguity ) {
121- trace_netfs_collect_contig (wreq , fpos + fsize ,
122- netfs_contig_trace_unlock );
123- wreq -> contiguity = fpos + fsize ;
124- }
125-
126116 /* Unlock any folio we've transferred all of. */
127117 if (collected_to < fend )
128118 break ;
@@ -380,7 +370,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
380370{
381371 struct netfs_io_subrequest * front , * remove ;
382372 struct netfs_io_stream * stream ;
383- unsigned long long collected_to ;
373+ unsigned long long collected_to , issued_to ;
384374 unsigned int notes ;
385375 int s ;
386376
@@ -389,28 +379,21 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
389379 trace_netfs_rreq (wreq , netfs_rreq_trace_collect );
390380
391381reassess_streams :
382+ issued_to = atomic64_read (& wreq -> issued_to );
392383 smp_rmb ();
393384 collected_to = ULLONG_MAX ;
394- if (wreq -> origin == NETFS_WRITEBACK )
395- notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG ;
396- else if (wreq -> origin == NETFS_WRITETHROUGH )
397- notes = ALL_EMPTY | BUFFERED ;
385+ if (wreq -> origin == NETFS_WRITEBACK ||
386+ wreq -> origin == NETFS_WRITETHROUGH )
387+ notes = BUFFERED ;
398388 else
399- notes = ALL_EMPTY ;
389+ notes = 0 ;
400390
401391 /* Remove completed subrequests from the front of the streams and
402392 * advance the completion point on each stream. We stop when we hit
403393 * something that's in progress. The issuer thread may be adding stuff
404394 * to the tail whilst we're doing this.
405- *
406- * We must not, however, merge in discontiguities that span whole
407- * folios that aren't under writeback. This is made more complicated
408- * by the folios in the gap being of unpredictable sizes - if they even
409- * exist - but we don't want to look them up.
410395 */
411396 for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
412- loff_t rstart , rend ;
413-
414397 stream = & wreq -> io_streams [s ];
415398 /* Read active flag before list pointers */
416399 if (!smp_load_acquire (& stream -> active ))
@@ -422,26 +405,10 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
422405 //_debug("sreq [%x] %llx %zx/%zx",
423406 // front->debug_index, front->start, front->transferred, front->len);
424407
425- /* Stall if there may be a discontinuity. */
426- rstart = round_down (front -> start , PAGE_SIZE );
427- if (rstart > wreq -> contiguity ) {
428- if (wreq -> contiguity > stream -> collected_to ) {
429- trace_netfs_collect_gap (wreq , stream ,
430- wreq -> contiguity , 'D' );
431- stream -> collected_to = wreq -> contiguity ;
432- }
433- notes |= REASSESS_DISCONTIG ;
434- break ;
408+ if (stream -> collected_to < front -> start ) {
409+ trace_netfs_collect_gap (wreq , stream , issued_to , 'F' );
410+ stream -> collected_to = front -> start ;
435411 }
436- rend = round_up (front -> start + front -> len , PAGE_SIZE );
437- if (rend > wreq -> contiguity ) {
438- trace_netfs_collect_contig (wreq , rend ,
439- netfs_contig_trace_collect );
440- wreq -> contiguity = rend ;
441- if (notes & REASSESS_DISCONTIG )
442- notes |= NEED_REASSESS ;
443- }
444- notes &= ~MAYBE_DISCONTIG ;
445412
446413 /* Stall if the front is still undergoing I/O. */
447414 if (test_bit (NETFS_SREQ_IN_PROGRESS , & front -> flags )) {
@@ -483,26 +450,20 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
483450 front = list_first_entry_or_null (& stream -> subrequests ,
484451 struct netfs_io_subrequest , rreq_link );
485452 stream -> front = front ;
486- if (!front ) {
487- unsigned long long jump_to = atomic64_read (& wreq -> issued_to );
488-
489- if (stream -> collected_to < jump_to ) {
490- trace_netfs_collect_gap (wreq , stream , jump_to , 'A' );
491- stream -> collected_to = jump_to ;
492- }
493- }
494-
495453 spin_unlock_bh (& wreq -> lock );
496454 netfs_put_subrequest (remove , false,
497455 notes & SAW_FAILURE ?
498456 netfs_sreq_trace_put_cancel :
499457 netfs_sreq_trace_put_done );
500458 }
501459
502- if (front )
503- notes &= ~ALL_EMPTY ;
504- else
505- notes |= SOME_EMPTY ;
460+ /* If we have an empty stream, we need to jump it forward
461+ * otherwise the collection point will never advance.
462+ */
463+ if (!front && issued_to > stream -> collected_to ) {
464+ trace_netfs_collect_gap (wreq , stream , issued_to , 'E' );
465+ stream -> collected_to = issued_to ;
466+ }
506467
507468 if (stream -> collected_to < collected_to )
508469 collected_to = stream -> collected_to ;
@@ -511,36 +472,6 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
511472 if (collected_to != ULLONG_MAX && collected_to > wreq -> collected_to )
512473 wreq -> collected_to = collected_to ;
513474
514- /* If we have an empty stream, we need to jump it forward over any gap
515- * otherwise the collection point will never advance.
516- *
517- * Note that the issuer always adds to the stream with the lowest
518- * so-far submitted start, so if we see two consecutive subreqs in one
519- * stream with nothing between then in another stream, then the second
520- * stream has a gap that can be jumped.
521- */
522- if (notes & SOME_EMPTY ) {
523- unsigned long long jump_to = wreq -> start + READ_ONCE (wreq -> submitted );
524-
525- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
526- stream = & wreq -> io_streams [s ];
527- if (stream -> active &&
528- stream -> front &&
529- stream -> front -> start < jump_to )
530- jump_to = stream -> front -> start ;
531- }
532-
533- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
534- stream = & wreq -> io_streams [s ];
535- if (stream -> active &&
536- !stream -> front &&
537- stream -> collected_to < jump_to ) {
538- trace_netfs_collect_gap (wreq , stream , jump_to , 'B' );
539- stream -> collected_to = jump_to ;
540- }
541- }
542- }
543-
544475 for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
545476 stream = & wreq -> io_streams [s ];
546477 if (stream -> active )
@@ -551,43 +482,14 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
551482
552483 /* Unlock any folios that we have now finished with. */
553484 if (notes & BUFFERED ) {
554- unsigned long long clean_to = min (wreq -> collected_to , wreq -> contiguity );
555-
556- if (wreq -> cleaned_to < clean_to )
557- netfs_writeback_unlock_folios (wreq , clean_to , & notes );
485+ if (wreq -> cleaned_to < wreq -> collected_to )
486+ netfs_writeback_unlock_folios (wreq , & notes );
558487 } else {
559488 wreq -> cleaned_to = wreq -> collected_to ;
560489 }
561490
562491 // TODO: Discard encryption buffers
563492
564- /* If all streams are discontiguous with the last folio we cleared, we
565- * may need to skip a set of folios.
566- */
567- if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY )) == MAYBE_DISCONTIG ) {
568- unsigned long long jump_to = ULLONG_MAX ;
569-
570- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
571- stream = & wreq -> io_streams [s ];
572- if (stream -> active && stream -> front &&
573- stream -> front -> start < jump_to )
574- jump_to = stream -> front -> start ;
575- }
576-
577- trace_netfs_collect_contig (wreq , jump_to , netfs_contig_trace_jump );
578- wreq -> contiguity = jump_to ;
579- wreq -> cleaned_to = jump_to ;
580- wreq -> collected_to = jump_to ;
581- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
582- stream = & wreq -> io_streams [s ];
583- if (stream -> collected_to < jump_to )
584- stream -> collected_to = jump_to ;
585- }
586- //cond_resched();
587- notes |= MADE_PROGRESS ;
588- goto reassess_streams ;
589- }
590-
591493 if (notes & NEED_RETRY )
592494 goto need_retry ;
593495 if ((notes & MADE_PROGRESS ) && test_bit (NETFS_RREQ_PAUSE , & wreq -> flags )) {
0 commit comments