@@ -2643,16 +2643,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
26432643 & hctx -> cpuhp_dead );
26442644}
26452645
2646+ /*
2647+ * Before freeing hw queue, clearing the flush request reference in
2648+ * tags->rqs[] for avoiding potential UAF.
2649+ */
2650+ static void blk_mq_clear_flush_rq_mapping (struct blk_mq_tags * tags ,
2651+ unsigned int queue_depth , struct request * flush_rq )
2652+ {
2653+ int i ;
2654+ unsigned long flags ;
2655+
2656+ /* The hw queue may not be mapped yet */
2657+ if (!tags )
2658+ return ;
2659+
2660+ WARN_ON_ONCE (refcount_read (& flush_rq -> ref ) != 0 );
2661+
2662+ for (i = 0 ; i < queue_depth ; i ++ )
2663+ cmpxchg (& tags -> rqs [i ], flush_rq , NULL );
2664+
2665+ /*
2666+ * Wait until all pending iteration is done.
2667+ *
2668+ * Request reference is cleared and it is guaranteed to be observed
2669+ * after the ->lock is released.
2670+ */
2671+ spin_lock_irqsave (& tags -> lock , flags );
2672+ spin_unlock_irqrestore (& tags -> lock , flags );
2673+ }
2674+
26462675/* hctx->ctxs will be freed in queue's release handler */
26472676static void blk_mq_exit_hctx (struct request_queue * q ,
26482677 struct blk_mq_tag_set * set ,
26492678 struct blk_mq_hw_ctx * hctx , unsigned int hctx_idx )
26502679{
2680+ struct request * flush_rq = hctx -> fq -> flush_rq ;
2681+
26512682 if (blk_mq_hw_queue_mapped (hctx ))
26522683 blk_mq_tag_idle (hctx );
26532684
2685+ blk_mq_clear_flush_rq_mapping (set -> tags [hctx_idx ],
2686+ set -> queue_depth , flush_rq );
26542687 if (set -> ops -> exit_request )
2655- set -> ops -> exit_request (set , hctx -> fq -> flush_rq , hctx_idx );
2688+ set -> ops -> exit_request (set , flush_rq , hctx_idx );
26562689
26572690 if (set -> ops -> exit_hctx )
26582691 set -> ops -> exit_hctx (hctx , hctx_idx );
0 commit comments