block: Fix use-after-free issue while accessing ioscheduler lock

Observes below crash while accessing (use-after-free) lock member
of bfq data.

context#1			context#2
				process_one_work()
kthread()			blk_mq_run_work_fn()
worker_thread()			 ->__blk_mq_run_hw_queue()
process_one_work()		  ->blk_mq_sched_dispatch_requests()
__blk_release_queue()		    ->blk_mq_do_dispatch_sched()
->__elevator_exit()
  ->blk_mq_exit_sched()
    ->exit_sched()
      ->kfree()
				       ->bfq_dispatch_request()
				         ->spin_unlock_irq(&bfqd->lock)

This is because of the kblockd delayed work that might got scheduled
around blk_release_queue() and accessed use-after-free member of
bfq_data.

240.212359:   <2> Unable to handle kernel paging request at
virtual address ffffffee2e33ad70
...
240.212637:   <2> Workqueue: kblockd blk_mq_run_work_fn
240.212649:   <2> pstate: 00c00085 (nzcv daIf +PAN +UAO)
240.212666:   <2> pc : queued_spin_lock_slowpath+0x10c/0x2e0
240.212677:   <2> lr : queued_spin_lock_slowpath+0x84/0x2e0
...
Call trace:
240.212865:   <2>  queued_spin_lock_slowpath+0x10c/0x2e0
240.212876:   <2>  do_raw_spin_lock+0xf0/0xf4
240.212890:   <2>  _raw_spin_lock_irq+0x74/0x94
240.212906:   <2>  bfq_dispatch_request+0x4c/0xd60
240.212918:   <2>  blk_mq_do_dispatch_sched+0xe0/0x1f0
240.212927:   <2>  blk_mq_sched_dispatch_requests+0x130/0x194
240.212940:   <2>  __blk_mq_run_hw_queue+0x100/0x158
240.212950:   <2>  blk_mq_run_work_fn+0x1c/0x28
240.212963:   <2>  process_one_work+0x280/0x460
240.212973:   <2>  worker_thread+0x27c/0x4dc
240.212986:   <2>  kthread+0x160/0x170

Fix this by cancelling the delayed work if any before elevator exits.

Change-Id: If015576983e328aa726bac90a7a8de824d6532f0
Signed-off-by: Pradeep P V K <ppvk@codeaurora.org>
This commit is contained in:
Pradeep P V K 2020-09-14 12:57:53 +05:30
parent 653a9c02c2
commit 166bbfd00d
2 changed files with 6 additions and 3 deletions

View File

@ -36,7 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj); kobj);
cancel_delayed_work_sync(&hctx->run_work);
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
cleanup_srcu_struct(hctx->srcu); cleanup_srcu_struct(hctx->srcu);

View File

@ -891,9 +891,13 @@ static void __blk_release_queue(struct work_struct *work)
blk_free_queue_stats(q->stats); blk_free_queue_stats(q->stats);
if (queue_is_mq(q)) if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&q->requeue_work); cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
}
blk_exit_queue(q); blk_exit_queue(q);
blk_queue_free_zone_bitmaps(q); blk_queue_free_zone_bitmaps(q);