From 585cb1a5daef44b5530020726291b1855cd3fa6b Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 11 May 2021 23:22:35 +0800 Subject: [PATCH] BACKPORT: blk-mq: clear stale request in tags->rq[] before freeing one request pool refcount_inc_not_zero() in bt_tags_iter() still may read one freed request. Fix the issue by the following approach: 1) hold a per-tags spinlock when reading ->rqs[tag] and calling refcount_inc_not_zero in bt_tags_iter() 2) clearing stale request referred via ->rqs[tag] before freeing request pool, the per-tags spinlock is held for clearing stale ->rq[tag] So after we cleared stale requests, bt_tags_iter() won't observe freed request any more, also the clearing will wait for pending request reference. The idea of clearing ->rqs[] is borrowed from John Garry's previous patch and one recent David's patch. Tested-by: John Garry Reviewed-by: David Jeffery Reviewed-by: Bart Van Assche Signed-off-by: Ming Lei . Bug: 197804811 Change-Id: If49478d7b05d3f5b0a26966ddf9ae764cf2fb6b0 [Upstream: cherry picked from commit bd63141d585bef14f4caf111f6d0e27fe2300ec6] [Todd: refactored to avoid breaking KMI ] Signed-off-by: Pradeep P V K Signed-off-by: Todd Kjos Git-commit: bb96e7f45dc6ac1d6ec12190f1f286e3014fb068 Git-repo: https://android.googlesource.com/kernel/common/ Signed-off-by: Pradeep P V K --- block/blk-mq-tag.c | 17 ++++++++++++---- block/blk-mq-tag.h | 15 +++++++++++++++ block/blk-mq.c | 48 +++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 71 insertions(+), 9 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a70b88587236..e5d8949b922f 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -215,10 +215,16 @@ struct bt_iter_data { static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, unsigned int bitnr) { - struct request *rq = tags->rqs[bitnr]; + struct request *rq; + unsigned long flags; + struct ext_blk_mq_tags *etags; + etags = container_of(tags, struct ext_blk_mq_tags, tags); + spin_lock_irqsave(&etags->lock, flags); + rq = tags->rqs[bitnr]; if (!rq || !refcount_inc_not_zero(&rq->ref)) - return NULL; + rq = NULL; + spin_unlock_irqrestore(&etags->lock, flags); return rq; } @@ -486,18 +492,21 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, int node, int alloc_policy) { struct blk_mq_tags *tags; + struct ext_blk_mq_tags *etags; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); return NULL; } - tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); - if (!tags) + etags = kzalloc_node(sizeof(*etags), GFP_KERNEL, node); + if (!etags) return NULL; + tags = &etags->tags; tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; + spin_lock_init(&etags->lock); return blk_mq_init_bitmap_tags(tags, node, alloc_policy); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 61deab0b5a5a..df345118cb53 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -21,6 +21,21 @@ struct blk_mq_tags { struct list_head page_list; }; +/* + * Extended tag address space map. This was needed + * to add a spinlock to blk_mq_tags in a KMI compliant + * way (no changes could be made to struct blk_mq_tags). + */ +struct ext_blk_mq_tags { + struct blk_mq_tags tags; + + /* + * used to clear request reference in rqs[] before freeing one + * request pool + */ + spinlock_t lock; +}; + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); extern void blk_mq_free_tags(struct blk_mq_tags *tags); diff --git a/block/blk-mq.c b/block/blk-mq.c index a637d70ec8a8..4266c17065cd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1975,6 +1975,47 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) } } +static size_t order_to_size(unsigned int order) +{ + return (size_t)PAGE_SIZE << order; +} + +/* called before freeing request pool in @tags */ +static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) +{ + struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; + struct ext_blk_mq_tags *drv_etags; + struct page *page; + unsigned long flags; + + list_for_each_entry(page, &tags->page_list, lru) { + unsigned long start = (unsigned long)page_address(page); + unsigned long end = start + order_to_size(page->private); + int i; + + for (i = 0; i < set->queue_depth; i++) { + struct request *rq = drv_tags->rqs[i]; + unsigned long rq_addr = (unsigned long)rq; + + if (rq_addr >= start && rq_addr < end) { + WARN_ON_ONCE(refcount_read(&rq->ref) != 0); + cmpxchg(&drv_tags->rqs[i], rq, NULL); + } + } + } + + /* + * Wait until all pending iteration is done. + * + * Request reference is cleared and it is guaranteed to be observed + * after the ->lock is released. + */ + drv_etags = container_of(drv_tags, struct ext_blk_mq_tags, tags); + spin_lock_irqsave(&drv_etags->lock, flags); + spin_unlock_irqrestore(&drv_etags->lock, flags); +} + static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); @@ -2100,6 +2141,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } + blk_mq_clear_rq_mapping(set, tags, hctx_idx); + while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); @@ -2159,11 +2202,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, return tags; } -static size_t order_to_size(unsigned int order) -{ - return (size_t)PAGE_SIZE << order; -} - static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) {