Forum | Documentation | Website | Blog

Skip to content
Snippets Groups Projects
blk-mq.c 107 KiB
Newer Older
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
 * BLK_STS_RESOURCE is usually returned.
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queue() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	cancel_delayed_work(&hctx->run_work);
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
 * BLK_STS_RESOURCE is usually returned.
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queues() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
void blk_mq_stop_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, false);
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	if (!blk_mq_hctx_stopped(hctx))
		return;

	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_stopped_hw_queue(hctx, async);
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

static void blk_mq_run_work_fn(struct work_struct *work)
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
	 * If we are stopped, don't run the queue.
	if (blk_mq_hctx_stopped(hctx))
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	enum hctx_type type = hctx->type;
	lockdep_assert_held(&ctx->lock);

		list_add(&rq->queuelist, &ctx->rq_lists[type]);
		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
			     bool at_head)
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

	lockdep_assert_held(&ctx->lock);

	__blk_mq_insert_req_list(hctx, rq, at_head);
	blk_mq_hctx_mark_pending(hctx, ctx);
}

/**
 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
 * @rq: Pointer to request to be inserted.
 * @at_head: true if the request should be inserted at the head of the list.
 * @run_queue: If we should run the hardware queue after inserting the request.
 *
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
				  bool run_queue)
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
	if (at_head)
		list_add(&rq->queuelist, &hctx->dispatch);
	else
		list_add_tail(&rq->queuelist, &hctx->dispatch);
	if (run_queue)
		blk_mq_run_hw_queue(hctx, false);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
	enum hctx_type type = hctx->type;
	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
	list_for_each_entry(rq, list, queuelist) {
		BUG_ON(rq->mq_ctx != ctx);
	list_splice_tail_init(list, &ctx->rq_lists[type]);
	blk_mq_hctx_mark_pending(hctx, ctx);
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
			      bool from_schedule)
{
	if (hctx->queue->mq_ops->commit_rqs) {
		trace_block_unplug(hctx->queue, *queued, !from_schedule);
		hctx->queue->mq_ops->commit_rqs(hctx);
	}
	*queued = 0;
}

static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *hctx = NULL;
	struct request *rq;
	int queued = 0;
	int errors = 0;

	while ((rq = rq_list_pop(&plug->mq_list))) {
		bool last = rq_list_empty(plug->mq_list);
		blk_status_t ret;

		if (hctx != rq->mq_hctx) {
			if (hctx)
				blk_mq_commit_rqs(hctx, &queued, from_schedule);
			hctx = rq->mq_hctx;
		}

		ret = blk_mq_request_issue_directly(rq, last);
		switch (ret) {
		case BLK_STS_OK:
			queued++;
			break;
		case BLK_STS_RESOURCE:
		case BLK_STS_DEV_RESOURCE:
			blk_mq_request_bypass_insert(rq, false, last);
			blk_mq_commit_rqs(hctx, &queued, from_schedule);
			return;
		default:
			blk_mq_end_request(rq, ret);
			errors++;
			break;
		}
	}

	/*
	 * If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if (errors)
		blk_mq_commit_rqs(hctx, &queued, from_schedule);
}

void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *this_hctx;
	struct blk_mq_ctx *this_ctx;
	unsigned int depth;
	if (rq_list_empty(plug->mq_list))
	if (!plug->multiple_queues && !plug->has_elevator) {
		blk_mq_plug_issue_direct(plug, from_schedule);
		if (rq_list_empty(plug->mq_list))
			return;
	}

	this_hctx = NULL;
	this_ctx = NULL;
	depth = 0;
		struct request *rq;

		rq = rq_list_pop(&plug->mq_list);

		if (!this_hctx) {
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;
		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
			trace_block_unplug(this_hctx->queue, depth,
						!from_schedule);
			blk_mq_sched_insert_requests(this_hctx, this_ctx,
						&list, from_schedule);
			depth = 0;
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;

		list_add(&rq->queuelist, &list);
		depth++;
	} while (!rq_list_empty(plug->mq_list));

	if (!list_empty(&list)) {
		trace_block_unplug(this_hctx->queue, depth, !from_schedule);
		blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
	if (bio->bi_opf & REQ_RAHEAD)
		rq->cmd_flags |= REQ_FAILFAST_MASK;

	rq->__sector = bio->bi_iter.bi_sector;
	rq->write_hint = bio->bi_write_hint;
	blk_rq_bio_prep(rq, bio, nr_segs);

	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
	WARN_ON_ONCE(err);
	blk_account_io_start(rq);
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
					    struct request *rq, bool last)
{
	struct request_queue *q = rq->q;
	struct blk_mq_queue_data bd = {
		.rq = rq,

	/*
	 * For OK queue, we are done. For error, caller may kill it.
	 * Any other error (busy), just add it to our list as we
	 * previously would have done.
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	switch (ret) {
	case BLK_STS_OK:
		blk_mq_update_dispatch_busy(hctx, false);
	case BLK_STS_DEV_RESOURCE:
		blk_mq_update_dispatch_busy(hctx, true);
		__blk_mq_requeue_request(rq);
		break;
	default:
		blk_mq_update_dispatch_busy(hctx, false);
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
						bool bypass_insert, bool last)
Ming Lei's avatar
Ming Lei committed
	bool run_queue = true;
	 * RCU or SRCU read lock is needed before checking quiesced flag.
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Lei's avatar
Ming Lei committed
		run_queue = false;
		bypass_insert = false;
		goto insert;
Ming Lei's avatar
Ming Lei committed
	}
	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
	budget_token = blk_mq_get_dispatch_budget(q);
	if (budget_token < 0)
	blk_mq_set_rq_budget_token(rq, budget_token);

	if (!blk_mq_get_driver_tag(rq)) {
		blk_mq_put_dispatch_budget(q, budget_token);
	return __blk_mq_issue_directly(hctx, rq, last);
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

	blk_mq_sched_insert_request(rq, false, run_queue, false);

/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
 * @rq: Pointer to request to be sent.
 *
 * If the device has enough resources to accept a new request now, send the
 * request directly to device driver. Else, insert at hctx->dispatch queue, so
 * we can try send it another time in the future. Requests inserted at this
 * queue have higher priority.
 */
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
		struct request *rq)
{
	blk_status_t ret;
	int srcu_idx;

	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);

	hctx_lock(hctx, &srcu_idx);

	ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
		blk_mq_request_bypass_insert(rq, false, true);
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);

	hctx_unlock(hctx, srcu_idx);
}

blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
	blk_status_t ret;
	int srcu_idx;
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	hctx_lock(hctx, &srcu_idx);
	ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
	hctx_unlock(hctx, srcu_idx);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
	while (!list_empty(list)) {
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
		ret = blk_mq_request_issue_directly(rq, list_empty(list));
		if (ret != BLK_STS_OK) {
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
				blk_mq_request_bypass_insert(rq, false,

	/*
	 * If we didn't flush the entire list, we could have told
	 * the driver there was more coming, but that turned out to
	 * be a lie.
	 */
	if ((!list_empty(list) || errors) &&
	     hctx->queue->mq_ops->commit_rqs && queued)
		hctx->queue->mq_ops->commit_rqs(hctx);
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{
	if (!plug->multiple_queues) {
		struct request *nxt = rq_list_peek(&plug->mq_list);
		if (nxt && nxt->q != rq->q)
			plug->multiple_queues = true;
	}
	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
		plug->has_elevator = true;
	rq->rq_next = NULL;
	rq_list_add(&plug->mq_list, rq);
	plug->rq_count++;
 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
 * queues. This is important for md arrays to benefit from merging
 * requests.
 */
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{
	if (plug->multiple_queues)
		return BLK_MAX_REQUEST_COUNT * 2;
 * blk_mq_submit_bio - Create and send a request to block device.
 * @bio: Bio pointer.
 *
 * Builds up a request structure from @q and @bio and send to the device. The
 * request may not be queued directly to hardware if:
 * * This request can be merged with another one
 * * We want to place request at plug queue for possible future merging
 * * There is an IO scheduler active at this queue
 *
 * It will not queue the request if there is an error with the bio, or at the
 * request creation.
 */
void blk_mq_submit_bio(struct bio *bio)
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	const int is_sync = op_is_sync(bio->bi_opf);
	const int is_flush_fua = op_is_flush(bio->bi_opf);
	struct blk_plug *plug;
	bool same_queue_rq = false;
	unsigned int nr_segs = 1;
	if (blk_may_split(q, bio))
		__blk_queue_split(q, &bio, &nr_segs);
	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
	if (blk_mq_sched_bio_merge(q, bio, nr_segs))
	plug = blk_mq_plug(q, bio);
	if (plug && plug->cached_rq) {
		rq = rq_list_pop(&plug->cached_rq);
		INIT_LIST_HEAD(&rq->queuelist);
	} else {
		struct blk_mq_alloc_data data = {
			.q		= q,
			.nr_tags	= 1,
			.cmd_flags	= bio->bi_opf,
		};

		if (plug) {
			data.nr_tags = plug->nr_ios;
			plug->nr_ios = 1;
			data.cached_rq = &plug->cached_rq;
		}
		rq = __blk_mq_alloc_requests(&data);
		if (unlikely(!rq)) {
			rq_qos_cleanup(q, bio);
			if (bio->bi_opf & REQ_NOWAIT)
				bio_wouldblock_error(bio);
			goto queue_exit;
		}
	rq_qos_track(q, rq, bio);
	blk_mq_bio_to_request(rq, bio, nr_segs);

	ret = blk_crypto_init_request(rq);
	if (ret != BLK_STS_OK) {
		bio->bi_status = ret;
		bio_endio(bio);
		blk_mq_free_request(rq);
	if (is_flush_fua && blk_insert_flush(rq))
		return;

	if (plug && (q->nr_hw_queues == 1 ||
	    blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
	    q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
		/*
		 * Use plugging if we have a ->commit_rqs() hook as well, as
		 * we know the driver uses bd->last in a smart fashion.
		 *
		 * Use normal plugging if this disk is slow HDD, as sequential
		 * IO may benefit a lot from plug merging.
		unsigned int request_count = plug->rq_count;
		struct request *last = NULL;

		if (!request_count) {
			trace_block_plug(q);
		} else if (!blk_queue_nomerges(q)) {
			last = rq_list_peek(&plug->mq_list);
			if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE)
				last = NULL;
		}
		if (request_count >= blk_plug_max_rq_count(plug) || last) {
			blk_flush_plug_list(plug, false);
			trace_block_plug(q);
		blk_add_rq_to_plug(plug, rq);
	} else if (rq->rq_flags & RQF_ELV) {
		/* Insert the request at the IO scheduler queue */
		blk_mq_sched_insert_request(rq, false, true, true);
	} else if (plug && !blk_queue_nomerges(q)) {
		struct request *next_rq = NULL;

		 * We do limited plugging. If the bio can be merged, do that.
		 * Otherwise the existing request in the plug list will be
		 * issued. So the plug list will have one request at most
		 * The plug list might get flushed before this. If that happens,
		 * the plug list is empty, and same_queue_rq is invalid.
			next_rq = rq_list_pop(&plug->mq_list);
		blk_add_rq_to_plug(plug, rq);
			trace_block_unplug(q, 1, true);
			blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
	} else if ((q->nr_hw_queues > 1 && is_sync) ||
		   !rq->mq_hctx->dispatch_busy) {
		/*
		 * There is no scheduler and we can try to send directly
		 * to the hardware.
		 */
		blk_mq_try_issue_directly(rq->mq_hctx, rq);
		blk_mq_sched_insert_request(rq, false, true, true);
static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

/* called before freeing request pool in @tags */
static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
				    struct blk_mq_tags *tags)
	/* There is no need to clear a driver tags own mapping */
	if (drv_tags == tags)
		return;

	list_for_each_entry(page, &tags->page_list, lru) {
		unsigned long start = (unsigned long)page_address(page);
		unsigned long end = start + order_to_size(page->private);
		int i;

		for (i = 0; i < drv_tags->nr_tags; i++) {
			struct request *rq = drv_tags->rqs[i];
			unsigned long rq_addr = (unsigned long)rq;

			if (rq_addr >= start && rq_addr < end) {
				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
				cmpxchg(&drv_tags->rqs[i], rq, NULL);
			}
		}
	}

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&drv_tags->lock, flags);
	spin_unlock_irqrestore(&drv_tags->lock, flags);
}

void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx)
	struct blk_mq_tags *drv_tags;
	if (blk_mq_is_shared_tags(set->flags))
		drv_tags = set->shared_tags;
	else
		drv_tags = set->tags[hctx_idx];
	if (tags->static_rqs && set->ops->exit_request) {
		for (i = 0; i < tags->nr_tags; i++) {
			struct request *rq = tags->static_rqs[i];

			if (!rq)
			set->ops->exit_request(set, rq, hctx_idx);
			tags->static_rqs[i] = NULL;
	blk_mq_clear_rq_mapping(drv_tags, tags);
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
		list_del_init(&page->lru);
		/*
		 * Remove kmemleak object previously allocated in
		 * blk_mq_alloc_rqs().
		 */
		kmemleak_free(page_address(page));
		__free_pages(page, page->private);
	}
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;
	blk_mq_free_tags(tags);
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					       unsigned int hctx_idx,
					       unsigned int nr_tags,
					       unsigned int reserved_tags)
	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
		blk_mq_free_tags(tags);
	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
					node);
	if (!tags->static_rqs) {
		kfree(tags->rqs);
		blk_mq_free_tags(tags);
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
	int ret;

	if (set->ops->init_request) {
		ret = set->ops->init_request(set, rq, hctx_idx, node);
		if (ret)
			return ret;
	}

	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
			    struct blk_mq_tags *tags,
			    unsigned int hctx_idx, unsigned int depth)
{
	unsigned int i, j, entries_per_page, max_order = 4;
	size_t rq_size, left;
	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;
	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
				cache_line_size());
	for (i = 0; i < depth; ) {
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

		while (this_order && left < order_to_size(this_order - 1))
			page = alloc_pages_node(node,
				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)

		page->private = this_order;
		list_add_tail(&page->lru, &tags->page_list);
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
		entries_per_page = order_to_size(this_order) / rq_size;
		to_do = min(entries_per_page, depth - i);
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
			struct request *rq = p;

			tags->static_rqs[i] = rq;
			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
				tags->static_rqs[i] = NULL;
				goto fail;
	blk_mq_free_rqs(set, tags, hctx_idx);
	return -ENOMEM;
struct rq_iter_data {
	struct blk_mq_hw_ctx *hctx;
	bool has_rq;
};

static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
{
	struct rq_iter_data *iter_data = data;

	if (rq->mq_hctx != iter_data->hctx)
		return true;
	iter_data->has_rq = true;
	return false;
}

static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->sched_tags ?
			hctx->sched_tags : hctx->tags;
	struct rq_iter_data data = {
		.hctx	= hctx,
	};

	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
	return data.has_rq;
}

static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
		struct blk_mq_hw_ctx *hctx)
{
	if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
		return false;
	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
		return false;
	return true;
}

static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
		return 0;

	/*
	 * Prevent new request from being allocated on the current hctx.
	 *
	 * The smp_mb__after_atomic() Pairs with the implied barrier in
	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
	 * seen once we return from the tag allocator.
	 */
	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	smp_mb__after_atomic();

	/*
	 * Try to grab a reference to the queue and wait for any outstanding
	 * requests.  If we could not grab a reference the queue has been
	 * frozen and there are no requests.
	 */
	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
		while (blk_mq_hctx_has_requests(hctx))
			msleep(5);
		percpu_ref_put(&hctx->queue->q_usage_counter);
	}

	return 0;
}

static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (cpumask_test_cpu(cpu, hctx->cpumask))
		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	return 0;
}

/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);
	enum hctx_type type;
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
	if (!cpumask_test_cpu(cpu, hctx->cpumask))
		return 0;

	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
	type = hctx->type;
	if (!list_empty(&ctx->rq_lists[type])) {
		list_splice_init(&ctx->rq_lists[type], &tmp);
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
						    &hctx->cpuhp_online);
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

	WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);

	for (i = 0; i < queue_depth; i++)
		cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct request *flush_rq = hctx->fq->flush_rq;