Lines Matching +full:input +full:- +full:depth
1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
6 #include "blk-stat.h"
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
34 BLK_MQ_NO_TAG = -1U,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
64 unsigned int hctx_idx, unsigned int depth);
70 * CPU -> queue mappings
75 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
84 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
111 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
134 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
138 * This assumes per-cpu software queueing queues. They could be per-node
139 * as well, for instance. For now this is hardcoded as-is. Note that we don't
149 /* input parameter */
160 /* input & output parameter */
176 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
191 return &bt->ws[0]; in bt_wait_ptr()
192 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
200 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy()
206 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_idle()
213 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
223 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
224 return data->hctx->sched_tags; in blk_mq_tags_from_data()
225 return data->hctx->tags; in blk_mq_tags_from_data()
231 if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) in blk_mq_hctx_stopped()
243 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
248 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
259 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
260 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
265 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
266 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
275 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
276 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
281 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
282 return rq->q->mq_ops->get_rq_budget_token(rq); in blk_mq_get_rq_budget_token()
283 return -1; in blk_mq_get_rq_budget_token()
289 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_add_active_requests()
290 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_add_active_requests()
292 atomic_add(val, &hctx->nr_active); in __blk_mq_add_active_requests()
303 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_sub_active_requests()
304 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests()
306 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests()
317 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_add_active_requests()
323 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_inc_active_requests()
330 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_sub_active_requests()
336 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_dec_active_requests()
342 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_active_requests()
343 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
344 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
350 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
351 rq->tag = BLK_MQ_NO_TAG; in __blk_mq_put_driver_tag()
356 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) in blk_mq_put_driver_tag()
359 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
366 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) in blk_mq_get_driver_tag()
377 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
384 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
386 list_del_init(&rq->queuelist); in blk_mq_free_requests()
393 * and attempt to provide a fair share of the tag depth for each of them.
398 unsigned int depth, users; in hctx_may_queue() local
400 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
406 if (bt->sb.depth == 1) in hctx_may_queue()
409 if (blk_mq_is_shared_tags(hctx->flags)) { in hctx_may_queue()
410 struct request_queue *q = hctx->queue; in hctx_may_queue()
412 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
415 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
419 users = READ_ONCE(hctx->tags->active_queues); in hctx_may_queue()
426 depth = max((bt->sb.depth + users - 1) / users, 4U); in hctx_may_queue()
427 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()
433 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
434 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
438 srcu_idx = srcu_read_lock(__tag_set->srcu); \
440 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
453 return (q->limits.features & BLK_FEAT_POLL) && in blk_mq_can_poll()
454 q->tag_set->map[HCTX_TYPE_POLL].nr_queues; in blk_mq_can_poll()