From 851a1f35420206f7b631d5d12b135e5a7c84b912 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 20 Dec 2021 20:42:49 +0200 Subject: [PATCH 2/2] nvme-rdma: add support for mq_ops->queue_rqs() Signed-off-by: Max Gurtovoy --- drivers/nvme/host/rdma.c | 75 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2d608cb48392..765bb57f0a55 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2121,6 +2121,80 @@ static blk_status_t nvme_rdma_prep_rq(struct nvme_rdma_queue *queue, return ret; } +static bool nvme_rdma_prep_rq_batch(struct nvme_rdma_queue *queue, + struct request *rq) +{ + bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); + + if (unlikely(!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))) + return false; + + rq->mq_hctx->tags->rqs[rq->tag] = rq; + return nvme_rdma_prep_rq(queue, rq, rq->q->queuedata) == BLK_STS_OK; +} + +static void nvme_rdma_submit_cmds(struct nvme_rdma_queue *queue, + struct request **rqlist) +{ + struct request *first_rq = rq_list_peek(rqlist); + struct nvme_rdma_request *nreq = blk_mq_rq_to_pdu(first_rq); + struct ib_send_wr *first, *last = NULL; + int ret; + + if (nreq->mr) + first = &nreq->reg_wr.wr; + else + first = &nreq->send_wr; + + while (!rq_list_empty(*rqlist)) { + struct request *rq = rq_list_pop(rqlist); + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct ib_send_wr *tmp; + + tmp = last; + last = &req->send_wr; + if (tmp) { + if (req->mr) + tmp->next = &req->reg_wr.wr; + else + tmp->next = &req->send_wr; + } + } + + ret = nvme_rdma_post_send(queue, first); + WARN_ON_ONCE(ret); +} + +static void nvme_rdma_queue_rqs(struct request **rqlist) +{ + struct request *req = rq_list_peek(rqlist), *prev = NULL; + struct request *requeue_list = NULL; + + do { + struct nvme_rdma_queue *queue = req->mq_hctx->driver_data; + + if (!nvme_rdma_prep_rq_batch(queue, req)) { + /* detach 'req' and add to remainder list */ + if (prev) + prev->rq_next = req->rq_next; + rq_list_add(&requeue_list, req); + } else { + prev = req; + } + + req = rq_list_next(req); + if (!req || (prev && req->mq_hctx != prev->mq_hctx)) { + /* detach rest of list, and submit */ + if (prev) + prev->rq_next = NULL; + nvme_rdma_submit_cmds(queue, rqlist); + *rqlist = req; + } + } while (req); + + *rqlist = requeue_list; +} + static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -2258,6 +2332,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) static const struct blk_mq_ops nvme_rdma_mq_ops = { .queue_rq = nvme_rdma_queue_rq, + .queue_rqs = nvme_rdma_queue_rqs, .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, .exit_request = nvme_rdma_exit_request, -- 2.18.1