summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/tcp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-16 12:36:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-16 12:36:39 -0700
commit11efae3506d882a8782bc89493a32e467defd6b9 (patch)
tree4671e72e14bb5f03e484ade6526ccfd1a40f75b8 /drivers/nvme/host/tcp.c
parent465c209db83e2cdaeb4a52f4e107a9fc636704db (diff)
parentf6d85f04e29859dd3ea65395c05925da352dae89 (diff)
Merge tag 'for-5.1/block-post-20190315' of git://git.kernel.dk/linux-block
Pull more block layer changes from Jens Axboe: "This is a collection of both stragglers, and fixes that came in after I finalized the initial pull. This contains: - An MD pull request from Song, with a few minor fixes - Set of NVMe patches via Christoph - Pull request from Konrad, with a few fixes for xen/blkback - pblk fix IO calculation fix (Javier) - Segment calculation fix for pass-through (Ming) - Fallthrough annotation for blkcg (Mathieu)" * tag 'for-5.1/block-post-20190315' of git://git.kernel.dk/linux-block: (25 commits) blkcg: annotate implicit fall through nvme-tcp: support C2HData with SUCCESS flag nvmet: ignore EOPNOTSUPP for discard nvme: add proper write zeroes setup for the multipath device nvme: add proper discard setup for the multipath device nvme: remove nvme_ns_config_oncs nvme: disable Write Zeroes for qemu controllers nvmet-fc: bring Disconnect into compliance with FC-NVME spec nvmet-fc: fix issues with targetport assoc_list list walking nvme-fc: reject reconnect if io queue count is reduced to zero nvme-fc: fix numa_node when dev is null nvme-fc: use nr_phys_segments to determine existence of sgl nvme-loop: init nvmet_ctrl fatal_err_work when allocate nvme: update comment to make the code easier to read nvme: put ns_head ref if namespace fails allocation nvme-trace: fix cdw10 buffer overrun nvme: don't warn on block content change effects nvme: add get-feature to admin cmds tracer md: Fix failed allocation of md_register_thread It's wrong to add len to sector_nr in raid10 reshape twice ...
Diffstat (limited to 'drivers/nvme/host/tcp.c')
-rw-r--r--drivers/nvme/host/tcp.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 208ee518af65..e7e08889865e 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -463,6 +463,15 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
queue->data_remaining = le32_to_cpu(pdu->data_length);
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
+ unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x SUCCESS set but not last PDU\n",
+ nvme_tcp_queue_id(queue), rq->tag);
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return -EPROTO;
+ }
+
return 0;
}
@@ -618,6 +627,14 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return ret;
}
+static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+{
+ union nvme_result res = {};
+
+ nvme_end_request(rq, cpu_to_le16(status << 1), res);
+}
+
+
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -685,6 +702,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
+ nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
nvme_tcp_init_recv_ctx(queue);
}
}
@@ -695,6 +714,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
struct sk_buff *skb, unsigned int *offset, size_t *len)
{
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
@@ -718,6 +738,13 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return -EIO;
}
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
+ struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+
+ nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ }
+
nvme_tcp_init_recv_ctx(queue);
return 0;
}
@@ -815,10 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- union nvme_result res = {};
-
- nvme_end_request(blk_mq_rq_from_pdu(req),
- cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)