Skip to content

Commit

Permalink
nvmet-tcp: fix a segmentation fault during io parsing error
Browse files Browse the repository at this point in the history
In case there is an io that contains inline data and it goes to
parsing error flow, command response will free command and iov
before clearing the data on the socket buffer.
This will delay the command response until receive flow is completed.

Fixes: 872d26a ("nvmet-tcp: add NVMe over TCP target driver")
Signed-off-by: Elad Grupi <[email protected]>
Signed-off-by: Hou Pu <[email protected]>
Reviewed-by: Sagi Grimberg <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
  • Loading branch information
Elad Grupi authored and Christoph Hellwig committed Apr 15, 2021
1 parent f8ee34a commit bdaf132
Showing 1 changed file with 31 additions and 8 deletions.
39 changes: 31 additions & 8 deletions drivers/nvme/target/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -537,11 +537,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
struct nvme_sgl_desc *sgl;
u32 len;

if (unlikely(cmd == queue->cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);

/*
* Wait for inline data before processing the response.
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
len && len < cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
}

llist_add(&cmd->lentry, &queue->resp_list);
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
}

static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
{
if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
nvmet_tcp_queue_response(&cmd->req);
else
cmd->req.execute(&cmd->req);
}

static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
{
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
Expand Down Expand Up @@ -973,7 +998,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length));

nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
return -EAGAIN;
return 0;
}

ret = nvmet_tcp_map_data(queue->cmd);
Expand Down Expand Up @@ -1116,10 +1141,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
}
nvmet_tcp_unmap_pdu_iovec(cmd);

if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
cmd->req.execute(&cmd->req);
}
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);

nvmet_prepare_receive_pdu(queue);
return 0;
Expand Down Expand Up @@ -1156,9 +1179,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
goto out;
}

if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len)
cmd->req.execute(&cmd->req);
if (cmd->rbytes_done == cmd->req.transfer_len)
nvmet_tcp_execute_request(cmd);

ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
Expand Down

0 comments on commit bdaf132

Please sign in to comment.