From ce9812e1bff6788f38e92e9bb0a81708e1995505 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Wed, 7 Jul 2021 15:12:42 +0200 Subject: [PATCH] nvmet-tcp: fix a race condition when resetting the controller experimental fix --- drivers/nvme/target/tcp.c | 56 ++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 39db59a90000..27c2c079ed7a 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -154,6 +154,8 @@ static struct workqueue_struct *nvmet_tcp_wq; static const struct nvmet_fabrics_ops nvmet_tcp_ops; static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_free_iov(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) @@ -211,6 +213,9 @@ nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) { + nvmet_tcp_unmap_pdu_iovec(cmd); + nvmet_tcp_free_iov(cmd); + if (unlikely(cmd == &cmd->queue->connect)) return; @@ -285,6 +290,14 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) return 0; } +static void nvmet_tcp_free_iov(struct nvmet_tcp_cmd *cmd) +{ + kfree(cmd->iov); + sgl_free(cmd->req.sg); + cmd->iov = NULL; + cmd->req.sg = NULL; +} + static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct scatterlist *sg; @@ -294,6 +307,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) for (i = 0; i < cmd->nr_mapped; i++) kunmap(sg_page(&sg[i])); + + cmd->nr_mapped = 0; } static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) @@ -375,7 +390,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) return 0; err: - sgl_free(cmd->req.sg); + nvmet_tcp_free_iov(cmd); return NVME_SC_INTERNAL; } @@ -494,6 +509,24 @@ static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) } } +static void nvmet_destroy_resp_list(struct nvmet_tcp_queue *queue) +{ + int i; + struct nvmet_tcp_cmd *cmd; + + nvmet_tcp_process_resp_list(queue); + + for (i = 0; i < queue->send_list_len; ++i) { + cmd = list_first_entry_or_null(&queue->resp_send_list, + struct nvmet_tcp_cmd, entry); + + list_del_init(&cmd->entry); + nvmet_tcp_put_cmd(cmd); + } + + queue->send_list_len = 0; +} + static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) { queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, @@ -525,9 +558,13 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; + struct socket *sock = queue->sock; + read_lock_bh(&sock->sk->sk_callback_lock); llist_add(&cmd->lentry, &queue->resp_list); - queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); + if (likely(sock->sk->sk_user_data)) + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); + read_unlock_bh(&sock->sk->sk_callback_lock); } static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -595,10 +632,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) } } - if (queue->nvme_sq.sqhd_disabled) { - kfree(cmd->iov); - sgl_free(cmd->req.sg); - } + if (queue->nvme_sq.sqhd_disabled) + nvmet_tcp_free_iov(cmd); return 1; @@ -627,8 +662,6 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, if (left) return -EAGAIN; - kfree(cmd->iov); - sgl_free(cmd->req.sg); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; @@ -1353,8 +1386,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_iov(cmd); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) @@ -1383,11 +1415,11 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); - flush_work(&queue->io_work); + cancel_work_sync(&queue->io_work); nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); - cancel_work_sync(&queue->io_work); + nvmet_destroy_resp_list(queue); sock_release(queue->sock); nvmet_tcp_free_cmds(queue); if (queue->hdr_digest || queue->data_digest) -- 2.27.0