From 507c186052457c15c55f41c88780e187d6d1f9ae Mon Sep 17 00:00:00 2001 From: Cyber Knight Date: Sun, 26 Sep 2021 01:46:49 +0800 Subject: [PATCH] drivers/nvme: Import OnePlus Changes Signed-off-by: Cyber Knight --- drivers/nvme/host/fc.c | 11 ++++------- drivers/nvme/host/rdma.c | 1 + drivers/nvme/target/core.c | 9 +-------- drivers/nvme/target/fc.c | 4 ++-- 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 7e3f3055a677..058d542647dd 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1492,7 +1492,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = -EFAULT; + ret = EFAULT; goto out_on_error; } @@ -1502,7 +1502,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = -EFAULT; + ret = EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -1566,7 +1566,6 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; - cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) @@ -3012,14 +3011,12 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || - lport->localport.port_name != laddr.pn || - lport->localport.port_state != FC_OBJSTATE_ONLINE) + lport->localport.port_name != laddr.pn) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || - rport->remoteport.port_name != raddr.pn || - rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + rport->remoteport.port_name != raddr.pn) continue; /* if fail to get reference fall through. Will error */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 564e457f1345..c91bfd839cab 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1545,6 +1545,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, complete(&queue->cm_done); return 0; case RDMA_CM_EVENT_REJECTED: + nvme_rdma_destroy_queue_ib(queue); cm_error = nvme_rdma_conn_rejected(queue, ev); break; case RDMA_CM_EVENT_ROUTE_ERROR: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 7d6d30a2d771..09a39f4aaf82 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -208,9 +208,6 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { - if (unlikely(ctrl->kato == 0)) - return; - pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -220,9 +217,6 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { - if (unlikely(ctrl->kato == 0)) - return; - pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); @@ -611,8 +605,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * in case a host died before it enabled the controller. Hence, simply * reset the keep alive timer when the controller is enabled. */ - if (ctrl->kato) - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index df1c6dee255b..b7a5d1065378 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1994,9 +1994,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock_irqsave(&fod->flock, flags); + spin_lock(&fod->flock); fod->abort = true; - spin_unlock_irqrestore(&fod->flock, flags); + spin_unlock(&fod->flock); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return;