Commit 8832cf92 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig
Browse files

nvmet: use a private workqueue instead of the system workqueue



Any attempt to flush kernel-global WQs has possibility of deadlock
so we should simply stop using them, instead introduce nvmet_wq
which is the generic nvmet workqueue for work elements that
don't explicitly require a dedicated workqueue (by the mere fact
that they are using the system_wq).

Changes were done using the following replaces:

 - s/schedule_work(/queue_work(nvmet_wq, /g
 - s/schedule_delayed_work(/queue_delayed_work(nvmet_wq, /g
 - s/flush_scheduled_work()/flush_workqueue(nvmet_wq)/g

Reported-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent bc360b0b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -988,7 +988,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
	mutex_unlock(&ctrl->lock);

	schedule_work(&ctrl->async_event_work);
	queue_work(nvmet_wq, &ctrl->async_event_work);
}

void nvmet_execute_keep_alive(struct nvmet_req *req)
+1 −1
Original line number Diff line number Diff line
@@ -1593,7 +1593,7 @@ static void nvmet_port_release(struct config_item *item)
	struct nvmet_port *port = to_nvmet_port(item);

	/* Let inflight controllers teardown complete */
	flush_scheduled_work();
	flush_workqueue(nvmet_wq);
	list_del(&port->global_entry);

	kfree(port->ana_state);
+18 −6
Original line number Diff line number Diff line
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);

struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);

/*
 * This read/write semaphore is used to synchronize access to configuration
 * information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
	list_add_tail(&aen->entry, &ctrl->async_events);
	mutex_unlock(&ctrl->lock);

	schedule_work(&ctrl->async_event_work);
	queue_work(nvmet_wq, &ctrl->async_event_work);
}

static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
	if (reset_tbkas) {
		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
			ctrl->cntlid);
		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
		queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
		return;
	}

@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
		ctrl->cntlid, ctrl->kato);

	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
	queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}

void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1478,7 +1481,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
	mutex_lock(&ctrl->lock);
	if (!(ctrl->csts & NVME_CSTS_CFS)) {
		ctrl->csts |= NVME_CSTS_CFS;
		schedule_work(&ctrl->fatal_err_work);
		queue_work(nvmet_wq, &ctrl->fatal_err_work);
	}
	mutex_unlock(&ctrl->lock);
}
@@ -1620,9 +1623,15 @@ static int __init nvmet_init(void)
		goto out_free_zbd_work_queue;
	}

	nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
	if (!nvmet_wq) {
		error = -ENOMEM;
		goto out_free_buffered_work_queue;
	}

	error = nvmet_init_discovery();
	if (error)
		goto out_free_work_queue;
		goto out_free_nvmet_work_queue;

	error = nvmet_init_configfs();
	if (error)
@@ -1631,7 +1640,9 @@ static int __init nvmet_init(void)

out_exit_discovery:
	nvmet_exit_discovery();
out_free_work_queue:
out_free_nvmet_work_queue:
	destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
	destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
	destroy_workqueue(zbd_wq);
@@ -1643,6 +1654,7 @@ static void __exit nvmet_exit(void)
	nvmet_exit_configfs();
	nvmet_exit_discovery();
	ida_destroy(&cntlid_ida);
	destroy_workqueue(nvmet_wq);
	destroy_workqueue(buffered_io_wq);
	destroy_workqueue(zbd_wq);

+4 −4
Original line number Diff line number Diff line
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
		if (!nvmet_fc_tgt_a_get(assoc))
			continue;
		if (!schedule_work(&assoc->del_work))
		if (!queue_work(nvmet_wq, &assoc->del_work))
			/* already deleting - release local reference */
			nvmet_fc_tgt_a_put(assoc);
	}
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
			continue;
		assoc->hostport->invalid = 1;
		noassoc = false;
		if (!schedule_work(&assoc->del_work))
		if (!queue_work(nvmet_wq, &assoc->del_work))
			/* already deleting - release local reference */
			nvmet_fc_tgt_a_put(assoc);
	}
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
		nvmet_fc_tgtport_put(tgtport);

		if (found_ctrl) {
			if (!schedule_work(&assoc->del_work))
			if (!queue_work(nvmet_wq, &assoc->del_work))
				/* already deleting - release local reference */
				nvmet_fc_tgt_a_put(assoc);
			return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
	iod->rqstdatalen = lsreqbuf_len;
	iod->hosthandle = hosthandle;

	schedule_work(&iod->work);
	queue_work(nvmet_wq, &iod->work);

	return 0;
}
+8 −8
Original line number Diff line number Diff line
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
		spin_lock(&rport->lock);
		list_add_tail(&rport->ls_list, &tls_req->ls_list);
		spin_unlock(&rport->lock);
		schedule_work(&rport->ls_work);
		queue_work(nvmet_wq, &rport->ls_work);
		return ret;
	}

@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
		spin_lock(&rport->lock);
		list_add_tail(&rport->ls_list, &tls_req->ls_list);
		spin_unlock(&rport->lock);
		schedule_work(&rport->ls_work);
		queue_work(nvmet_wq, &rport->ls_work);
	}

	return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
		spin_lock(&tport->lock);
		list_add_tail(&tport->ls_list, &tls_req->ls_list);
		spin_unlock(&tport->lock);
		schedule_work(&tport->ls_work);
		queue_work(nvmet_wq, &tport->ls_work);
		return ret;
	}

@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
		spin_lock(&tport->lock);
		list_add_tail(&tport->ls_list, &tls_req->ls_list);
		spin_unlock(&tport->lock);
		schedule_work(&tport->ls_work);
		queue_work(nvmet_wq, &tport->ls_work);
	}

	return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
	tgt_rscn->tport = tgtport->private;
	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);

	schedule_work(&tgt_rscn->work);
	queue_work(nvmet_wq, &tgt_rscn->work);
}

static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
	kref_init(&tfcp_req->ref);

	schedule_work(&tfcp_req->fcp_rcv_work);
	queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);

	return 0;
}
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);

	schedule_work(&tfcp_req->tio_done_work);
	queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}

static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,

	if (abortio)
		/* leave the reference while the work item is scheduled */
		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
		WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
	else  {
		/*
		 * as the io has already had the done callback made,
Loading