2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
24 #define NVME_LOOP_MAX_SEGMENTS 256
26 struct nvme_loop_iod {
27 struct nvme_request nvme_req;
28 struct nvme_command cmd;
29 struct nvme_completion rsp;
31 struct nvme_loop_queue *queue;
32 struct work_struct work;
33 struct sg_table sg_table;
34 struct scatterlist first_sgl[];
37 struct nvme_loop_ctrl {
38 struct nvme_loop_queue *queues;
40 struct blk_mq_tag_set admin_tag_set;
42 struct list_head list;
43 struct blk_mq_tag_set tag_set;
44 struct nvme_loop_iod async_event_iod;
45 struct nvme_ctrl ctrl;
47 struct nvmet_ctrl *target_ctrl;
48 struct nvmet_port *port;
51 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
53 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
56 enum nvme_loop_queue_flags {
60 struct nvme_loop_queue {
61 struct nvmet_cq nvme_cq;
62 struct nvmet_sq nvme_sq;
63 struct nvme_loop_ctrl *ctrl;
67 static LIST_HEAD(nvme_loop_ports);
68 static DEFINE_MUTEX(nvme_loop_ports_mutex);
70 static LIST_HEAD(nvme_loop_ctrl_list);
71 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
73 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
74 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
76 static const struct nvmet_fabrics_ops nvme_loop_ops;
78 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
80 return queue - queue->ctrl->queues;
83 static void nvme_loop_complete_rq(struct request *req)
85 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
87 nvme_cleanup_cmd(req);
88 sg_free_table_chained(&iod->sg_table, true);
89 nvme_complete_rq(req);
92 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
94 u32 queue_idx = nvme_loop_queue_idx(queue);
97 return queue->ctrl->admin_tag_set.tags[queue_idx];
98 return queue->ctrl->tag_set.tags[queue_idx - 1];
101 static void nvme_loop_queue_response(struct nvmet_req *req)
103 struct nvme_loop_queue *queue =
104 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
105 struct nvme_completion *cqe = req->rsp;
108 * AEN requests are special as they don't time out and can
109 * survive any kind of queue freeze and often don't respond to
110 * aborts. We don't even bother to allocate a struct request
111 * for them but rather special case them here.
113 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
114 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
115 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
120 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
122 dev_err(queue->ctrl->ctrl.device,
123 "tag 0x%x on queue %d not found\n",
124 cqe->command_id, nvme_loop_queue_idx(queue));
128 nvme_end_request(rq, cqe->status, cqe->result);
132 static void nvme_loop_execute_work(struct work_struct *work)
134 struct nvme_loop_iod *iod =
135 container_of(work, struct nvme_loop_iod, work);
137 nvmet_req_execute(&iod->req);
140 static enum blk_eh_timer_return
141 nvme_loop_timeout(struct request *rq, bool reserved)
143 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
145 /* queue error recovery */
146 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
148 /* fail with DNR on admin cmd timeout */
149 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
154 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
155 const struct blk_mq_queue_data *bd)
157 struct nvme_ns *ns = hctx->queue->queuedata;
158 struct nvme_loop_queue *queue = hctx->driver_data;
159 struct request *req = bd->rq;
160 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
161 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
171 blk_mq_start_request(req);
172 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
173 iod->req.port = queue->ctrl->port;
174 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
175 &queue->nvme_sq, &nvme_loop_ops))
178 if (blk_rq_nr_phys_segments(req)) {
179 iod->sg_table.sgl = iod->first_sgl;
180 if (sg_alloc_table_chained(&iod->sg_table,
181 blk_rq_nr_phys_segments(req),
183 return BLK_STS_RESOURCE;
185 iod->req.sg = iod->sg_table.sgl;
186 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
187 iod->req.transfer_len = blk_rq_payload_bytes(req);
190 schedule_work(&iod->work);
194 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
196 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
197 struct nvme_loop_queue *queue = &ctrl->queues[0];
198 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
200 memset(&iod->cmd, 0, sizeof(iod->cmd));
201 iod->cmd.common.opcode = nvme_admin_async_event;
202 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
203 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
205 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
207 dev_err(ctrl->ctrl.device, "failed async event work\n");
211 schedule_work(&iod->work);
214 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
215 struct nvme_loop_iod *iod, unsigned int queue_idx)
217 iod->req.cmd = &iod->cmd;
218 iod->req.rsp = &iod->rsp;
219 iod->queue = &ctrl->queues[queue_idx];
220 INIT_WORK(&iod->work, nvme_loop_execute_work);
224 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
225 struct request *req, unsigned int hctx_idx,
226 unsigned int numa_node)
228 struct nvme_loop_ctrl *ctrl = set->driver_data;
230 nvme_req(req)->ctrl = &ctrl->ctrl;
231 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
232 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
235 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
236 unsigned int hctx_idx)
238 struct nvme_loop_ctrl *ctrl = data;
239 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
241 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
243 hctx->driver_data = queue;
247 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
248 unsigned int hctx_idx)
250 struct nvme_loop_ctrl *ctrl = data;
251 struct nvme_loop_queue *queue = &ctrl->queues[0];
253 BUG_ON(hctx_idx != 0);
255 hctx->driver_data = queue;
259 static const struct blk_mq_ops nvme_loop_mq_ops = {
260 .queue_rq = nvme_loop_queue_rq,
261 .complete = nvme_loop_complete_rq,
262 .init_request = nvme_loop_init_request,
263 .init_hctx = nvme_loop_init_hctx,
264 .timeout = nvme_loop_timeout,
267 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
268 .queue_rq = nvme_loop_queue_rq,
269 .complete = nvme_loop_complete_rq,
270 .init_request = nvme_loop_init_request,
271 .init_hctx = nvme_loop_init_admin_hctx,
272 .timeout = nvme_loop_timeout,
275 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
277 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
279 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
280 blk_cleanup_queue(ctrl->ctrl.admin_q);
281 blk_mq_free_tag_set(&ctrl->admin_tag_set);
284 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
286 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
288 if (list_empty(&ctrl->list))
291 mutex_lock(&nvme_loop_ctrl_mutex);
292 list_del(&ctrl->list);
293 mutex_unlock(&nvme_loop_ctrl_mutex);
296 blk_cleanup_queue(ctrl->ctrl.connect_q);
297 blk_mq_free_tag_set(&ctrl->tag_set);
300 nvmf_free_options(nctrl->opts);
305 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
309 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
310 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
311 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
313 ctrl->ctrl.queue_count = 1;
316 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
318 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
319 unsigned int nr_io_queues;
322 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
323 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
324 if (ret || !nr_io_queues)
327 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
329 for (i = 1; i <= nr_io_queues; i++) {
330 ctrl->queues[i].ctrl = ctrl;
331 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
333 goto out_destroy_queues;
335 ctrl->ctrl.queue_count++;
341 nvme_loop_destroy_io_queues(ctrl);
345 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
349 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
350 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
353 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
359 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
363 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
364 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
365 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
366 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
367 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
368 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
369 SG_CHUNK_SIZE * sizeof(struct scatterlist);
370 ctrl->admin_tag_set.driver_data = ctrl;
371 ctrl->admin_tag_set.nr_hw_queues = 1;
372 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
373 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
375 ctrl->queues[0].ctrl = ctrl;
376 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
379 ctrl->ctrl.queue_count = 1;
381 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
384 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
386 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
387 if (IS_ERR(ctrl->ctrl.admin_q)) {
388 error = PTR_ERR(ctrl->ctrl.admin_q);
389 goto out_free_tagset;
392 error = nvmf_connect_admin_queue(&ctrl->ctrl);
394 goto out_cleanup_queue;
396 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
398 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
400 dev_err(ctrl->ctrl.device,
401 "prop_get NVME_REG_CAP failed\n");
402 goto out_cleanup_queue;
406 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
408 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
410 goto out_cleanup_queue;
412 ctrl->ctrl.max_hw_sectors =
413 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
415 error = nvme_init_identify(&ctrl->ctrl);
417 goto out_cleanup_queue;
422 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
423 blk_cleanup_queue(ctrl->ctrl.admin_q);
425 blk_mq_free_tag_set(&ctrl->admin_tag_set);
427 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
431 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
433 if (ctrl->ctrl.queue_count > 1) {
434 nvme_stop_queues(&ctrl->ctrl);
435 blk_mq_tagset_busy_iter(&ctrl->tag_set,
436 nvme_cancel_request, &ctrl->ctrl);
437 nvme_loop_destroy_io_queues(ctrl);
440 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
441 nvme_shutdown_ctrl(&ctrl->ctrl);
443 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
444 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
445 nvme_cancel_request, &ctrl->ctrl);
446 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
447 nvme_loop_destroy_admin_queue(ctrl);
450 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
452 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
455 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
457 struct nvme_loop_ctrl *ctrl;
459 mutex_lock(&nvme_loop_ctrl_mutex);
460 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
461 if (ctrl->ctrl.cntlid == nctrl->cntlid)
462 nvme_delete_ctrl(&ctrl->ctrl);
464 mutex_unlock(&nvme_loop_ctrl_mutex);
467 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
469 struct nvme_loop_ctrl *ctrl =
470 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
474 nvme_stop_ctrl(&ctrl->ctrl);
475 nvme_loop_shutdown_ctrl(ctrl);
477 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
478 /* state change failure should never happen */
483 ret = nvme_loop_configure_admin_queue(ctrl);
487 ret = nvme_loop_init_io_queues(ctrl);
489 goto out_destroy_admin;
491 ret = nvme_loop_connect_io_queues(ctrl);
495 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
496 ctrl->ctrl.queue_count - 1);
498 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
499 WARN_ON_ONCE(!changed);
501 nvme_start_ctrl(&ctrl->ctrl);
506 nvme_loop_destroy_io_queues(ctrl);
508 nvme_loop_destroy_admin_queue(ctrl);
510 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
511 nvme_uninit_ctrl(&ctrl->ctrl);
512 nvme_put_ctrl(&ctrl->ctrl);
515 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
517 .module = THIS_MODULE,
518 .flags = NVME_F_FABRICS,
519 .reg_read32 = nvmf_reg_read32,
520 .reg_read64 = nvmf_reg_read64,
521 .reg_write32 = nvmf_reg_write32,
522 .free_ctrl = nvme_loop_free_ctrl,
523 .submit_async_event = nvme_loop_submit_async_event,
524 .delete_ctrl = nvme_loop_delete_ctrl_host,
525 .get_address = nvmf_get_address,
528 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
532 ret = nvme_loop_init_io_queues(ctrl);
536 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
537 ctrl->tag_set.ops = &nvme_loop_mq_ops;
538 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
539 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
540 ctrl->tag_set.numa_node = NUMA_NO_NODE;
541 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
542 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
543 SG_CHUNK_SIZE * sizeof(struct scatterlist);
544 ctrl->tag_set.driver_data = ctrl;
545 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
546 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
547 ctrl->ctrl.tagset = &ctrl->tag_set;
549 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
551 goto out_destroy_queues;
553 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
554 if (IS_ERR(ctrl->ctrl.connect_q)) {
555 ret = PTR_ERR(ctrl->ctrl.connect_q);
556 goto out_free_tagset;
559 ret = nvme_loop_connect_io_queues(ctrl);
561 goto out_cleanup_connect_q;
565 out_cleanup_connect_q:
566 blk_cleanup_queue(ctrl->ctrl.connect_q);
568 blk_mq_free_tag_set(&ctrl->tag_set);
570 nvme_loop_destroy_io_queues(ctrl);
574 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
576 struct nvmet_port *p, *found = NULL;
578 mutex_lock(&nvme_loop_ports_mutex);
579 list_for_each_entry(p, &nvme_loop_ports, entry) {
580 /* if no transport address is specified use the first port */
581 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
582 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
587 mutex_unlock(&nvme_loop_ports_mutex);
591 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
592 struct nvmf_ctrl_options *opts)
594 struct nvme_loop_ctrl *ctrl;
598 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
600 return ERR_PTR(-ENOMEM);
601 ctrl->ctrl.opts = opts;
602 INIT_LIST_HEAD(&ctrl->list);
604 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
606 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
607 0 /* no quirks, we're perfect! */);
613 ctrl->ctrl.sqsize = opts->queue_size - 1;
614 ctrl->ctrl.kato = opts->kato;
615 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
617 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
620 goto out_uninit_ctrl;
622 ret = nvme_loop_configure_admin_queue(ctrl);
624 goto out_free_queues;
626 if (opts->queue_size > ctrl->ctrl.maxcmd) {
627 /* warn if maxcmd is lower than queue_size */
628 dev_warn(ctrl->ctrl.device,
629 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
630 opts->queue_size, ctrl->ctrl.maxcmd);
631 opts->queue_size = ctrl->ctrl.maxcmd;
634 if (opts->nr_io_queues) {
635 ret = nvme_loop_create_io_queues(ctrl);
637 goto out_remove_admin_queue;
640 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
642 dev_info(ctrl->ctrl.device,
643 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
645 nvme_get_ctrl(&ctrl->ctrl);
647 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
648 WARN_ON_ONCE(!changed);
650 mutex_lock(&nvme_loop_ctrl_mutex);
651 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
652 mutex_unlock(&nvme_loop_ctrl_mutex);
654 nvme_start_ctrl(&ctrl->ctrl);
658 out_remove_admin_queue:
659 nvme_loop_destroy_admin_queue(ctrl);
663 nvme_uninit_ctrl(&ctrl->ctrl);
665 nvme_put_ctrl(&ctrl->ctrl);
671 static int nvme_loop_add_port(struct nvmet_port *port)
673 mutex_lock(&nvme_loop_ports_mutex);
674 list_add_tail(&port->entry, &nvme_loop_ports);
675 mutex_unlock(&nvme_loop_ports_mutex);
679 static void nvme_loop_remove_port(struct nvmet_port *port)
681 mutex_lock(&nvme_loop_ports_mutex);
682 list_del_init(&port->entry);
683 mutex_unlock(&nvme_loop_ports_mutex);
686 * Ensure any ctrls that are in the process of being
687 * deleted are in fact deleted before we return
688 * and free the port. This is to prevent active
689 * ctrls from using a port after it's freed.
691 flush_workqueue(nvme_delete_wq);
694 static const struct nvmet_fabrics_ops nvme_loop_ops = {
695 .owner = THIS_MODULE,
696 .type = NVMF_TRTYPE_LOOP,
697 .add_port = nvme_loop_add_port,
698 .remove_port = nvme_loop_remove_port,
699 .queue_response = nvme_loop_queue_response,
700 .delete_ctrl = nvme_loop_delete_ctrl,
703 static struct nvmf_transport_ops nvme_loop_transport = {
705 .module = THIS_MODULE,
706 .create_ctrl = nvme_loop_create_ctrl,
707 .allowed_opts = NVMF_OPT_TRADDR,
710 static int __init nvme_loop_init_module(void)
714 ret = nvmet_register_transport(&nvme_loop_ops);
718 ret = nvmf_register_transport(&nvme_loop_transport);
720 nvmet_unregister_transport(&nvme_loop_ops);
725 static void __exit nvme_loop_cleanup_module(void)
727 struct nvme_loop_ctrl *ctrl, *next;
729 nvmf_unregister_transport(&nvme_loop_transport);
730 nvmet_unregister_transport(&nvme_loop_ops);
732 mutex_lock(&nvme_loop_ctrl_mutex);
733 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
734 nvme_delete_ctrl(&ctrl->ctrl);
735 mutex_unlock(&nvme_loop_ctrl_mutex);
737 flush_workqueue(nvme_delete_wq);
740 module_init(nvme_loop_init_module);
741 module_exit(nvme_loop_cleanup_module);
743 MODULE_LICENSE("GPL v2");
744 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */