GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / nvme / target / loop.c
1 /*
2  * NVMe over Fabrics loopback device.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include "nvmet.h"
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
23
24 #define NVME_LOOP_MAX_SEGMENTS          256
25
26 /*
27  * We handle AEN commands ourselves and don't even let the
28  * block layer know about them.
29  */
30 #define NVME_LOOP_NR_AEN_COMMANDS       1
31 #define NVME_LOOP_AQ_BLKMQ_DEPTH        \
32         (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
33
34 struct nvme_loop_iod {
35         struct nvme_request     nvme_req;
36         struct nvme_command     cmd;
37         struct nvme_completion  rsp;
38         struct nvmet_req        req;
39         struct nvme_loop_queue  *queue;
40         struct work_struct      work;
41         struct sg_table         sg_table;
42         struct scatterlist      first_sgl[];
43 };
44
45 struct nvme_loop_ctrl {
46         struct nvme_loop_queue  *queues;
47
48         struct blk_mq_tag_set   admin_tag_set;
49
50         struct list_head        list;
51         struct blk_mq_tag_set   tag_set;
52         struct nvme_loop_iod    async_event_iod;
53         struct nvme_ctrl        ctrl;
54
55         struct nvmet_ctrl       *target_ctrl;
56         struct work_struct      delete_work;
57 };
58
59 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
60 {
61         return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
62 }
63
64 enum nvme_loop_queue_flags {
65         NVME_LOOP_Q_LIVE        = 0,
66 };
67
68 struct nvme_loop_queue {
69         struct nvmet_cq         nvme_cq;
70         struct nvmet_sq         nvme_sq;
71         struct nvme_loop_ctrl   *ctrl;
72         unsigned long           flags;
73 };
74
75 static struct nvmet_port *nvmet_loop_port;
76
77 static LIST_HEAD(nvme_loop_ctrl_list);
78 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
79
80 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
81 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
82
83 static struct nvmet_fabrics_ops nvme_loop_ops;
84
85 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
86 {
87         return queue - queue->ctrl->queues;
88 }
89
90 static void nvme_loop_complete_rq(struct request *req)
91 {
92         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
93
94         nvme_cleanup_cmd(req);
95         sg_free_table_chained(&iod->sg_table, true);
96         nvme_complete_rq(req);
97 }
98
99 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
100 {
101         u32 queue_idx = nvme_loop_queue_idx(queue);
102
103         if (queue_idx == 0)
104                 return queue->ctrl->admin_tag_set.tags[queue_idx];
105         return queue->ctrl->tag_set.tags[queue_idx - 1];
106 }
107
108 static void nvme_loop_queue_response(struct nvmet_req *req)
109 {
110         struct nvme_loop_queue *queue =
111                 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
112         struct nvme_completion *cqe = req->rsp;
113
114         /*
115          * AEN requests are special as they don't time out and can
116          * survive any kind of queue freeze and often don't respond to
117          * aborts.  We don't even bother to allocate a struct request
118          * for them but rather special case them here.
119          */
120         if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
121                         cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
122                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
123                                 &cqe->result);
124         } else {
125                 struct request *rq;
126
127                 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
128                 if (!rq) {
129                         dev_err(queue->ctrl->ctrl.device,
130                                 "tag 0x%x on queue %d not found\n",
131                                 cqe->command_id, nvme_loop_queue_idx(queue));
132                         return;
133                 }
134
135                 nvme_end_request(rq, cqe->status, cqe->result);
136         }
137 }
138
139 static void nvme_loop_execute_work(struct work_struct *work)
140 {
141         struct nvme_loop_iod *iod =
142                 container_of(work, struct nvme_loop_iod, work);
143
144         iod->req.execute(&iod->req);
145 }
146
147 static enum blk_eh_timer_return
148 nvme_loop_timeout(struct request *rq, bool reserved)
149 {
150         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
151
152         /* queue error recovery */
153         nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
154
155         /* fail with DNR on admin cmd timeout */
156         nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
157
158         return BLK_EH_HANDLED;
159 }
160
161 static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
162                 struct request *rq)
163 {
164         if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
165                 return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
166         return BLK_STS_OK;
167 }
168
169 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
170                 const struct blk_mq_queue_data *bd)
171 {
172         struct nvme_ns *ns = hctx->queue->queuedata;
173         struct nvme_loop_queue *queue = hctx->driver_data;
174         struct request *req = bd->rq;
175         struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
176         blk_status_t ret;
177
178         ret = nvme_loop_is_ready(queue, req);
179         if (unlikely(ret))
180                 return ret;
181
182         ret = nvme_setup_cmd(ns, req, &iod->cmd);
183         if (ret)
184                 return ret;
185
186         blk_mq_start_request(req);
187         iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
188         iod->req.port = nvmet_loop_port;
189         if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
190                         &queue->nvme_sq, &nvme_loop_ops))
191                 return BLK_STS_OK;
192
193         if (blk_rq_bytes(req)) {
194                 iod->sg_table.sgl = iod->first_sgl;
195                 if (sg_alloc_table_chained(&iod->sg_table,
196                                 blk_rq_nr_phys_segments(req),
197                                 iod->sg_table.sgl))
198                         return BLK_STS_RESOURCE;
199
200                 iod->req.sg = iod->sg_table.sgl;
201                 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
202         }
203
204         schedule_work(&iod->work);
205         return BLK_STS_OK;
206 }
207
208 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
209 {
210         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
211         struct nvme_loop_queue *queue = &ctrl->queues[0];
212         struct nvme_loop_iod *iod = &ctrl->async_event_iod;
213
214         memset(&iod->cmd, 0, sizeof(iod->cmd));
215         iod->cmd.common.opcode = nvme_admin_async_event;
216         iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
217         iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
218
219         if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
220                         &nvme_loop_ops)) {
221                 dev_err(ctrl->ctrl.device, "failed async event work\n");
222                 return;
223         }
224
225         schedule_work(&iod->work);
226 }
227
228 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
229                 struct nvme_loop_iod *iod, unsigned int queue_idx)
230 {
231         iod->req.cmd = &iod->cmd;
232         iod->req.rsp = &iod->rsp;
233         iod->queue = &ctrl->queues[queue_idx];
234         INIT_WORK(&iod->work, nvme_loop_execute_work);
235         return 0;
236 }
237
238 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
239                 struct request *req, unsigned int hctx_idx,
240                 unsigned int numa_node)
241 {
242         struct nvme_loop_ctrl *ctrl = set->driver_data;
243
244         return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
245                         (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
246 }
247
248 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
249                 unsigned int hctx_idx)
250 {
251         struct nvme_loop_ctrl *ctrl = data;
252         struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
253
254         BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
255
256         hctx->driver_data = queue;
257         return 0;
258 }
259
260 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
261                 unsigned int hctx_idx)
262 {
263         struct nvme_loop_ctrl *ctrl = data;
264         struct nvme_loop_queue *queue = &ctrl->queues[0];
265
266         BUG_ON(hctx_idx != 0);
267
268         hctx->driver_data = queue;
269         return 0;
270 }
271
272 static const struct blk_mq_ops nvme_loop_mq_ops = {
273         .queue_rq       = nvme_loop_queue_rq,
274         .complete       = nvme_loop_complete_rq,
275         .init_request   = nvme_loop_init_request,
276         .init_hctx      = nvme_loop_init_hctx,
277         .timeout        = nvme_loop_timeout,
278 };
279
280 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
281         .queue_rq       = nvme_loop_queue_rq,
282         .complete       = nvme_loop_complete_rq,
283         .init_request   = nvme_loop_init_request,
284         .init_hctx      = nvme_loop_init_admin_hctx,
285         .timeout        = nvme_loop_timeout,
286 };
287
288 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
289 {
290         if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
291                 return;
292         nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
293         blk_cleanup_queue(ctrl->ctrl.admin_q);
294         blk_mq_free_tag_set(&ctrl->admin_tag_set);
295 }
296
297 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
298 {
299         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
300
301         if (list_empty(&ctrl->list))
302                 goto free_ctrl;
303
304         mutex_lock(&nvme_loop_ctrl_mutex);
305         list_del(&ctrl->list);
306         mutex_unlock(&nvme_loop_ctrl_mutex);
307
308         if (nctrl->tagset) {
309                 blk_cleanup_queue(ctrl->ctrl.connect_q);
310                 blk_mq_free_tag_set(&ctrl->tag_set);
311         }
312         kfree(ctrl->queues);
313         nvmf_free_options(nctrl->opts);
314 free_ctrl:
315         kfree(ctrl);
316 }
317
318 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
319 {
320         int i;
321
322         for (i = 1; i < ctrl->ctrl.queue_count; i++) {
323                 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
324                 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
325         }
326         ctrl->ctrl.queue_count = 1;
327 }
328
329 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
330 {
331         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
332         unsigned int nr_io_queues;
333         int ret, i;
334
335         nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
336         ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
337         if (ret || !nr_io_queues)
338                 return ret;
339
340         dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
341
342         for (i = 1; i <= nr_io_queues; i++) {
343                 ctrl->queues[i].ctrl = ctrl;
344                 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
345                 if (ret)
346                         goto out_destroy_queues;
347
348                 ctrl->ctrl.queue_count++;
349         }
350
351         return 0;
352
353 out_destroy_queues:
354         nvme_loop_destroy_io_queues(ctrl);
355         return ret;
356 }
357
358 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
359 {
360         int i, ret;
361
362         for (i = 1; i < ctrl->ctrl.queue_count; i++) {
363                 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
364                 if (ret)
365                         return ret;
366                 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
367         }
368
369         return 0;
370 }
371
372 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
373 {
374         int error;
375
376         memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
377         ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
378         ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
379         ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
380         ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
381         ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
382                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
383         ctrl->admin_tag_set.driver_data = ctrl;
384         ctrl->admin_tag_set.nr_hw_queues = 1;
385         ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
386
387         ctrl->queues[0].ctrl = ctrl;
388         error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
389         if (error)
390                 return error;
391         ctrl->ctrl.queue_count = 1;
392
393         error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
394         if (error)
395                 goto out_free_sq;
396         ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
397
398         ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
399         if (IS_ERR(ctrl->ctrl.admin_q)) {
400                 error = PTR_ERR(ctrl->ctrl.admin_q);
401                 goto out_free_tagset;
402         }
403
404         error = nvmf_connect_admin_queue(&ctrl->ctrl);
405         if (error)
406                 goto out_cleanup_queue;
407
408         set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
409
410         error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
411         if (error) {
412                 dev_err(ctrl->ctrl.device,
413                         "prop_get NVME_REG_CAP failed\n");
414                 goto out_cleanup_queue;
415         }
416
417         ctrl->ctrl.sqsize =
418                 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
419
420         error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
421         if (error)
422                 goto out_cleanup_queue;
423
424         ctrl->ctrl.max_hw_sectors =
425                 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
426
427         error = nvme_init_identify(&ctrl->ctrl);
428         if (error)
429                 goto out_cleanup_queue;
430
431         return 0;
432
433 out_cleanup_queue:
434         clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
435         blk_cleanup_queue(ctrl->ctrl.admin_q);
436 out_free_tagset:
437         blk_mq_free_tag_set(&ctrl->admin_tag_set);
438 out_free_sq:
439         nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
440         return error;
441 }
442
443 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
444 {
445         if (ctrl->ctrl.queue_count > 1) {
446                 nvme_stop_queues(&ctrl->ctrl);
447                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
448                                         nvme_cancel_request, &ctrl->ctrl);
449                 nvme_loop_destroy_io_queues(ctrl);
450         }
451
452         if (ctrl->ctrl.state == NVME_CTRL_LIVE)
453                 nvme_shutdown_ctrl(&ctrl->ctrl);
454
455         blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
456         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
457                                 nvme_cancel_request, &ctrl->ctrl);
458         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
459         nvme_loop_destroy_admin_queue(ctrl);
460 }
461
462 static void nvme_loop_del_ctrl_work(struct work_struct *work)
463 {
464         struct nvme_loop_ctrl *ctrl = container_of(work,
465                                 struct nvme_loop_ctrl, delete_work);
466
467         nvme_stop_ctrl(&ctrl->ctrl);
468         nvme_remove_namespaces(&ctrl->ctrl);
469         nvme_loop_shutdown_ctrl(ctrl);
470         nvme_uninit_ctrl(&ctrl->ctrl);
471         nvme_put_ctrl(&ctrl->ctrl);
472 }
473
474 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
475 {
476         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
477                 return -EBUSY;
478
479         if (!queue_work(nvme_wq, &ctrl->delete_work))
480                 return -EBUSY;
481
482         return 0;
483 }
484
485 static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
486 {
487         struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
488         int ret;
489
490         ret = __nvme_loop_del_ctrl(ctrl);
491         if (ret)
492                 return ret;
493
494         flush_work(&ctrl->delete_work);
495
496         return 0;
497 }
498
499 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
500 {
501         struct nvme_loop_ctrl *ctrl;
502
503         mutex_lock(&nvme_loop_ctrl_mutex);
504         list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
505                 if (ctrl->ctrl.cntlid == nctrl->cntlid)
506                         __nvme_loop_del_ctrl(ctrl);
507         }
508         mutex_unlock(&nvme_loop_ctrl_mutex);
509 }
510
511 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
512 {
513         struct nvme_loop_ctrl *ctrl =
514                 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
515         bool changed;
516         int ret;
517
518         nvme_stop_ctrl(&ctrl->ctrl);
519         nvme_loop_shutdown_ctrl(ctrl);
520
521         ret = nvme_loop_configure_admin_queue(ctrl);
522         if (ret)
523                 goto out_disable;
524
525         ret = nvme_loop_init_io_queues(ctrl);
526         if (ret)
527                 goto out_destroy_admin;
528
529         ret = nvme_loop_connect_io_queues(ctrl);
530         if (ret)
531                 goto out_destroy_io;
532
533         blk_mq_update_nr_hw_queues(&ctrl->tag_set,
534                         ctrl->ctrl.queue_count - 1);
535
536         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
537         WARN_ON_ONCE(!changed);
538
539         nvme_start_ctrl(&ctrl->ctrl);
540
541         return;
542
543 out_destroy_io:
544         nvme_loop_destroy_io_queues(ctrl);
545 out_destroy_admin:
546         nvme_loop_destroy_admin_queue(ctrl);
547 out_disable:
548         dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
549         nvme_uninit_ctrl(&ctrl->ctrl);
550         nvme_put_ctrl(&ctrl->ctrl);
551 }
552
553 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
554         .name                   = "loop",
555         .module                 = THIS_MODULE,
556         .flags                  = NVME_F_FABRICS,
557         .reg_read32             = nvmf_reg_read32,
558         .reg_read64             = nvmf_reg_read64,
559         .reg_write32            = nvmf_reg_write32,
560         .free_ctrl              = nvme_loop_free_ctrl,
561         .submit_async_event     = nvme_loop_submit_async_event,
562         .delete_ctrl            = nvme_loop_del_ctrl,
563 };
564
565 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
566 {
567         int ret;
568
569         ret = nvme_loop_init_io_queues(ctrl);
570         if (ret)
571                 return ret;
572
573         memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
574         ctrl->tag_set.ops = &nvme_loop_mq_ops;
575         ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
576         ctrl->tag_set.reserved_tags = 1; /* fabric connect */
577         ctrl->tag_set.numa_node = NUMA_NO_NODE;
578         ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
579         ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
580                 SG_CHUNK_SIZE * sizeof(struct scatterlist);
581         ctrl->tag_set.driver_data = ctrl;
582         ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
583         ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
584         ctrl->ctrl.tagset = &ctrl->tag_set;
585
586         ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
587         if (ret)
588                 goto out_destroy_queues;
589
590         ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
591         if (IS_ERR(ctrl->ctrl.connect_q)) {
592                 ret = PTR_ERR(ctrl->ctrl.connect_q);
593                 goto out_free_tagset;
594         }
595
596         ret = nvme_loop_connect_io_queues(ctrl);
597         if (ret)
598                 goto out_cleanup_connect_q;
599
600         return 0;
601
602 out_cleanup_connect_q:
603         blk_cleanup_queue(ctrl->ctrl.connect_q);
604 out_free_tagset:
605         blk_mq_free_tag_set(&ctrl->tag_set);
606 out_destroy_queues:
607         nvme_loop_destroy_io_queues(ctrl);
608         return ret;
609 }
610
611 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
612                 struct nvmf_ctrl_options *opts)
613 {
614         struct nvme_loop_ctrl *ctrl;
615         bool changed;
616         int ret;
617
618         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
619         if (!ctrl)
620                 return ERR_PTR(-ENOMEM);
621         ctrl->ctrl.opts = opts;
622         INIT_LIST_HEAD(&ctrl->list);
623
624         INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
625         INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
626
627         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
628                                 0 /* no quirks, we're perfect! */);
629         if (ret)
630                 goto out_put_ctrl;
631
632         ret = -ENOMEM;
633
634         ctrl->ctrl.sqsize = opts->queue_size - 1;
635         ctrl->ctrl.kato = opts->kato;
636
637         ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
638                         GFP_KERNEL);
639         if (!ctrl->queues)
640                 goto out_uninit_ctrl;
641
642         ret = nvme_loop_configure_admin_queue(ctrl);
643         if (ret)
644                 goto out_free_queues;
645
646         if (opts->queue_size > ctrl->ctrl.maxcmd) {
647                 /* warn if maxcmd is lower than queue_size */
648                 dev_warn(ctrl->ctrl.device,
649                         "queue_size %zu > ctrl maxcmd %u, clamping down\n",
650                         opts->queue_size, ctrl->ctrl.maxcmd);
651                 opts->queue_size = ctrl->ctrl.maxcmd;
652         }
653
654         if (opts->nr_io_queues) {
655                 ret = nvme_loop_create_io_queues(ctrl);
656                 if (ret)
657                         goto out_remove_admin_queue;
658         }
659
660         nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
661
662         dev_info(ctrl->ctrl.device,
663                  "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
664
665         kref_get(&ctrl->ctrl.kref);
666
667         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
668         WARN_ON_ONCE(!changed);
669
670         mutex_lock(&nvme_loop_ctrl_mutex);
671         list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
672         mutex_unlock(&nvme_loop_ctrl_mutex);
673
674         nvme_start_ctrl(&ctrl->ctrl);
675
676         return &ctrl->ctrl;
677
678 out_remove_admin_queue:
679         nvme_loop_destroy_admin_queue(ctrl);
680 out_free_queues:
681         kfree(ctrl->queues);
682 out_uninit_ctrl:
683         nvme_uninit_ctrl(&ctrl->ctrl);
684 out_put_ctrl:
685         nvme_put_ctrl(&ctrl->ctrl);
686         if (ret > 0)
687                 ret = -EIO;
688         return ERR_PTR(ret);
689 }
690
691 static int nvme_loop_add_port(struct nvmet_port *port)
692 {
693         /*
694          * XXX: disalow adding more than one port so
695          * there is no connection rejections when a
696          * a subsystem is assigned to a port for which
697          * loop doesn't have a pointer.
698          * This scenario would be possible if we allowed
699          * more than one port to be added and a subsystem
700          * was assigned to a port other than nvmet_loop_port.
701          */
702
703         if (nvmet_loop_port)
704                 return -EPERM;
705
706         nvmet_loop_port = port;
707         return 0;
708 }
709
710 static void nvme_loop_remove_port(struct nvmet_port *port)
711 {
712         if (port == nvmet_loop_port)
713                 nvmet_loop_port = NULL;
714 }
715
716 static struct nvmet_fabrics_ops nvme_loop_ops = {
717         .owner          = THIS_MODULE,
718         .type           = NVMF_TRTYPE_LOOP,
719         .add_port       = nvme_loop_add_port,
720         .remove_port    = nvme_loop_remove_port,
721         .queue_response = nvme_loop_queue_response,
722         .delete_ctrl    = nvme_loop_delete_ctrl,
723 };
724
725 static struct nvmf_transport_ops nvme_loop_transport = {
726         .name           = "loop",
727         .create_ctrl    = nvme_loop_create_ctrl,
728 };
729
730 static int __init nvme_loop_init_module(void)
731 {
732         int ret;
733
734         ret = nvmet_register_transport(&nvme_loop_ops);
735         if (ret)
736                 return ret;
737
738         ret = nvmf_register_transport(&nvme_loop_transport);
739         if (ret)
740                 nvmet_unregister_transport(&nvme_loop_ops);
741
742         return ret;
743 }
744
745 static void __exit nvme_loop_cleanup_module(void)
746 {
747         struct nvme_loop_ctrl *ctrl, *next;
748
749         nvmf_unregister_transport(&nvme_loop_transport);
750         nvmet_unregister_transport(&nvme_loop_ops);
751
752         mutex_lock(&nvme_loop_ctrl_mutex);
753         list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
754                 __nvme_loop_del_ctrl(ctrl);
755         mutex_unlock(&nvme_loop_ctrl_mutex);
756
757         flush_workqueue(nvme_wq);
758 }
759
760 module_init(nvme_loop_init_module);
761 module_exit(nvme_loop_cleanup_module);
762
763 MODULE_LICENSE("GPL v2");
764 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */