GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / nvme / target / fcloop.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25
26
27 enum {
28         NVMF_OPT_ERR            = 0,
29         NVMF_OPT_WWNN           = 1 << 0,
30         NVMF_OPT_WWPN           = 1 << 1,
31         NVMF_OPT_ROLES          = 1 << 2,
32         NVMF_OPT_FCADDR         = 1 << 3,
33         NVMF_OPT_LPWWNN         = 1 << 4,
34         NVMF_OPT_LPWWPN         = 1 << 5,
35 };
36
37 struct fcloop_ctrl_options {
38         int                     mask;
39         u64                     wwnn;
40         u64                     wwpn;
41         u32                     roles;
42         u32                     fcaddr;
43         u64                     lpwwnn;
44         u64                     lpwwpn;
45 };
46
47 static const match_table_t opt_tokens = {
48         { NVMF_OPT_WWNN,        "wwnn=%s"       },
49         { NVMF_OPT_WWPN,        "wwpn=%s"       },
50         { NVMF_OPT_ROLES,       "roles=%d"      },
51         { NVMF_OPT_FCADDR,      "fcaddr=%x"     },
52         { NVMF_OPT_LPWWNN,      "lpwwnn=%s"     },
53         { NVMF_OPT_LPWWPN,      "lpwwpn=%s"     },
54         { NVMF_OPT_ERR,         NULL            }
55 };
56
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59                 const char *buf)
60 {
61         substring_t args[MAX_OPT_ARGS];
62         char *options, *o, *p;
63         int token, ret = 0;
64         u64 token64;
65
66         options = o = kstrdup(buf, GFP_KERNEL);
67         if (!options)
68                 return -ENOMEM;
69
70         while ((p = strsep(&o, ",\n")) != NULL) {
71                 if (!*p)
72                         continue;
73
74                 token = match_token(p, opt_tokens, args);
75                 opts->mask |= token;
76                 switch (token) {
77                 case NVMF_OPT_WWNN:
78                         if (match_u64(args, &token64)) {
79                                 ret = -EINVAL;
80                                 goto out_free_options;
81                         }
82                         opts->wwnn = token64;
83                         break;
84                 case NVMF_OPT_WWPN:
85                         if (match_u64(args, &token64)) {
86                                 ret = -EINVAL;
87                                 goto out_free_options;
88                         }
89                         opts->wwpn = token64;
90                         break;
91                 case NVMF_OPT_ROLES:
92                         if (match_int(args, &token)) {
93                                 ret = -EINVAL;
94                                 goto out_free_options;
95                         }
96                         opts->roles = token;
97                         break;
98                 case NVMF_OPT_FCADDR:
99                         if (match_hex(args, &token)) {
100                                 ret = -EINVAL;
101                                 goto out_free_options;
102                         }
103                         opts->fcaddr = token;
104                         break;
105                 case NVMF_OPT_LPWWNN:
106                         if (match_u64(args, &token64)) {
107                                 ret = -EINVAL;
108                                 goto out_free_options;
109                         }
110                         opts->lpwwnn = token64;
111                         break;
112                 case NVMF_OPT_LPWWPN:
113                         if (match_u64(args, &token64)) {
114                                 ret = -EINVAL;
115                                 goto out_free_options;
116                         }
117                         opts->lpwwpn = token64;
118                         break;
119                 default:
120                         pr_warn("unknown parameter or missing value '%s'\n", p);
121                         ret = -EINVAL;
122                         goto out_free_options;
123                 }
124         }
125
126 out_free_options:
127         kfree(options);
128         return ret;
129 }
130
131
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134                 const char *buf)
135 {
136         substring_t args[MAX_OPT_ARGS];
137         char *options, *o, *p;
138         int token, ret = 0;
139         u64 token64;
140
141         *nname = -1;
142         *pname = -1;
143
144         options = o = kstrdup(buf, GFP_KERNEL);
145         if (!options)
146                 return -ENOMEM;
147
148         while ((p = strsep(&o, ",\n")) != NULL) {
149                 if (!*p)
150                         continue;
151
152                 token = match_token(p, opt_tokens, args);
153                 switch (token) {
154                 case NVMF_OPT_WWNN:
155                         if (match_u64(args, &token64)) {
156                                 ret = -EINVAL;
157                                 goto out_free_options;
158                         }
159                         *nname = token64;
160                         break;
161                 case NVMF_OPT_WWPN:
162                         if (match_u64(args, &token64)) {
163                                 ret = -EINVAL;
164                                 goto out_free_options;
165                         }
166                         *pname = token64;
167                         break;
168                 default:
169                         pr_warn("unknown parameter or missing value '%s'\n", p);
170                         ret = -EINVAL;
171                         goto out_free_options;
172                 }
173         }
174
175 out_free_options:
176         kfree(options);
177
178         if (!ret) {
179                 if (*nname == -1)
180                         return -EINVAL;
181                 if (*pname == -1)
182                         return -EINVAL;
183         }
184
185         return ret;
186 }
187
188
189 #define LPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190
191 #define RPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
192                          NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193
194 #define TGTPORT_OPTS    (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195
196
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
200
201 struct fcloop_lport {
202         struct nvme_fc_local_port *localport;
203         struct list_head lport_list;
204         struct completion unreg_done;
205 };
206
207 struct fcloop_lport_priv {
208         struct fcloop_lport *lport;
209 };
210
211 struct fcloop_rport {
212         struct nvme_fc_remote_port *remoteport;
213         struct nvmet_fc_target_port *targetport;
214         struct fcloop_nport *nport;
215         struct fcloop_lport *lport;
216 };
217
218 struct fcloop_tport {
219         struct nvmet_fc_target_port *targetport;
220         struct nvme_fc_remote_port *remoteport;
221         struct fcloop_nport *nport;
222         struct fcloop_lport *lport;
223 };
224
225 struct fcloop_nport {
226         struct fcloop_rport *rport;
227         struct fcloop_tport *tport;
228         struct fcloop_lport *lport;
229         struct list_head nport_list;
230         struct kref ref;
231         u64 node_name;
232         u64 port_name;
233         u32 port_role;
234         u32 port_id;
235 };
236
237 struct fcloop_lsreq {
238         struct fcloop_tport             *tport;
239         struct nvmefc_ls_req            *lsreq;
240         struct work_struct              work;
241         struct nvmefc_tgt_ls_req        tgt_ls_req;
242         int                             status;
243 };
244
245 struct fcloop_fcpreq {
246         struct fcloop_tport             *tport;
247         struct nvmefc_fcp_req           *fcpreq;
248         spinlock_t                      reqlock;
249         u16                             status;
250         bool                            active;
251         bool                            aborted;
252         struct work_struct              work;
253         struct nvmefc_tgt_fcp_req       tgt_fcp_req;
254 };
255
256 struct fcloop_ini_fcpreq {
257         struct nvmefc_fcp_req           *fcpreq;
258         struct fcloop_fcpreq            *tfcp_req;
259         struct work_struct              iniwork;
260 };
261
262 static inline struct fcloop_lsreq *
263 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
264 {
265         return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
266 }
267
268 static inline struct fcloop_fcpreq *
269 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
270 {
271         return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
272 }
273
274
275 static int
276 fcloop_create_queue(struct nvme_fc_local_port *localport,
277                         unsigned int qidx, u16 qsize,
278                         void **handle)
279 {
280         *handle = localport;
281         return 0;
282 }
283
284 static void
285 fcloop_delete_queue(struct nvme_fc_local_port *localport,
286                         unsigned int idx, void *handle)
287 {
288 }
289
290
291 /*
292  * Transmit of LS RSP done (e.g. buffers all set). call back up
293  * initiator "done" flows.
294  */
295 static void
296 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
297 {
298         struct fcloop_lsreq *tls_req =
299                 container_of(work, struct fcloop_lsreq, work);
300         struct fcloop_tport *tport = tls_req->tport;
301         struct nvmefc_ls_req *lsreq = tls_req->lsreq;
302
303         if (!tport || tport->remoteport)
304                 lsreq->done(lsreq, tls_req->status);
305 }
306
307 static int
308 fcloop_ls_req(struct nvme_fc_local_port *localport,
309                         struct nvme_fc_remote_port *remoteport,
310                         struct nvmefc_ls_req *lsreq)
311 {
312         struct fcloop_lsreq *tls_req = lsreq->private;
313         struct fcloop_rport *rport = remoteport->private;
314         int ret = 0;
315
316         tls_req->lsreq = lsreq;
317         INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
318
319         if (!rport->targetport) {
320                 tls_req->status = -ECONNREFUSED;
321                 tls_req->tport = NULL;
322                 schedule_work(&tls_req->work);
323                 return ret;
324         }
325
326         tls_req->status = 0;
327         tls_req->tport = rport->targetport->private;
328         ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
329                                  lsreq->rqstaddr, lsreq->rqstlen);
330
331         return ret;
332 }
333
334 static int
335 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
336                         struct nvmefc_tgt_ls_req *tgt_lsreq)
337 {
338         struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
339         struct nvmefc_ls_req *lsreq = tls_req->lsreq;
340
341         memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
342                 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
343                                 lsreq->rsplen : tgt_lsreq->rsplen));
344         tgt_lsreq->done(tgt_lsreq);
345
346         schedule_work(&tls_req->work);
347
348         return 0;
349 }
350
351 /*
352  * FCP IO operation done by initiator abort.
353  * call back up initiator "done" flows.
354  */
355 static void
356 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
357 {
358         struct fcloop_ini_fcpreq *inireq =
359                 container_of(work, struct fcloop_ini_fcpreq, iniwork);
360
361         inireq->fcpreq->done(inireq->fcpreq);
362 }
363
364 /*
365  * FCP IO operation done by target completion.
366  * call back up initiator "done" flows.
367  */
368 static void
369 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
370 {
371         struct fcloop_fcpreq *tfcp_req =
372                 container_of(work, struct fcloop_fcpreq, work);
373         struct fcloop_tport *tport = tfcp_req->tport;
374         struct nvmefc_fcp_req *fcpreq;
375
376         spin_lock(&tfcp_req->reqlock);
377         fcpreq = tfcp_req->fcpreq;
378         tfcp_req->fcpreq = NULL;
379         spin_unlock(&tfcp_req->reqlock);
380
381         if (tport->remoteport && fcpreq) {
382                 fcpreq->status = tfcp_req->status;
383                 fcpreq->done(fcpreq);
384         }
385
386         kfree(tfcp_req);
387 }
388
389
390 static int
391 fcloop_fcp_req(struct nvme_fc_local_port *localport,
392                         struct nvme_fc_remote_port *remoteport,
393                         void *hw_queue_handle,
394                         struct nvmefc_fcp_req *fcpreq)
395 {
396         struct fcloop_rport *rport = remoteport->private;
397         struct fcloop_ini_fcpreq *inireq = fcpreq->private;
398         struct fcloop_fcpreq *tfcp_req;
399         int ret = 0;
400
401         if (!rport->targetport)
402                 return -ECONNREFUSED;
403
404         tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
405         if (!tfcp_req)
406                 return -ENOMEM;
407
408         inireq->fcpreq = fcpreq;
409         inireq->tfcp_req = tfcp_req;
410         INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
411         tfcp_req->fcpreq = fcpreq;
412         tfcp_req->tport = rport->targetport->private;
413         spin_lock_init(&tfcp_req->reqlock);
414         INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
415
416         ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
417                                  fcpreq->cmdaddr, fcpreq->cmdlen);
418
419         return ret;
420 }
421
422 static void
423 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
424                         struct scatterlist *io_sg, u32 offset, u32 length)
425 {
426         void *data_p, *io_p;
427         u32 data_len, io_len, tlen;
428
429         io_p = sg_virt(io_sg);
430         io_len = io_sg->length;
431
432         for ( ; offset; ) {
433                 tlen = min_t(u32, offset, io_len);
434                 offset -= tlen;
435                 io_len -= tlen;
436                 if (!io_len) {
437                         io_sg = sg_next(io_sg);
438                         io_p = sg_virt(io_sg);
439                         io_len = io_sg->length;
440                 } else
441                         io_p += tlen;
442         }
443
444         data_p = sg_virt(data_sg);
445         data_len = data_sg->length;
446
447         for ( ; length; ) {
448                 tlen = min_t(u32, io_len, data_len);
449                 tlen = min_t(u32, tlen, length);
450
451                 if (op == NVMET_FCOP_WRITEDATA)
452                         memcpy(data_p, io_p, tlen);
453                 else
454                         memcpy(io_p, data_p, tlen);
455
456                 length -= tlen;
457
458                 io_len -= tlen;
459                 if ((!io_len) && (length)) {
460                         io_sg = sg_next(io_sg);
461                         io_p = sg_virt(io_sg);
462                         io_len = io_sg->length;
463                 } else
464                         io_p += tlen;
465
466                 data_len -= tlen;
467                 if ((!data_len) && (length)) {
468                         data_sg = sg_next(data_sg);
469                         data_p = sg_virt(data_sg);
470                         data_len = data_sg->length;
471                 } else
472                         data_p += tlen;
473         }
474 }
475
476 static int
477 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
478                         struct nvmefc_tgt_fcp_req *tgt_fcpreq)
479 {
480         struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
481         struct nvmefc_fcp_req *fcpreq;
482         u32 rsplen = 0, xfrlen = 0;
483         int fcp_err = 0, active, aborted;
484         u8 op = tgt_fcpreq->op;
485
486         spin_lock(&tfcp_req->reqlock);
487         fcpreq = tfcp_req->fcpreq;
488         active = tfcp_req->active;
489         aborted = tfcp_req->aborted;
490         tfcp_req->active = true;
491         spin_unlock(&tfcp_req->reqlock);
492
493         if (unlikely(active))
494                 /* illegal - call while i/o active */
495                 return -EALREADY;
496
497         if (unlikely(aborted)) {
498                 /* target transport has aborted i/o prior */
499                 spin_lock(&tfcp_req->reqlock);
500                 tfcp_req->active = false;
501                 spin_unlock(&tfcp_req->reqlock);
502                 tgt_fcpreq->transferred_length = 0;
503                 tgt_fcpreq->fcp_error = -ECANCELED;
504                 tgt_fcpreq->done(tgt_fcpreq);
505                 return 0;
506         }
507
508         /*
509          * if fcpreq is NULL, the I/O has been aborted (from
510          * initiator side). For the target side, act as if all is well
511          * but don't actually move data.
512          */
513
514         switch (op) {
515         case NVMET_FCOP_WRITEDATA:
516                 xfrlen = tgt_fcpreq->transfer_length;
517                 if (fcpreq) {
518                         fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
519                                         fcpreq->first_sgl, tgt_fcpreq->offset,
520                                         xfrlen);
521                         fcpreq->transferred_length += xfrlen;
522                 }
523                 break;
524
525         case NVMET_FCOP_READDATA:
526         case NVMET_FCOP_READDATA_RSP:
527                 xfrlen = tgt_fcpreq->transfer_length;
528                 if (fcpreq) {
529                         fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
530                                         fcpreq->first_sgl, tgt_fcpreq->offset,
531                                         xfrlen);
532                         fcpreq->transferred_length += xfrlen;
533                 }
534                 if (op == NVMET_FCOP_READDATA)
535                         break;
536
537                 /* Fall-Thru to RSP handling */
538                 /* FALLTHRU */
539
540         case NVMET_FCOP_RSP:
541                 if (fcpreq) {
542                         rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
543                                         fcpreq->rsplen : tgt_fcpreq->rsplen);
544                         memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
545                         if (rsplen < tgt_fcpreq->rsplen)
546                                 fcp_err = -E2BIG;
547                         fcpreq->rcv_rsplen = rsplen;
548                         fcpreq->status = 0;
549                 }
550                 tfcp_req->status = 0;
551                 break;
552
553         default:
554                 fcp_err = -EINVAL;
555                 break;
556         }
557
558         spin_lock(&tfcp_req->reqlock);
559         tfcp_req->active = false;
560         spin_unlock(&tfcp_req->reqlock);
561
562         tgt_fcpreq->transferred_length = xfrlen;
563         tgt_fcpreq->fcp_error = fcp_err;
564         tgt_fcpreq->done(tgt_fcpreq);
565
566         return 0;
567 }
568
569 static void
570 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
571                         struct nvmefc_tgt_fcp_req *tgt_fcpreq)
572 {
573         struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
574
575         /*
576          * mark aborted only in case there were 2 threads in transport
577          * (one doing io, other doing abort) and only kills ops posted
578          * after the abort request
579          */
580         spin_lock(&tfcp_req->reqlock);
581         tfcp_req->aborted = true;
582         spin_unlock(&tfcp_req->reqlock);
583
584         tfcp_req->status = NVME_SC_INTERNAL;
585
586         /*
587          * nothing more to do. If io wasn't active, the transport should
588          * immediately call the req_release. If it was active, the op
589          * will complete, and the lldd should call req_release.
590          */
591 }
592
593 static void
594 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
595                         struct nvmefc_tgt_fcp_req *tgt_fcpreq)
596 {
597         struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
598
599         schedule_work(&tfcp_req->work);
600 }
601
602 static void
603 fcloop_ls_abort(struct nvme_fc_local_port *localport,
604                         struct nvme_fc_remote_port *remoteport,
605                                 struct nvmefc_ls_req *lsreq)
606 {
607 }
608
609 static void
610 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
611                         struct nvme_fc_remote_port *remoteport,
612                         void *hw_queue_handle,
613                         struct nvmefc_fcp_req *fcpreq)
614 {
615         struct fcloop_rport *rport = remoteport->private;
616         struct fcloop_ini_fcpreq *inireq = fcpreq->private;
617         struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
618
619         if (!tfcp_req)
620                 /* abort has already been called */
621                 goto finish;
622
623         /* break initiator/target relationship for io */
624         spin_lock(&tfcp_req->reqlock);
625         inireq->tfcp_req = NULL;
626         tfcp_req->fcpreq = NULL;
627         spin_unlock(&tfcp_req->reqlock);
628
629         if (rport->targetport)
630                 nvmet_fc_rcv_fcp_abort(rport->targetport,
631                                         &tfcp_req->tgt_fcp_req);
632
633 finish:
634         /* post the aborted io completion */
635         fcpreq->status = -ECANCELED;
636         schedule_work(&inireq->iniwork);
637 }
638
639 static void
640 fcloop_nport_free(struct kref *ref)
641 {
642         struct fcloop_nport *nport =
643                 container_of(ref, struct fcloop_nport, ref);
644         unsigned long flags;
645
646         spin_lock_irqsave(&fcloop_lock, flags);
647         list_del(&nport->nport_list);
648         spin_unlock_irqrestore(&fcloop_lock, flags);
649
650         kfree(nport);
651 }
652
653 static void
654 fcloop_nport_put(struct fcloop_nport *nport)
655 {
656         kref_put(&nport->ref, fcloop_nport_free);
657 }
658
659 static int
660 fcloop_nport_get(struct fcloop_nport *nport)
661 {
662         return kref_get_unless_zero(&nport->ref);
663 }
664
665 static void
666 fcloop_localport_delete(struct nvme_fc_local_port *localport)
667 {
668         struct fcloop_lport_priv *lport_priv = localport->private;
669         struct fcloop_lport *lport = lport_priv->lport;
670
671         /* release any threads waiting for the unreg to complete */
672         complete(&lport->unreg_done);
673 }
674
675 static void
676 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
677 {
678         struct fcloop_rport *rport = remoteport->private;
679
680         fcloop_nport_put(rport->nport);
681 }
682
683 static void
684 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
685 {
686         struct fcloop_tport *tport = targetport->private;
687
688         fcloop_nport_put(tport->nport);
689 }
690
691 #define FCLOOP_HW_QUEUES                4
692 #define FCLOOP_SGL_SEGS                 256
693 #define FCLOOP_DMABOUND_4G              0xFFFFFFFF
694
695 static struct nvme_fc_port_template fctemplate = {
696         .localport_delete       = fcloop_localport_delete,
697         .remoteport_delete      = fcloop_remoteport_delete,
698         .create_queue           = fcloop_create_queue,
699         .delete_queue           = fcloop_delete_queue,
700         .ls_req                 = fcloop_ls_req,
701         .fcp_io                 = fcloop_fcp_req,
702         .ls_abort               = fcloop_ls_abort,
703         .fcp_abort              = fcloop_fcp_abort,
704         .max_hw_queues          = FCLOOP_HW_QUEUES,
705         .max_sgl_segments       = FCLOOP_SGL_SEGS,
706         .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
707         .dma_boundary           = FCLOOP_DMABOUND_4G,
708         /* sizes of additional private data for data structures */
709         .local_priv_sz          = sizeof(struct fcloop_lport_priv),
710         .remote_priv_sz         = sizeof(struct fcloop_rport),
711         .lsrqst_priv_sz         = sizeof(struct fcloop_lsreq),
712         .fcprqst_priv_sz        = sizeof(struct fcloop_ini_fcpreq),
713 };
714
715 static struct nvmet_fc_target_template tgttemplate = {
716         .targetport_delete      = fcloop_targetport_delete,
717         .xmt_ls_rsp             = fcloop_xmt_ls_rsp,
718         .fcp_op                 = fcloop_fcp_op,
719         .fcp_abort              = fcloop_tgt_fcp_abort,
720         .fcp_req_release        = fcloop_fcp_req_release,
721         .max_hw_queues          = FCLOOP_HW_QUEUES,
722         .max_sgl_segments       = FCLOOP_SGL_SEGS,
723         .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
724         .dma_boundary           = FCLOOP_DMABOUND_4G,
725         /* optional features */
726         .target_features        = NVMET_FCTGTFEAT_CMD_IN_ISR |
727                                   NVMET_FCTGTFEAT_OPDONE_IN_ISR,
728         /* sizes of additional private data for data structures */
729         .target_priv_sz         = sizeof(struct fcloop_tport),
730 };
731
732 static ssize_t
733 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
734                 const char *buf, size_t count)
735 {
736         struct nvme_fc_port_info pinfo;
737         struct fcloop_ctrl_options *opts;
738         struct nvme_fc_local_port *localport;
739         struct fcloop_lport *lport;
740         struct fcloop_lport_priv *lport_priv;
741         unsigned long flags;
742         int ret = -ENOMEM;
743
744         lport = kzalloc(sizeof(*lport), GFP_KERNEL);
745         if (!lport)
746                 return -ENOMEM;
747
748         opts = kzalloc(sizeof(*opts), GFP_KERNEL);
749         if (!opts)
750                 goto out_free_lport;
751
752         ret = fcloop_parse_options(opts, buf);
753         if (ret)
754                 goto out_free_opts;
755
756         /* everything there ? */
757         if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
758                 ret = -EINVAL;
759                 goto out_free_opts;
760         }
761
762         memset(&pinfo, 0, sizeof(pinfo));
763         pinfo.node_name = opts->wwnn;
764         pinfo.port_name = opts->wwpn;
765         pinfo.port_role = opts->roles;
766         pinfo.port_id = opts->fcaddr;
767
768         ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
769         if (!ret) {
770                 /* success */
771                 lport_priv = localport->private;
772                 lport_priv->lport = lport;
773
774                 lport->localport = localport;
775                 INIT_LIST_HEAD(&lport->lport_list);
776
777                 spin_lock_irqsave(&fcloop_lock, flags);
778                 list_add_tail(&lport->lport_list, &fcloop_lports);
779                 spin_unlock_irqrestore(&fcloop_lock, flags);
780         }
781
782 out_free_opts:
783         kfree(opts);
784 out_free_lport:
785         /* free only if we're going to fail */
786         if (ret)
787                 kfree(lport);
788
789         return ret ? ret : count;
790 }
791
792
793 static void
794 __unlink_local_port(struct fcloop_lport *lport)
795 {
796         list_del(&lport->lport_list);
797 }
798
799 static int
800 __wait_localport_unreg(struct fcloop_lport *lport)
801 {
802         int ret;
803
804         init_completion(&lport->unreg_done);
805
806         ret = nvme_fc_unregister_localport(lport->localport);
807
808         wait_for_completion(&lport->unreg_done);
809
810         kfree(lport);
811
812         return ret;
813 }
814
815
816 static ssize_t
817 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
818                 const char *buf, size_t count)
819 {
820         struct fcloop_lport *tlport, *lport = NULL;
821         u64 nodename, portname;
822         unsigned long flags;
823         int ret;
824
825         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
826         if (ret)
827                 return ret;
828
829         spin_lock_irqsave(&fcloop_lock, flags);
830
831         list_for_each_entry(tlport, &fcloop_lports, lport_list) {
832                 if (tlport->localport->node_name == nodename &&
833                     tlport->localport->port_name == portname) {
834                         lport = tlport;
835                         __unlink_local_port(lport);
836                         break;
837                 }
838         }
839         spin_unlock_irqrestore(&fcloop_lock, flags);
840
841         if (!lport)
842                 return -ENOENT;
843
844         ret = __wait_localport_unreg(lport);
845
846         return ret ? ret : count;
847 }
848
849 static struct fcloop_nport *
850 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
851 {
852         struct fcloop_nport *newnport, *nport = NULL;
853         struct fcloop_lport *tmplport, *lport = NULL;
854         struct fcloop_ctrl_options *opts;
855         unsigned long flags;
856         u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
857         int ret;
858
859         opts = kzalloc(sizeof(*opts), GFP_KERNEL);
860         if (!opts)
861                 return NULL;
862
863         ret = fcloop_parse_options(opts, buf);
864         if (ret)
865                 goto out_free_opts;
866
867         /* everything there ? */
868         if ((opts->mask & opts_mask) != opts_mask) {
869                 ret = -EINVAL;
870                 goto out_free_opts;
871         }
872
873         newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
874         if (!newnport)
875                 goto out_free_opts;
876
877         INIT_LIST_HEAD(&newnport->nport_list);
878         newnport->node_name = opts->wwnn;
879         newnport->port_name = opts->wwpn;
880         if (opts->mask & NVMF_OPT_ROLES)
881                 newnport->port_role = opts->roles;
882         if (opts->mask & NVMF_OPT_FCADDR)
883                 newnport->port_id = opts->fcaddr;
884         kref_init(&newnport->ref);
885
886         spin_lock_irqsave(&fcloop_lock, flags);
887
888         list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
889                 if (tmplport->localport->node_name == opts->wwnn &&
890                     tmplport->localport->port_name == opts->wwpn)
891                         goto out_invalid_opts;
892
893                 if (tmplport->localport->node_name == opts->lpwwnn &&
894                     tmplport->localport->port_name == opts->lpwwpn)
895                         lport = tmplport;
896         }
897
898         if (remoteport) {
899                 if (!lport)
900                         goto out_invalid_opts;
901                 newnport->lport = lport;
902         }
903
904         list_for_each_entry(nport, &fcloop_nports, nport_list) {
905                 if (nport->node_name == opts->wwnn &&
906                     nport->port_name == opts->wwpn) {
907                         if ((remoteport && nport->rport) ||
908                             (!remoteport && nport->tport)) {
909                                 nport = NULL;
910                                 goto out_invalid_opts;
911                         }
912
913                         fcloop_nport_get(nport);
914
915                         spin_unlock_irqrestore(&fcloop_lock, flags);
916
917                         if (remoteport)
918                                 nport->lport = lport;
919                         if (opts->mask & NVMF_OPT_ROLES)
920                                 nport->port_role = opts->roles;
921                         if (opts->mask & NVMF_OPT_FCADDR)
922                                 nport->port_id = opts->fcaddr;
923                         goto out_free_newnport;
924                 }
925         }
926
927         list_add_tail(&newnport->nport_list, &fcloop_nports);
928
929         spin_unlock_irqrestore(&fcloop_lock, flags);
930
931         kfree(opts);
932         return newnport;
933
934 out_invalid_opts:
935         spin_unlock_irqrestore(&fcloop_lock, flags);
936 out_free_newnport:
937         kfree(newnport);
938 out_free_opts:
939         kfree(opts);
940         return nport;
941 }
942
943 static ssize_t
944 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
945                 const char *buf, size_t count)
946 {
947         struct nvme_fc_remote_port *remoteport;
948         struct fcloop_nport *nport;
949         struct fcloop_rport *rport;
950         struct nvme_fc_port_info pinfo;
951         int ret;
952
953         nport = fcloop_alloc_nport(buf, count, true);
954         if (!nport)
955                 return -EIO;
956
957         memset(&pinfo, 0, sizeof(pinfo));
958         pinfo.node_name = nport->node_name;
959         pinfo.port_name = nport->port_name;
960         pinfo.port_role = nport->port_role;
961         pinfo.port_id = nport->port_id;
962
963         ret = nvme_fc_register_remoteport(nport->lport->localport,
964                                                 &pinfo, &remoteport);
965         if (ret || !remoteport) {
966                 fcloop_nport_put(nport);
967                 return ret;
968         }
969
970         /* success */
971         rport = remoteport->private;
972         rport->remoteport = remoteport;
973         rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
974         if (nport->tport) {
975                 nport->tport->remoteport = remoteport;
976                 nport->tport->lport = nport->lport;
977         }
978         rport->nport = nport;
979         rport->lport = nport->lport;
980         nport->rport = rport;
981
982         return count;
983 }
984
985
986 static struct fcloop_rport *
987 __unlink_remote_port(struct fcloop_nport *nport)
988 {
989         struct fcloop_rport *rport = nport->rport;
990
991         if (rport && nport->tport)
992                 nport->tport->remoteport = NULL;
993         nport->rport = NULL;
994
995         return rport;
996 }
997
998 static int
999 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1000 {
1001         if (!rport)
1002                 return -EALREADY;
1003
1004         return nvme_fc_unregister_remoteport(rport->remoteport);
1005 }
1006
1007 static ssize_t
1008 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1009                 const char *buf, size_t count)
1010 {
1011         struct fcloop_nport *nport = NULL, *tmpport;
1012         static struct fcloop_rport *rport;
1013         u64 nodename, portname;
1014         unsigned long flags;
1015         int ret;
1016
1017         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1018         if (ret)
1019                 return ret;
1020
1021         spin_lock_irqsave(&fcloop_lock, flags);
1022
1023         list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1024                 if (tmpport->node_name == nodename &&
1025                     tmpport->port_name == portname && tmpport->rport) {
1026                         nport = tmpport;
1027                         rport = __unlink_remote_port(nport);
1028                         break;
1029                 }
1030         }
1031
1032         spin_unlock_irqrestore(&fcloop_lock, flags);
1033
1034         if (!nport)
1035                 return -ENOENT;
1036
1037         ret = __remoteport_unreg(nport, rport);
1038
1039         return ret ? ret : count;
1040 }
1041
1042 static ssize_t
1043 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1044                 const char *buf, size_t count)
1045 {
1046         struct nvmet_fc_target_port *targetport;
1047         struct fcloop_nport *nport;
1048         struct fcloop_tport *tport;
1049         struct nvmet_fc_port_info tinfo;
1050         int ret;
1051
1052         nport = fcloop_alloc_nport(buf, count, false);
1053         if (!nport)
1054                 return -EIO;
1055
1056         tinfo.node_name = nport->node_name;
1057         tinfo.port_name = nport->port_name;
1058         tinfo.port_id = nport->port_id;
1059
1060         ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1061                                                 &targetport);
1062         if (ret) {
1063                 fcloop_nport_put(nport);
1064                 return ret;
1065         }
1066
1067         /* success */
1068         tport = targetport->private;
1069         tport->targetport = targetport;
1070         tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1071         if (nport->rport)
1072                 nport->rport->targetport = targetport;
1073         tport->nport = nport;
1074         tport->lport = nport->lport;
1075         nport->tport = tport;
1076
1077         return count;
1078 }
1079
1080
1081 static struct fcloop_tport *
1082 __unlink_target_port(struct fcloop_nport *nport)
1083 {
1084         struct fcloop_tport *tport = nport->tport;
1085
1086         if (tport && nport->rport)
1087                 nport->rport->targetport = NULL;
1088         nport->tport = NULL;
1089
1090         return tport;
1091 }
1092
1093 static int
1094 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1095 {
1096         if (!tport)
1097                 return -EALREADY;
1098
1099         return nvmet_fc_unregister_targetport(tport->targetport);
1100 }
1101
1102 static ssize_t
1103 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1104                 const char *buf, size_t count)
1105 {
1106         struct fcloop_nport *nport = NULL, *tmpport;
1107         struct fcloop_tport *tport;
1108         u64 nodename, portname;
1109         unsigned long flags;
1110         int ret;
1111
1112         ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1113         if (ret)
1114                 return ret;
1115
1116         spin_lock_irqsave(&fcloop_lock, flags);
1117
1118         list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1119                 if (tmpport->node_name == nodename &&
1120                     tmpport->port_name == portname && tmpport->tport) {
1121                         nport = tmpport;
1122                         tport = __unlink_target_port(nport);
1123                         break;
1124                 }
1125         }
1126
1127         spin_unlock_irqrestore(&fcloop_lock, flags);
1128
1129         if (!nport)
1130                 return -ENOENT;
1131
1132         ret = __targetport_unreg(nport, tport);
1133
1134         return ret ? ret : count;
1135 }
1136
1137
1138 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1139 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1140 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1141 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1142 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1143 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1144
1145 static struct attribute *fcloop_dev_attrs[] = {
1146         &dev_attr_add_local_port.attr,
1147         &dev_attr_del_local_port.attr,
1148         &dev_attr_add_remote_port.attr,
1149         &dev_attr_del_remote_port.attr,
1150         &dev_attr_add_target_port.attr,
1151         &dev_attr_del_target_port.attr,
1152         NULL
1153 };
1154
1155 static struct attribute_group fclopp_dev_attrs_group = {
1156         .attrs          = fcloop_dev_attrs,
1157 };
1158
1159 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1160         &fclopp_dev_attrs_group,
1161         NULL,
1162 };
1163
1164 static struct class *fcloop_class;
1165 static struct device *fcloop_device;
1166
1167
1168 static int __init fcloop_init(void)
1169 {
1170         int ret;
1171
1172         fcloop_class = class_create(THIS_MODULE, "fcloop");
1173         if (IS_ERR(fcloop_class)) {
1174                 pr_err("couldn't register class fcloop\n");
1175                 ret = PTR_ERR(fcloop_class);
1176                 return ret;
1177         }
1178
1179         fcloop_device = device_create_with_groups(
1180                                 fcloop_class, NULL, MKDEV(0, 0), NULL,
1181                                 fcloop_dev_attr_groups, "ctl");
1182         if (IS_ERR(fcloop_device)) {
1183                 pr_err("couldn't create ctl device!\n");
1184                 ret = PTR_ERR(fcloop_device);
1185                 goto out_destroy_class;
1186         }
1187
1188         get_device(fcloop_device);
1189
1190         return 0;
1191
1192 out_destroy_class:
1193         class_destroy(fcloop_class);
1194         return ret;
1195 }
1196
1197 static void __exit fcloop_exit(void)
1198 {
1199         struct fcloop_lport *lport;
1200         struct fcloop_nport *nport;
1201         struct fcloop_tport *tport;
1202         struct fcloop_rport *rport;
1203         unsigned long flags;
1204         int ret;
1205
1206         spin_lock_irqsave(&fcloop_lock, flags);
1207
1208         for (;;) {
1209                 nport = list_first_entry_or_null(&fcloop_nports,
1210                                                 typeof(*nport), nport_list);
1211                 if (!nport)
1212                         break;
1213
1214                 tport = __unlink_target_port(nport);
1215                 rport = __unlink_remote_port(nport);
1216
1217                 spin_unlock_irqrestore(&fcloop_lock, flags);
1218
1219                 ret = __targetport_unreg(nport, tport);
1220                 if (ret)
1221                         pr_warn("%s: Failed deleting target port\n", __func__);
1222
1223                 ret = __remoteport_unreg(nport, rport);
1224                 if (ret)
1225                         pr_warn("%s: Failed deleting remote port\n", __func__);
1226
1227                 spin_lock_irqsave(&fcloop_lock, flags);
1228         }
1229
1230         for (;;) {
1231                 lport = list_first_entry_or_null(&fcloop_lports,
1232                                                 typeof(*lport), lport_list);
1233                 if (!lport)
1234                         break;
1235
1236                 __unlink_local_port(lport);
1237
1238                 spin_unlock_irqrestore(&fcloop_lock, flags);
1239
1240                 ret = __wait_localport_unreg(lport);
1241                 if (ret)
1242                         pr_warn("%s: Failed deleting local port\n", __func__);
1243
1244                 spin_lock_irqsave(&fcloop_lock, flags);
1245         }
1246
1247         spin_unlock_irqrestore(&fcloop_lock, flags);
1248
1249         put_device(fcloop_device);
1250
1251         device_destroy(fcloop_class, MKDEV(0, 0));
1252         class_destroy(fcloop_class);
1253 }
1254
1255 module_init(fcloop_init);
1256 module_exit(fcloop_exit);
1257
1258 MODULE_LICENSE("GPL v2");