GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 #include <linux/percpu_ida.h>
50
51 #include "vhost.h"
52
53 #define VHOST_SCSI_VERSION  "v0.1"
54 #define VHOST_SCSI_NAMELEN 256
55 #define VHOST_SCSI_MAX_CDB_SIZE 32
56 #define VHOST_SCSI_DEFAULT_TAGS 256
57 #define VHOST_SCSI_PREALLOC_SGLS 2048
58 #define VHOST_SCSI_PREALLOC_UPAGES 2048
59 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
60
61 /* Max number of requests before requeueing the job.
62  * Using this limit prevents one virtqueue from starving others with
63  * request.
64  */
65 #define VHOST_SCSI_WEIGHT 256
66
67 struct vhost_scsi_inflight {
68         /* Wait for the flush operation to finish */
69         struct completion comp;
70         /* Refcount for the inflight reqs */
71         struct kref kref;
72 };
73
74 struct vhost_scsi_cmd {
75         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
76         int tvc_vq_desc;
77         /* virtio-scsi initiator task attribute */
78         int tvc_task_attr;
79         /* virtio-scsi response incoming iovecs */
80         int tvc_in_iovs;
81         /* virtio-scsi initiator data direction */
82         enum dma_data_direction tvc_data_direction;
83         /* Expected data transfer length from virtio-scsi header */
84         u32 tvc_exp_data_len;
85         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
86         u64 tvc_tag;
87         /* The number of scatterlists associated with this cmd */
88         u32 tvc_sgl_count;
89         u32 tvc_prot_sgl_count;
90         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
91         u32 tvc_lun;
92         /* Pointer to the SGL formatted memory from virtio-scsi */
93         struct scatterlist *tvc_sgl;
94         struct scatterlist *tvc_prot_sgl;
95         struct page **tvc_upages;
96         /* Pointer to response header iovec */
97         struct iovec tvc_resp_iov;
98         /* Pointer to vhost_scsi for our device */
99         struct vhost_scsi *tvc_vhost;
100         /* Pointer to vhost_virtqueue for the cmd */
101         struct vhost_virtqueue *tvc_vq;
102         /* Pointer to vhost nexus memory */
103         struct vhost_scsi_nexus *tvc_nexus;
104         /* The TCM I/O descriptor that is accessed via container_of() */
105         struct se_cmd tvc_se_cmd;
106         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
107         struct work_struct work;
108         /* Copy of the incoming SCSI command descriptor block (CDB) */
109         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
110         /* Sense buffer that will be mapped into outgoing status */
111         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
112         /* Completed commands list, serviced from vhost worker thread */
113         struct llist_node tvc_completion_list;
114         /* Used to track inflight cmd */
115         struct vhost_scsi_inflight *inflight;
116 };
117
118 struct vhost_scsi_nexus {
119         /* Pointer to TCM session for I_T Nexus */
120         struct se_session *tvn_se_sess;
121 };
122
123 struct vhost_scsi_tpg {
124         /* Vhost port target portal group tag for TCM */
125         u16 tport_tpgt;
126         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
127         int tv_tpg_port_count;
128         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
129         int tv_tpg_vhost_count;
130         /* Used for enabling T10-PI with legacy devices */
131         int tv_fabric_prot_type;
132         /* list for vhost_scsi_list */
133         struct list_head tv_tpg_list;
134         /* Used to protect access for tpg_nexus */
135         struct mutex tv_tpg_mutex;
136         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
137         struct vhost_scsi_nexus *tpg_nexus;
138         /* Pointer back to vhost_scsi_tport */
139         struct vhost_scsi_tport *tport;
140         /* Returned by vhost_scsi_make_tpg() */
141         struct se_portal_group se_tpg;
142         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
143         struct vhost_scsi *vhost_scsi;
144 };
145
146 struct vhost_scsi_tport {
147         /* SCSI protocol the tport is providing */
148         u8 tport_proto_id;
149         /* Binary World Wide unique Port Name for Vhost Target port */
150         u64 tport_wwpn;
151         /* ASCII formatted WWPN for Vhost Target port */
152         char tport_name[VHOST_SCSI_NAMELEN];
153         /* Returned by vhost_scsi_make_tport() */
154         struct se_wwn tport_wwn;
155 };
156
157 struct vhost_scsi_evt {
158         /* event to be sent to guest */
159         struct virtio_scsi_event event;
160         /* event list, serviced from vhost worker thread */
161         struct llist_node list;
162 };
163
164 enum {
165         VHOST_SCSI_VQ_CTL = 0,
166         VHOST_SCSI_VQ_EVT = 1,
167         VHOST_SCSI_VQ_IO = 2,
168 };
169
170 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
171 enum {
172         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
173                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
174 };
175
176 #define VHOST_SCSI_MAX_TARGET   256
177 #define VHOST_SCSI_MAX_VQ       128
178 #define VHOST_SCSI_MAX_EVENT    128
179
180 struct vhost_scsi_virtqueue {
181         struct vhost_virtqueue vq;
182         /*
183          * Reference counting for inflight reqs, used for flush operation. At
184          * each time, one reference tracks new commands submitted, while we
185          * wait for another one to reach 0.
186          */
187         struct vhost_scsi_inflight inflights[2];
188         /*
189          * Indicate current inflight in use, protected by vq->mutex.
190          * Writers must also take dev mutex and flush under it.
191          */
192         int inflight_idx;
193 };
194
195 struct vhost_scsi {
196         /* Protected by vhost_scsi->dev.mutex */
197         struct vhost_scsi_tpg **vs_tpg;
198         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199
200         struct vhost_dev dev;
201         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
202
203         struct vhost_work vs_completion_work; /* cmd completion work item */
204         struct llist_head vs_completion_list; /* cmd completion queue */
205
206         struct vhost_work vs_event_work; /* evt injection work item */
207         struct llist_head vs_event_list; /* evt injection queue */
208
209         bool vs_events_missed; /* any missed events, protected by vq->mutex */
210         int vs_events_nr; /* num of pending events, protected by vq->mutex */
211 };
212
213 static struct workqueue_struct *vhost_scsi_workqueue;
214
215 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
216 static DEFINE_MUTEX(vhost_scsi_mutex);
217 static LIST_HEAD(vhost_scsi_list);
218
219 static int iov_num_pages(void __user *iov_base, size_t iov_len)
220 {
221         return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
222                ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
223 }
224
225 static void vhost_scsi_done_inflight(struct kref *kref)
226 {
227         struct vhost_scsi_inflight *inflight;
228
229         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
230         complete(&inflight->comp);
231 }
232
233 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
234                                     struct vhost_scsi_inflight *old_inflight[])
235 {
236         struct vhost_scsi_inflight *new_inflight;
237         struct vhost_virtqueue *vq;
238         int idx, i;
239
240         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
241                 vq = &vs->vqs[i].vq;
242
243                 mutex_lock(&vq->mutex);
244
245                 /* store old infight */
246                 idx = vs->vqs[i].inflight_idx;
247                 if (old_inflight)
248                         old_inflight[i] = &vs->vqs[i].inflights[idx];
249
250                 /* setup new infight */
251                 vs->vqs[i].inflight_idx = idx ^ 1;
252                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
253                 kref_init(&new_inflight->kref);
254                 init_completion(&new_inflight->comp);
255
256                 mutex_unlock(&vq->mutex);
257         }
258 }
259
260 static struct vhost_scsi_inflight *
261 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
262 {
263         struct vhost_scsi_inflight *inflight;
264         struct vhost_scsi_virtqueue *svq;
265
266         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
267         inflight = &svq->inflights[svq->inflight_idx];
268         kref_get(&inflight->kref);
269
270         return inflight;
271 }
272
273 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
274 {
275         kref_put(&inflight->kref, vhost_scsi_done_inflight);
276 }
277
278 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
279 {
280         return 1;
281 }
282
283 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
284 {
285         return 0;
286 }
287
288 static char *vhost_scsi_get_fabric_name(void)
289 {
290         return "vhost";
291 }
292
293 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
294 {
295         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
296                                 struct vhost_scsi_tpg, se_tpg);
297         struct vhost_scsi_tport *tport = tpg->tport;
298
299         return &tport->tport_name[0];
300 }
301
302 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
303 {
304         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305                                 struct vhost_scsi_tpg, se_tpg);
306         return tpg->tport_tpgt;
307 }
308
309 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
310 {
311         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
312                                 struct vhost_scsi_tpg, se_tpg);
313
314         return tpg->tv_fabric_prot_type;
315 }
316
317 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
318 {
319         return 1;
320 }
321
322 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
323 {
324         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
325                                 struct vhost_scsi_cmd, tvc_se_cmd);
326         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
327         int i;
328
329         if (tv_cmd->tvc_sgl_count) {
330                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
331                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
332         }
333         if (tv_cmd->tvc_prot_sgl_count) {
334                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
335                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
336         }
337
338         vhost_scsi_put_inflight(tv_cmd->inflight);
339         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
340 }
341
342 static int vhost_scsi_shutdown_session(struct se_session *se_sess)
343 {
344         return 0;
345 }
346
347 static void vhost_scsi_close_session(struct se_session *se_sess)
348 {
349         return;
350 }
351
352 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
353 {
354         return 0;
355 }
356
357 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
358 {
359         /* Go ahead and process the write immediately */
360         target_execute_cmd(se_cmd);
361         return 0;
362 }
363
364 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
365 {
366         return 0;
367 }
368
369 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
370 {
371         return;
372 }
373
374 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
375 {
376         return 0;
377 }
378
379 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
380 {
381         struct vhost_scsi *vs = cmd->tvc_vhost;
382
383         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
384
385         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
386 }
387
388 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
389 {
390         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
391                                 struct vhost_scsi_cmd, tvc_se_cmd);
392         vhost_scsi_complete_cmd(cmd);
393         return 0;
394 }
395
396 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
397 {
398         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
399                                 struct vhost_scsi_cmd, tvc_se_cmd);
400         vhost_scsi_complete_cmd(cmd);
401         return 0;
402 }
403
404 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
405 {
406         return;
407 }
408
409 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
410 {
411         return;
412 }
413
414 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
415 {
416         vs->vs_events_nr--;
417         kfree(evt);
418 }
419
420 static struct vhost_scsi_evt *
421 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
422                        u32 event, u32 reason)
423 {
424         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
425         struct vhost_scsi_evt *evt;
426
427         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
428                 vs->vs_events_missed = true;
429                 return NULL;
430         }
431
432         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
433         if (!evt) {
434                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
435                 vs->vs_events_missed = true;
436                 return NULL;
437         }
438
439         evt->event.event = cpu_to_vhost32(vq, event);
440         evt->event.reason = cpu_to_vhost32(vq, reason);
441         vs->vs_events_nr++;
442
443         return evt;
444 }
445
446 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
447 {
448         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
449
450         /* TODO locking against target/backend threads? */
451         transport_generic_free_cmd(se_cmd, 0);
452
453 }
454
455 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
456 {
457         return target_put_sess_cmd(se_cmd);
458 }
459
460 static void
461 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
462 {
463         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
464         struct virtio_scsi_event *event = &evt->event;
465         struct virtio_scsi_event __user *eventp;
466         unsigned out, in;
467         int head, ret;
468
469         if (!vq->private_data) {
470                 vs->vs_events_missed = true;
471                 return;
472         }
473
474 again:
475         vhost_disable_notify(&vs->dev, vq);
476         head = vhost_get_vq_desc(vq, vq->iov,
477                         ARRAY_SIZE(vq->iov), &out, &in,
478                         NULL, NULL);
479         if (head < 0) {
480                 vs->vs_events_missed = true;
481                 return;
482         }
483         if (head == vq->num) {
484                 if (vhost_enable_notify(&vs->dev, vq))
485                         goto again;
486                 vs->vs_events_missed = true;
487                 return;
488         }
489
490         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
491                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
492                                 vq->iov[out].iov_len);
493                 vs->vs_events_missed = true;
494                 return;
495         }
496
497         if (vs->vs_events_missed) {
498                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
499                 vs->vs_events_missed = false;
500         }
501
502         eventp = vq->iov[out].iov_base;
503         ret = __copy_to_user(eventp, event, sizeof(*event));
504         if (!ret)
505                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
506         else
507                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
508 }
509
510 static void vhost_scsi_evt_work(struct vhost_work *work)
511 {
512         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
513                                         vs_event_work);
514         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
515         struct vhost_scsi_evt *evt;
516         struct llist_node *llnode;
517
518         mutex_lock(&vq->mutex);
519         llnode = llist_del_all(&vs->vs_event_list);
520         while (llnode) {
521                 evt = llist_entry(llnode, struct vhost_scsi_evt, list);
522                 llnode = llist_next(llnode);
523                 vhost_scsi_do_evt_work(vs, evt);
524                 vhost_scsi_free_evt(vs, evt);
525         }
526         mutex_unlock(&vq->mutex);
527 }
528
529 /* Fill in status and signal that we are done processing this command
530  *
531  * This is scheduled in the vhost work queue so we are called with the owner
532  * process mm and can access the vring.
533  */
534 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
535 {
536         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
537                                         vs_completion_work);
538         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
539         struct virtio_scsi_cmd_resp v_rsp;
540         struct vhost_scsi_cmd *cmd;
541         struct llist_node *llnode;
542         struct se_cmd *se_cmd;
543         struct iov_iter iov_iter;
544         int ret, vq;
545
546         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
547         llnode = llist_del_all(&vs->vs_completion_list);
548         while (llnode) {
549                 cmd = llist_entry(llnode, struct vhost_scsi_cmd,
550                                      tvc_completion_list);
551                 llnode = llist_next(llnode);
552                 se_cmd = &cmd->tvc_se_cmd;
553
554                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
555                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
556
557                 memset(&v_rsp, 0, sizeof(v_rsp));
558                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
559                 /* TODO is status_qualifier field needed? */
560                 v_rsp.status = se_cmd->scsi_status;
561                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
562                                                  se_cmd->scsi_sense_length);
563                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
564                        se_cmd->scsi_sense_length);
565
566                 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
567                               cmd->tvc_in_iovs, sizeof(v_rsp));
568                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
569                 if (likely(ret == sizeof(v_rsp))) {
570                         struct vhost_scsi_virtqueue *q;
571                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
572                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
573                         vq = q - vs->vqs;
574                         __set_bit(vq, signal);
575                 } else
576                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
577
578                 vhost_scsi_free_cmd(cmd);
579         }
580
581         vq = -1;
582         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
583                 < VHOST_SCSI_MAX_VQ)
584                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
585 }
586
587 static struct vhost_scsi_cmd *
588 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
589                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
590                    u32 exp_data_len, int data_direction)
591 {
592         struct vhost_scsi_cmd *cmd;
593         struct vhost_scsi_nexus *tv_nexus;
594         struct se_session *se_sess;
595         struct scatterlist *sg, *prot_sg;
596         struct page **pages;
597         int tag;
598
599         tv_nexus = tpg->tpg_nexus;
600         if (!tv_nexus) {
601                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
602                 return ERR_PTR(-EIO);
603         }
604         se_sess = tv_nexus->tvn_se_sess;
605
606         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
607         if (tag < 0) {
608                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
609                 return ERR_PTR(-ENOMEM);
610         }
611
612         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
613         sg = cmd->tvc_sgl;
614         prot_sg = cmd->tvc_prot_sgl;
615         pages = cmd->tvc_upages;
616         memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
617
618         cmd->tvc_sgl = sg;
619         cmd->tvc_prot_sgl = prot_sg;
620         cmd->tvc_upages = pages;
621         cmd->tvc_se_cmd.map_tag = tag;
622         cmd->tvc_tag = scsi_tag;
623         cmd->tvc_lun = lun;
624         cmd->tvc_task_attr = task_attr;
625         cmd->tvc_exp_data_len = exp_data_len;
626         cmd->tvc_data_direction = data_direction;
627         cmd->tvc_nexus = tv_nexus;
628         cmd->inflight = vhost_scsi_get_inflight(vq);
629
630         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
631
632         return cmd;
633 }
634
635 /*
636  * Map a user memory range into a scatterlist
637  *
638  * Returns the number of scatterlist entries used or -errno on error.
639  */
640 static int
641 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
642                       void __user *ptr,
643                       size_t len,
644                       struct scatterlist *sgl,
645                       bool write)
646 {
647         unsigned int npages = 0, offset, nbytes;
648         unsigned int pages_nr = iov_num_pages(ptr, len);
649         struct scatterlist *sg = sgl;
650         struct page **pages = cmd->tvc_upages;
651         int ret, i;
652
653         if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
654                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
655                        " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
656                         pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
657                 return -ENOBUFS;
658         }
659
660         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
661         /* No pages were pinned */
662         if (ret < 0)
663                 goto out;
664         /* Less pages pinned than wanted */
665         if (ret != pages_nr) {
666                 for (i = 0; i < ret; i++)
667                         put_page(pages[i]);
668                 ret = -EFAULT;
669                 goto out;
670         }
671
672         while (len > 0) {
673                 offset = (uintptr_t)ptr & ~PAGE_MASK;
674                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
675                 sg_set_page(sg, pages[npages], nbytes, offset);
676                 ptr += nbytes;
677                 len -= nbytes;
678                 sg++;
679                 npages++;
680         }
681
682 out:
683         return ret;
684 }
685
686 static int
687 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
688 {
689         int sgl_count = 0;
690
691         if (!iter || !iter->iov) {
692                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
693                        " present\n", __func__, bytes);
694                 return -EINVAL;
695         }
696
697         sgl_count = iov_iter_npages(iter, 0xffff);
698         if (sgl_count > max_sgls) {
699                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
700                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
701                 return -EINVAL;
702         }
703         return sgl_count;
704 }
705
706 static int
707 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
708                       struct iov_iter *iter,
709                       struct scatterlist *sg, int sg_count)
710 {
711         size_t off = iter->iov_offset;
712         struct scatterlist *p = sg;
713         int i, ret;
714
715         for (i = 0; i < iter->nr_segs; i++) {
716                 void __user *base = iter->iov[i].iov_base + off;
717                 size_t len = iter->iov[i].iov_len - off;
718
719                 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
720                 if (ret < 0) {
721                         while (p < sg) {
722                                 struct page *page = sg_page(p++);
723                                 if (page)
724                                         put_page(page);
725                         }
726                         return ret;
727                 }
728                 sg += ret;
729                 off = 0;
730         }
731         return 0;
732 }
733
734 static int
735 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
736                  size_t prot_bytes, struct iov_iter *prot_iter,
737                  size_t data_bytes, struct iov_iter *data_iter)
738 {
739         int sgl_count, ret;
740         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
741
742         if (prot_bytes) {
743                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
744                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
745                 if (sgl_count < 0)
746                         return sgl_count;
747
748                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
749                 cmd->tvc_prot_sgl_count = sgl_count;
750                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
751                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
752
753                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
754                                             cmd->tvc_prot_sgl,
755                                             cmd->tvc_prot_sgl_count);
756                 if (ret < 0) {
757                         cmd->tvc_prot_sgl_count = 0;
758                         return ret;
759                 }
760         }
761         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
762                                          VHOST_SCSI_PREALLOC_SGLS);
763         if (sgl_count < 0)
764                 return sgl_count;
765
766         sg_init_table(cmd->tvc_sgl, sgl_count);
767         cmd->tvc_sgl_count = sgl_count;
768         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
769                   cmd->tvc_sgl, cmd->tvc_sgl_count);
770
771         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
772                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
773         if (ret < 0) {
774                 cmd->tvc_sgl_count = 0;
775                 return ret;
776         }
777         return 0;
778 }
779
780 static int vhost_scsi_to_tcm_attr(int attr)
781 {
782         switch (attr) {
783         case VIRTIO_SCSI_S_SIMPLE:
784                 return TCM_SIMPLE_TAG;
785         case VIRTIO_SCSI_S_ORDERED:
786                 return TCM_ORDERED_TAG;
787         case VIRTIO_SCSI_S_HEAD:
788                 return TCM_HEAD_TAG;
789         case VIRTIO_SCSI_S_ACA:
790                 return TCM_ACA_TAG;
791         default:
792                 break;
793         }
794         return TCM_SIMPLE_TAG;
795 }
796
797 static void vhost_scsi_submission_work(struct work_struct *work)
798 {
799         struct vhost_scsi_cmd *cmd =
800                 container_of(work, struct vhost_scsi_cmd, work);
801         struct vhost_scsi_nexus *tv_nexus;
802         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
803         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
804         int rc;
805
806         /* FIXME: BIDI operation */
807         if (cmd->tvc_sgl_count) {
808                 sg_ptr = cmd->tvc_sgl;
809
810                 if (cmd->tvc_prot_sgl_count)
811                         sg_prot_ptr = cmd->tvc_prot_sgl;
812                 else
813                         se_cmd->prot_pto = true;
814         } else {
815                 sg_ptr = NULL;
816         }
817         tv_nexus = cmd->tvc_nexus;
818
819         se_cmd->tag = 0;
820         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
821                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
822                         cmd->tvc_lun, cmd->tvc_exp_data_len,
823                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
824                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
825                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
826                         cmd->tvc_prot_sgl_count);
827         if (rc < 0) {
828                 transport_send_check_condition_and_sense(se_cmd,
829                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
830                 transport_generic_free_cmd(se_cmd, 0);
831         }
832 }
833
834 static void
835 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
836                            struct vhost_virtqueue *vq,
837                            int head, unsigned out)
838 {
839         struct virtio_scsi_cmd_resp __user *resp;
840         struct virtio_scsi_cmd_resp rsp;
841         int ret;
842
843         memset(&rsp, 0, sizeof(rsp));
844         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
845         resp = vq->iov[out].iov_base;
846         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
847         if (!ret)
848                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
849         else
850                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
851 }
852
853 static void
854 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
855 {
856         struct vhost_scsi_tpg **vs_tpg, *tpg;
857         struct virtio_scsi_cmd_req v_req;
858         struct virtio_scsi_cmd_req_pi v_req_pi;
859         struct vhost_scsi_cmd *cmd;
860         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
861         u64 tag;
862         u32 exp_data_len, data_direction;
863         unsigned out, in;
864         int head, ret, prot_bytes, c = 0;
865         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
866         size_t out_size, in_size;
867         u16 lun;
868         u8 *target, *lunp, task_attr;
869         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
870         void *req, *cdb;
871
872         mutex_lock(&vq->mutex);
873         /*
874          * We can handle the vq only after the endpoint is setup by calling the
875          * VHOST_SCSI_SET_ENDPOINT ioctl.
876          */
877         vs_tpg = vq->private_data;
878         if (!vs_tpg)
879                 goto out;
880
881         vhost_disable_notify(&vs->dev, vq);
882
883         do {
884                 head = vhost_get_vq_desc(vq, vq->iov,
885                                          ARRAY_SIZE(vq->iov), &out, &in,
886                                          NULL, NULL);
887                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
888                          head, out, in);
889                 /* On error, stop handling until the next kick. */
890                 if (unlikely(head < 0))
891                         break;
892                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
893                 if (head == vq->num) {
894                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
895                                 vhost_disable_notify(&vs->dev, vq);
896                                 continue;
897                         }
898                         break;
899                 }
900                 /*
901                  * Check for a sane response buffer so we can report early
902                  * errors back to the guest.
903                  */
904                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
905                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
906                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
907                         break;
908                 }
909                 /*
910                  * Setup pointers and values based upon different virtio-scsi
911                  * request header if T10_PI is enabled in KVM guest.
912                  */
913                 if (t10_pi) {
914                         req = &v_req_pi;
915                         req_size = sizeof(v_req_pi);
916                         lunp = &v_req_pi.lun[0];
917                         target = &v_req_pi.lun[1];
918                 } else {
919                         req = &v_req;
920                         req_size = sizeof(v_req);
921                         lunp = &v_req.lun[0];
922                         target = &v_req.lun[1];
923                 }
924                 /*
925                  * FIXME: Not correct for BIDI operation
926                  */
927                 out_size = iov_length(vq->iov, out);
928                 in_size = iov_length(&vq->iov[out], in);
929
930                 /*
931                  * Copy over the virtio-scsi request header, which for a
932                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
933                  * single iovec may contain both the header + outgoing
934                  * WRITE payloads.
935                  *
936                  * copy_from_iter() will advance out_iter, so that it will
937                  * point at the start of the outgoing WRITE payload, if
938                  * DMA_TO_DEVICE is set.
939                  */
940                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
941
942                 ret = copy_from_iter(req, req_size, &out_iter);
943                 if (unlikely(ret != req_size)) {
944                         vq_err(vq, "Faulted on copy_from_iter\n");
945                         vhost_scsi_send_bad_target(vs, vq, head, out);
946                         continue;
947                 }
948                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
949                 if (unlikely(*lunp != 1)) {
950                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
951                         vhost_scsi_send_bad_target(vs, vq, head, out);
952                         continue;
953                 }
954
955                 tpg = ACCESS_ONCE(vs_tpg[*target]);
956                 if (unlikely(!tpg)) {
957                         /* Target does not exist, fail the request */
958                         vhost_scsi_send_bad_target(vs, vq, head, out);
959                         continue;
960                 }
961                 /*
962                  * Determine data_direction by calculating the total outgoing
963                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
964                  * response headers respectively.
965                  *
966                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
967                  * to the right place.
968                  *
969                  * For DMA_FROM_DEVICE, the iovec will be just past the end
970                  * of the virtio-scsi response header in either the same
971                  * or immediately following iovec.
972                  *
973                  * Any associated T10_PI bytes for the outgoing / incoming
974                  * payloads are included in calculation of exp_data_len here.
975                  */
976                 prot_bytes = 0;
977
978                 if (out_size > req_size) {
979                         data_direction = DMA_TO_DEVICE;
980                         exp_data_len = out_size - req_size;
981                         data_iter = out_iter;
982                 } else if (in_size > rsp_size) {
983                         data_direction = DMA_FROM_DEVICE;
984                         exp_data_len = in_size - rsp_size;
985
986                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
987                                       rsp_size + exp_data_len);
988                         iov_iter_advance(&in_iter, rsp_size);
989                         data_iter = in_iter;
990                 } else {
991                         data_direction = DMA_NONE;
992                         exp_data_len = 0;
993                 }
994                 /*
995                  * If T10_PI header + payload is present, setup prot_iter values
996                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
997                  * host scatterlists via get_user_pages_fast().
998                  */
999                 if (t10_pi) {
1000                         if (v_req_pi.pi_bytesout) {
1001                                 if (data_direction != DMA_TO_DEVICE) {
1002                                         vq_err(vq, "Received non zero pi_bytesout,"
1003                                                 " but wrong data_direction\n");
1004                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1005                                         continue;
1006                                 }
1007                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1008                         } else if (v_req_pi.pi_bytesin) {
1009                                 if (data_direction != DMA_FROM_DEVICE) {
1010                                         vq_err(vq, "Received non zero pi_bytesin,"
1011                                                 " but wrong data_direction\n");
1012                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1013                                         continue;
1014                                 }
1015                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1016                         }
1017                         /*
1018                          * Set prot_iter to data_iter and truncate it to
1019                          * prot_bytes, and advance data_iter past any
1020                          * preceeding prot_bytes that may be present.
1021                          *
1022                          * Also fix up the exp_data_len to reflect only the
1023                          * actual data payload length.
1024                          */
1025                         if (prot_bytes) {
1026                                 exp_data_len -= prot_bytes;
1027                                 prot_iter = data_iter;
1028                                 iov_iter_truncate(&prot_iter, prot_bytes);
1029                                 iov_iter_advance(&data_iter, prot_bytes);
1030                         }
1031                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1032                         task_attr = v_req_pi.task_attr;
1033                         cdb = &v_req_pi.cdb[0];
1034                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1035                 } else {
1036                         tag = vhost64_to_cpu(vq, v_req.tag);
1037                         task_attr = v_req.task_attr;
1038                         cdb = &v_req.cdb[0];
1039                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1040                 }
1041                 /*
1042                  * Check that the received CDB size does not exceeded our
1043                  * hardcoded max for vhost-scsi, then get a pre-allocated
1044                  * cmd descriptor for the new virtio-scsi tag.
1045                  *
1046                  * TODO what if cdb was too small for varlen cdb header?
1047                  */
1048                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1049                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1050                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1051                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1052                         vhost_scsi_send_bad_target(vs, vq, head, out);
1053                         continue;
1054                 }
1055                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1056                                          exp_data_len + prot_bytes,
1057                                          data_direction);
1058                 if (IS_ERR(cmd)) {
1059                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1060                                PTR_ERR(cmd));
1061                         vhost_scsi_send_bad_target(vs, vq, head, out);
1062                         continue;
1063                 }
1064                 cmd->tvc_vhost = vs;
1065                 cmd->tvc_vq = vq;
1066                 cmd->tvc_resp_iov = vq->iov[out];
1067                 cmd->tvc_in_iovs = in;
1068
1069                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1070                          cmd->tvc_cdb[0], cmd->tvc_lun);
1071                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1072                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1073
1074                 if (data_direction != DMA_NONE) {
1075                         ret = vhost_scsi_mapal(cmd,
1076                                                prot_bytes, &prot_iter,
1077                                                exp_data_len, &data_iter);
1078                         if (unlikely(ret)) {
1079                                 vq_err(vq, "Failed to map iov to sgl\n");
1080                                 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1081                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1082                                 continue;
1083                         }
1084                 }
1085                 /*
1086                  * Save the descriptor from vhost_get_vq_desc() to be used to
1087                  * complete the virtio-scsi request in TCM callback context via
1088                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1089                  */
1090                 cmd->tvc_vq_desc = head;
1091                 /*
1092                  * Dispatch cmd descriptor for cmwq execution in process
1093                  * context provided by vhost_scsi_workqueue.  This also ensures
1094                  * cmd is executed on the same kworker CPU as this vhost
1095                  * thread to gain positive L2 cache locality effects.
1096                  */
1097                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1098                 queue_work(vhost_scsi_workqueue, &cmd->work);
1099         } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1100 out:
1101         mutex_unlock(&vq->mutex);
1102 }
1103
1104 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1105 {
1106         pr_debug("%s: The handling func for control queue.\n", __func__);
1107 }
1108
1109 static void
1110 vhost_scsi_send_evt(struct vhost_scsi *vs,
1111                    struct vhost_scsi_tpg *tpg,
1112                    struct se_lun *lun,
1113                    u32 event,
1114                    u32 reason)
1115 {
1116         struct vhost_scsi_evt *evt;
1117
1118         evt = vhost_scsi_allocate_evt(vs, event, reason);
1119         if (!evt)
1120                 return;
1121
1122         if (tpg && lun) {
1123                 /* TODO: share lun setup code with virtio-scsi.ko */
1124                 /*
1125                  * Note: evt->event is zeroed when we allocate it and
1126                  * lun[4-7] need to be zero according to virtio-scsi spec.
1127                  */
1128                 evt->event.lun[0] = 0x01;
1129                 evt->event.lun[1] = tpg->tport_tpgt;
1130                 if (lun->unpacked_lun >= 256)
1131                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1132                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1133         }
1134
1135         llist_add(&evt->list, &vs->vs_event_list);
1136         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1137 }
1138
1139 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1140 {
1141         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1142                                                 poll.work);
1143         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1144
1145         mutex_lock(&vq->mutex);
1146         if (!vq->private_data)
1147                 goto out;
1148
1149         if (vs->vs_events_missed)
1150                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1151 out:
1152         mutex_unlock(&vq->mutex);
1153 }
1154
1155 static void vhost_scsi_handle_kick(struct vhost_work *work)
1156 {
1157         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1158                                                 poll.work);
1159         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1160
1161         vhost_scsi_handle_vq(vs, vq);
1162 }
1163
1164 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1165 {
1166         vhost_poll_flush(&vs->vqs[index].vq.poll);
1167 }
1168
1169 /* Callers must hold dev mutex */
1170 static void vhost_scsi_flush(struct vhost_scsi *vs)
1171 {
1172         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1173         int i;
1174
1175         /* Init new inflight and remember the old inflight */
1176         vhost_scsi_init_inflight(vs, old_inflight);
1177
1178         /*
1179          * The inflight->kref was initialized to 1. We decrement it here to
1180          * indicate the start of the flush operation so that it will reach 0
1181          * when all the reqs are finished.
1182          */
1183         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1184                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1185
1186         /* Flush both the vhost poll and vhost work */
1187         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1188                 vhost_scsi_flush_vq(vs, i);
1189         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1190         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1191
1192         /* Wait for all reqs issued before the flush to be finished */
1193         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1194                 wait_for_completion(&old_inflight[i]->comp);
1195 }
1196
1197 /*
1198  * Called from vhost_scsi_ioctl() context to walk the list of available
1199  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1200  *
1201  *  The lock nesting rule is:
1202  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1203  */
1204 static int
1205 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1206                         struct vhost_scsi_target *t)
1207 {
1208         struct se_portal_group *se_tpg;
1209         struct vhost_scsi_tport *tv_tport;
1210         struct vhost_scsi_tpg *tpg;
1211         struct vhost_scsi_tpg **vs_tpg;
1212         struct vhost_virtqueue *vq;
1213         int index, ret, i, len;
1214         bool match = false;
1215
1216         mutex_lock(&vhost_scsi_mutex);
1217         mutex_lock(&vs->dev.mutex);
1218
1219         /* Verify that ring has been setup correctly. */
1220         for (index = 0; index < vs->dev.nvqs; ++index) {
1221                 /* Verify that ring has been setup correctly. */
1222                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1223                         ret = -EFAULT;
1224                         goto out;
1225                 }
1226         }
1227
1228         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1229         vs_tpg = kzalloc(len, GFP_KERNEL);
1230         if (!vs_tpg) {
1231                 ret = -ENOMEM;
1232                 goto out;
1233         }
1234         if (vs->vs_tpg)
1235                 memcpy(vs_tpg, vs->vs_tpg, len);
1236
1237         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1238                 mutex_lock(&tpg->tv_tpg_mutex);
1239                 if (!tpg->tpg_nexus) {
1240                         mutex_unlock(&tpg->tv_tpg_mutex);
1241                         continue;
1242                 }
1243                 if (tpg->tv_tpg_vhost_count != 0) {
1244                         mutex_unlock(&tpg->tv_tpg_mutex);
1245                         continue;
1246                 }
1247                 tv_tport = tpg->tport;
1248
1249                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1250                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1251                                 kfree(vs_tpg);
1252                                 mutex_unlock(&tpg->tv_tpg_mutex);
1253                                 ret = -EEXIST;
1254                                 goto out;
1255                         }
1256                         /*
1257                          * In order to ensure individual vhost-scsi configfs
1258                          * groups cannot be removed while in use by vhost ioctl,
1259                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1260                          * dependency now.
1261                          */
1262                         se_tpg = &tpg->se_tpg;
1263                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1264                         if (ret) {
1265                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1266                                 kfree(vs_tpg);
1267                                 mutex_unlock(&tpg->tv_tpg_mutex);
1268                                 goto out;
1269                         }
1270                         tpg->tv_tpg_vhost_count++;
1271                         tpg->vhost_scsi = vs;
1272                         vs_tpg[tpg->tport_tpgt] = tpg;
1273                         smp_mb__after_atomic();
1274                         match = true;
1275                 }
1276                 mutex_unlock(&tpg->tv_tpg_mutex);
1277         }
1278
1279         if (match) {
1280                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1281                        sizeof(vs->vs_vhost_wwpn));
1282                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1283                         vq = &vs->vqs[i].vq;
1284                         mutex_lock(&vq->mutex);
1285                         vq->private_data = vs_tpg;
1286                         vhost_init_used(vq);
1287                         mutex_unlock(&vq->mutex);
1288                 }
1289                 ret = 0;
1290         } else {
1291                 ret = -EEXIST;
1292         }
1293
1294         /*
1295          * Act as synchronize_rcu to make sure access to
1296          * old vs->vs_tpg is finished.
1297          */
1298         vhost_scsi_flush(vs);
1299         kfree(vs->vs_tpg);
1300         vs->vs_tpg = vs_tpg;
1301
1302 out:
1303         mutex_unlock(&vs->dev.mutex);
1304         mutex_unlock(&vhost_scsi_mutex);
1305         return ret;
1306 }
1307
1308 static int
1309 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1310                           struct vhost_scsi_target *t)
1311 {
1312         struct se_portal_group *se_tpg;
1313         struct vhost_scsi_tport *tv_tport;
1314         struct vhost_scsi_tpg *tpg;
1315         struct vhost_virtqueue *vq;
1316         bool match = false;
1317         int index, ret, i;
1318         u8 target;
1319
1320         mutex_lock(&vhost_scsi_mutex);
1321         mutex_lock(&vs->dev.mutex);
1322         /* Verify that ring has been setup correctly. */
1323         for (index = 0; index < vs->dev.nvqs; ++index) {
1324                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1325                         ret = -EFAULT;
1326                         goto err_dev;
1327                 }
1328         }
1329
1330         if (!vs->vs_tpg) {
1331                 ret = 0;
1332                 goto err_dev;
1333         }
1334
1335         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1336                 target = i;
1337                 tpg = vs->vs_tpg[target];
1338                 if (!tpg)
1339                         continue;
1340
1341                 mutex_lock(&tpg->tv_tpg_mutex);
1342                 tv_tport = tpg->tport;
1343                 if (!tv_tport) {
1344                         ret = -ENODEV;
1345                         goto err_tpg;
1346                 }
1347
1348                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1349                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1350                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1351                                 tv_tport->tport_name, tpg->tport_tpgt,
1352                                 t->vhost_wwpn, t->vhost_tpgt);
1353                         ret = -EINVAL;
1354                         goto err_tpg;
1355                 }
1356                 tpg->tv_tpg_vhost_count--;
1357                 tpg->vhost_scsi = NULL;
1358                 vs->vs_tpg[target] = NULL;
1359                 match = true;
1360                 mutex_unlock(&tpg->tv_tpg_mutex);
1361                 /*
1362                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1363                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1364                  */
1365                 se_tpg = &tpg->se_tpg;
1366                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1367         }
1368         if (match) {
1369                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1370                         vq = &vs->vqs[i].vq;
1371                         mutex_lock(&vq->mutex);
1372                         vq->private_data = NULL;
1373                         mutex_unlock(&vq->mutex);
1374                 }
1375         }
1376         /*
1377          * Act as synchronize_rcu to make sure access to
1378          * old vs->vs_tpg is finished.
1379          */
1380         vhost_scsi_flush(vs);
1381         kfree(vs->vs_tpg);
1382         vs->vs_tpg = NULL;
1383         WARN_ON(vs->vs_events_nr);
1384         mutex_unlock(&vs->dev.mutex);
1385         mutex_unlock(&vhost_scsi_mutex);
1386         return 0;
1387
1388 err_tpg:
1389         mutex_unlock(&tpg->tv_tpg_mutex);
1390 err_dev:
1391         mutex_unlock(&vs->dev.mutex);
1392         mutex_unlock(&vhost_scsi_mutex);
1393         return ret;
1394 }
1395
1396 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1397 {
1398         struct vhost_virtqueue *vq;
1399         int i;
1400
1401         if (features & ~VHOST_SCSI_FEATURES)
1402                 return -EOPNOTSUPP;
1403
1404         mutex_lock(&vs->dev.mutex);
1405         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1406             !vhost_log_access_ok(&vs->dev)) {
1407                 mutex_unlock(&vs->dev.mutex);
1408                 return -EFAULT;
1409         }
1410
1411         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1412                 vq = &vs->vqs[i].vq;
1413                 mutex_lock(&vq->mutex);
1414                 vq->acked_features = features;
1415                 mutex_unlock(&vq->mutex);
1416         }
1417         mutex_unlock(&vs->dev.mutex);
1418         return 0;
1419 }
1420
1421 static int vhost_scsi_open(struct inode *inode, struct file *f)
1422 {
1423         struct vhost_scsi *vs;
1424         struct vhost_virtqueue **vqs;
1425         int r = -ENOMEM, i;
1426
1427         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1428         if (!vs) {
1429                 vs = vzalloc(sizeof(*vs));
1430                 if (!vs)
1431                         goto err_vs;
1432         }
1433
1434         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1435         if (!vqs)
1436                 goto err_vqs;
1437
1438         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1439         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1440
1441         vs->vs_events_nr = 0;
1442         vs->vs_events_missed = false;
1443
1444         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1445         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1446         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1447         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1448         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1449                 vqs[i] = &vs->vqs[i].vq;
1450                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1451         }
1452         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ,
1453                        VHOST_SCSI_WEIGHT, 0);
1454
1455         vhost_scsi_init_inflight(vs, NULL);
1456
1457         f->private_data = vs;
1458         return 0;
1459
1460 err_vqs:
1461         kvfree(vs);
1462 err_vs:
1463         return r;
1464 }
1465
1466 static int vhost_scsi_release(struct inode *inode, struct file *f)
1467 {
1468         struct vhost_scsi *vs = f->private_data;
1469         struct vhost_scsi_target t;
1470
1471         mutex_lock(&vs->dev.mutex);
1472         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1473         mutex_unlock(&vs->dev.mutex);
1474         vhost_scsi_clear_endpoint(vs, &t);
1475         vhost_dev_stop(&vs->dev);
1476         vhost_dev_cleanup(&vs->dev, false);
1477         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1478         vhost_scsi_flush(vs);
1479         kfree(vs->dev.vqs);
1480         kvfree(vs);
1481         return 0;
1482 }
1483
1484 static long
1485 vhost_scsi_ioctl(struct file *f,
1486                  unsigned int ioctl,
1487                  unsigned long arg)
1488 {
1489         struct vhost_scsi *vs = f->private_data;
1490         struct vhost_scsi_target backend;
1491         void __user *argp = (void __user *)arg;
1492         u64 __user *featurep = argp;
1493         u32 __user *eventsp = argp;
1494         u32 events_missed;
1495         u64 features;
1496         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1497         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1498
1499         switch (ioctl) {
1500         case VHOST_SCSI_SET_ENDPOINT:
1501                 if (copy_from_user(&backend, argp, sizeof backend))
1502                         return -EFAULT;
1503                 if (backend.reserved != 0)
1504                         return -EOPNOTSUPP;
1505
1506                 return vhost_scsi_set_endpoint(vs, &backend);
1507         case VHOST_SCSI_CLEAR_ENDPOINT:
1508                 if (copy_from_user(&backend, argp, sizeof backend))
1509                         return -EFAULT;
1510                 if (backend.reserved != 0)
1511                         return -EOPNOTSUPP;
1512
1513                 return vhost_scsi_clear_endpoint(vs, &backend);
1514         case VHOST_SCSI_GET_ABI_VERSION:
1515                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1516                         return -EFAULT;
1517                 return 0;
1518         case VHOST_SCSI_SET_EVENTS_MISSED:
1519                 if (get_user(events_missed, eventsp))
1520                         return -EFAULT;
1521                 mutex_lock(&vq->mutex);
1522                 vs->vs_events_missed = events_missed;
1523                 mutex_unlock(&vq->mutex);
1524                 return 0;
1525         case VHOST_SCSI_GET_EVENTS_MISSED:
1526                 mutex_lock(&vq->mutex);
1527                 events_missed = vs->vs_events_missed;
1528                 mutex_unlock(&vq->mutex);
1529                 if (put_user(events_missed, eventsp))
1530                         return -EFAULT;
1531                 return 0;
1532         case VHOST_GET_FEATURES:
1533                 features = VHOST_SCSI_FEATURES;
1534                 if (copy_to_user(featurep, &features, sizeof features))
1535                         return -EFAULT;
1536                 return 0;
1537         case VHOST_SET_FEATURES:
1538                 if (copy_from_user(&features, featurep, sizeof features))
1539                         return -EFAULT;
1540                 return vhost_scsi_set_features(vs, features);
1541         default:
1542                 mutex_lock(&vs->dev.mutex);
1543                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1544                 /* TODO: flush backend after dev ioctl. */
1545                 if (r == -ENOIOCTLCMD)
1546                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1547                 mutex_unlock(&vs->dev.mutex);
1548                 return r;
1549         }
1550 }
1551
1552 #ifdef CONFIG_COMPAT
1553 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1554                                 unsigned long arg)
1555 {
1556         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1557 }
1558 #endif
1559
1560 static const struct file_operations vhost_scsi_fops = {
1561         .owner          = THIS_MODULE,
1562         .release        = vhost_scsi_release,
1563         .unlocked_ioctl = vhost_scsi_ioctl,
1564 #ifdef CONFIG_COMPAT
1565         .compat_ioctl   = vhost_scsi_compat_ioctl,
1566 #endif
1567         .open           = vhost_scsi_open,
1568         .llseek         = noop_llseek,
1569 };
1570
1571 static struct miscdevice vhost_scsi_misc = {
1572         MISC_DYNAMIC_MINOR,
1573         "vhost-scsi",
1574         &vhost_scsi_fops,
1575 };
1576
1577 static int __init vhost_scsi_register(void)
1578 {
1579         return misc_register(&vhost_scsi_misc);
1580 }
1581
1582 static void vhost_scsi_deregister(void)
1583 {
1584         misc_deregister(&vhost_scsi_misc);
1585 }
1586
1587 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1588 {
1589         switch (tport->tport_proto_id) {
1590         case SCSI_PROTOCOL_SAS:
1591                 return "SAS";
1592         case SCSI_PROTOCOL_FCP:
1593                 return "FCP";
1594         case SCSI_PROTOCOL_ISCSI:
1595                 return "iSCSI";
1596         default:
1597                 break;
1598         }
1599
1600         return "Unknown";
1601 }
1602
1603 static void
1604 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1605                   struct se_lun *lun, bool plug)
1606 {
1607
1608         struct vhost_scsi *vs = tpg->vhost_scsi;
1609         struct vhost_virtqueue *vq;
1610         u32 reason;
1611
1612         if (!vs)
1613                 return;
1614
1615         mutex_lock(&vs->dev.mutex);
1616
1617         if (plug)
1618                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1619         else
1620                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1621
1622         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1623         mutex_lock(&vq->mutex);
1624         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1625                 vhost_scsi_send_evt(vs, tpg, lun,
1626                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1627         mutex_unlock(&vq->mutex);
1628         mutex_unlock(&vs->dev.mutex);
1629 }
1630
1631 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1632 {
1633         vhost_scsi_do_plug(tpg, lun, true);
1634 }
1635
1636 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1637 {
1638         vhost_scsi_do_plug(tpg, lun, false);
1639 }
1640
1641 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1642                                struct se_lun *lun)
1643 {
1644         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1645                                 struct vhost_scsi_tpg, se_tpg);
1646
1647         mutex_lock(&vhost_scsi_mutex);
1648
1649         mutex_lock(&tpg->tv_tpg_mutex);
1650         tpg->tv_tpg_port_count++;
1651         mutex_unlock(&tpg->tv_tpg_mutex);
1652
1653         vhost_scsi_hotplug(tpg, lun);
1654
1655         mutex_unlock(&vhost_scsi_mutex);
1656
1657         return 0;
1658 }
1659
1660 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1661                                   struct se_lun *lun)
1662 {
1663         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1664                                 struct vhost_scsi_tpg, se_tpg);
1665
1666         mutex_lock(&vhost_scsi_mutex);
1667
1668         mutex_lock(&tpg->tv_tpg_mutex);
1669         tpg->tv_tpg_port_count--;
1670         mutex_unlock(&tpg->tv_tpg_mutex);
1671
1672         vhost_scsi_hotunplug(tpg, lun);
1673
1674         mutex_unlock(&vhost_scsi_mutex);
1675 }
1676
1677 static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1678                                        struct se_session *se_sess)
1679 {
1680         struct vhost_scsi_cmd *tv_cmd;
1681         unsigned int i;
1682
1683         if (!se_sess->sess_cmd_map)
1684                 return;
1685
1686         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1687                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1688
1689                 kfree(tv_cmd->tvc_sgl);
1690                 kfree(tv_cmd->tvc_prot_sgl);
1691                 kfree(tv_cmd->tvc_upages);
1692         }
1693 }
1694
1695 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1696                 struct config_item *item, const char *page, size_t count)
1697 {
1698         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1699         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1700                                 struct vhost_scsi_tpg, se_tpg);
1701         unsigned long val;
1702         int ret = kstrtoul(page, 0, &val);
1703
1704         if (ret) {
1705                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1706                 return ret;
1707         }
1708         if (val != 0 && val != 1 && val != 3) {
1709                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1710                 return -EINVAL;
1711         }
1712         tpg->tv_fabric_prot_type = val;
1713
1714         return count;
1715 }
1716
1717 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1718                 struct config_item *item, char *page)
1719 {
1720         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1721         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1722                                 struct vhost_scsi_tpg, se_tpg);
1723
1724         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1725 }
1726
1727 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1728
1729 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1730         &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1731         NULL,
1732 };
1733
1734 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1735                                 const char *name)
1736 {
1737         struct se_portal_group *se_tpg;
1738         struct se_session *se_sess;
1739         struct vhost_scsi_nexus *tv_nexus;
1740         struct vhost_scsi_cmd *tv_cmd;
1741         unsigned int i;
1742
1743         mutex_lock(&tpg->tv_tpg_mutex);
1744         if (tpg->tpg_nexus) {
1745                 mutex_unlock(&tpg->tv_tpg_mutex);
1746                 pr_debug("tpg->tpg_nexus already exists\n");
1747                 return -EEXIST;
1748         }
1749         se_tpg = &tpg->se_tpg;
1750
1751         tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1752         if (!tv_nexus) {
1753                 mutex_unlock(&tpg->tv_tpg_mutex);
1754                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1755                 return -ENOMEM;
1756         }
1757         /*
1758          *  Initialize the struct se_session pointer and setup tagpool
1759          *  for struct vhost_scsi_cmd descriptors
1760          */
1761         tv_nexus->tvn_se_sess = transport_init_session_tags(
1762                                         VHOST_SCSI_DEFAULT_TAGS,
1763                                         sizeof(struct vhost_scsi_cmd),
1764                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1765         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1766                 mutex_unlock(&tpg->tv_tpg_mutex);
1767                 kfree(tv_nexus);
1768                 return -ENOMEM;
1769         }
1770         se_sess = tv_nexus->tvn_se_sess;
1771         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1772                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1773
1774                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1775                                         VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1776                 if (!tv_cmd->tvc_sgl) {
1777                         mutex_unlock(&tpg->tv_tpg_mutex);
1778                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1779                         goto out;
1780                 }
1781
1782                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1783                                         VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1784                 if (!tv_cmd->tvc_upages) {
1785                         mutex_unlock(&tpg->tv_tpg_mutex);
1786                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1787                         goto out;
1788                 }
1789
1790                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1791                                         VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1792                 if (!tv_cmd->tvc_prot_sgl) {
1793                         mutex_unlock(&tpg->tv_tpg_mutex);
1794                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1795                         goto out;
1796                 }
1797         }
1798         /*
1799          * Since we are running in 'demo mode' this call with generate a
1800          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1801          * the SCSI Initiator port name of the passed configfs group 'name'.
1802          */
1803         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1804                                 se_tpg, (unsigned char *)name);
1805         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1806                 mutex_unlock(&tpg->tv_tpg_mutex);
1807                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1808                                 " for %s\n", name);
1809                 goto out;
1810         }
1811         /*
1812          * Now register the TCM vhost virtual I_T Nexus as active.
1813          */
1814         transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1815                         tv_nexus->tvn_se_sess, tv_nexus);
1816         tpg->tpg_nexus = tv_nexus;
1817
1818         mutex_unlock(&tpg->tv_tpg_mutex);
1819         return 0;
1820
1821 out:
1822         vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1823         transport_free_session(se_sess);
1824         kfree(tv_nexus);
1825         return -ENOMEM;
1826 }
1827
1828 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1829 {
1830         struct se_session *se_sess;
1831         struct vhost_scsi_nexus *tv_nexus;
1832
1833         mutex_lock(&tpg->tv_tpg_mutex);
1834         tv_nexus = tpg->tpg_nexus;
1835         if (!tv_nexus) {
1836                 mutex_unlock(&tpg->tv_tpg_mutex);
1837                 return -ENODEV;
1838         }
1839
1840         se_sess = tv_nexus->tvn_se_sess;
1841         if (!se_sess) {
1842                 mutex_unlock(&tpg->tv_tpg_mutex);
1843                 return -ENODEV;
1844         }
1845
1846         if (tpg->tv_tpg_port_count != 0) {
1847                 mutex_unlock(&tpg->tv_tpg_mutex);
1848                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1849                         " active TPG port count: %d\n",
1850                         tpg->tv_tpg_port_count);
1851                 return -EBUSY;
1852         }
1853
1854         if (tpg->tv_tpg_vhost_count != 0) {
1855                 mutex_unlock(&tpg->tv_tpg_mutex);
1856                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1857                         " active TPG vhost count: %d\n",
1858                         tpg->tv_tpg_vhost_count);
1859                 return -EBUSY;
1860         }
1861
1862         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1863                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1864                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1865
1866         vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1867         /*
1868          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1869          */
1870         transport_deregister_session(tv_nexus->tvn_se_sess);
1871         tpg->tpg_nexus = NULL;
1872         mutex_unlock(&tpg->tv_tpg_mutex);
1873
1874         kfree(tv_nexus);
1875         return 0;
1876 }
1877
1878 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1879 {
1880         struct se_portal_group *se_tpg = to_tpg(item);
1881         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1882                                 struct vhost_scsi_tpg, se_tpg);
1883         struct vhost_scsi_nexus *tv_nexus;
1884         ssize_t ret;
1885
1886         mutex_lock(&tpg->tv_tpg_mutex);
1887         tv_nexus = tpg->tpg_nexus;
1888         if (!tv_nexus) {
1889                 mutex_unlock(&tpg->tv_tpg_mutex);
1890                 return -ENODEV;
1891         }
1892         ret = snprintf(page, PAGE_SIZE, "%s\n",
1893                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1894         mutex_unlock(&tpg->tv_tpg_mutex);
1895
1896         return ret;
1897 }
1898
1899 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1900                 const char *page, size_t count)
1901 {
1902         struct se_portal_group *se_tpg = to_tpg(item);
1903         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1904                                 struct vhost_scsi_tpg, se_tpg);
1905         struct vhost_scsi_tport *tport_wwn = tpg->tport;
1906         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1907         int ret;
1908         /*
1909          * Shutdown the active I_T nexus if 'NULL' is passed..
1910          */
1911         if (!strncmp(page, "NULL", 4)) {
1912                 ret = vhost_scsi_drop_nexus(tpg);
1913                 return (!ret) ? count : ret;
1914         }
1915         /*
1916          * Otherwise make sure the passed virtual Initiator port WWN matches
1917          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1918          * vhost_scsi_make_nexus().
1919          */
1920         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1921                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1922                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1923                 return -EINVAL;
1924         }
1925         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1926
1927         ptr = strstr(i_port, "naa.");
1928         if (ptr) {
1929                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1930                         pr_err("Passed SAS Initiator Port %s does not"
1931                                 " match target port protoid: %s\n", i_port,
1932                                 vhost_scsi_dump_proto_id(tport_wwn));
1933                         return -EINVAL;
1934                 }
1935                 port_ptr = &i_port[0];
1936                 goto check_newline;
1937         }
1938         ptr = strstr(i_port, "fc.");
1939         if (ptr) {
1940                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1941                         pr_err("Passed FCP Initiator Port %s does not"
1942                                 " match target port protoid: %s\n", i_port,
1943                                 vhost_scsi_dump_proto_id(tport_wwn));
1944                         return -EINVAL;
1945                 }
1946                 port_ptr = &i_port[3]; /* Skip over "fc." */
1947                 goto check_newline;
1948         }
1949         ptr = strstr(i_port, "iqn.");
1950         if (ptr) {
1951                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1952                         pr_err("Passed iSCSI Initiator Port %s does not"
1953                                 " match target port protoid: %s\n", i_port,
1954                                 vhost_scsi_dump_proto_id(tport_wwn));
1955                         return -EINVAL;
1956                 }
1957                 port_ptr = &i_port[0];
1958                 goto check_newline;
1959         }
1960         pr_err("Unable to locate prefix for emulated Initiator Port:"
1961                         " %s\n", i_port);
1962         return -EINVAL;
1963         /*
1964          * Clear any trailing newline for the NAA WWN
1965          */
1966 check_newline:
1967         if (i_port[strlen(i_port)-1] == '\n')
1968                 i_port[strlen(i_port)-1] = '\0';
1969
1970         ret = vhost_scsi_make_nexus(tpg, port_ptr);
1971         if (ret < 0)
1972                 return ret;
1973
1974         return count;
1975 }
1976
1977 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1978
1979 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1980         &vhost_scsi_tpg_attr_nexus,
1981         NULL,
1982 };
1983
1984 static struct se_portal_group *
1985 vhost_scsi_make_tpg(struct se_wwn *wwn,
1986                    struct config_group *group,
1987                    const char *name)
1988 {
1989         struct vhost_scsi_tport *tport = container_of(wwn,
1990                         struct vhost_scsi_tport, tport_wwn);
1991
1992         struct vhost_scsi_tpg *tpg;
1993         u16 tpgt;
1994         int ret;
1995
1996         if (strstr(name, "tpgt_") != name)
1997                 return ERR_PTR(-EINVAL);
1998         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1999                 return ERR_PTR(-EINVAL);
2000
2001         tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2002         if (!tpg) {
2003                 pr_err("Unable to allocate struct vhost_scsi_tpg");
2004                 return ERR_PTR(-ENOMEM);
2005         }
2006         mutex_init(&tpg->tv_tpg_mutex);
2007         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2008         tpg->tport = tport;
2009         tpg->tport_tpgt = tpgt;
2010
2011         ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2012         if (ret < 0) {
2013                 kfree(tpg);
2014                 return NULL;
2015         }
2016         mutex_lock(&vhost_scsi_mutex);
2017         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2018         mutex_unlock(&vhost_scsi_mutex);
2019
2020         return &tpg->se_tpg;
2021 }
2022
2023 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2024 {
2025         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2026                                 struct vhost_scsi_tpg, se_tpg);
2027
2028         mutex_lock(&vhost_scsi_mutex);
2029         list_del(&tpg->tv_tpg_list);
2030         mutex_unlock(&vhost_scsi_mutex);
2031         /*
2032          * Release the virtual I_T Nexus for this vhost TPG
2033          */
2034         vhost_scsi_drop_nexus(tpg);
2035         /*
2036          * Deregister the se_tpg from TCM..
2037          */
2038         core_tpg_deregister(se_tpg);
2039         kfree(tpg);
2040 }
2041
2042 static struct se_wwn *
2043 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2044                      struct config_group *group,
2045                      const char *name)
2046 {
2047         struct vhost_scsi_tport *tport;
2048         char *ptr;
2049         u64 wwpn = 0;
2050         int off = 0;
2051
2052         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2053                 return ERR_PTR(-EINVAL); */
2054
2055         tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2056         if (!tport) {
2057                 pr_err("Unable to allocate struct vhost_scsi_tport");
2058                 return ERR_PTR(-ENOMEM);
2059         }
2060         tport->tport_wwpn = wwpn;
2061         /*
2062          * Determine the emulated Protocol Identifier and Target Port Name
2063          * based on the incoming configfs directory name.
2064          */
2065         ptr = strstr(name, "naa.");
2066         if (ptr) {
2067                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2068                 goto check_len;
2069         }
2070         ptr = strstr(name, "fc.");
2071         if (ptr) {
2072                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2073                 off = 3; /* Skip over "fc." */
2074                 goto check_len;
2075         }
2076         ptr = strstr(name, "iqn.");
2077         if (ptr) {
2078                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2079                 goto check_len;
2080         }
2081
2082         pr_err("Unable to locate prefix for emulated Target Port:"
2083                         " %s\n", name);
2084         kfree(tport);
2085         return ERR_PTR(-EINVAL);
2086
2087 check_len:
2088         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2089                 pr_err("Emulated %s Address: %s, exceeds"
2090                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2091                         VHOST_SCSI_NAMELEN);
2092                 kfree(tport);
2093                 return ERR_PTR(-EINVAL);
2094         }
2095         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2096
2097         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2098                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2099
2100         return &tport->tport_wwn;
2101 }
2102
2103 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2104 {
2105         struct vhost_scsi_tport *tport = container_of(wwn,
2106                                 struct vhost_scsi_tport, tport_wwn);
2107
2108         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2109                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2110                 tport->tport_name);
2111
2112         kfree(tport);
2113 }
2114
2115 static ssize_t
2116 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2117 {
2118         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2119                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2120                 utsname()->machine);
2121 }
2122
2123 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2124
2125 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2126         &vhost_scsi_wwn_attr_version,
2127         NULL,
2128 };
2129
2130 static struct target_core_fabric_ops vhost_scsi_ops = {
2131         .module                         = THIS_MODULE,
2132         .name                           = "vhost",
2133         .get_fabric_name                = vhost_scsi_get_fabric_name,
2134         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2135         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2136         .tpg_check_demo_mode            = vhost_scsi_check_true,
2137         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2138         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2139         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2140         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2141         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2142         .release_cmd                    = vhost_scsi_release_cmd,
2143         .check_stop_free                = vhost_scsi_check_stop_free,
2144         .shutdown_session               = vhost_scsi_shutdown_session,
2145         .close_session                  = vhost_scsi_close_session,
2146         .sess_get_index                 = vhost_scsi_sess_get_index,
2147         .sess_get_initiator_sid         = NULL,
2148         .write_pending                  = vhost_scsi_write_pending,
2149         .write_pending_status           = vhost_scsi_write_pending_status,
2150         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2151         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2152         .queue_data_in                  = vhost_scsi_queue_data_in,
2153         .queue_status                   = vhost_scsi_queue_status,
2154         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2155         .aborted_task                   = vhost_scsi_aborted_task,
2156         /*
2157          * Setup callers for generic logic in target_core_fabric_configfs.c
2158          */
2159         .fabric_make_wwn                = vhost_scsi_make_tport,
2160         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2161         .fabric_make_tpg                = vhost_scsi_make_tpg,
2162         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2163         .fabric_post_link               = vhost_scsi_port_link,
2164         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2165
2166         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2167         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2168         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2169 };
2170
2171 static int __init vhost_scsi_init(void)
2172 {
2173         int ret = -ENOMEM;
2174
2175         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2176                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2177                 utsname()->machine);
2178
2179         /*
2180          * Use our own dedicated workqueue for submitting I/O into
2181          * target core to avoid contention within system_wq.
2182          */
2183         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2184         if (!vhost_scsi_workqueue)
2185                 goto out;
2186
2187         ret = vhost_scsi_register();
2188         if (ret < 0)
2189                 goto out_destroy_workqueue;
2190
2191         ret = target_register_template(&vhost_scsi_ops);
2192         if (ret < 0)
2193                 goto out_vhost_scsi_deregister;
2194
2195         return 0;
2196
2197 out_vhost_scsi_deregister:
2198         vhost_scsi_deregister();
2199 out_destroy_workqueue:
2200         destroy_workqueue(vhost_scsi_workqueue);
2201 out:
2202         return ret;
2203 };
2204
2205 static void vhost_scsi_exit(void)
2206 {
2207         target_unregister_template(&vhost_scsi_ops);
2208         vhost_scsi_deregister();
2209         destroy_workqueue(vhost_scsi_workqueue);
2210 };
2211
2212 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2213 MODULE_ALIAS("tcm_vhost");
2214 MODULE_LICENSE("GPL");
2215 module_init(vhost_scsi_init);
2216 module_exit(vhost_scsi_exit);