GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/kthread.h>
33 #include <linux/in.h>
34 #include <linux/cdrom.h>
35 #include <linux/module.h>
36 #include <linux/ratelimit.h>
37 #include <linux/vmalloc.h>
38 #include <asm/unaligned.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi_proto.h>
42 #include <scsi/scsi_common.h>
43
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/target.h>
55
56 static struct workqueue_struct *target_completion_wq;
57 static struct kmem_cache *se_sess_cache;
58 struct kmem_cache *se_ua_cache;
59 struct kmem_cache *t10_pr_reg_cache;
60 struct kmem_cache *t10_alua_lu_gp_cache;
61 struct kmem_cache *t10_alua_lu_gp_mem_cache;
62 struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 struct kmem_cache *t10_alua_lba_map_cache;
64 struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66 static void transport_complete_task_attr(struct se_cmd *cmd);
67 static void transport_handle_queue_full(struct se_cmd *cmd,
68                 struct se_device *dev);
69 static int transport_put_cmd(struct se_cmd *cmd);
70 static void target_complete_ok_work(struct work_struct *work);
71
72 int init_se_kmem_caches(void)
73 {
74         se_sess_cache = kmem_cache_create("se_sess_cache",
75                         sizeof(struct se_session), __alignof__(struct se_session),
76                         0, NULL);
77         if (!se_sess_cache) {
78                 pr_err("kmem_cache_create() for struct se_session"
79                                 " failed\n");
80                 goto out;
81         }
82         se_ua_cache = kmem_cache_create("se_ua_cache",
83                         sizeof(struct se_ua), __alignof__(struct se_ua),
84                         0, NULL);
85         if (!se_ua_cache) {
86                 pr_err("kmem_cache_create() for struct se_ua failed\n");
87                 goto out_free_sess_cache;
88         }
89         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90                         sizeof(struct t10_pr_registration),
91                         __alignof__(struct t10_pr_registration), 0, NULL);
92         if (!t10_pr_reg_cache) {
93                 pr_err("kmem_cache_create() for struct t10_pr_registration"
94                                 " failed\n");
95                 goto out_free_ua_cache;
96         }
97         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99                         0, NULL);
100         if (!t10_alua_lu_gp_cache) {
101                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102                                 " failed\n");
103                 goto out_free_pr_reg_cache;
104         }
105         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106                         sizeof(struct t10_alua_lu_gp_member),
107                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108         if (!t10_alua_lu_gp_mem_cache) {
109                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110                                 "cache failed\n");
111                 goto out_free_lu_gp_cache;
112         }
113         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114                         sizeof(struct t10_alua_tg_pt_gp),
115                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116         if (!t10_alua_tg_pt_gp_cache) {
117                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118                                 "cache failed\n");
119                 goto out_free_lu_gp_mem_cache;
120         }
121         t10_alua_lba_map_cache = kmem_cache_create(
122                         "t10_alua_lba_map_cache",
123                         sizeof(struct t10_alua_lba_map),
124                         __alignof__(struct t10_alua_lba_map), 0, NULL);
125         if (!t10_alua_lba_map_cache) {
126                 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127                                 "cache failed\n");
128                 goto out_free_tg_pt_gp_cache;
129         }
130         t10_alua_lba_map_mem_cache = kmem_cache_create(
131                         "t10_alua_lba_map_mem_cache",
132                         sizeof(struct t10_alua_lba_map_member),
133                         __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134         if (!t10_alua_lba_map_mem_cache) {
135                 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136                                 "cache failed\n");
137                 goto out_free_lba_map_cache;
138         }
139
140         target_completion_wq = alloc_workqueue("target_completion",
141                                                WQ_MEM_RECLAIM, 0);
142         if (!target_completion_wq)
143                 goto out_free_lba_map_mem_cache;
144
145         return 0;
146
147 out_free_lba_map_mem_cache:
148         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149 out_free_lba_map_cache:
150         kmem_cache_destroy(t10_alua_lba_map_cache);
151 out_free_tg_pt_gp_cache:
152         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153 out_free_lu_gp_mem_cache:
154         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155 out_free_lu_gp_cache:
156         kmem_cache_destroy(t10_alua_lu_gp_cache);
157 out_free_pr_reg_cache:
158         kmem_cache_destroy(t10_pr_reg_cache);
159 out_free_ua_cache:
160         kmem_cache_destroy(se_ua_cache);
161 out_free_sess_cache:
162         kmem_cache_destroy(se_sess_cache);
163 out:
164         return -ENOMEM;
165 }
166
167 void release_se_kmem_caches(void)
168 {
169         destroy_workqueue(target_completion_wq);
170         kmem_cache_destroy(se_sess_cache);
171         kmem_cache_destroy(se_ua_cache);
172         kmem_cache_destroy(t10_pr_reg_cache);
173         kmem_cache_destroy(t10_alua_lu_gp_cache);
174         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176         kmem_cache_destroy(t10_alua_lba_map_cache);
177         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178 }
179
180 /* This code ensures unique mib indexes are handed out. */
181 static DEFINE_SPINLOCK(scsi_mib_index_lock);
182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184 /*
185  * Allocate a new row index for the entry type specified
186  */
187 u32 scsi_get_new_index(scsi_index_t type)
188 {
189         u32 new_index;
190
191         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193         spin_lock(&scsi_mib_index_lock);
194         new_index = ++scsi_mib_index[type];
195         spin_unlock(&scsi_mib_index_lock);
196
197         return new_index;
198 }
199
200 void transport_subsystem_check_init(void)
201 {
202         int ret;
203         static int sub_api_initialized;
204
205         if (sub_api_initialized)
206                 return;
207
208         ret = request_module("target_core_iblock");
209         if (ret != 0)
210                 pr_err("Unable to load target_core_iblock\n");
211
212         ret = request_module("target_core_file");
213         if (ret != 0)
214                 pr_err("Unable to load target_core_file\n");
215
216         ret = request_module("target_core_pscsi");
217         if (ret != 0)
218                 pr_err("Unable to load target_core_pscsi\n");
219
220         ret = request_module("target_core_user");
221         if (ret != 0)
222                 pr_err("Unable to load target_core_user\n");
223
224         sub_api_initialized = 1;
225 }
226
227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228 {
229         struct se_session *se_sess;
230
231         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232         if (!se_sess) {
233                 pr_err("Unable to allocate struct se_session from"
234                                 " se_sess_cache\n");
235                 return ERR_PTR(-ENOMEM);
236         }
237         INIT_LIST_HEAD(&se_sess->sess_list);
238         INIT_LIST_HEAD(&se_sess->sess_acl_list);
239         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240         INIT_LIST_HEAD(&se_sess->sess_wait_list);
241         spin_lock_init(&se_sess->sess_cmd_lock);
242         se_sess->sup_prot_ops = sup_prot_ops;
243
244         return se_sess;
245 }
246 EXPORT_SYMBOL(transport_init_session);
247
248 int transport_alloc_session_tags(struct se_session *se_sess,
249                                  unsigned int tag_num, unsigned int tag_size)
250 {
251         int rc;
252
253         se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
254                                         GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
255         if (!se_sess->sess_cmd_map) {
256                 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
257                 if (!se_sess->sess_cmd_map) {
258                         pr_err("Unable to allocate se_sess->sess_cmd_map\n");
259                         return -ENOMEM;
260                 }
261         }
262
263         rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
264         if (rc < 0) {
265                 pr_err("Unable to init se_sess->sess_tag_pool,"
266                         " tag_num: %u\n", tag_num);
267                 kvfree(se_sess->sess_cmd_map);
268                 se_sess->sess_cmd_map = NULL;
269                 return -ENOMEM;
270         }
271
272         return 0;
273 }
274 EXPORT_SYMBOL(transport_alloc_session_tags);
275
276 struct se_session *transport_init_session_tags(unsigned int tag_num,
277                                                unsigned int tag_size,
278                                                enum target_prot_op sup_prot_ops)
279 {
280         struct se_session *se_sess;
281         int rc;
282
283         if (tag_num != 0 && !tag_size) {
284                 pr_err("init_session_tags called with percpu-ida tag_num:"
285                        " %u, but zero tag_size\n", tag_num);
286                 return ERR_PTR(-EINVAL);
287         }
288         if (!tag_num && tag_size) {
289                 pr_err("init_session_tags called with percpu-ida tag_size:"
290                        " %u, but zero tag_num\n", tag_size);
291                 return ERR_PTR(-EINVAL);
292         }
293
294         se_sess = transport_init_session(sup_prot_ops);
295         if (IS_ERR(se_sess))
296                 return se_sess;
297
298         rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
299         if (rc < 0) {
300                 transport_free_session(se_sess);
301                 return ERR_PTR(-ENOMEM);
302         }
303
304         return se_sess;
305 }
306 EXPORT_SYMBOL(transport_init_session_tags);
307
308 /*
309  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
310  */
311 void __transport_register_session(
312         struct se_portal_group *se_tpg,
313         struct se_node_acl *se_nacl,
314         struct se_session *se_sess,
315         void *fabric_sess_ptr)
316 {
317         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
318         unsigned char buf[PR_REG_ISID_LEN];
319         unsigned long flags;
320
321         se_sess->se_tpg = se_tpg;
322         se_sess->fabric_sess_ptr = fabric_sess_ptr;
323         /*
324          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
325          *
326          * Only set for struct se_session's that will actually be moving I/O.
327          * eg: *NOT* discovery sessions.
328          */
329         if (se_nacl) {
330                 /*
331                  *
332                  * Determine if fabric allows for T10-PI feature bits exposed to
333                  * initiators for device backends with !dev->dev_attrib.pi_prot_type.
334                  *
335                  * If so, then always save prot_type on a per se_node_acl node
336                  * basis and re-instate the previous sess_prot_type to avoid
337                  * disabling PI from below any previously initiator side
338                  * registered LUNs.
339                  */
340                 if (se_nacl->saved_prot_type)
341                         se_sess->sess_prot_type = se_nacl->saved_prot_type;
342                 else if (tfo->tpg_check_prot_fabric_only)
343                         se_sess->sess_prot_type = se_nacl->saved_prot_type =
344                                         tfo->tpg_check_prot_fabric_only(se_tpg);
345                 /*
346                  * If the fabric module supports an ISID based TransportID,
347                  * save this value in binary from the fabric I_T Nexus now.
348                  */
349                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
350                         memset(&buf[0], 0, PR_REG_ISID_LEN);
351                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
352                                         &buf[0], PR_REG_ISID_LEN);
353                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
354                 }
355
356                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
357                 /*
358                  * The se_nacl->nacl_sess pointer will be set to the
359                  * last active I_T Nexus for each struct se_node_acl.
360                  */
361                 se_nacl->nacl_sess = se_sess;
362
363                 list_add_tail(&se_sess->sess_acl_list,
364                               &se_nacl->acl_sess_list);
365                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
366         }
367         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
368
369         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
370                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
371 }
372 EXPORT_SYMBOL(__transport_register_session);
373
374 void transport_register_session(
375         struct se_portal_group *se_tpg,
376         struct se_node_acl *se_nacl,
377         struct se_session *se_sess,
378         void *fabric_sess_ptr)
379 {
380         unsigned long flags;
381
382         spin_lock_irqsave(&se_tpg->session_lock, flags);
383         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
384         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
385 }
386 EXPORT_SYMBOL(transport_register_session);
387
388 struct se_session *
389 target_alloc_session(struct se_portal_group *tpg,
390                      unsigned int tag_num, unsigned int tag_size,
391                      enum target_prot_op prot_op,
392                      const char *initiatorname, void *private,
393                      int (*callback)(struct se_portal_group *,
394                                      struct se_session *, void *))
395 {
396         struct se_session *sess;
397
398         /*
399          * If the fabric driver is using percpu-ida based pre allocation
400          * of I/O descriptor tags, go ahead and perform that setup now..
401          */
402         if (tag_num != 0)
403                 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
404         else
405                 sess = transport_init_session(prot_op);
406
407         if (IS_ERR(sess))
408                 return sess;
409
410         sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
411                                         (unsigned char *)initiatorname);
412         if (!sess->se_node_acl) {
413                 transport_free_session(sess);
414                 return ERR_PTR(-EACCES);
415         }
416         /*
417          * Go ahead and perform any remaining fabric setup that is
418          * required before transport_register_session().
419          */
420         if (callback != NULL) {
421                 int rc = callback(tpg, sess, private);
422                 if (rc) {
423                         transport_free_session(sess);
424                         return ERR_PTR(rc);
425                 }
426         }
427
428         transport_register_session(tpg, sess->se_node_acl, sess, private);
429         return sess;
430 }
431 EXPORT_SYMBOL(target_alloc_session);
432
433 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
434 {
435         struct se_session *se_sess;
436         ssize_t len = 0;
437
438         spin_lock_bh(&se_tpg->session_lock);
439         list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
440                 if (!se_sess->se_node_acl)
441                         continue;
442                 if (!se_sess->se_node_acl->dynamic_node_acl)
443                         continue;
444                 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
445                         break;
446
447                 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
448                                 se_sess->se_node_acl->initiatorname);
449                 len += 1; /* Include NULL terminator */
450         }
451         spin_unlock_bh(&se_tpg->session_lock);
452
453         return len;
454 }
455 EXPORT_SYMBOL(target_show_dynamic_sessions);
456
457 static void target_complete_nacl(struct kref *kref)
458 {
459         struct se_node_acl *nacl = container_of(kref,
460                                 struct se_node_acl, acl_kref);
461         struct se_portal_group *se_tpg = nacl->se_tpg;
462
463         if (!nacl->dynamic_stop) {
464                 complete(&nacl->acl_free_comp);
465                 return;
466         }
467
468         mutex_lock(&se_tpg->acl_node_mutex);
469         list_del_init(&nacl->acl_list);
470         mutex_unlock(&se_tpg->acl_node_mutex);
471
472         core_tpg_wait_for_nacl_pr_ref(nacl);
473         core_free_device_list_for_node(nacl, se_tpg);
474         kfree(nacl);
475 }
476
477 void target_put_nacl(struct se_node_acl *nacl)
478 {
479         kref_put(&nacl->acl_kref, target_complete_nacl);
480 }
481 EXPORT_SYMBOL(target_put_nacl);
482
483 void transport_deregister_session_configfs(struct se_session *se_sess)
484 {
485         struct se_node_acl *se_nacl;
486         unsigned long flags;
487         /*
488          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
489          */
490         se_nacl = se_sess->se_node_acl;
491         if (se_nacl) {
492                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
493                 if (!list_empty(&se_sess->sess_acl_list))
494                         list_del_init(&se_sess->sess_acl_list);
495                 /*
496                  * If the session list is empty, then clear the pointer.
497                  * Otherwise, set the struct se_session pointer from the tail
498                  * element of the per struct se_node_acl active session list.
499                  */
500                 if (list_empty(&se_nacl->acl_sess_list))
501                         se_nacl->nacl_sess = NULL;
502                 else {
503                         se_nacl->nacl_sess = container_of(
504                                         se_nacl->acl_sess_list.prev,
505                                         struct se_session, sess_acl_list);
506                 }
507                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
508         }
509 }
510 EXPORT_SYMBOL(transport_deregister_session_configfs);
511
512 void transport_free_session(struct se_session *se_sess)
513 {
514         struct se_node_acl *se_nacl = se_sess->se_node_acl;
515
516         /*
517          * Drop the se_node_acl->nacl_kref obtained from within
518          * core_tpg_get_initiator_node_acl().
519          */
520         if (se_nacl) {
521                 struct se_portal_group *se_tpg = se_nacl->se_tpg;
522                 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
523                 unsigned long flags;
524
525                 se_sess->se_node_acl = NULL;
526
527                 /*
528                  * Also determine if we need to drop the extra ->cmd_kref if
529                  * it had been previously dynamically generated, and
530                  * the endpoint is not caching dynamic ACLs.
531                  */
532                 mutex_lock(&se_tpg->acl_node_mutex);
533                 if (se_nacl->dynamic_node_acl &&
534                     !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
535                         spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
536                         if (list_empty(&se_nacl->acl_sess_list))
537                                 se_nacl->dynamic_stop = true;
538                         spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
539
540                         if (se_nacl->dynamic_stop)
541                                 list_del_init(&se_nacl->acl_list);
542                 }
543                 mutex_unlock(&se_tpg->acl_node_mutex);
544
545                 if (se_nacl->dynamic_stop)
546                         target_put_nacl(se_nacl);
547
548                 target_put_nacl(se_nacl);
549         }
550         if (se_sess->sess_cmd_map) {
551                 percpu_ida_destroy(&se_sess->sess_tag_pool);
552                 kvfree(se_sess->sess_cmd_map);
553         }
554         kmem_cache_free(se_sess_cache, se_sess);
555 }
556 EXPORT_SYMBOL(transport_free_session);
557
558 void transport_deregister_session(struct se_session *se_sess)
559 {
560         struct se_portal_group *se_tpg = se_sess->se_tpg;
561         unsigned long flags;
562
563         if (!se_tpg) {
564                 transport_free_session(se_sess);
565                 return;
566         }
567
568         spin_lock_irqsave(&se_tpg->session_lock, flags);
569         list_del(&se_sess->sess_list);
570         se_sess->se_tpg = NULL;
571         se_sess->fabric_sess_ptr = NULL;
572         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
573
574         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
575                 se_tpg->se_tpg_tfo->get_fabric_name());
576         /*
577          * If last kref is dropping now for an explicit NodeACL, awake sleeping
578          * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
579          * removal context from within transport_free_session() code.
580          *
581          * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
582          * to release all remaining generate_node_acl=1 created ACL resources.
583          */
584
585         transport_free_session(se_sess);
586 }
587 EXPORT_SYMBOL(transport_deregister_session);
588
589 static void target_remove_from_state_list(struct se_cmd *cmd)
590 {
591         struct se_device *dev = cmd->se_dev;
592         unsigned long flags;
593
594         if (!dev)
595                 return;
596
597         if (cmd->transport_state & CMD_T_BUSY)
598                 return;
599
600         spin_lock_irqsave(&dev->execute_task_lock, flags);
601         if (cmd->state_active) {
602                 list_del(&cmd->state_list);
603                 cmd->state_active = false;
604         }
605         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
606 }
607
608 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
609                                     bool write_pending)
610 {
611         unsigned long flags;
612
613         if (remove_from_lists) {
614                 target_remove_from_state_list(cmd);
615
616                 /*
617                  * Clear struct se_cmd->se_lun before the handoff to FE.
618                  */
619                 cmd->se_lun = NULL;
620         }
621
622         spin_lock_irqsave(&cmd->t_state_lock, flags);
623         if (write_pending)
624                 cmd->t_state = TRANSPORT_WRITE_PENDING;
625
626         /*
627          * Determine if frontend context caller is requesting the stopping of
628          * this command for frontend exceptions.
629          */
630         if (cmd->transport_state & CMD_T_STOP) {
631                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
632                         __func__, __LINE__, cmd->tag);
633
634                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
635
636                 complete_all(&cmd->t_transport_stop_comp);
637                 return 1;
638         }
639
640         cmd->transport_state &= ~CMD_T_ACTIVE;
641         if (remove_from_lists) {
642                 /*
643                  * Some fabric modules like tcm_loop can release
644                  * their internally allocated I/O reference now and
645                  * struct se_cmd now.
646                  *
647                  * Fabric modules are expected to return '1' here if the
648                  * se_cmd being passed is released at this point,
649                  * or zero if not being released.
650                  */
651                 if (cmd->se_tfo->check_stop_free != NULL) {
652                         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
653                         return cmd->se_tfo->check_stop_free(cmd);
654                 }
655         }
656
657         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
658         return 0;
659 }
660
661 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
662 {
663         return transport_cmd_check_stop(cmd, true, false);
664 }
665
666 static void transport_lun_remove_cmd(struct se_cmd *cmd)
667 {
668         struct se_lun *lun = cmd->se_lun;
669
670         if (!lun)
671                 return;
672
673         if (cmpxchg(&cmd->lun_ref_active, true, false))
674                 percpu_ref_put(&lun->lun_ref);
675 }
676
677 int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
678 {
679         bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
680         int ret = 0;
681
682         if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
683                 transport_lun_remove_cmd(cmd);
684         /*
685          * Allow the fabric driver to unmap any resources before
686          * releasing the descriptor via TFO->release_cmd()
687          */
688         if (remove)
689                 cmd->se_tfo->aborted_task(cmd);
690
691         if (transport_cmd_check_stop_to_fabric(cmd))
692                 return 1;
693         if (remove && ack_kref)
694                 ret = transport_put_cmd(cmd);
695
696         return ret;
697 }
698
699 static void target_complete_failure_work(struct work_struct *work)
700 {
701         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
702
703         transport_generic_request_failure(cmd,
704                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
705 }
706
707 /*
708  * Used when asking transport to copy Sense Data from the underlying
709  * Linux/SCSI struct scsi_cmnd
710  */
711 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
712 {
713         struct se_device *dev = cmd->se_dev;
714
715         WARN_ON(!cmd->se_lun);
716
717         if (!dev)
718                 return NULL;
719
720         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
721                 return NULL;
722
723         cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
724
725         pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
726                 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
727         return cmd->sense_buffer;
728 }
729
730 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
731 {
732         struct se_device *dev = cmd->se_dev;
733         int success = scsi_status == GOOD;
734         unsigned long flags;
735
736         cmd->scsi_status = scsi_status;
737
738
739         spin_lock_irqsave(&cmd->t_state_lock, flags);
740         cmd->transport_state &= ~CMD_T_BUSY;
741
742         if (dev && dev->transport->transport_complete) {
743                 dev->transport->transport_complete(cmd,
744                                 cmd->t_data_sg,
745                                 transport_get_sense_buffer(cmd));
746                 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
747                         success = 1;
748         }
749
750         /*
751          * Check for case where an explicit ABORT_TASK has been received
752          * and transport_wait_for_tasks() will be waiting for completion..
753          */
754         if (cmd->transport_state & CMD_T_ABORTED ||
755             cmd->transport_state & CMD_T_STOP) {
756                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
757                 /*
758                  * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
759                  * release se_device->caw_sem obtained by sbc_compare_and_write()
760                  * since target_complete_ok_work() or target_complete_failure_work()
761                  * won't be called to invoke the normal CAW completion callbacks.
762                  */
763                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
764                         up(&dev->caw_sem);
765                 }
766                 complete_all(&cmd->t_transport_stop_comp);
767                 return;
768         } else if (!success) {
769                 INIT_WORK(&cmd->work, target_complete_failure_work);
770         } else {
771                 INIT_WORK(&cmd->work, target_complete_ok_work);
772         }
773
774         cmd->t_state = TRANSPORT_COMPLETE;
775         cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
776         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
777
778         if (cmd->se_cmd_flags & SCF_USE_CPUID)
779                 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
780         else
781                 queue_work(target_completion_wq, &cmd->work);
782 }
783 EXPORT_SYMBOL(target_complete_cmd);
784
785 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
786 {
787         if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
788                 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
789                         cmd->residual_count += cmd->data_length - length;
790                 } else {
791                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
792                         cmd->residual_count = cmd->data_length - length;
793                 }
794
795                 cmd->data_length = length;
796         }
797
798         target_complete_cmd(cmd, scsi_status);
799 }
800 EXPORT_SYMBOL(target_complete_cmd_with_length);
801
802 static void target_add_to_state_list(struct se_cmd *cmd)
803 {
804         struct se_device *dev = cmd->se_dev;
805         unsigned long flags;
806
807         spin_lock_irqsave(&dev->execute_task_lock, flags);
808         if (!cmd->state_active) {
809                 list_add_tail(&cmd->state_list, &dev->state_list);
810                 cmd->state_active = true;
811         }
812         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
813 }
814
815 /*
816  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
817  */
818 static void transport_write_pending_qf(struct se_cmd *cmd);
819 static void transport_complete_qf(struct se_cmd *cmd);
820
821 void target_qf_do_work(struct work_struct *work)
822 {
823         struct se_device *dev = container_of(work, struct se_device,
824                                         qf_work_queue);
825         LIST_HEAD(qf_cmd_list);
826         struct se_cmd *cmd, *cmd_tmp;
827
828         spin_lock_irq(&dev->qf_cmd_lock);
829         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
830         spin_unlock_irq(&dev->qf_cmd_lock);
831
832         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
833                 list_del(&cmd->se_qf_node);
834                 atomic_dec_mb(&dev->dev_qf_count);
835
836                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
837                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
838                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
839                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
840                         : "UNKNOWN");
841
842                 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
843                         transport_write_pending_qf(cmd);
844                 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
845                         transport_complete_qf(cmd);
846         }
847 }
848
849 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
850 {
851         switch (cmd->data_direction) {
852         case DMA_NONE:
853                 return "NONE";
854         case DMA_FROM_DEVICE:
855                 return "READ";
856         case DMA_TO_DEVICE:
857                 return "WRITE";
858         case DMA_BIDIRECTIONAL:
859                 return "BIDI";
860         default:
861                 break;
862         }
863
864         return "UNKNOWN";
865 }
866
867 void transport_dump_dev_state(
868         struct se_device *dev,
869         char *b,
870         int *bl)
871 {
872         *bl += sprintf(b + *bl, "Status: ");
873         if (dev->export_count)
874                 *bl += sprintf(b + *bl, "ACTIVATED");
875         else
876                 *bl += sprintf(b + *bl, "DEACTIVATED");
877
878         *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
879         *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
880                 dev->dev_attrib.block_size,
881                 dev->dev_attrib.hw_max_sectors);
882         *bl += sprintf(b + *bl, "        ");
883 }
884
885 void transport_dump_vpd_proto_id(
886         struct t10_vpd *vpd,
887         unsigned char *p_buf,
888         int p_buf_len)
889 {
890         unsigned char buf[VPD_TMP_BUF_SIZE];
891         int len;
892
893         memset(buf, 0, VPD_TMP_BUF_SIZE);
894         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
895
896         switch (vpd->protocol_identifier) {
897         case 0x00:
898                 sprintf(buf+len, "Fibre Channel\n");
899                 break;
900         case 0x10:
901                 sprintf(buf+len, "Parallel SCSI\n");
902                 break;
903         case 0x20:
904                 sprintf(buf+len, "SSA\n");
905                 break;
906         case 0x30:
907                 sprintf(buf+len, "IEEE 1394\n");
908                 break;
909         case 0x40:
910                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
911                                 " Protocol\n");
912                 break;
913         case 0x50:
914                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
915                 break;
916         case 0x60:
917                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
918                 break;
919         case 0x70:
920                 sprintf(buf+len, "Automation/Drive Interface Transport"
921                                 " Protocol\n");
922                 break;
923         case 0x80:
924                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
925                 break;
926         default:
927                 sprintf(buf+len, "Unknown 0x%02x\n",
928                                 vpd->protocol_identifier);
929                 break;
930         }
931
932         if (p_buf)
933                 strncpy(p_buf, buf, p_buf_len);
934         else
935                 pr_debug("%s", buf);
936 }
937
938 void
939 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
940 {
941         /*
942          * Check if the Protocol Identifier Valid (PIV) bit is set..
943          *
944          * from spc3r23.pdf section 7.5.1
945          */
946          if (page_83[1] & 0x80) {
947                 vpd->protocol_identifier = (page_83[0] & 0xf0);
948                 vpd->protocol_identifier_set = 1;
949                 transport_dump_vpd_proto_id(vpd, NULL, 0);
950         }
951 }
952 EXPORT_SYMBOL(transport_set_vpd_proto_id);
953
954 int transport_dump_vpd_assoc(
955         struct t10_vpd *vpd,
956         unsigned char *p_buf,
957         int p_buf_len)
958 {
959         unsigned char buf[VPD_TMP_BUF_SIZE];
960         int ret = 0;
961         int len;
962
963         memset(buf, 0, VPD_TMP_BUF_SIZE);
964         len = sprintf(buf, "T10 VPD Identifier Association: ");
965
966         switch (vpd->association) {
967         case 0x00:
968                 sprintf(buf+len, "addressed logical unit\n");
969                 break;
970         case 0x10:
971                 sprintf(buf+len, "target port\n");
972                 break;
973         case 0x20:
974                 sprintf(buf+len, "SCSI target device\n");
975                 break;
976         default:
977                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
978                 ret = -EINVAL;
979                 break;
980         }
981
982         if (p_buf)
983                 strncpy(p_buf, buf, p_buf_len);
984         else
985                 pr_debug("%s", buf);
986
987         return ret;
988 }
989
990 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
991 {
992         /*
993          * The VPD identification association..
994          *
995          * from spc3r23.pdf Section 7.6.3.1 Table 297
996          */
997         vpd->association = (page_83[1] & 0x30);
998         return transport_dump_vpd_assoc(vpd, NULL, 0);
999 }
1000 EXPORT_SYMBOL(transport_set_vpd_assoc);
1001
1002 int transport_dump_vpd_ident_type(
1003         struct t10_vpd *vpd,
1004         unsigned char *p_buf,
1005         int p_buf_len)
1006 {
1007         unsigned char buf[VPD_TMP_BUF_SIZE];
1008         int ret = 0;
1009         int len;
1010
1011         memset(buf, 0, VPD_TMP_BUF_SIZE);
1012         len = sprintf(buf, "T10 VPD Identifier Type: ");
1013
1014         switch (vpd->device_identifier_type) {
1015         case 0x00:
1016                 sprintf(buf+len, "Vendor specific\n");
1017                 break;
1018         case 0x01:
1019                 sprintf(buf+len, "T10 Vendor ID based\n");
1020                 break;
1021         case 0x02:
1022                 sprintf(buf+len, "EUI-64 based\n");
1023                 break;
1024         case 0x03:
1025                 sprintf(buf+len, "NAA\n");
1026                 break;
1027         case 0x04:
1028                 sprintf(buf+len, "Relative target port identifier\n");
1029                 break;
1030         case 0x08:
1031                 sprintf(buf+len, "SCSI name string\n");
1032                 break;
1033         default:
1034                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1035                                 vpd->device_identifier_type);
1036                 ret = -EINVAL;
1037                 break;
1038         }
1039
1040         if (p_buf) {
1041                 if (p_buf_len < strlen(buf)+1)
1042                         return -EINVAL;
1043                 strncpy(p_buf, buf, p_buf_len);
1044         } else {
1045                 pr_debug("%s", buf);
1046         }
1047
1048         return ret;
1049 }
1050
1051 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1052 {
1053         /*
1054          * The VPD identifier type..
1055          *
1056          * from spc3r23.pdf Section 7.6.3.1 Table 298
1057          */
1058         vpd->device_identifier_type = (page_83[1] & 0x0f);
1059         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1060 }
1061 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1062
1063 int transport_dump_vpd_ident(
1064         struct t10_vpd *vpd,
1065         unsigned char *p_buf,
1066         int p_buf_len)
1067 {
1068         unsigned char buf[VPD_TMP_BUF_SIZE];
1069         int ret = 0;
1070
1071         memset(buf, 0, VPD_TMP_BUF_SIZE);
1072
1073         switch (vpd->device_identifier_code_set) {
1074         case 0x01: /* Binary */
1075                 snprintf(buf, sizeof(buf),
1076                         "T10 VPD Binary Device Identifier: %s\n",
1077                         &vpd->device_identifier[0]);
1078                 break;
1079         case 0x02: /* ASCII */
1080                 snprintf(buf, sizeof(buf),
1081                         "T10 VPD ASCII Device Identifier: %s\n",
1082                         &vpd->device_identifier[0]);
1083                 break;
1084         case 0x03: /* UTF-8 */
1085                 snprintf(buf, sizeof(buf),
1086                         "T10 VPD UTF-8 Device Identifier: %s\n",
1087                         &vpd->device_identifier[0]);
1088                 break;
1089         default:
1090                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1091                         " 0x%02x", vpd->device_identifier_code_set);
1092                 ret = -EINVAL;
1093                 break;
1094         }
1095
1096         if (p_buf)
1097                 strncpy(p_buf, buf, p_buf_len);
1098         else
1099                 pr_debug("%s", buf);
1100
1101         return ret;
1102 }
1103
1104 int
1105 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1106 {
1107         static const char hex_str[] = "0123456789abcdef";
1108         int j = 0, i = 4; /* offset to start of the identifier */
1109
1110         /*
1111          * The VPD Code Set (encoding)
1112          *
1113          * from spc3r23.pdf Section 7.6.3.1 Table 296
1114          */
1115         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1116         switch (vpd->device_identifier_code_set) {
1117         case 0x01: /* Binary */
1118                 vpd->device_identifier[j++] =
1119                                 hex_str[vpd->device_identifier_type];
1120                 while (i < (4 + page_83[3])) {
1121                         vpd->device_identifier[j++] =
1122                                 hex_str[(page_83[i] & 0xf0) >> 4];
1123                         vpd->device_identifier[j++] =
1124                                 hex_str[page_83[i] & 0x0f];
1125                         i++;
1126                 }
1127                 break;
1128         case 0x02: /* ASCII */
1129         case 0x03: /* UTF-8 */
1130                 while (i < (4 + page_83[3]))
1131                         vpd->device_identifier[j++] = page_83[i++];
1132                 break;
1133         default:
1134                 break;
1135         }
1136
1137         return transport_dump_vpd_ident(vpd, NULL, 0);
1138 }
1139 EXPORT_SYMBOL(transport_set_vpd_ident);
1140
1141 static sense_reason_t
1142 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1143                                unsigned int size)
1144 {
1145         u32 mtl;
1146
1147         if (!cmd->se_tfo->max_data_sg_nents)
1148                 return TCM_NO_SENSE;
1149         /*
1150          * Check if fabric enforced maximum SGL entries per I/O descriptor
1151          * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1152          * residual_count and reduce original cmd->data_length to maximum
1153          * length based on single PAGE_SIZE entry scatter-lists.
1154          */
1155         mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1156         if (cmd->data_length > mtl) {
1157                 /*
1158                  * If an existing CDB overflow is present, calculate new residual
1159                  * based on CDB size minus fabric maximum transfer length.
1160                  *
1161                  * If an existing CDB underflow is present, calculate new residual
1162                  * based on original cmd->data_length minus fabric maximum transfer
1163                  * length.
1164                  *
1165                  * Otherwise, set the underflow residual based on cmd->data_length
1166                  * minus fabric maximum transfer length.
1167                  */
1168                 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1169                         cmd->residual_count = (size - mtl);
1170                 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1171                         u32 orig_dl = size + cmd->residual_count;
1172                         cmd->residual_count = (orig_dl - mtl);
1173                 } else {
1174                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1175                         cmd->residual_count = (cmd->data_length - mtl);
1176                 }
1177                 cmd->data_length = mtl;
1178                 /*
1179                  * Reset sbc_check_prot() calculated protection payload
1180                  * length based upon the new smaller MTL.
1181                  */
1182                 if (cmd->prot_length) {
1183                         u32 sectors = (mtl / dev->dev_attrib.block_size);
1184                         cmd->prot_length = dev->prot_length * sectors;
1185                 }
1186         }
1187         return TCM_NO_SENSE;
1188 }
1189
1190 sense_reason_t
1191 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1192 {
1193         struct se_device *dev = cmd->se_dev;
1194
1195         if (cmd->unknown_data_length) {
1196                 cmd->data_length = size;
1197         } else if (size != cmd->data_length) {
1198                 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1199                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1200                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1201                                 cmd->data_length, size, cmd->t_task_cdb[0]);
1202
1203                 if (cmd->data_direction == DMA_TO_DEVICE) {
1204                         if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1205                                 pr_err_ratelimited("Rejecting underflow/overflow"
1206                                                    " for WRITE data CDB\n");
1207                                 return TCM_INVALID_CDB_FIELD;
1208                         }
1209                         /*
1210                          * Some fabric drivers like iscsi-target still expect to
1211                          * always reject overflow writes.  Reject this case until
1212                          * full fabric driver level support for overflow writes
1213                          * is introduced tree-wide.
1214                          */
1215                         if (size > cmd->data_length) {
1216                                 pr_err_ratelimited("Rejecting overflow for"
1217                                                    " WRITE control CDB\n");
1218                                 return TCM_INVALID_CDB_FIELD;
1219                         }
1220                 }
1221                 /*
1222                  * Reject READ_* or WRITE_* with overflow/underflow for
1223                  * type SCF_SCSI_DATA_CDB.
1224                  */
1225                 if (dev->dev_attrib.block_size != 512)  {
1226                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1227                                 " CDB on non 512-byte sector setup subsystem"
1228                                 " plugin: %s\n", dev->transport->name);
1229                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1230                         return TCM_INVALID_CDB_FIELD;
1231                 }
1232                 /*
1233                  * For the overflow case keep the existing fabric provided
1234                  * ->data_length.  Otherwise for the underflow case, reset
1235                  * ->data_length to the smaller SCSI expected data transfer
1236                  * length.
1237                  */
1238                 if (size > cmd->data_length) {
1239                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1240                         cmd->residual_count = (size - cmd->data_length);
1241                 } else {
1242                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1243                         cmd->residual_count = (cmd->data_length - size);
1244                         cmd->data_length = size;
1245                 }
1246         }
1247
1248         return target_check_max_data_sg_nents(cmd, dev, size);
1249
1250 }
1251
1252 /*
1253  * Used by fabric modules containing a local struct se_cmd within their
1254  * fabric dependent per I/O descriptor.
1255  *
1256  * Preserves the value of @cmd->tag.
1257  */
1258 void transport_init_se_cmd(
1259         struct se_cmd *cmd,
1260         const struct target_core_fabric_ops *tfo,
1261         struct se_session *se_sess,
1262         u32 data_length,
1263         int data_direction,
1264         int task_attr,
1265         unsigned char *sense_buffer)
1266 {
1267         INIT_LIST_HEAD(&cmd->se_delayed_node);
1268         INIT_LIST_HEAD(&cmd->se_qf_node);
1269         INIT_LIST_HEAD(&cmd->se_cmd_list);
1270         INIT_LIST_HEAD(&cmd->state_list);
1271         init_completion(&cmd->t_transport_stop_comp);
1272         init_completion(&cmd->cmd_wait_comp);
1273         spin_lock_init(&cmd->t_state_lock);
1274         kref_init(&cmd->cmd_kref);
1275         cmd->transport_state = CMD_T_DEV_ACTIVE;
1276
1277         cmd->se_tfo = tfo;
1278         cmd->se_sess = se_sess;
1279         cmd->data_length = data_length;
1280         cmd->data_direction = data_direction;
1281         cmd->sam_task_attr = task_attr;
1282         cmd->sense_buffer = sense_buffer;
1283
1284         cmd->state_active = false;
1285 }
1286 EXPORT_SYMBOL(transport_init_se_cmd);
1287
1288 static sense_reason_t
1289 transport_check_alloc_task_attr(struct se_cmd *cmd)
1290 {
1291         struct se_device *dev = cmd->se_dev;
1292
1293         /*
1294          * Check if SAM Task Attribute emulation is enabled for this
1295          * struct se_device storage object
1296          */
1297         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1298                 return 0;
1299
1300         if (cmd->sam_task_attr == TCM_ACA_TAG) {
1301                 pr_debug("SAM Task Attribute ACA"
1302                         " emulation is not supported\n");
1303                 return TCM_INVALID_CDB_FIELD;
1304         }
1305
1306         return 0;
1307 }
1308
1309 sense_reason_t
1310 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1311 {
1312         struct se_device *dev = cmd->se_dev;
1313         sense_reason_t ret;
1314
1315         /*
1316          * Ensure that the received CDB is less than the max (252 + 8) bytes
1317          * for VARIABLE_LENGTH_CMD
1318          */
1319         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1320                 pr_err("Received SCSI CDB with command_size: %d that"
1321                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1322                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1323                 return TCM_INVALID_CDB_FIELD;
1324         }
1325         /*
1326          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1327          * allocate the additional extended CDB buffer now..  Otherwise
1328          * setup the pointer from __t_task_cdb to t_task_cdb.
1329          */
1330         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1331                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1332                                                 GFP_KERNEL);
1333                 if (!cmd->t_task_cdb) {
1334                         pr_err("Unable to allocate cmd->t_task_cdb"
1335                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1336                                 scsi_command_size(cdb),
1337                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1338                         return TCM_OUT_OF_RESOURCES;
1339                 }
1340         } else
1341                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1342         /*
1343          * Copy the original CDB into cmd->
1344          */
1345         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1346
1347         trace_target_sequencer_start(cmd);
1348
1349         ret = dev->transport->parse_cdb(cmd);
1350         if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1351                 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1352                                     cmd->se_tfo->get_fabric_name(),
1353                                     cmd->se_sess->se_node_acl->initiatorname,
1354                                     cmd->t_task_cdb[0]);
1355         if (ret)
1356                 return ret;
1357
1358         ret = transport_check_alloc_task_attr(cmd);
1359         if (ret)
1360                 return ret;
1361
1362         cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1363         atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1364         return 0;
1365 }
1366 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1367
1368 /*
1369  * Used by fabric module frontends to queue tasks directly.
1370  * May only be used from process context.
1371  */
1372 int transport_handle_cdb_direct(
1373         struct se_cmd *cmd)
1374 {
1375         sense_reason_t ret;
1376
1377         if (!cmd->se_lun) {
1378                 dump_stack();
1379                 pr_err("cmd->se_lun is NULL\n");
1380                 return -EINVAL;
1381         }
1382         if (in_interrupt()) {
1383                 dump_stack();
1384                 pr_err("transport_generic_handle_cdb cannot be called"
1385                                 " from interrupt context\n");
1386                 return -EINVAL;
1387         }
1388         /*
1389          * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1390          * outstanding descriptors are handled correctly during shutdown via
1391          * transport_wait_for_tasks()
1392          *
1393          * Also, we don't take cmd->t_state_lock here as we only expect
1394          * this to be called for initial descriptor submission.
1395          */
1396         cmd->t_state = TRANSPORT_NEW_CMD;
1397         cmd->transport_state |= CMD_T_ACTIVE;
1398
1399         /*
1400          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1401          * so follow TRANSPORT_NEW_CMD processing thread context usage
1402          * and call transport_generic_request_failure() if necessary..
1403          */
1404         ret = transport_generic_new_cmd(cmd);
1405         if (ret)
1406                 transport_generic_request_failure(cmd, ret);
1407         return 0;
1408 }
1409 EXPORT_SYMBOL(transport_handle_cdb_direct);
1410
1411 sense_reason_t
1412 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1413                 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1414 {
1415         if (!sgl || !sgl_count)
1416                 return 0;
1417
1418         /*
1419          * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1420          * scatterlists already have been set to follow what the fabric
1421          * passes for the original expected data transfer length.
1422          */
1423         if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1424                 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1425                         " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1426                 return TCM_INVALID_CDB_FIELD;
1427         }
1428
1429         cmd->t_data_sg = sgl;
1430         cmd->t_data_nents = sgl_count;
1431         cmd->t_bidi_data_sg = sgl_bidi;
1432         cmd->t_bidi_data_nents = sgl_bidi_count;
1433
1434         cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1435         return 0;
1436 }
1437
1438 /*
1439  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1440  *                       se_cmd + use pre-allocated SGL memory.
1441  *
1442  * @se_cmd: command descriptor to submit
1443  * @se_sess: associated se_sess for endpoint
1444  * @cdb: pointer to SCSI CDB
1445  * @sense: pointer to SCSI sense buffer
1446  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1447  * @data_length: fabric expected data transfer length
1448  * @task_addr: SAM task attribute
1449  * @data_dir: DMA data direction
1450  * @flags: flags for command submission from target_sc_flags_tables
1451  * @sgl: struct scatterlist memory for unidirectional mapping
1452  * @sgl_count: scatterlist count for unidirectional mapping
1453  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1454  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1455  * @sgl_prot: struct scatterlist memory protection information
1456  * @sgl_prot_count: scatterlist count for protection information
1457  *
1458  * Task tags are supported if the caller has set @se_cmd->tag.
1459  *
1460  * Returns non zero to signal active I/O shutdown failure.  All other
1461  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1462  * but still return zero here.
1463  *
1464  * This may only be called from process context, and also currently
1465  * assumes internal allocation of fabric payload buffer by target-core.
1466  */
1467 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1468                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1469                 u32 data_length, int task_attr, int data_dir, int flags,
1470                 struct scatterlist *sgl, u32 sgl_count,
1471                 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1472                 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1473 {
1474         struct se_portal_group *se_tpg;
1475         sense_reason_t rc;
1476         int ret;
1477
1478         se_tpg = se_sess->se_tpg;
1479         BUG_ON(!se_tpg);
1480         BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1481         BUG_ON(in_interrupt());
1482         /*
1483          * Initialize se_cmd for target operation.  From this point
1484          * exceptions are handled by sending exception status via
1485          * target_core_fabric_ops->queue_status() callback
1486          */
1487         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1488                                 data_length, data_dir, task_attr, sense);
1489
1490         if (flags & TARGET_SCF_USE_CPUID)
1491                 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1492         else
1493                 se_cmd->cpuid = WORK_CPU_UNBOUND;
1494
1495         if (flags & TARGET_SCF_UNKNOWN_SIZE)
1496                 se_cmd->unknown_data_length = 1;
1497         /*
1498          * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1499          * se_sess->sess_cmd_list.  A second kref_get here is necessary
1500          * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1501          * kref_put() to happen during fabric packet acknowledgement.
1502          */
1503         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1504         if (ret)
1505                 return ret;
1506         /*
1507          * Signal bidirectional data payloads to target-core
1508          */
1509         if (flags & TARGET_SCF_BIDI_OP)
1510                 se_cmd->se_cmd_flags |= SCF_BIDI;
1511         /*
1512          * Locate se_lun pointer and attach it to struct se_cmd
1513          */
1514         rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1515         if (rc) {
1516                 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1517                 target_put_sess_cmd(se_cmd);
1518                 return 0;
1519         }
1520
1521         rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1522         if (rc != 0) {
1523                 transport_generic_request_failure(se_cmd, rc);
1524                 return 0;
1525         }
1526
1527         /*
1528          * Save pointers for SGLs containing protection information,
1529          * if present.
1530          */
1531         if (sgl_prot_count) {
1532                 se_cmd->t_prot_sg = sgl_prot;
1533                 se_cmd->t_prot_nents = sgl_prot_count;
1534                 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1535         }
1536
1537         /*
1538          * When a non zero sgl_count has been passed perform SGL passthrough
1539          * mapping for pre-allocated fabric memory instead of having target
1540          * core perform an internal SGL allocation..
1541          */
1542         if (sgl_count != 0) {
1543                 BUG_ON(!sgl);
1544
1545                 /*
1546                  * A work-around for tcm_loop as some userspace code via
1547                  * scsi-generic do not memset their associated read buffers,
1548                  * so go ahead and do that here for type non-data CDBs.  Also
1549                  * note that this is currently guaranteed to be a single SGL
1550                  * for this case by target core in target_setup_cmd_from_cdb()
1551                  * -> transport_generic_cmd_sequencer().
1552                  */
1553                 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1554                      se_cmd->data_direction == DMA_FROM_DEVICE) {
1555                         unsigned char *buf = NULL;
1556
1557                         if (sgl)
1558                                 buf = kmap(sg_page(sgl)) + sgl->offset;
1559
1560                         if (buf) {
1561                                 memset(buf, 0, sgl->length);
1562                                 kunmap(sg_page(sgl));
1563                         }
1564                 }
1565
1566                 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1567                                 sgl_bidi, sgl_bidi_count);
1568                 if (rc != 0) {
1569                         transport_generic_request_failure(se_cmd, rc);
1570                         return 0;
1571                 }
1572         }
1573
1574         /*
1575          * Check if we need to delay processing because of ALUA
1576          * Active/NonOptimized primary access state..
1577          */
1578         core_alua_check_nonop_delay(se_cmd);
1579
1580         transport_handle_cdb_direct(se_cmd);
1581         return 0;
1582 }
1583 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1584
1585 /*
1586  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1587  *
1588  * @se_cmd: command descriptor to submit
1589  * @se_sess: associated se_sess for endpoint
1590  * @cdb: pointer to SCSI CDB
1591  * @sense: pointer to SCSI sense buffer
1592  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1593  * @data_length: fabric expected data transfer length
1594  * @task_addr: SAM task attribute
1595  * @data_dir: DMA data direction
1596  * @flags: flags for command submission from target_sc_flags_tables
1597  *
1598  * Task tags are supported if the caller has set @se_cmd->tag.
1599  *
1600  * Returns non zero to signal active I/O shutdown failure.  All other
1601  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1602  * but still return zero here.
1603  *
1604  * This may only be called from process context, and also currently
1605  * assumes internal allocation of fabric payload buffer by target-core.
1606  *
1607  * It also assumes interal target core SGL memory allocation.
1608  */
1609 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1610                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1611                 u32 data_length, int task_attr, int data_dir, int flags)
1612 {
1613         return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1614                         unpacked_lun, data_length, task_attr, data_dir,
1615                         flags, NULL, 0, NULL, 0, NULL, 0);
1616 }
1617 EXPORT_SYMBOL(target_submit_cmd);
1618
1619 static void target_complete_tmr_failure(struct work_struct *work)
1620 {
1621         struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1622
1623         se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1624         se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1625
1626         transport_cmd_check_stop_to_fabric(se_cmd);
1627 }
1628
1629 /**
1630  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1631  *                     for TMR CDBs
1632  *
1633  * @se_cmd: command descriptor to submit
1634  * @se_sess: associated se_sess for endpoint
1635  * @sense: pointer to SCSI sense buffer
1636  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1637  * @fabric_context: fabric context for TMR req
1638  * @tm_type: Type of TM request
1639  * @gfp: gfp type for caller
1640  * @tag: referenced task tag for TMR_ABORT_TASK
1641  * @flags: submit cmd flags
1642  *
1643  * Callable from all contexts.
1644  **/
1645
1646 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1647                 unsigned char *sense, u64 unpacked_lun,
1648                 void *fabric_tmr_ptr, unsigned char tm_type,
1649                 gfp_t gfp, u64 tag, int flags)
1650 {
1651         struct se_portal_group *se_tpg;
1652         int ret;
1653
1654         se_tpg = se_sess->se_tpg;
1655         BUG_ON(!se_tpg);
1656
1657         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1658                               0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1659         /*
1660          * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1661          * allocation failure.
1662          */
1663         ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1664         if (ret < 0)
1665                 return -ENOMEM;
1666
1667         if (tm_type == TMR_ABORT_TASK)
1668                 se_cmd->se_tmr_req->ref_task_tag = tag;
1669
1670         /* See target_submit_cmd for commentary */
1671         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1672         if (ret) {
1673                 core_tmr_release_req(se_cmd->se_tmr_req);
1674                 return ret;
1675         }
1676
1677         ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1678         if (ret) {
1679                 /*
1680                  * For callback during failure handling, push this work off
1681                  * to process context with TMR_LUN_DOES_NOT_EXIST status.
1682                  */
1683                 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1684                 schedule_work(&se_cmd->work);
1685                 return 0;
1686         }
1687         transport_generic_handle_tmr(se_cmd);
1688         return 0;
1689 }
1690 EXPORT_SYMBOL(target_submit_tmr);
1691
1692 /*
1693  * Handle SAM-esque emulation for generic transport request failures.
1694  */
1695 void transport_generic_request_failure(struct se_cmd *cmd,
1696                 sense_reason_t sense_reason)
1697 {
1698         int ret = 0, post_ret = 0;
1699
1700         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1701                 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1702         pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1703                 cmd->se_tfo->get_cmd_state(cmd),
1704                 cmd->t_state, sense_reason);
1705         pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1706                 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1707                 (cmd->transport_state & CMD_T_STOP) != 0,
1708                 (cmd->transport_state & CMD_T_SENT) != 0);
1709
1710         /*
1711          * For SAM Task Attribute emulation for failed struct se_cmd
1712          */
1713         transport_complete_task_attr(cmd);
1714         /*
1715          * Handle special case for COMPARE_AND_WRITE failure, where the
1716          * callback is expected to drop the per device ->caw_sem.
1717          */
1718         if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1719              cmd->transport_complete_callback)
1720                 cmd->transport_complete_callback(cmd, false, &post_ret);
1721
1722         switch (sense_reason) {
1723         case TCM_NON_EXISTENT_LUN:
1724         case TCM_UNSUPPORTED_SCSI_OPCODE:
1725         case TCM_INVALID_CDB_FIELD:
1726         case TCM_INVALID_PARAMETER_LIST:
1727         case TCM_PARAMETER_LIST_LENGTH_ERROR:
1728         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1729         case TCM_UNKNOWN_MODE_PAGE:
1730         case TCM_WRITE_PROTECTED:
1731         case TCM_ADDRESS_OUT_OF_RANGE:
1732         case TCM_CHECK_CONDITION_ABORT_CMD:
1733         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1734         case TCM_CHECK_CONDITION_NOT_READY:
1735         case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1736         case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1737         case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1738         case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1739         case TCM_TOO_MANY_TARGET_DESCS:
1740         case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1741         case TCM_TOO_MANY_SEGMENT_DESCS:
1742         case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1743                 break;
1744         case TCM_OUT_OF_RESOURCES:
1745                 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746                 break;
1747         case TCM_RESERVATION_CONFLICT:
1748                 /*
1749                  * No SENSE Data payload for this case, set SCSI Status
1750                  * and queue the response to $FABRIC_MOD.
1751                  *
1752                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1753                  */
1754                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1755                 /*
1756                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1757                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1758                  * CONFLICT STATUS.
1759                  *
1760                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1761                  */
1762                 if (cmd->se_sess &&
1763                     cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1764                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1765                                                cmd->orig_fe_lun, 0x2C,
1766                                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1767                 }
1768                 trace_target_cmd_complete(cmd);
1769                 ret = cmd->se_tfo->queue_status(cmd);
1770                 if (ret == -EAGAIN || ret == -ENOMEM)
1771                         goto queue_full;
1772                 goto check_stop;
1773         default:
1774                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1775                         cmd->t_task_cdb[0], sense_reason);
1776                 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1777                 break;
1778         }
1779
1780         ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1781         if (ret == -EAGAIN || ret == -ENOMEM)
1782                 goto queue_full;
1783
1784 check_stop:
1785         transport_lun_remove_cmd(cmd);
1786         transport_cmd_check_stop_to_fabric(cmd);
1787         return;
1788
1789 queue_full:
1790         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1791         transport_handle_queue_full(cmd, cmd->se_dev);
1792 }
1793 EXPORT_SYMBOL(transport_generic_request_failure);
1794
1795 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1796 {
1797         sense_reason_t ret;
1798
1799         if (!cmd->execute_cmd) {
1800                 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1801                 goto err;
1802         }
1803         if (do_checks) {
1804                 /*
1805                  * Check for an existing UNIT ATTENTION condition after
1806                  * target_handle_task_attr() has done SAM task attr
1807                  * checking, and possibly have already defered execution
1808                  * out to target_restart_delayed_cmds() context.
1809                  */
1810                 ret = target_scsi3_ua_check(cmd);
1811                 if (ret)
1812                         goto err;
1813
1814                 ret = target_alua_state_check(cmd);
1815                 if (ret)
1816                         goto err;
1817
1818                 ret = target_check_reservation(cmd);
1819                 if (ret) {
1820                         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1821                         goto err;
1822                 }
1823         }
1824
1825         ret = cmd->execute_cmd(cmd);
1826         if (!ret)
1827                 return;
1828 err:
1829         spin_lock_irq(&cmd->t_state_lock);
1830         cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1831         spin_unlock_irq(&cmd->t_state_lock);
1832
1833         transport_generic_request_failure(cmd, ret);
1834 }
1835
1836 static int target_write_prot_action(struct se_cmd *cmd)
1837 {
1838         u32 sectors;
1839         /*
1840          * Perform WRITE_INSERT of PI using software emulation when backend
1841          * device has PI enabled, if the transport has not already generated
1842          * PI using hardware WRITE_INSERT offload.
1843          */
1844         switch (cmd->prot_op) {
1845         case TARGET_PROT_DOUT_INSERT:
1846                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1847                         sbc_dif_generate(cmd);
1848                 break;
1849         case TARGET_PROT_DOUT_STRIP:
1850                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1851                         break;
1852
1853                 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1854                 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1855                                              sectors, 0, cmd->t_prot_sg, 0);
1856                 if (unlikely(cmd->pi_err)) {
1857                         spin_lock_irq(&cmd->t_state_lock);
1858                         cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1859                         spin_unlock_irq(&cmd->t_state_lock);
1860                         transport_generic_request_failure(cmd, cmd->pi_err);
1861                         return -1;
1862                 }
1863                 break;
1864         default:
1865                 break;
1866         }
1867
1868         return 0;
1869 }
1870
1871 static bool target_handle_task_attr(struct se_cmd *cmd)
1872 {
1873         struct se_device *dev = cmd->se_dev;
1874
1875         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1876                 return false;
1877
1878         cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1879
1880         /*
1881          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1882          * to allow the passed struct se_cmd list of tasks to the front of the list.
1883          */
1884         switch (cmd->sam_task_attr) {
1885         case TCM_HEAD_TAG:
1886                 atomic_inc_mb(&dev->non_ordered);
1887                 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1888                          cmd->t_task_cdb[0]);
1889                 return false;
1890         case TCM_ORDERED_TAG:
1891                 atomic_inc_mb(&dev->delayed_cmd_count);
1892
1893                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1894                          cmd->t_task_cdb[0]);
1895                 break;
1896         default:
1897                 /*
1898                  * For SIMPLE and UNTAGGED Task Attribute commands
1899                  */
1900                 atomic_inc_mb(&dev->non_ordered);
1901
1902                 if (atomic_read(&dev->delayed_cmd_count) == 0)
1903                         return false;
1904                 break;
1905         }
1906
1907         if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
1908                 atomic_inc_mb(&dev->delayed_cmd_count);
1909                 /*
1910                  * We will account for this when we dequeue from the delayed
1911                  * list.
1912                  */
1913                 atomic_dec_mb(&dev->non_ordered);
1914         }
1915
1916         spin_lock(&dev->delayed_cmd_lock);
1917         list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1918         spin_unlock(&dev->delayed_cmd_lock);
1919
1920         pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1921                 cmd->t_task_cdb[0], cmd->sam_task_attr);
1922         /*
1923          * We may have no non ordered cmds when this function started or we
1924          * could have raced with the last simple/head cmd completing, so kick
1925          * the delayed handler here.
1926          */
1927         schedule_work(&dev->delayed_cmd_work);
1928         return true;
1929 }
1930
1931 static int __transport_check_aborted_status(struct se_cmd *, int);
1932
1933 void target_execute_cmd(struct se_cmd *cmd)
1934 {
1935         /*
1936          * Determine if frontend context caller is requesting the stopping of
1937          * this command for frontend exceptions.
1938          *
1939          * If the received CDB has aleady been aborted stop processing it here.
1940          */
1941         spin_lock_irq(&cmd->t_state_lock);
1942         if (__transport_check_aborted_status(cmd, 1)) {
1943                 spin_unlock_irq(&cmd->t_state_lock);
1944                 return;
1945         }
1946         if (cmd->transport_state & CMD_T_STOP) {
1947                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1948                         __func__, __LINE__, cmd->tag);
1949
1950                 spin_unlock_irq(&cmd->t_state_lock);
1951                 complete_all(&cmd->t_transport_stop_comp);
1952                 return;
1953         }
1954
1955         cmd->t_state = TRANSPORT_PROCESSING;
1956         cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
1957         cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1958         spin_unlock_irq(&cmd->t_state_lock);
1959
1960         if (target_write_prot_action(cmd))
1961                 return;
1962
1963         if (target_handle_task_attr(cmd)) {
1964                 spin_lock_irq(&cmd->t_state_lock);
1965                 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1966                 spin_unlock_irq(&cmd->t_state_lock);
1967                 return;
1968         }
1969
1970         __target_execute_cmd(cmd, true);
1971 }
1972 EXPORT_SYMBOL(target_execute_cmd);
1973
1974 /*
1975  * Process all commands up to the last received ORDERED task attribute which
1976  * requires another blocking boundary
1977  */
1978 void target_do_delayed_work(struct work_struct *work)
1979 {
1980         struct se_device *dev = container_of(work, struct se_device,
1981                                              delayed_cmd_work);
1982
1983         spin_lock(&dev->delayed_cmd_lock);
1984         while (!dev->ordered_sync_in_progress) {
1985                 struct se_cmd *cmd;
1986
1987                 if (list_empty(&dev->delayed_cmd_list))
1988                         break;
1989
1990                 cmd = list_entry(dev->delayed_cmd_list.next,
1991                                  struct se_cmd, se_delayed_node);
1992
1993                 if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1994                         /*
1995                          * Check if we started with:
1996                          * [ordered] [simple] [ordered]
1997                          * and we are now at the last ordered so we have to wait
1998                          * for the simple cmd.
1999                          */
2000                         if (atomic_read(&dev->non_ordered) > 0)
2001                                 break;
2002
2003                         dev->ordered_sync_in_progress = true;
2004                 }
2005
2006                 list_del(&cmd->se_delayed_node);
2007                 atomic_dec_mb(&dev->delayed_cmd_count);
2008                 spin_unlock(&dev->delayed_cmd_lock);
2009
2010                 if (cmd->sam_task_attr != TCM_ORDERED_TAG)
2011                         atomic_inc_mb(&dev->non_ordered);
2012
2013                 cmd->transport_state |= CMD_T_SENT;
2014
2015                 __target_execute_cmd(cmd, true);
2016
2017                 spin_lock(&dev->delayed_cmd_lock);
2018         }
2019         spin_unlock(&dev->delayed_cmd_lock);
2020 }
2021
2022 /*
2023  * Called from I/O completion to determine which dormant/delayed
2024  * and ordered cmds need to have their tasks added to the execution queue.
2025  */
2026 static void transport_complete_task_attr(struct se_cmd *cmd)
2027 {
2028         struct se_device *dev = cmd->se_dev;
2029
2030         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2031                 return;
2032
2033         if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2034                 goto restart;
2035
2036         if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2037                 atomic_dec_mb(&dev->non_ordered);
2038                 dev->dev_cur_ordered_id++;
2039                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
2040                          dev->dev_cur_ordered_id);
2041         } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2042                 atomic_dec_mb(&dev->non_ordered);
2043                 dev->dev_cur_ordered_id++;
2044                 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2045                          dev->dev_cur_ordered_id);
2046         } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2047                 spin_lock(&dev->delayed_cmd_lock);
2048                 dev->ordered_sync_in_progress = false;
2049                 spin_unlock(&dev->delayed_cmd_lock);
2050
2051                 dev->dev_cur_ordered_id++;
2052                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2053                          dev->dev_cur_ordered_id);
2054         }
2055         cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2056
2057 restart:
2058         if (atomic_read(&dev->delayed_cmd_count) > 0)
2059                 schedule_work(&dev->delayed_cmd_work);
2060 }
2061
2062 static void transport_complete_qf(struct se_cmd *cmd)
2063 {
2064         int ret = 0;
2065
2066         transport_complete_task_attr(cmd);
2067
2068         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2069                 trace_target_cmd_complete(cmd);
2070                 ret = cmd->se_tfo->queue_status(cmd);
2071                 goto out;
2072         }
2073
2074         switch (cmd->data_direction) {
2075         case DMA_FROM_DEVICE:
2076                 if (cmd->scsi_status)
2077                         goto queue_status;
2078
2079                 trace_target_cmd_complete(cmd);
2080                 ret = cmd->se_tfo->queue_data_in(cmd);
2081                 break;
2082         case DMA_TO_DEVICE:
2083                 if (cmd->se_cmd_flags & SCF_BIDI) {
2084                         ret = cmd->se_tfo->queue_data_in(cmd);
2085                         break;
2086                 }
2087                 /* Fall through for DMA_TO_DEVICE */
2088         case DMA_NONE:
2089 queue_status:
2090                 trace_target_cmd_complete(cmd);
2091                 ret = cmd->se_tfo->queue_status(cmd);
2092                 break;
2093         default:
2094                 break;
2095         }
2096
2097 out:
2098         if (ret < 0) {
2099                 transport_handle_queue_full(cmd, cmd->se_dev);
2100                 return;
2101         }
2102         transport_lun_remove_cmd(cmd);
2103         transport_cmd_check_stop_to_fabric(cmd);
2104 }
2105
2106 static void transport_handle_queue_full(
2107         struct se_cmd *cmd,
2108         struct se_device *dev)
2109 {
2110         spin_lock_irq(&dev->qf_cmd_lock);
2111         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2112         atomic_inc_mb(&dev->dev_qf_count);
2113         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2114
2115         schedule_work(&cmd->se_dev->qf_work_queue);
2116 }
2117
2118 static bool target_read_prot_action(struct se_cmd *cmd)
2119 {
2120         switch (cmd->prot_op) {
2121         case TARGET_PROT_DIN_STRIP:
2122                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2123                         u32 sectors = cmd->data_length >>
2124                                   ilog2(cmd->se_dev->dev_attrib.block_size);
2125
2126                         cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2127                                                      sectors, 0, cmd->t_prot_sg,
2128                                                      0);
2129                         if (cmd->pi_err)
2130                                 return true;
2131                 }
2132                 break;
2133         case TARGET_PROT_DIN_INSERT:
2134                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2135                         break;
2136
2137                 sbc_dif_generate(cmd);
2138                 break;
2139         default:
2140                 break;
2141         }
2142
2143         return false;
2144 }
2145
2146 static void target_complete_ok_work(struct work_struct *work)
2147 {
2148         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2149         int ret;
2150
2151         /*
2152          * Check if we need to move delayed/dormant tasks from cmds on the
2153          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2154          * Attribute.
2155          */
2156         transport_complete_task_attr(cmd);
2157
2158         /*
2159          * Check to schedule QUEUE_FULL work, or execute an existing
2160          * cmd->transport_qf_callback()
2161          */
2162         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2163                 schedule_work(&cmd->se_dev->qf_work_queue);
2164
2165         /*
2166          * Check if we need to send a sense buffer from
2167          * the struct se_cmd in question.
2168          */
2169         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2170                 WARN_ON(!cmd->scsi_status);
2171                 ret = transport_send_check_condition_and_sense(
2172                                         cmd, 0, 1);
2173                 if (ret == -EAGAIN || ret == -ENOMEM)
2174                         goto queue_full;
2175
2176                 transport_lun_remove_cmd(cmd);
2177                 transport_cmd_check_stop_to_fabric(cmd);
2178                 return;
2179         }
2180         /*
2181          * Check for a callback, used by amongst other things
2182          * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2183          */
2184         if (cmd->transport_complete_callback) {
2185                 sense_reason_t rc;
2186                 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2187                 bool zero_dl = !(cmd->data_length);
2188                 int post_ret = 0;
2189
2190                 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2191                 if (!rc && !post_ret) {
2192                         if (caw && zero_dl)
2193                                 goto queue_rsp;
2194
2195                         return;
2196                 } else if (rc) {
2197                         ret = transport_send_check_condition_and_sense(cmd,
2198                                                 rc, 0);
2199                         if (ret == -EAGAIN || ret == -ENOMEM)
2200                                 goto queue_full;
2201
2202                         transport_lun_remove_cmd(cmd);
2203                         transport_cmd_check_stop_to_fabric(cmd);
2204                         return;
2205                 }
2206         }
2207
2208 queue_rsp:
2209         switch (cmd->data_direction) {
2210         case DMA_FROM_DEVICE:
2211                 if (cmd->scsi_status)
2212                         goto queue_status;
2213
2214                 atomic_long_add(cmd->data_length,
2215                                 &cmd->se_lun->lun_stats.tx_data_octets);
2216                 /*
2217                  * Perform READ_STRIP of PI using software emulation when
2218                  * backend had PI enabled, if the transport will not be
2219                  * performing hardware READ_STRIP offload.
2220                  */
2221                 if (target_read_prot_action(cmd)) {
2222                         ret = transport_send_check_condition_and_sense(cmd,
2223                                                 cmd->pi_err, 0);
2224                         if (ret == -EAGAIN || ret == -ENOMEM)
2225                                 goto queue_full;
2226
2227                         transport_lun_remove_cmd(cmd);
2228                         transport_cmd_check_stop_to_fabric(cmd);
2229                         return;
2230                 }
2231
2232                 trace_target_cmd_complete(cmd);
2233                 ret = cmd->se_tfo->queue_data_in(cmd);
2234                 if (ret == -EAGAIN || ret == -ENOMEM)
2235                         goto queue_full;
2236                 break;
2237         case DMA_TO_DEVICE:
2238                 atomic_long_add(cmd->data_length,
2239                                 &cmd->se_lun->lun_stats.rx_data_octets);
2240                 /*
2241                  * Check if we need to send READ payload for BIDI-COMMAND
2242                  */
2243                 if (cmd->se_cmd_flags & SCF_BIDI) {
2244                         atomic_long_add(cmd->data_length,
2245                                         &cmd->se_lun->lun_stats.tx_data_octets);
2246                         ret = cmd->se_tfo->queue_data_in(cmd);
2247                         if (ret == -EAGAIN || ret == -ENOMEM)
2248                                 goto queue_full;
2249                         break;
2250                 }
2251                 /* Fall through for DMA_TO_DEVICE */
2252         case DMA_NONE:
2253 queue_status:
2254                 trace_target_cmd_complete(cmd);
2255                 ret = cmd->se_tfo->queue_status(cmd);
2256                 if (ret == -EAGAIN || ret == -ENOMEM)
2257                         goto queue_full;
2258                 break;
2259         default:
2260                 break;
2261         }
2262
2263         transport_lun_remove_cmd(cmd);
2264         transport_cmd_check_stop_to_fabric(cmd);
2265         return;
2266
2267 queue_full:
2268         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2269                 " data_direction: %d\n", cmd, cmd->data_direction);
2270         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2271         transport_handle_queue_full(cmd, cmd->se_dev);
2272 }
2273
2274 void target_free_sgl(struct scatterlist *sgl, int nents)
2275 {
2276         struct scatterlist *sg;
2277         int count;
2278
2279         for_each_sg(sgl, sg, nents, count)
2280                 __free_page(sg_page(sg));
2281
2282         kfree(sgl);
2283 }
2284 EXPORT_SYMBOL(target_free_sgl);
2285
2286 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2287 {
2288         /*
2289          * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2290          * emulation, and free + reset pointers if necessary..
2291          */
2292         if (!cmd->t_data_sg_orig)
2293                 return;
2294
2295         kfree(cmd->t_data_sg);
2296         cmd->t_data_sg = cmd->t_data_sg_orig;
2297         cmd->t_data_sg_orig = NULL;
2298         cmd->t_data_nents = cmd->t_data_nents_orig;
2299         cmd->t_data_nents_orig = 0;
2300 }
2301
2302 static inline void transport_free_pages(struct se_cmd *cmd)
2303 {
2304         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2305                 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2306                 cmd->t_prot_sg = NULL;
2307                 cmd->t_prot_nents = 0;
2308         }
2309
2310         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2311                 /*
2312                  * Release special case READ buffer payload required for
2313                  * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2314                  */
2315                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2316                         target_free_sgl(cmd->t_bidi_data_sg,
2317                                            cmd->t_bidi_data_nents);
2318                         cmd->t_bidi_data_sg = NULL;
2319                         cmd->t_bidi_data_nents = 0;
2320                 }
2321                 transport_reset_sgl_orig(cmd);
2322                 return;
2323         }
2324         transport_reset_sgl_orig(cmd);
2325
2326         target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2327         cmd->t_data_sg = NULL;
2328         cmd->t_data_nents = 0;
2329
2330         target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2331         cmd->t_bidi_data_sg = NULL;
2332         cmd->t_bidi_data_nents = 0;
2333 }
2334
2335 /**
2336  * transport_put_cmd - release a reference to a command
2337  * @cmd:       command to release
2338  *
2339  * This routine releases our reference to the command and frees it if possible.
2340  */
2341 static int transport_put_cmd(struct se_cmd *cmd)
2342 {
2343         BUG_ON(!cmd->se_tfo);
2344         /*
2345          * If this cmd has been setup with target_get_sess_cmd(), drop
2346          * the kref and call ->release_cmd() in kref callback.
2347          */
2348         return target_put_sess_cmd(cmd);
2349 }
2350
2351 void *transport_kmap_data_sg(struct se_cmd *cmd)
2352 {
2353         struct scatterlist *sg = cmd->t_data_sg;
2354         struct page **pages;
2355         int i;
2356
2357         /*
2358          * We need to take into account a possible offset here for fabrics like
2359          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2360          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2361          */
2362         if (!cmd->t_data_nents)
2363                 return NULL;
2364
2365         BUG_ON(!sg);
2366         if (cmd->t_data_nents == 1)
2367                 return kmap(sg_page(sg)) + sg->offset;
2368
2369         /* >1 page. use vmap */
2370         pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2371         if (!pages)
2372                 return NULL;
2373
2374         /* convert sg[] to pages[] */
2375         for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2376                 pages[i] = sg_page(sg);
2377         }
2378
2379         cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2380         kfree(pages);
2381         if (!cmd->t_data_vmap)
2382                 return NULL;
2383
2384         return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2385 }
2386 EXPORT_SYMBOL(transport_kmap_data_sg);
2387
2388 void transport_kunmap_data_sg(struct se_cmd *cmd)
2389 {
2390         if (!cmd->t_data_nents) {
2391                 return;
2392         } else if (cmd->t_data_nents == 1) {
2393                 kunmap(sg_page(cmd->t_data_sg));
2394                 return;
2395         }
2396
2397         vunmap(cmd->t_data_vmap);
2398         cmd->t_data_vmap = NULL;
2399 }
2400 EXPORT_SYMBOL(transport_kunmap_data_sg);
2401
2402 int
2403 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2404                  bool zero_page, bool chainable)
2405 {
2406         struct scatterlist *sg;
2407         struct page *page;
2408         gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2409         unsigned int nalloc, nent;
2410         int i = 0;
2411
2412         nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
2413         if (chainable)
2414                 nalloc++;
2415         sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
2416         if (!sg)
2417                 return -ENOMEM;
2418
2419         sg_init_table(sg, nalloc);
2420
2421         while (length) {
2422                 u32 page_len = min_t(u32, length, PAGE_SIZE);
2423                 page = alloc_page(GFP_KERNEL | zero_flag);
2424                 if (!page)
2425                         goto out;
2426
2427                 sg_set_page(&sg[i], page, page_len, 0);
2428                 length -= page_len;
2429                 i++;
2430         }
2431         *sgl = sg;
2432         *nents = nent;
2433         return 0;
2434
2435 out:
2436         while (i > 0) {
2437                 i--;
2438                 __free_page(sg_page(&sg[i]));
2439         }
2440         kfree(sg);
2441         return -ENOMEM;
2442 }
2443 EXPORT_SYMBOL(target_alloc_sgl);
2444
2445 /*
2446  * Allocate any required resources to execute the command.  For writes we
2447  * might not have the payload yet, so notify the fabric via a call to
2448  * ->write_pending instead. Otherwise place it on the execution queue.
2449  */
2450 sense_reason_t
2451 transport_generic_new_cmd(struct se_cmd *cmd)
2452 {
2453         int ret = 0;
2454         bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2455
2456         if (cmd->prot_op != TARGET_PROT_NORMAL &&
2457             !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2458                 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2459                                        cmd->prot_length, true, false);
2460                 if (ret < 0)
2461                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2462         }
2463
2464         /*
2465          * Determine is the TCM fabric module has already allocated physical
2466          * memory, and is directly calling transport_generic_map_mem_to_cmd()
2467          * beforehand.
2468          */
2469         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2470             cmd->data_length) {
2471
2472                 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2473                     (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2474                         u32 bidi_length;
2475
2476                         if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2477                                 bidi_length = cmd->t_task_nolb *
2478                                               cmd->se_dev->dev_attrib.block_size;
2479                         else
2480                                 bidi_length = cmd->data_length;
2481
2482                         ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2483                                                &cmd->t_bidi_data_nents,
2484                                                bidi_length, zero_flag, false);
2485                         if (ret < 0)
2486                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2487                 }
2488
2489                 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2490                                        cmd->data_length, zero_flag, false);
2491                 if (ret < 0)
2492                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2493         } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2494                     cmd->data_length) {
2495                 /*
2496                  * Special case for COMPARE_AND_WRITE with fabrics
2497                  * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2498                  */
2499                 u32 caw_length = cmd->t_task_nolb *
2500                                  cmd->se_dev->dev_attrib.block_size;
2501
2502                 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2503                                        &cmd->t_bidi_data_nents,
2504                                        caw_length, zero_flag, false);
2505                 if (ret < 0)
2506                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2507         }
2508         /*
2509          * If this command is not a write we can execute it right here,
2510          * for write buffers we need to notify the fabric driver first
2511          * and let it call back once the write buffers are ready.
2512          */
2513         target_add_to_state_list(cmd);
2514         if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2515                 target_execute_cmd(cmd);
2516                 return 0;
2517         }
2518         transport_cmd_check_stop(cmd, false, true);
2519
2520         ret = cmd->se_tfo->write_pending(cmd);
2521         if (ret == -EAGAIN || ret == -ENOMEM)
2522                 goto queue_full;
2523
2524         /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2525         WARN_ON(ret);
2526
2527         return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2528
2529 queue_full:
2530         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2531         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2532         transport_handle_queue_full(cmd, cmd->se_dev);
2533         return 0;
2534 }
2535 EXPORT_SYMBOL(transport_generic_new_cmd);
2536
2537 static void transport_write_pending_qf(struct se_cmd *cmd)
2538 {
2539         int ret;
2540
2541         ret = cmd->se_tfo->write_pending(cmd);
2542         if (ret == -EAGAIN || ret == -ENOMEM) {
2543                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2544                          cmd);
2545                 transport_handle_queue_full(cmd, cmd->se_dev);
2546         }
2547 }
2548
2549 static bool
2550 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2551                            unsigned long *flags);
2552
2553 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2554 {
2555         unsigned long flags;
2556
2557         spin_lock_irqsave(&cmd->t_state_lock, flags);
2558         __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2559         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2560 }
2561
2562 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2563 {
2564         int ret = 0;
2565         bool aborted = false, tas = false;
2566
2567         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2568                 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2569                         target_wait_free_cmd(cmd, &aborted, &tas);
2570
2571                 if (!aborted || tas)
2572                         ret = transport_put_cmd(cmd);
2573         } else {
2574                 if (wait_for_tasks)
2575                         target_wait_free_cmd(cmd, &aborted, &tas);
2576                 /*
2577                  * Handle WRITE failure case where transport_generic_new_cmd()
2578                  * has already added se_cmd to state_list, but fabric has
2579                  * failed command before I/O submission.
2580                  */
2581                 if (cmd->state_active)
2582                         target_remove_from_state_list(cmd);
2583
2584                 if (cmd->se_lun)
2585                         transport_lun_remove_cmd(cmd);
2586
2587                 if (!aborted || tas)
2588                         ret = transport_put_cmd(cmd);
2589         }
2590         /*
2591          * If the task has been internally aborted due to TMR ABORT_TASK
2592          * or LUN_RESET, target_core_tmr.c is responsible for performing
2593          * the remaining calls to target_put_sess_cmd(), and not the
2594          * callers of this function.
2595          */
2596         if (aborted) {
2597                 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2598                 wait_for_completion(&cmd->cmd_wait_comp);
2599                 cmd->se_tfo->release_cmd(cmd);
2600                 ret = 1;
2601         }
2602         return ret;
2603 }
2604 EXPORT_SYMBOL(transport_generic_free_cmd);
2605
2606 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2607  * @se_cmd:     command descriptor to add
2608  * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
2609  */
2610 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2611 {
2612         struct se_session *se_sess = se_cmd->se_sess;
2613         unsigned long flags;
2614         int ret = 0;
2615
2616         /*
2617          * Add a second kref if the fabric caller is expecting to handle
2618          * fabric acknowledgement that requires two target_put_sess_cmd()
2619          * invocations before se_cmd descriptor release.
2620          */
2621         if (ack_kref) {
2622                 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2623                         return -EINVAL;
2624
2625                 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2626         }
2627
2628         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2629         if (se_sess->sess_tearing_down) {
2630                 ret = -ESHUTDOWN;
2631                 goto out;
2632         }
2633         se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2634         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2635 out:
2636         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2637
2638         if (ret && ack_kref)
2639                 target_put_sess_cmd(se_cmd);
2640
2641         return ret;
2642 }
2643 EXPORT_SYMBOL(target_get_sess_cmd);
2644
2645 static void target_free_cmd_mem(struct se_cmd *cmd)
2646 {
2647         transport_free_pages(cmd);
2648
2649         if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2650                 core_tmr_release_req(cmd->se_tmr_req);
2651         if (cmd->t_task_cdb != cmd->__t_task_cdb)
2652                 kfree(cmd->t_task_cdb);
2653 }
2654
2655 static void target_release_cmd_kref(struct kref *kref)
2656 {
2657         struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2658         struct se_session *se_sess = se_cmd->se_sess;
2659         unsigned long flags;
2660         bool fabric_stop;
2661
2662         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2663
2664         spin_lock(&se_cmd->t_state_lock);
2665         fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
2666                       (se_cmd->transport_state & CMD_T_ABORTED);
2667         spin_unlock(&se_cmd->t_state_lock);
2668
2669         if (se_cmd->cmd_wait_set || fabric_stop) {
2670                 list_del_init(&se_cmd->se_cmd_list);
2671                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2672                 target_free_cmd_mem(se_cmd);
2673                 complete(&se_cmd->cmd_wait_comp);
2674                 return;
2675         }
2676         list_del_init(&se_cmd->se_cmd_list);
2677         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2678
2679         target_free_cmd_mem(se_cmd);
2680         se_cmd->se_tfo->release_cmd(se_cmd);
2681 }
2682
2683 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2684  * @se_cmd:     command descriptor to drop
2685  */
2686 int target_put_sess_cmd(struct se_cmd *se_cmd)
2687 {
2688         struct se_session *se_sess = se_cmd->se_sess;
2689
2690         if (!se_sess) {
2691                 target_free_cmd_mem(se_cmd);
2692                 se_cmd->se_tfo->release_cmd(se_cmd);
2693                 return 1;
2694         }
2695         return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2696 }
2697 EXPORT_SYMBOL(target_put_sess_cmd);
2698
2699 /* target_sess_cmd_list_set_waiting - Flag all commands in
2700  *         sess_cmd_list to complete cmd_wait_comp.  Set
2701  *         sess_tearing_down so no more commands are queued.
2702  * @se_sess:    session to flag
2703  */
2704 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2705 {
2706         struct se_cmd *se_cmd, *tmp_cmd;
2707         unsigned long flags;
2708         int rc;
2709
2710         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2711         if (se_sess->sess_tearing_down) {
2712                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2713                 return;
2714         }
2715         se_sess->sess_tearing_down = 1;
2716         list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2717
2718         list_for_each_entry_safe(se_cmd, tmp_cmd,
2719                                  &se_sess->sess_wait_list, se_cmd_list) {
2720                 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2721                 if (rc) {
2722                         se_cmd->cmd_wait_set = 1;
2723                         spin_lock(&se_cmd->t_state_lock);
2724                         se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2725                         spin_unlock(&se_cmd->t_state_lock);
2726                 } else
2727                         list_del_init(&se_cmd->se_cmd_list);
2728         }
2729
2730         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2731 }
2732 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2733
2734 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2735  * @se_sess:    session to wait for active I/O
2736  */
2737 void target_wait_for_sess_cmds(struct se_session *se_sess)
2738 {
2739         struct se_cmd *se_cmd, *tmp_cmd;
2740         unsigned long flags;
2741         bool tas;
2742
2743         list_for_each_entry_safe(se_cmd, tmp_cmd,
2744                                 &se_sess->sess_wait_list, se_cmd_list) {
2745                 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2746                         " %d\n", se_cmd, se_cmd->t_state,
2747                         se_cmd->se_tfo->get_cmd_state(se_cmd));
2748
2749                 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2750                 tas = (se_cmd->transport_state & CMD_T_TAS);
2751                 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2752
2753                 if (!target_put_sess_cmd(se_cmd)) {
2754                         if (tas)
2755                                 target_put_sess_cmd(se_cmd);
2756                 }
2757
2758                 wait_for_completion(&se_cmd->cmd_wait_comp);
2759                 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2760                         " fabric state: %d\n", se_cmd, se_cmd->t_state,
2761                         se_cmd->se_tfo->get_cmd_state(se_cmd));
2762
2763                 se_cmd->se_tfo->release_cmd(se_cmd);
2764         }
2765
2766         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2767         WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2768         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2769
2770 }
2771 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2772
2773 static void target_lun_confirm(struct percpu_ref *ref)
2774 {
2775         struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2776
2777         complete(&lun->lun_ref_comp);
2778 }
2779
2780 void transport_clear_lun_ref(struct se_lun *lun)
2781 {
2782         /*
2783          * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2784          * the initial reference and schedule confirm kill to be
2785          * executed after one full RCU grace period has completed.
2786          */
2787         percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2788         /*
2789          * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2790          * to call target_lun_confirm after lun->lun_ref has been marked
2791          * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2792          * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2793          * fails for all new incoming I/O.
2794          */
2795         wait_for_completion(&lun->lun_ref_comp);
2796         /*
2797          * The second completion waits for percpu_ref_put_many() to
2798          * invoke ->release() after lun->lun_ref has switched to
2799          * atomic_t mode, and lun->lun_ref.count has reached zero.
2800          *
2801          * At this point all target-core lun->lun_ref references have
2802          * been dropped via transport_lun_remove_cmd(), and it's safe
2803          * to proceed with the remaining LUN shutdown.
2804          */
2805         wait_for_completion(&lun->lun_shutdown_comp);
2806 }
2807
2808 static bool
2809 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2810                            bool *aborted, bool *tas, unsigned long *flags)
2811         __releases(&cmd->t_state_lock)
2812         __acquires(&cmd->t_state_lock)
2813 {
2814         lockdep_assert_held(&cmd->t_state_lock);
2815
2816         if (fabric_stop)
2817                 cmd->transport_state |= CMD_T_FABRIC_STOP;
2818
2819         if (cmd->transport_state & CMD_T_ABORTED)
2820                 *aborted = true;
2821
2822         if (cmd->transport_state & CMD_T_TAS)
2823                 *tas = true;
2824
2825         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2826             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2827                 return false;
2828
2829         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2830             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2831                 return false;
2832
2833         if (!(cmd->transport_state & CMD_T_ACTIVE))
2834                 return false;
2835
2836         if (fabric_stop && *aborted)
2837                 return false;
2838
2839         cmd->transport_state |= CMD_T_STOP;
2840
2841         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2842                  " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2843                  cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2844
2845         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2846
2847         wait_for_completion(&cmd->t_transport_stop_comp);
2848
2849         spin_lock_irqsave(&cmd->t_state_lock, *flags);
2850         cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2851
2852         pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2853                  "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2854
2855         return true;
2856 }
2857
2858 /**
2859  * transport_wait_for_tasks - wait for completion to occur
2860  * @cmd:        command to wait
2861  *
2862  * Called from frontend fabric context to wait for storage engine
2863  * to pause and/or release frontend generated struct se_cmd.
2864  */
2865 bool transport_wait_for_tasks(struct se_cmd *cmd)
2866 {
2867         unsigned long flags;
2868         bool ret, aborted = false, tas = false;
2869
2870         spin_lock_irqsave(&cmd->t_state_lock, flags);
2871         ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2872         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2873
2874         return ret;
2875 }
2876 EXPORT_SYMBOL(transport_wait_for_tasks);
2877
2878 struct sense_info {
2879         u8 key;
2880         u8 asc;
2881         u8 ascq;
2882         bool add_sector_info;
2883 };
2884
2885 static const struct sense_info sense_info_table[] = {
2886         [TCM_NO_SENSE] = {
2887                 .key = NOT_READY
2888         },
2889         [TCM_NON_EXISTENT_LUN] = {
2890                 .key = ILLEGAL_REQUEST,
2891                 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
2892         },
2893         [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2894                 .key = ILLEGAL_REQUEST,
2895                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2896         },
2897         [TCM_SECTOR_COUNT_TOO_MANY] = {
2898                 .key = ILLEGAL_REQUEST,
2899                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2900         },
2901         [TCM_UNKNOWN_MODE_PAGE] = {
2902                 .key = ILLEGAL_REQUEST,
2903                 .asc = 0x24, /* INVALID FIELD IN CDB */
2904         },
2905         [TCM_CHECK_CONDITION_ABORT_CMD] = {
2906                 .key = ABORTED_COMMAND,
2907                 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
2908                 .ascq = 0x03,
2909         },
2910         [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2911                 .key = ABORTED_COMMAND,
2912                 .asc = 0x0c, /* WRITE ERROR */
2913                 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
2914         },
2915         [TCM_INVALID_CDB_FIELD] = {
2916                 .key = ILLEGAL_REQUEST,
2917                 .asc = 0x24, /* INVALID FIELD IN CDB */
2918         },
2919         [TCM_INVALID_PARAMETER_LIST] = {
2920                 .key = ILLEGAL_REQUEST,
2921                 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2922         },
2923         [TCM_TOO_MANY_TARGET_DESCS] = {
2924                 .key = ILLEGAL_REQUEST,
2925                 .asc = 0x26,
2926                 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
2927         },
2928         [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
2929                 .key = ILLEGAL_REQUEST,
2930                 .asc = 0x26,
2931                 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
2932         },
2933         [TCM_TOO_MANY_SEGMENT_DESCS] = {
2934                 .key = ILLEGAL_REQUEST,
2935                 .asc = 0x26,
2936                 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
2937         },
2938         [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
2939                 .key = ILLEGAL_REQUEST,
2940                 .asc = 0x26,
2941                 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
2942         },
2943         [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2944                 .key = ILLEGAL_REQUEST,
2945                 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
2946         },
2947         [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2948                 .key = ILLEGAL_REQUEST,
2949                 .asc = 0x0c, /* WRITE ERROR */
2950                 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
2951         },
2952         [TCM_SERVICE_CRC_ERROR] = {
2953                 .key = ABORTED_COMMAND,
2954                 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
2955                 .ascq = 0x05, /* N/A */
2956         },
2957         [TCM_SNACK_REJECTED] = {
2958                 .key = ABORTED_COMMAND,
2959                 .asc = 0x11, /* READ ERROR */
2960                 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
2961         },
2962         [TCM_WRITE_PROTECTED] = {
2963                 .key = DATA_PROTECT,
2964                 .asc = 0x27, /* WRITE PROTECTED */
2965         },
2966         [TCM_ADDRESS_OUT_OF_RANGE] = {
2967                 .key = ILLEGAL_REQUEST,
2968                 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2969         },
2970         [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2971                 .key = UNIT_ATTENTION,
2972         },
2973         [TCM_CHECK_CONDITION_NOT_READY] = {
2974                 .key = NOT_READY,
2975         },
2976         [TCM_MISCOMPARE_VERIFY] = {
2977                 .key = MISCOMPARE,
2978                 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
2979                 .ascq = 0x00,
2980         },
2981         [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2982                 .key = ABORTED_COMMAND,
2983                 .asc = 0x10,
2984                 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
2985                 .add_sector_info = true,
2986         },
2987         [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2988                 .key = ABORTED_COMMAND,
2989                 .asc = 0x10,
2990                 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2991                 .add_sector_info = true,
2992         },
2993         [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2994                 .key = ABORTED_COMMAND,
2995                 .asc = 0x10,
2996                 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2997                 .add_sector_info = true,
2998         },
2999         [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3000                 .key = COPY_ABORTED,
3001                 .asc = 0x0d,
3002                 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3003
3004         },
3005         [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3006                 /*
3007                  * Returning ILLEGAL REQUEST would cause immediate IO errors on
3008                  * Solaris initiators.  Returning NOT READY instead means the
3009                  * operations will be retried a finite number of times and we
3010                  * can survive intermittent errors.
3011                  */
3012                 .key = NOT_READY,
3013                 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3014         },
3015 };
3016
3017 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3018 {
3019         const struct sense_info *si;
3020         u8 *buffer = cmd->sense_buffer;
3021         int r = (__force int)reason;
3022         u8 asc, ascq;
3023         bool desc_format = target_sense_desc_format(cmd->se_dev);
3024
3025         if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3026                 si = &sense_info_table[r];
3027         else
3028                 si = &sense_info_table[(__force int)
3029                                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3030
3031         if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3032                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3033                 WARN_ON_ONCE(asc == 0);
3034         } else if (si->asc == 0) {
3035                 WARN_ON_ONCE(cmd->scsi_asc == 0);
3036                 asc = cmd->scsi_asc;
3037                 ascq = cmd->scsi_ascq;
3038         } else {
3039                 asc = si->asc;
3040                 ascq = si->ascq;
3041         }
3042
3043         scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
3044         if (si->add_sector_info)
3045                 return scsi_set_sense_information(buffer,
3046                                                   cmd->scsi_sense_length,
3047                                                   cmd->bad_sector);
3048
3049         return 0;
3050 }
3051
3052 int
3053 transport_send_check_condition_and_sense(struct se_cmd *cmd,
3054                 sense_reason_t reason, int from_transport)
3055 {
3056         unsigned long flags;
3057
3058         spin_lock_irqsave(&cmd->t_state_lock, flags);
3059         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3060                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3061                 return 0;
3062         }
3063         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3064         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3065
3066         if (!from_transport) {
3067                 int rc;
3068
3069                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3070                 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3071                 cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3072                 rc = translate_sense_reason(cmd, reason);
3073                 if (rc)
3074                         return rc;
3075         }
3076
3077         trace_target_cmd_complete(cmd);
3078         return cmd->se_tfo->queue_status(cmd);
3079 }
3080 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3081
3082 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3083         __releases(&cmd->t_state_lock)
3084         __acquires(&cmd->t_state_lock)
3085 {
3086         assert_spin_locked(&cmd->t_state_lock);
3087         WARN_ON_ONCE(!irqs_disabled());
3088
3089         if (!(cmd->transport_state & CMD_T_ABORTED))
3090                 return 0;
3091         /*
3092          * If cmd has been aborted but either no status is to be sent or it has
3093          * already been sent, just return
3094          */
3095         if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3096                 if (send_status)
3097                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3098                 return 1;
3099         }
3100
3101         pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3102                 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3103
3104         cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3105         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3106         trace_target_cmd_complete(cmd);
3107
3108         spin_unlock_irq(&cmd->t_state_lock);
3109         cmd->se_tfo->queue_status(cmd);
3110         spin_lock_irq(&cmd->t_state_lock);
3111
3112         return 1;
3113 }
3114
3115 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3116 {
3117         int ret;
3118
3119         spin_lock_irq(&cmd->t_state_lock);
3120         ret = __transport_check_aborted_status(cmd, send_status);
3121         spin_unlock_irq(&cmd->t_state_lock);
3122
3123         return ret;
3124 }
3125 EXPORT_SYMBOL(transport_check_aborted_status);
3126
3127 void transport_send_task_abort(struct se_cmd *cmd)
3128 {
3129         unsigned long flags;
3130
3131         spin_lock_irqsave(&cmd->t_state_lock, flags);
3132         if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3133                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3134                 return;
3135         }
3136         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3137
3138         /*
3139          * If there are still expected incoming fabric WRITEs, we wait
3140          * until until they have completed before sending a TASK_ABORTED
3141          * response.  This response with TASK_ABORTED status will be
3142          * queued back to fabric module by transport_check_aborted_status().
3143          */
3144         if (cmd->data_direction == DMA_TO_DEVICE) {
3145                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3146                         spin_lock_irqsave(&cmd->t_state_lock, flags);
3147                         if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3148                                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3149                                 goto send_abort;
3150                         }
3151                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3152                         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3153                         return;
3154                 }
3155         }
3156 send_abort:
3157         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3158
3159         transport_lun_remove_cmd(cmd);
3160
3161         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3162                  cmd->t_task_cdb[0], cmd->tag);
3163
3164         trace_target_cmd_complete(cmd);
3165         cmd->se_tfo->queue_status(cmd);
3166 }
3167
3168 static void target_tmr_work(struct work_struct *work)
3169 {
3170         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3171         struct se_device *dev = cmd->se_dev;
3172         struct se_tmr_req *tmr = cmd->se_tmr_req;
3173         unsigned long flags;
3174         int ret;
3175
3176         spin_lock_irqsave(&cmd->t_state_lock, flags);
3177         if (cmd->transport_state & CMD_T_ABORTED) {
3178                 tmr->response = TMR_FUNCTION_REJECTED;
3179                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3180                 goto check_stop;
3181         }
3182         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3183
3184         switch (tmr->function) {
3185         case TMR_ABORT_TASK:
3186                 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3187                 break;
3188         case TMR_ABORT_TASK_SET:
3189         case TMR_CLEAR_ACA:
3190         case TMR_CLEAR_TASK_SET:
3191                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3192                 break;
3193         case TMR_LUN_RESET:
3194                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3195                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3196                                          TMR_FUNCTION_REJECTED;
3197                 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3198                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3199                                                cmd->orig_fe_lun, 0x29,
3200                                                ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3201                 }
3202                 break;
3203         case TMR_TARGET_WARM_RESET:
3204                 tmr->response = TMR_FUNCTION_REJECTED;
3205                 break;
3206         case TMR_TARGET_COLD_RESET:
3207                 tmr->response = TMR_FUNCTION_REJECTED;
3208                 break;
3209         default:
3210                 pr_err("Uknown TMR function: 0x%02x.\n",
3211                                 tmr->function);
3212                 tmr->response = TMR_FUNCTION_REJECTED;
3213                 break;
3214         }
3215
3216         spin_lock_irqsave(&cmd->t_state_lock, flags);
3217         if (cmd->transport_state & CMD_T_ABORTED) {
3218                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3219                 goto check_stop;
3220         }
3221         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3222
3223         cmd->se_tfo->queue_tm_rsp(cmd);
3224
3225 check_stop:
3226         transport_cmd_check_stop_to_fabric(cmd);
3227 }
3228
3229 int transport_generic_handle_tmr(
3230         struct se_cmd *cmd)
3231 {
3232         unsigned long flags;
3233         bool aborted = false;
3234
3235         spin_lock_irqsave(&cmd->t_state_lock, flags);
3236         if (cmd->transport_state & CMD_T_ABORTED) {
3237                 aborted = true;
3238         } else {
3239                 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3240                 cmd->transport_state |= CMD_T_ACTIVE;
3241         }
3242         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3243
3244         if (aborted) {
3245                 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3246                         "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3247                         cmd->se_tmr_req->ref_task_tag, cmd->tag);
3248                 transport_cmd_check_stop_to_fabric(cmd);
3249                 return 0;
3250         }
3251
3252         INIT_WORK(&cmd->work, target_tmr_work);
3253         queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3254         return 0;
3255 }
3256 EXPORT_SYMBOL(transport_generic_handle_tmr);
3257
3258 bool
3259 target_check_wce(struct se_device *dev)
3260 {
3261         bool wce = false;
3262
3263         if (dev->transport->get_write_cache)
3264                 wce = dev->transport->get_write_cache(dev);
3265         else if (dev->dev_attrib.emulate_write_cache > 0)
3266                 wce = true;
3267
3268         return wce;
3269 }
3270
3271 bool
3272 target_check_fua(struct se_device *dev)
3273 {
3274         return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3275 }