source: src/linux/universal/linux-4.9/drivers/target/target_core_transport.c @ 31662

Last change on this file since 31662 was 31662, checked in by brainslayer, 4 months ago

use new squashfs in all kernels

File size: 85.6 KB
Line 
1/*******************************************************************************
2 * Filename:  target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * (c) Copyright 2002-2013 Datera, Inc.
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
35#include <linux/module.h>
36#include <linux/ratelimit.h>
37#include <linux/vmalloc.h>
38#include <asm/unaligned.h>
39#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi_proto.h>
42#include <scsi/scsi_common.h>
43
44#include <target/target_core_base.h>
45#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
47
48#include "target_core_internal.h"
49#include "target_core_alua.h"
50#include "target_core_pr.h"
51#include "target_core_ua.h"
52
53#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
56static struct workqueue_struct *target_completion_wq;
57static struct kmem_cache *se_sess_cache;
58struct kmem_cache *se_ua_cache;
59struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
63struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66static void transport_complete_task_attr(struct se_cmd *cmd);
67static void transport_handle_queue_full(struct se_cmd *cmd,
68                struct se_device *dev);
69static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work);
71
72int init_se_kmem_caches(void)
73{
74        se_sess_cache = kmem_cache_create("se_sess_cache",
75                        sizeof(struct se_session), __alignof__(struct se_session),
76                        0, NULL);
77        if (!se_sess_cache) {
78                pr_err("kmem_cache_create() for struct se_session"
79                                " failed\n");
80                goto out;
81        }
82        se_ua_cache = kmem_cache_create("se_ua_cache",
83                        sizeof(struct se_ua), __alignof__(struct se_ua),
84                        0, NULL);
85        if (!se_ua_cache) {
86                pr_err("kmem_cache_create() for struct se_ua failed\n");
87                goto out_free_sess_cache;
88        }
89        t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90                        sizeof(struct t10_pr_registration),
91                        __alignof__(struct t10_pr_registration), 0, NULL);
92        if (!t10_pr_reg_cache) {
93                pr_err("kmem_cache_create() for struct t10_pr_registration"
94                                " failed\n");
95                goto out_free_ua_cache;
96        }
97        t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98                        sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99                        0, NULL);
100        if (!t10_alua_lu_gp_cache) {
101                pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102                                " failed\n");
103                goto out_free_pr_reg_cache;
104        }
105        t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106                        sizeof(struct t10_alua_lu_gp_member),
107                        __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108        if (!t10_alua_lu_gp_mem_cache) {
109                pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110                                "cache failed\n");
111                goto out_free_lu_gp_cache;
112        }
113        t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114                        sizeof(struct t10_alua_tg_pt_gp),
115                        __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116        if (!t10_alua_tg_pt_gp_cache) {
117                pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118                                "cache failed\n");
119                goto out_free_lu_gp_mem_cache;
120        }
121        t10_alua_lba_map_cache = kmem_cache_create(
122                        "t10_alua_lba_map_cache",
123                        sizeof(struct t10_alua_lba_map),
124                        __alignof__(struct t10_alua_lba_map), 0, NULL);
125        if (!t10_alua_lba_map_cache) {
126                pr_err("kmem_cache_create() for t10_alua_lba_map_"
127                                "cache failed\n");
128                goto out_free_tg_pt_gp_cache;
129        }
130        t10_alua_lba_map_mem_cache = kmem_cache_create(
131                        "t10_alua_lba_map_mem_cache",
132                        sizeof(struct t10_alua_lba_map_member),
133                        __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134        if (!t10_alua_lba_map_mem_cache) {
135                pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136                                "cache failed\n");
137                goto out_free_lba_map_cache;
138        }
139
140        target_completion_wq = alloc_workqueue("target_completion",
141                                               WQ_MEM_RECLAIM, 0);
142        if (!target_completion_wq)
143                goto out_free_lba_map_mem_cache;
144
145        return 0;
146
147out_free_lba_map_mem_cache:
148        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149out_free_lba_map_cache:
150        kmem_cache_destroy(t10_alua_lba_map_cache);
151out_free_tg_pt_gp_cache:
152        kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153out_free_lu_gp_mem_cache:
154        kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155out_free_lu_gp_cache:
156        kmem_cache_destroy(t10_alua_lu_gp_cache);
157out_free_pr_reg_cache:
158        kmem_cache_destroy(t10_pr_reg_cache);
159out_free_ua_cache:
160        kmem_cache_destroy(se_ua_cache);
161out_free_sess_cache:
162        kmem_cache_destroy(se_sess_cache);
163out:
164        return -ENOMEM;
165}
166
167void release_se_kmem_caches(void)
168{
169        destroy_workqueue(target_completion_wq);
170        kmem_cache_destroy(se_sess_cache);
171        kmem_cache_destroy(se_ua_cache);
172        kmem_cache_destroy(t10_pr_reg_cache);
173        kmem_cache_destroy(t10_alua_lu_gp_cache);
174        kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175        kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176        kmem_cache_destroy(t10_alua_lba_map_cache);
177        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178}
179
180/* This code ensures unique mib indexes are handed out. */
181static DEFINE_SPINLOCK(scsi_mib_index_lock);
182static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184/*
185 * Allocate a new row index for the entry type specified
186 */
187u32 scsi_get_new_index(scsi_index_t type)
188{
189        u32 new_index;
190
191        BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193        spin_lock(&scsi_mib_index_lock);
194        new_index = ++scsi_mib_index[type];
195        spin_unlock(&scsi_mib_index_lock);
196
197        return new_index;
198}
199
200void transport_subsystem_check_init(void)
201{
202        int ret;
203        static int sub_api_initialized;
204
205        if (sub_api_initialized)
206                return;
207
208        ret = request_module("target_core_iblock");
209        if (ret != 0)
210                pr_err("Unable to load target_core_iblock\n");
211
212        ret = request_module("target_core_file");
213        if (ret != 0)
214                pr_err("Unable to load target_core_file\n");
215
216        ret = request_module("target_core_pscsi");
217        if (ret != 0)
218                pr_err("Unable to load target_core_pscsi\n");
219
220        ret = request_module("target_core_user");
221        if (ret != 0)
222                pr_err("Unable to load target_core_user\n");
223
224        sub_api_initialized = 1;
225}
226
227struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228{
229        struct se_session *se_sess;
230
231        se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232        if (!se_sess) {
233                pr_err("Unable to allocate struct se_session from"
234                                " se_sess_cache\n");
235                return ERR_PTR(-ENOMEM);
236        }
237        INIT_LIST_HEAD(&se_sess->sess_list);
238        INIT_LIST_HEAD(&se_sess->sess_acl_list);
239        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240        INIT_LIST_HEAD(&se_sess->sess_wait_list);
241        spin_lock_init(&se_sess->sess_cmd_lock);
242        se_sess->sup_prot_ops = sup_prot_ops;
243
244        return se_sess;
245}
246EXPORT_SYMBOL(transport_init_session);
247
248int transport_alloc_session_tags(struct se_session *se_sess,
249                                 unsigned int tag_num, unsigned int tag_size)
250{
251        int rc;
252
253        se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
254                                        GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
255        if (!se_sess->sess_cmd_map) {
256                se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
257                if (!se_sess->sess_cmd_map) {
258                        pr_err("Unable to allocate se_sess->sess_cmd_map\n");
259                        return -ENOMEM;
260                }
261        }
262
263        rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
264        if (rc < 0) {
265                pr_err("Unable to init se_sess->sess_tag_pool,"
266                        " tag_num: %u\n", tag_num);
267                kvfree(se_sess->sess_cmd_map);
268                se_sess->sess_cmd_map = NULL;
269                return -ENOMEM;
270        }
271
272        return 0;
273}
274EXPORT_SYMBOL(transport_alloc_session_tags);
275
276struct se_session *transport_init_session_tags(unsigned int tag_num,
277                                               unsigned int tag_size,
278                                               enum target_prot_op sup_prot_ops)
279{
280        struct se_session *se_sess;
281        int rc;
282
283        if (tag_num != 0 && !tag_size) {
284                pr_err("init_session_tags called with percpu-ida tag_num:"
285                       " %u, but zero tag_size\n", tag_num);
286                return ERR_PTR(-EINVAL);
287        }
288        if (!tag_num && tag_size) {
289                pr_err("init_session_tags called with percpu-ida tag_size:"
290                       " %u, but zero tag_num\n", tag_size);
291                return ERR_PTR(-EINVAL);
292        }
293
294        se_sess = transport_init_session(sup_prot_ops);
295        if (IS_ERR(se_sess))
296                return se_sess;
297
298        rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
299        if (rc < 0) {
300                transport_free_session(se_sess);
301                return ERR_PTR(-ENOMEM);
302        }
303
304        return se_sess;
305}
306EXPORT_SYMBOL(transport_init_session_tags);
307
308/*
309 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
310 */
311void __transport_register_session(
312        struct se_portal_group *se_tpg,
313        struct se_node_acl *se_nacl,
314        struct se_session *se_sess,
315        void *fabric_sess_ptr)
316{
317        const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
318        unsigned char buf[PR_REG_ISID_LEN];
319
320        se_sess->se_tpg = se_tpg;
321        se_sess->fabric_sess_ptr = fabric_sess_ptr;
322        /*
323         * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
324         *
325         * Only set for struct se_session's that will actually be moving I/O.
326         * eg: *NOT* discovery sessions.
327         */
328        if (se_nacl) {
329                /*
330                 *
331                 * Determine if fabric allows for T10-PI feature bits exposed to
332                 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
333                 *
334                 * If so, then always save prot_type on a per se_node_acl node
335                 * basis and re-instate the previous sess_prot_type to avoid
336                 * disabling PI from below any previously initiator side
337                 * registered LUNs.
338                 */
339                if (se_nacl->saved_prot_type)
340                        se_sess->sess_prot_type = se_nacl->saved_prot_type;
341                else if (tfo->tpg_check_prot_fabric_only)
342                        se_sess->sess_prot_type = se_nacl->saved_prot_type =
343                                        tfo->tpg_check_prot_fabric_only(se_tpg);
344                /*
345                 * If the fabric module supports an ISID based TransportID,
346                 * save this value in binary from the fabric I_T Nexus now.
347                 */
348                if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
349                        memset(&buf[0], 0, PR_REG_ISID_LEN);
350                        se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
351                                        &buf[0], PR_REG_ISID_LEN);
352                        se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
353                }
354
355                spin_lock_irq(&se_nacl->nacl_sess_lock);
356                /*
357                 * The se_nacl->nacl_sess pointer will be set to the
358                 * last active I_T Nexus for each struct se_node_acl.
359                 */
360                se_nacl->nacl_sess = se_sess;
361
362                list_add_tail(&se_sess->sess_acl_list,
363                              &se_nacl->acl_sess_list);
364                spin_unlock_irq(&se_nacl->nacl_sess_lock);
365        }
366        list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
367
368        pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
369                se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
370}
371EXPORT_SYMBOL(__transport_register_session);
372
373void transport_register_session(
374        struct se_portal_group *se_tpg,
375        struct se_node_acl *se_nacl,
376        struct se_session *se_sess,
377        void *fabric_sess_ptr)
378{
379        unsigned long flags;
380
381        spin_lock_irqsave(&se_tpg->session_lock, flags);
382        __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
383        spin_unlock_irqrestore(&se_tpg->session_lock, flags);
384}
385EXPORT_SYMBOL(transport_register_session);
386
387struct se_session *
388target_alloc_session(struct se_portal_group *tpg,
389                     unsigned int tag_num, unsigned int tag_size,
390                     enum target_prot_op prot_op,
391                     const char *initiatorname, void *private,
392                     int (*callback)(struct se_portal_group *,
393                                     struct se_session *, void *))
394{
395        struct se_session *sess;
396
397        /*
398         * If the fabric driver is using percpu-ida based pre allocation
399         * of I/O descriptor tags, go ahead and perform that setup now..
400         */
401        if (tag_num != 0)
402                sess = transport_init_session_tags(tag_num, tag_size, prot_op);
403        else
404                sess = transport_init_session(prot_op);
405
406        if (IS_ERR(sess))
407                return sess;
408
409        sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
410                                        (unsigned char *)initiatorname);
411        if (!sess->se_node_acl) {
412                transport_free_session(sess);
413                return ERR_PTR(-EACCES);
414        }
415        /*
416         * Go ahead and perform any remaining fabric setup that is
417         * required before transport_register_session().
418         */
419        if (callback != NULL) {
420                int rc = callback(tpg, sess, private);
421                if (rc) {
422                        transport_free_session(sess);
423                        return ERR_PTR(rc);
424                }
425        }
426
427        transport_register_session(tpg, sess->se_node_acl, sess, private);
428        return sess;
429}
430EXPORT_SYMBOL(target_alloc_session);
431
432ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
433{
434        struct se_session *se_sess;
435        ssize_t len = 0;
436
437        spin_lock_bh(&se_tpg->session_lock);
438        list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
439                if (!se_sess->se_node_acl)
440                        continue;
441                if (!se_sess->se_node_acl->dynamic_node_acl)
442                        continue;
443                if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
444                        break;
445
446                len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
447                                se_sess->se_node_acl->initiatorname);
448                len += 1; /* Include NULL terminator */
449        }
450        spin_unlock_bh(&se_tpg->session_lock);
451
452        return len;
453}
454EXPORT_SYMBOL(target_show_dynamic_sessions);
455
456static void target_complete_nacl(struct kref *kref)
457{
458        struct se_node_acl *nacl = container_of(kref,
459                                struct se_node_acl, acl_kref);
460        struct se_portal_group *se_tpg = nacl->se_tpg;
461
462        if (!nacl->dynamic_stop) {
463                complete(&nacl->acl_free_comp);
464                return;
465        }
466
467        mutex_lock(&se_tpg->acl_node_mutex);
468        list_del(&nacl->acl_list);
469        mutex_unlock(&se_tpg->acl_node_mutex);
470
471        core_tpg_wait_for_nacl_pr_ref(nacl);
472        core_free_device_list_for_node(nacl, se_tpg);
473        kfree(nacl);
474}
475
476void target_put_nacl(struct se_node_acl *nacl)
477{
478        kref_put(&nacl->acl_kref, target_complete_nacl);
479}
480EXPORT_SYMBOL(target_put_nacl);
481
482void transport_deregister_session_configfs(struct se_session *se_sess)
483{
484        struct se_node_acl *se_nacl;
485        unsigned long flags;
486        /*
487         * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
488         */
489        se_nacl = se_sess->se_node_acl;
490        if (se_nacl) {
491                spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
492                if (!list_empty(&se_sess->sess_acl_list))
493                        list_del_init(&se_sess->sess_acl_list);
494                /*
495                 * If the session list is empty, then clear the pointer.
496                 * Otherwise, set the struct se_session pointer from the tail
497                 * element of the per struct se_node_acl active session list.
498                 */
499                if (list_empty(&se_nacl->acl_sess_list))
500                        se_nacl->nacl_sess = NULL;
501                else {
502                        se_nacl->nacl_sess = container_of(
503                                        se_nacl->acl_sess_list.prev,
504                                        struct se_session, sess_acl_list);
505                }
506                spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
507        }
508}
509EXPORT_SYMBOL(transport_deregister_session_configfs);
510
511void transport_free_session(struct se_session *se_sess)
512{
513        struct se_node_acl *se_nacl = se_sess->se_node_acl;
514
515        /*
516         * Drop the se_node_acl->nacl_kref obtained from within
517         * core_tpg_get_initiator_node_acl().
518         */
519        if (se_nacl) {
520                struct se_portal_group *se_tpg = se_nacl->se_tpg;
521                const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
522                unsigned long flags;
523
524                se_sess->se_node_acl = NULL;
525
526                /*
527                 * Also determine if we need to drop the extra ->cmd_kref if
528                 * it had been previously dynamically generated, and
529                 * the endpoint is not caching dynamic ACLs.
530                 */
531                mutex_lock(&se_tpg->acl_node_mutex);
532                if (se_nacl->dynamic_node_acl &&
533                    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
534                        spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
535                        if (list_empty(&se_nacl->acl_sess_list))
536                                se_nacl->dynamic_stop = true;
537                        spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
538
539                        if (se_nacl->dynamic_stop)
540                                list_del(&se_nacl->acl_list);
541                }
542                mutex_unlock(&se_tpg->acl_node_mutex);
543
544                if (se_nacl->dynamic_stop)
545                        target_put_nacl(se_nacl);
546
547                target_put_nacl(se_nacl);
548        }
549        if (se_sess->sess_cmd_map) {
550                percpu_ida_destroy(&se_sess->sess_tag_pool);
551                kvfree(se_sess->sess_cmd_map);
552        }
553        kmem_cache_free(se_sess_cache, se_sess);
554}
555EXPORT_SYMBOL(transport_free_session);
556
557void transport_deregister_session(struct se_session *se_sess)
558{
559        struct se_portal_group *se_tpg = se_sess->se_tpg;
560        unsigned long flags;
561
562        if (!se_tpg) {
563                transport_free_session(se_sess);
564                return;
565        }
566
567        spin_lock_irqsave(&se_tpg->session_lock, flags);
568        list_del(&se_sess->sess_list);
569        se_sess->se_tpg = NULL;
570        se_sess->fabric_sess_ptr = NULL;
571        spin_unlock_irqrestore(&se_tpg->session_lock, flags);
572
573        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
574                se_tpg->se_tpg_tfo->get_fabric_name());
575        /*
576         * If last kref is dropping now for an explicit NodeACL, awake sleeping
577         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
578         * removal context from within transport_free_session() code.
579         *
580         * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
581         * to release all remaining generate_node_acl=1 created ACL resources.
582         */
583
584        transport_free_session(se_sess);
585}
586EXPORT_SYMBOL(transport_deregister_session);
587
588static void target_remove_from_state_list(struct se_cmd *cmd)
589{
590        struct se_device *dev = cmd->se_dev;
591        unsigned long flags;
592
593        if (!dev)
594                return;
595
596        if (cmd->transport_state & CMD_T_BUSY)
597                return;
598
599        spin_lock_irqsave(&dev->execute_task_lock, flags);
600        if (cmd->state_active) {
601                list_del(&cmd->state_list);
602                cmd->state_active = false;
603        }
604        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
605}
606
607static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
608                                    bool write_pending)
609{
610        unsigned long flags;
611
612        if (remove_from_lists) {
613                target_remove_from_state_list(cmd);
614
615                /*
616                 * Clear struct se_cmd->se_lun before the handoff to FE.
617                 */
618                cmd->se_lun = NULL;
619        }
620
621        spin_lock_irqsave(&cmd->t_state_lock, flags);
622        if (write_pending)
623                cmd->t_state = TRANSPORT_WRITE_PENDING;
624
625        /*
626         * Determine if frontend context caller is requesting the stopping of
627         * this command for frontend exceptions.
628         */
629        if (cmd->transport_state & CMD_T_STOP) {
630                pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
631                        __func__, __LINE__, cmd->tag);
632
633                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
634
635                complete_all(&cmd->t_transport_stop_comp);
636                return 1;
637        }
638
639        cmd->transport_state &= ~CMD_T_ACTIVE;
640        if (remove_from_lists) {
641                /*
642                 * Some fabric modules like tcm_loop can release
643                 * their internally allocated I/O reference now and
644                 * struct se_cmd now.
645                 *
646                 * Fabric modules are expected to return '1' here if the
647                 * se_cmd being passed is released at this point,
648                 * or zero if not being released.
649                 */
650                if (cmd->se_tfo->check_stop_free != NULL) {
651                        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
652                        return cmd->se_tfo->check_stop_free(cmd);
653                }
654        }
655
656        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
657        return 0;
658}
659
660static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
661{
662        return transport_cmd_check_stop(cmd, true, false);
663}
664
665static void transport_lun_remove_cmd(struct se_cmd *cmd)
666{
667        struct se_lun *lun = cmd->se_lun;
668
669        if (!lun)
670                return;
671
672        if (cmpxchg(&cmd->lun_ref_active, true, false))
673                percpu_ref_put(&lun->lun_ref);
674}
675
676void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
677{
678        bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
679
680        if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
681                transport_lun_remove_cmd(cmd);
682        /*
683         * Allow the fabric driver to unmap any resources before
684         * releasing the descriptor via TFO->release_cmd()
685         */
686        if (remove)
687                cmd->se_tfo->aborted_task(cmd);
688
689        if (transport_cmd_check_stop_to_fabric(cmd))
690                return;
691        if (remove && ack_kref)
692                transport_put_cmd(cmd);
693}
694
695static void target_complete_failure_work(struct work_struct *work)
696{
697        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
698
699        transport_generic_request_failure(cmd,
700                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
701}
702
703/*
704 * Used when asking transport to copy Sense Data from the underlying
705 * Linux/SCSI struct scsi_cmnd
706 */
707static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
708{
709        struct se_device *dev = cmd->se_dev;
710
711        WARN_ON(!cmd->se_lun);
712
713        if (!dev)
714                return NULL;
715
716        if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
717                return NULL;
718
719        cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
720
721        pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
722                dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
723        return cmd->sense_buffer;
724}
725
726void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
727{
728        struct se_device *dev = cmd->se_dev;
729        int success = scsi_status == GOOD;
730        unsigned long flags;
731
732        cmd->scsi_status = scsi_status;
733
734
735        spin_lock_irqsave(&cmd->t_state_lock, flags);
736        cmd->transport_state &= ~CMD_T_BUSY;
737
738        if (dev && dev->transport->transport_complete) {
739                dev->transport->transport_complete(cmd,
740                                cmd->t_data_sg,
741                                transport_get_sense_buffer(cmd));
742                if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
743                        success = 1;
744        }
745
746        /*
747         * Check for case where an explicit ABORT_TASK has been received
748         * and transport_wait_for_tasks() will be waiting for completion..
749         */
750        if (cmd->transport_state & CMD_T_ABORTED ||
751            cmd->transport_state & CMD_T_STOP) {
752                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
753                complete_all(&cmd->t_transport_stop_comp);
754                return;
755        } else if (!success) {
756                INIT_WORK(&cmd->work, target_complete_failure_work);
757        } else {
758                INIT_WORK(&cmd->work, target_complete_ok_work);
759        }
760
761        cmd->t_state = TRANSPORT_COMPLETE;
762        cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
763        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
764
765        if (cmd->se_cmd_flags & SCF_USE_CPUID)
766                queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
767        else
768                queue_work(target_completion_wq, &cmd->work);
769}
770EXPORT_SYMBOL(target_complete_cmd);
771
772void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
773{
774        if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
775                if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
776                        cmd->residual_count += cmd->data_length - length;
777                } else {
778                        cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
779                        cmd->residual_count = cmd->data_length - length;
780                }
781
782                cmd->data_length = length;
783        }
784
785        target_complete_cmd(cmd, scsi_status);
786}
787EXPORT_SYMBOL(target_complete_cmd_with_length);
788
789static void target_add_to_state_list(struct se_cmd *cmd)
790{
791        struct se_device *dev = cmd->se_dev;
792        unsigned long flags;
793
794        spin_lock_irqsave(&dev->execute_task_lock, flags);
795        if (!cmd->state_active) {
796                list_add_tail(&cmd->state_list, &dev->state_list);
797                cmd->state_active = true;
798        }
799        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
800}
801
802/*
803 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
804 */
805static void transport_write_pending_qf(struct se_cmd *cmd);
806static void transport_complete_qf(struct se_cmd *cmd);
807
808void target_qf_do_work(struct work_struct *work)
809{
810        struct se_device *dev = container_of(work, struct se_device,
811                                        qf_work_queue);
812        LIST_HEAD(qf_cmd_list);
813        struct se_cmd *cmd, *cmd_tmp;
814
815        spin_lock_irq(&dev->qf_cmd_lock);
816        list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
817        spin_unlock_irq(&dev->qf_cmd_lock);
818
819        list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
820                list_del(&cmd->se_qf_node);
821                atomic_dec_mb(&dev->dev_qf_count);
822
823                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
824                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
825                        (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
826                        (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
827                        : "UNKNOWN");
828
829                if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
830                        transport_write_pending_qf(cmd);
831                else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
832                        transport_complete_qf(cmd);
833        }
834}
835
836unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
837{
838        switch (cmd->data_direction) {
839        case DMA_NONE:
840                return "NONE";
841        case DMA_FROM_DEVICE:
842                return "READ";
843        case DMA_TO_DEVICE:
844                return "WRITE";
845        case DMA_BIDIRECTIONAL:
846                return "BIDI";
847        default:
848                break;
849        }
850
851        return "UNKNOWN";
852}
853
854void transport_dump_dev_state(
855        struct se_device *dev,
856        char *b,
857        int *bl)
858{
859        *bl += sprintf(b + *bl, "Status: ");
860        if (dev->export_count)
861                *bl += sprintf(b + *bl, "ACTIVATED");
862        else
863                *bl += sprintf(b + *bl, "DEACTIVATED");
864
865        *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
866        *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
867                dev->dev_attrib.block_size,
868                dev->dev_attrib.hw_max_sectors);
869        *bl += sprintf(b + *bl, "        ");
870}
871
872void transport_dump_vpd_proto_id(
873        struct t10_vpd *vpd,
874        unsigned char *p_buf,
875        int p_buf_len)
876{
877        unsigned char buf[VPD_TMP_BUF_SIZE];
878        int len;
879
880        memset(buf, 0, VPD_TMP_BUF_SIZE);
881        len = sprintf(buf, "T10 VPD Protocol Identifier: ");
882
883        switch (vpd->protocol_identifier) {
884        case 0x00:
885                sprintf(buf+len, "Fibre Channel\n");
886                break;
887        case 0x10:
888                sprintf(buf+len, "Parallel SCSI\n");
889                break;
890        case 0x20:
891                sprintf(buf+len, "SSA\n");
892                break;
893        case 0x30:
894                sprintf(buf+len, "IEEE 1394\n");
895                break;
896        case 0x40:
897                sprintf(buf+len, "SCSI Remote Direct Memory Access"
898                                " Protocol\n");
899                break;
900        case 0x50:
901                sprintf(buf+len, "Internet SCSI (iSCSI)\n");
902                break;
903        case 0x60:
904                sprintf(buf+len, "SAS Serial SCSI Protocol\n");
905                break;
906        case 0x70:
907                sprintf(buf+len, "Automation/Drive Interface Transport"
908                                " Protocol\n");
909                break;
910        case 0x80:
911                sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
912                break;
913        default:
914                sprintf(buf+len, "Unknown 0x%02x\n",
915                                vpd->protocol_identifier);
916                break;
917        }
918
919        if (p_buf)
920                strncpy(p_buf, buf, p_buf_len);
921        else
922                pr_debug("%s", buf);
923}
924
925void
926transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
927{
928        /*
929         * Check if the Protocol Identifier Valid (PIV) bit is set..
930         *
931         * from spc3r23.pdf section 7.5.1
932         */
933         if (page_83[1] & 0x80) {
934                vpd->protocol_identifier = (page_83[0] & 0xf0);
935                vpd->protocol_identifier_set = 1;
936                transport_dump_vpd_proto_id(vpd, NULL, 0);
937        }
938}
939EXPORT_SYMBOL(transport_set_vpd_proto_id);
940
941int transport_dump_vpd_assoc(
942        struct t10_vpd *vpd,
943        unsigned char *p_buf,
944        int p_buf_len)
945{
946        unsigned char buf[VPD_TMP_BUF_SIZE];
947        int ret = 0;
948        int len;
949
950        memset(buf, 0, VPD_TMP_BUF_SIZE);
951        len = sprintf(buf, "T10 VPD Identifier Association: ");
952
953        switch (vpd->association) {
954        case 0x00:
955                sprintf(buf+len, "addressed logical unit\n");
956                break;
957        case 0x10:
958                sprintf(buf+len, "target port\n");
959                break;
960        case 0x20:
961                sprintf(buf+len, "SCSI target device\n");
962                break;
963        default:
964                sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
965                ret = -EINVAL;
966                break;
967        }
968
969        if (p_buf)
970                strncpy(p_buf, buf, p_buf_len);
971        else
972                pr_debug("%s", buf);
973
974        return ret;
975}
976
977int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
978{
979        /*
980         * The VPD identification association..
981         *
982         * from spc3r23.pdf Section 7.6.3.1 Table 297
983         */
984        vpd->association = (page_83[1] & 0x30);
985        return transport_dump_vpd_assoc(vpd, NULL, 0);
986}
987EXPORT_SYMBOL(transport_set_vpd_assoc);
988
989int transport_dump_vpd_ident_type(
990        struct t10_vpd *vpd,
991        unsigned char *p_buf,
992        int p_buf_len)
993{
994        unsigned char buf[VPD_TMP_BUF_SIZE];
995        int ret = 0;
996        int len;
997
998        memset(buf, 0, VPD_TMP_BUF_SIZE);
999        len = sprintf(buf, "T10 VPD Identifier Type: ");
1000
1001        switch (vpd->device_identifier_type) {
1002        case 0x00:
1003                sprintf(buf+len, "Vendor specific\n");
1004                break;
1005        case 0x01:
1006                sprintf(buf+len, "T10 Vendor ID based\n");
1007                break;
1008        case 0x02:
1009                sprintf(buf+len, "EUI-64 based\n");
1010                break;
1011        case 0x03:
1012                sprintf(buf+len, "NAA\n");
1013                break;
1014        case 0x04:
1015                sprintf(buf+len, "Relative target port identifier\n");
1016                break;
1017        case 0x08:
1018                sprintf(buf+len, "SCSI name string\n");
1019                break;
1020        default:
1021                sprintf(buf+len, "Unsupported: 0x%02x\n",
1022                                vpd->device_identifier_type);
1023                ret = -EINVAL;
1024                break;
1025        }
1026
1027        if (p_buf) {
1028                if (p_buf_len < strlen(buf)+1)
1029                        return -EINVAL;
1030                strncpy(p_buf, buf, p_buf_len);
1031        } else {
1032                pr_debug("%s", buf);
1033        }
1034
1035        return ret;
1036}
1037
1038int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1039{
1040        /*
1041         * The VPD identifier type..
1042         *
1043         * from spc3r23.pdf Section 7.6.3.1 Table 298
1044         */
1045        vpd->device_identifier_type = (page_83[1] & 0x0f);
1046        return transport_dump_vpd_ident_type(vpd, NULL, 0);
1047}
1048EXPORT_SYMBOL(transport_set_vpd_ident_type);
1049
1050int transport_dump_vpd_ident(
1051        struct t10_vpd *vpd,
1052        unsigned char *p_buf,
1053        int p_buf_len)
1054{
1055        unsigned char buf[VPD_TMP_BUF_SIZE];
1056        int ret = 0;
1057
1058        memset(buf, 0, VPD_TMP_BUF_SIZE);
1059
1060        switch (vpd->device_identifier_code_set) {
1061        case 0x01: /* Binary */
1062                snprintf(buf, sizeof(buf),
1063                        "T10 VPD Binary Device Identifier: %s\n",
1064                        &vpd->device_identifier[0]);
1065                break;
1066        case 0x02: /* ASCII */
1067                snprintf(buf, sizeof(buf),
1068                        "T10 VPD ASCII Device Identifier: %s\n",
1069                        &vpd->device_identifier[0]);
1070                break;
1071        case 0x03: /* UTF-8 */
1072                snprintf(buf, sizeof(buf),
1073                        "T10 VPD UTF-8 Device Identifier: %s\n",
1074                        &vpd->device_identifier[0]);
1075                break;
1076        default:
1077                sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1078                        " 0x%02x", vpd->device_identifier_code_set);
1079                ret = -EINVAL;
1080                break;
1081        }
1082
1083        if (p_buf)
1084                strncpy(p_buf, buf, p_buf_len);
1085        else
1086                pr_debug("%s", buf);
1087
1088        return ret;
1089}
1090
1091int
1092transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1093{
1094        static const char hex_str[] = "0123456789abcdef";
1095        int j = 0, i = 4; /* offset to start of the identifier */
1096
1097        /*
1098         * The VPD Code Set (encoding)
1099         *
1100         * from spc3r23.pdf Section 7.6.3.1 Table 296
1101         */
1102        vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1103        switch (vpd->device_identifier_code_set) {
1104        case 0x01: /* Binary */
1105                vpd->device_identifier[j++] =
1106                                hex_str[vpd->device_identifier_type];
1107                while (i < (4 + page_83[3])) {
1108                        vpd->device_identifier[j++] =
1109                                hex_str[(page_83[i] & 0xf0) >> 4];
1110                        vpd->device_identifier[j++] =
1111                                hex_str[page_83[i] & 0x0f];
1112                        i++;
1113                }
1114                break;
1115        case 0x02: /* ASCII */
1116        case 0x03: /* UTF-8 */
1117                while (i < (4 + page_83[3]))
1118                        vpd->device_identifier[j++] = page_83[i++];
1119                break;
1120        default:
1121                break;
1122        }
1123
1124        return transport_dump_vpd_ident(vpd, NULL, 0);
1125}
1126EXPORT_SYMBOL(transport_set_vpd_ident);
1127
1128static sense_reason_t
1129target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1130                               unsigned int size)
1131{
1132        u32 mtl;
1133
1134        if (!cmd->se_tfo->max_data_sg_nents)
1135                return TCM_NO_SENSE;
1136        /*
1137         * Check if fabric enforced maximum SGL entries per I/O descriptor
1138         * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1139         * residual_count and reduce original cmd->data_length to maximum
1140         * length based on single PAGE_SIZE entry scatter-lists.
1141         */
1142        mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1143        if (cmd->data_length > mtl) {
1144                /*
1145                 * If an existing CDB overflow is present, calculate new residual
1146                 * based on CDB size minus fabric maximum transfer length.
1147                 *
1148                 * If an existing CDB underflow is present, calculate new residual
1149                 * based on original cmd->data_length minus fabric maximum transfer
1150                 * length.
1151                 *
1152                 * Otherwise, set the underflow residual based on cmd->data_length
1153                 * minus fabric maximum transfer length.
1154                 */
1155                if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1156                        cmd->residual_count = (size - mtl);
1157                } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1158                        u32 orig_dl = size + cmd->residual_count;
1159                        cmd->residual_count = (orig_dl - mtl);
1160                } else {
1161                        cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1162                        cmd->residual_count = (cmd->data_length - mtl);
1163                }
1164                cmd->data_length = mtl;
1165                /*
1166                 * Reset sbc_check_prot() calculated protection payload
1167                 * length based upon the new smaller MTL.
1168                 */
1169                if (cmd->prot_length) {
1170                        u32 sectors = (mtl / dev->dev_attrib.block_size);
1171                        cmd->prot_length = dev->prot_length * sectors;
1172                }
1173        }
1174        return TCM_NO_SENSE;
1175}
1176
1177sense_reason_t
1178target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1179{
1180        struct se_device *dev = cmd->se_dev;
1181
1182        if (cmd->unknown_data_length) {
1183                cmd->data_length = size;
1184        } else if (size != cmd->data_length) {
1185                pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1186                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1187                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1188                                cmd->data_length, size, cmd->t_task_cdb[0]);
1189
1190                if (cmd->data_direction == DMA_TO_DEVICE &&
1191                    cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1192                        pr_err("Rejecting underflow/overflow WRITE data\n");
1193                        return TCM_INVALID_CDB_FIELD;
1194                }
1195                /*
1196                 * Reject READ_* or WRITE_* with overflow/underflow for
1197                 * type SCF_SCSI_DATA_CDB.
1198                 */
1199                if (dev->dev_attrib.block_size != 512)  {
1200                        pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1201                                " CDB on non 512-byte sector setup subsystem"
1202                                " plugin: %s\n", dev->transport->name);
1203                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1204                        return TCM_INVALID_CDB_FIELD;
1205                }
1206                /*
1207                 * For the overflow case keep the existing fabric provided
1208                 * ->data_length.  Otherwise for the underflow case, reset
1209                 * ->data_length to the smaller SCSI expected data transfer
1210                 * length.
1211                 */
1212                if (size > cmd->data_length) {
1213                        cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1214                        cmd->residual_count = (size - cmd->data_length);
1215                } else {
1216                        cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1217                        cmd->residual_count = (cmd->data_length - size);
1218                        cmd->data_length = size;
1219                }
1220        }
1221
1222        return target_check_max_data_sg_nents(cmd, dev, size);
1223
1224}
1225
1226/*
1227 * Used by fabric modules containing a local struct se_cmd within their
1228 * fabric dependent per I/O descriptor.
1229 *
1230 * Preserves the value of @cmd->tag.
1231 */
1232void transport_init_se_cmd(
1233        struct se_cmd *cmd,
1234        const struct target_core_fabric_ops *tfo,
1235        struct se_session *se_sess,
1236        u32 data_length,
1237        int data_direction,
1238        int task_attr,
1239        unsigned char *sense_buffer)
1240{
1241        INIT_LIST_HEAD(&cmd->se_delayed_node);
1242        INIT_LIST_HEAD(&cmd->se_qf_node);
1243        INIT_LIST_HEAD(&cmd->se_cmd_list);
1244        INIT_LIST_HEAD(&cmd->state_list);
1245        init_completion(&cmd->t_transport_stop_comp);
1246        init_completion(&cmd->cmd_wait_comp);
1247        spin_lock_init(&cmd->t_state_lock);
1248        kref_init(&cmd->cmd_kref);
1249        cmd->transport_state = CMD_T_DEV_ACTIVE;
1250
1251        cmd->se_tfo = tfo;
1252        cmd->se_sess = se_sess;
1253        cmd->data_length = data_length;
1254        cmd->data_direction = data_direction;
1255        cmd->sam_task_attr = task_attr;
1256        cmd->sense_buffer = sense_buffer;
1257
1258        cmd->state_active = false;
1259}
1260EXPORT_SYMBOL(transport_init_se_cmd);
1261
1262static sense_reason_t
1263transport_check_alloc_task_attr(struct se_cmd *cmd)
1264{
1265        struct se_device *dev = cmd->se_dev;
1266
1267        /*
1268         * Check if SAM Task Attribute emulation is enabled for this
1269         * struct se_device storage object
1270         */
1271        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1272                return 0;
1273
1274        if (cmd->sam_task_attr == TCM_ACA_TAG) {
1275                pr_debug("SAM Task Attribute ACA"
1276                        " emulation is not supported\n");
1277                return TCM_INVALID_CDB_FIELD;
1278        }
1279
1280        return 0;
1281}
1282
1283sense_reason_t
1284target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1285{
1286        struct se_device *dev = cmd->se_dev;
1287        sense_reason_t ret;
1288
1289        /*
1290         * Ensure that the received CDB is less than the max (252 + 8) bytes
1291         * for VARIABLE_LENGTH_CMD
1292         */
1293        if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1294                pr_err("Received SCSI CDB with command_size: %d that"
1295                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1296                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1297                return TCM_INVALID_CDB_FIELD;
1298        }
1299        /*
1300         * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1301         * allocate the additional extended CDB buffer now..  Otherwise
1302         * setup the pointer from __t_task_cdb to t_task_cdb.
1303         */
1304        if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1305                cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1306                                                GFP_KERNEL);
1307                if (!cmd->t_task_cdb) {
1308                        pr_err("Unable to allocate cmd->t_task_cdb"
1309                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1310                                scsi_command_size(cdb),
1311                                (unsigned long)sizeof(cmd->__t_task_cdb));
1312                        return TCM_OUT_OF_RESOURCES;
1313                }
1314        } else
1315                cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1316        /*
1317         * Copy the original CDB into cmd->
1318         */
1319        memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1320
1321        trace_target_sequencer_start(cmd);
1322
1323        ret = dev->transport->parse_cdb(cmd);
1324        if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1325                pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1326                                    cmd->se_tfo->get_fabric_name(),
1327                                    cmd->se_sess->se_node_acl->initiatorname,
1328                                    cmd->t_task_cdb[0]);
1329        if (ret)
1330                return ret;
1331
1332        ret = transport_check_alloc_task_attr(cmd);
1333        if (ret)
1334                return ret;
1335
1336        cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1337        atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1338        return 0;
1339}
1340EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1341
1342/*
1343 * Used by fabric module frontends to queue tasks directly.
1344 * May only be used from process context.
1345 */
1346int transport_handle_cdb_direct(
1347        struct se_cmd *cmd)
1348{
1349        sense_reason_t ret;
1350
1351        if (!cmd->se_lun) {
1352                dump_stack();
1353                pr_err("cmd->se_lun is NULL\n");
1354                return -EINVAL;
1355        }
1356        if (in_interrupt()) {
1357                dump_stack();
1358                pr_err("transport_generic_handle_cdb cannot be called"
1359                                " from interrupt context\n");
1360                return -EINVAL;
1361        }
1362        /*
1363         * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1364         * outstanding descriptors are handled correctly during shutdown via
1365         * transport_wait_for_tasks()
1366         *
1367         * Also, we don't take cmd->t_state_lock here as we only expect
1368         * this to be called for initial descriptor submission.
1369         */
1370        cmd->t_state = TRANSPORT_NEW_CMD;
1371        cmd->transport_state |= CMD_T_ACTIVE;
1372
1373        /*
1374         * transport_generic_new_cmd() is already handling QUEUE_FULL,
1375         * so follow TRANSPORT_NEW_CMD processing thread context usage
1376         * and call transport_generic_request_failure() if necessary..
1377         */
1378        ret = transport_generic_new_cmd(cmd);
1379        if (ret)
1380                transport_generic_request_failure(cmd, ret);
1381        return 0;
1382}
1383EXPORT_SYMBOL(transport_handle_cdb_direct);
1384
1385sense_reason_t
1386transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1387                u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1388{
1389        if (!sgl || !sgl_count)
1390                return 0;
1391
1392        /*
1393         * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1394         * scatterlists already have been set to follow what the fabric
1395         * passes for the original expected data transfer length.
1396         */
1397        if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1398                pr_warn("Rejecting SCSI DATA overflow for fabric using"
1399                        " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1400                return TCM_INVALID_CDB_FIELD;
1401        }
1402
1403        cmd->t_data_sg = sgl;
1404        cmd->t_data_nents = sgl_count;
1405        cmd->t_bidi_data_sg = sgl_bidi;
1406        cmd->t_bidi_data_nents = sgl_bidi_count;
1407
1408        cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1409        return 0;
1410}
1411
1412/*
1413 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1414 *                       se_cmd + use pre-allocated SGL memory.
1415 *
1416 * @se_cmd: command descriptor to submit
1417 * @se_sess: associated se_sess for endpoint
1418 * @cdb: pointer to SCSI CDB
1419 * @sense: pointer to SCSI sense buffer
1420 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1421 * @data_length: fabric expected data transfer length
1422 * @task_addr: SAM task attribute
1423 * @data_dir: DMA data direction
1424 * @flags: flags for command submission from target_sc_flags_tables
1425 * @sgl: struct scatterlist memory for unidirectional mapping
1426 * @sgl_count: scatterlist count for unidirectional mapping
1427 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1428 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1429 * @sgl_prot: struct scatterlist memory protection information
1430 * @sgl_prot_count: scatterlist count for protection information
1431 *
1432 * Task tags are supported if the caller has set @se_cmd->tag.
1433 *
1434 * Returns non zero to signal active I/O shutdown failure.  All other
1435 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1436 * but still return zero here.
1437 *
1438 * This may only be called from process context, and also currently
1439 * assumes internal allocation of fabric payload buffer by target-core.
1440 */
1441int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1442                unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1443                u32 data_length, int task_attr, int data_dir, int flags,
1444                struct scatterlist *sgl, u32 sgl_count,
1445                struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1446                struct scatterlist *sgl_prot, u32 sgl_prot_count)
1447{
1448        struct se_portal_group *se_tpg;
1449        sense_reason_t rc;
1450        int ret;
1451
1452        se_tpg = se_sess->se_tpg;
1453        BUG_ON(!se_tpg);
1454        BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1455        BUG_ON(in_interrupt());
1456        /*
1457         * Initialize se_cmd for target operation.  From this point
1458         * exceptions are handled by sending exception status via
1459         * target_core_fabric_ops->queue_status() callback
1460         */
1461        transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1462                                data_length, data_dir, task_attr, sense);
1463
1464        if (flags & TARGET_SCF_USE_CPUID)
1465                se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1466        else
1467                se_cmd->cpuid = WORK_CPU_UNBOUND;
1468
1469        if (flags & TARGET_SCF_UNKNOWN_SIZE)
1470                se_cmd->unknown_data_length = 1;
1471        /*
1472         * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1473         * se_sess->sess_cmd_list.  A second kref_get here is necessary
1474         * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1475         * kref_put() to happen during fabric packet acknowledgement.
1476         */
1477        ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1478        if (ret)
1479                return ret;
1480        /*
1481         * Signal bidirectional data payloads to target-core
1482         */
1483        if (flags & TARGET_SCF_BIDI_OP)
1484                se_cmd->se_cmd_flags |= SCF_BIDI;
1485        /*
1486         * Locate se_lun pointer and attach it to struct se_cmd
1487         */
1488        rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1489        if (rc) {
1490                transport_send_check_condition_and_sense(se_cmd, rc, 0);
1491                target_put_sess_cmd(se_cmd);
1492                return 0;
1493        }
1494
1495        rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1496        if (rc != 0) {
1497                transport_generic_request_failure(se_cmd, rc);
1498                return 0;
1499        }
1500
1501        /*
1502         * Save pointers for SGLs containing protection information,
1503         * if present.
1504         */
1505        if (sgl_prot_count) {
1506                se_cmd->t_prot_sg = sgl_prot;
1507                se_cmd->t_prot_nents = sgl_prot_count;
1508                se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1509        }
1510
1511        /*
1512         * When a non zero sgl_count has been passed perform SGL passthrough
1513         * mapping for pre-allocated fabric memory instead of having target
1514         * core perform an internal SGL allocation..
1515         */
1516        if (sgl_count != 0) {
1517                BUG_ON(!sgl);
1518
1519                /*
1520                 * A work-around for tcm_loop as some userspace code via
1521                 * scsi-generic do not memset their associated read buffers,
1522                 * so go ahead and do that here for type non-data CDBs.  Also
1523                 * note that this is currently guaranteed to be a single SGL
1524                 * for this case by target core in target_setup_cmd_from_cdb()
1525                 * -> transport_generic_cmd_sequencer().
1526                 */
1527                if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1528                     se_cmd->data_direction == DMA_FROM_DEVICE) {
1529                        unsigned char *buf = NULL;
1530
1531                        if (sgl)
1532                                buf = kmap(sg_page(sgl)) + sgl->offset;
1533
1534                        if (buf) {
1535                                memset(buf, 0, sgl->length);
1536                                kunmap(sg_page(sgl));
1537                        }
1538                }
1539
1540                rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1541                                sgl_bidi, sgl_bidi_count);
1542                if (rc != 0) {
1543                        transport_generic_request_failure(se_cmd, rc);
1544                        return 0;
1545                }
1546        }
1547
1548        /*
1549         * Check if we need to delay processing because of ALUA
1550         * Active/NonOptimized primary access state..
1551         */
1552        core_alua_check_nonop_delay(se_cmd);
1553
1554        transport_handle_cdb_direct(se_cmd);
1555        return 0;
1556}
1557EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1558
1559/*
1560 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1561 *
1562 * @se_cmd: command descriptor to submit
1563 * @se_sess: associated se_sess for endpoint
1564 * @cdb: pointer to SCSI CDB
1565 * @sense: pointer to SCSI sense buffer
1566 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1567 * @data_length: fabric expected data transfer length
1568 * @task_addr: SAM task attribute
1569 * @data_dir: DMA data direction
1570 * @flags: flags for command submission from target_sc_flags_tables
1571 *
1572 * Task tags are supported if the caller has set @se_cmd->tag.
1573 *
1574 * Returns non zero to signal active I/O shutdown failure.  All other
1575 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1576 * but still return zero here.
1577 *
1578 * This may only be called from process context, and also currently
1579 * assumes internal allocation of fabric payload buffer by target-core.
1580 *
1581 * It also assumes interal target core SGL memory allocation.
1582 */
1583int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1584                unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1585                u32 data_length, int task_attr, int data_dir, int flags)
1586{
1587        return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1588                        unpacked_lun, data_length, task_attr, data_dir,
1589                        flags, NULL, 0, NULL, 0, NULL, 0);
1590}
1591EXPORT_SYMBOL(target_submit_cmd);
1592
1593static void target_complete_tmr_failure(struct work_struct *work)
1594{
1595        struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1596
1597        se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1598        se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1599
1600        transport_cmd_check_stop_to_fabric(se_cmd);
1601}
1602
1603/**
1604 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1605 *                     for TMR CDBs
1606 *
1607 * @se_cmd: command descriptor to submit
1608 * @se_sess: associated se_sess for endpoint
1609 * @sense: pointer to SCSI sense buffer
1610 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1611 * @fabric_context: fabric context for TMR req
1612 * @tm_type: Type of TM request
1613 * @gfp: gfp type for caller
1614 * @tag: referenced task tag for TMR_ABORT_TASK
1615 * @flags: submit cmd flags
1616 *
1617 * Callable from all contexts.
1618 **/
1619
1620int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1621                unsigned char *sense, u64 unpacked_lun,
1622                void *fabric_tmr_ptr, unsigned char tm_type,
1623                gfp_t gfp, u64 tag, int flags)
1624{
1625        struct se_portal_group *se_tpg;
1626        int ret;
1627
1628        se_tpg = se_sess->se_tpg;
1629        BUG_ON(!se_tpg);
1630
1631        transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1632                              0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1633        /*
1634         * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1635         * allocation failure.
1636         */
1637        ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1638        if (ret < 0)
1639                return -ENOMEM;
1640
1641        if (tm_type == TMR_ABORT_TASK)
1642                se_cmd->se_tmr_req->ref_task_tag = tag;
1643
1644        /* See target_submit_cmd for commentary */
1645        ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1646        if (ret) {
1647                core_tmr_release_req(se_cmd->se_tmr_req);
1648                return ret;
1649        }
1650
1651        ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1652        if (ret) {
1653                /*
1654                 * For callback during failure handling, push this work off
1655                 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1656                 */
1657                INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1658                schedule_work(&se_cmd->work);
1659                return 0;
1660        }
1661        transport_generic_handle_tmr(se_cmd);
1662        return 0;
1663}
1664EXPORT_SYMBOL(target_submit_tmr);
1665
1666/*
1667 * Handle SAM-esque emulation for generic transport request failures.
1668 */
1669void transport_generic_request_failure(struct se_cmd *cmd,
1670                sense_reason_t sense_reason)
1671{
1672        int ret = 0, post_ret = 0;
1673
1674        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1675                " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1676        pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1677                cmd->se_tfo->get_cmd_state(cmd),
1678                cmd->t_state, sense_reason);
1679        pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1680                (cmd->transport_state & CMD_T_ACTIVE) != 0,
1681                (cmd->transport_state & CMD_T_STOP) != 0,
1682                (cmd->transport_state & CMD_T_SENT) != 0);
1683
1684        /*
1685         * For SAM Task Attribute emulation for failed struct se_cmd
1686         */
1687        transport_complete_task_attr(cmd);
1688        /*
1689         * Handle special case for COMPARE_AND_WRITE failure, where the
1690         * callback is expected to drop the per device ->caw_sem.
1691         */
1692        if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1693             cmd->transport_complete_callback)
1694                cmd->transport_complete_callback(cmd, false, &post_ret);
1695
1696        switch (sense_reason) {
1697        case TCM_NON_EXISTENT_LUN:
1698        case TCM_UNSUPPORTED_SCSI_OPCODE:
1699        case TCM_INVALID_CDB_FIELD:
1700        case TCM_INVALID_PARAMETER_LIST:
1701        case TCM_PARAMETER_LIST_LENGTH_ERROR:
1702        case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1703        case TCM_UNKNOWN_MODE_PAGE:
1704        case TCM_WRITE_PROTECTED:
1705        case TCM_ADDRESS_OUT_OF_RANGE:
1706        case TCM_CHECK_CONDITION_ABORT_CMD:
1707        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1708        case TCM_CHECK_CONDITION_NOT_READY:
1709        case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1710        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1711        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1712        case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1713                break;
1714        case TCM_OUT_OF_RESOURCES:
1715                sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1716                break;
1717        case TCM_RESERVATION_CONFLICT:
1718                /*
1719                 * No SENSE Data payload for this case, set SCSI Status
1720                 * and queue the response to $FABRIC_MOD.
1721                 *
1722                 * Uses linux/include/scsi/scsi.h SAM status codes defs
1723                 */
1724                cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1725                /*
1726                 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1727                 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1728                 * CONFLICT STATUS.
1729                 *
1730                 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1731                 */
1732                if (cmd->se_sess &&
1733                    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1734                        target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1735                                               cmd->orig_fe_lun, 0x2C,
1736                                        ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1737                }
1738                trace_target_cmd_complete(cmd);
1739                ret = cmd->se_tfo->queue_status(cmd);
1740                if (ret == -EAGAIN || ret == -ENOMEM)
1741                        goto queue_full;
1742                goto check_stop;
1743        default:
1744                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1745                        cmd->t_task_cdb[0], sense_reason);
1746                sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1747                break;
1748        }
1749
1750        ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1751        if (ret == -EAGAIN || ret == -ENOMEM)
1752                goto queue_full;
1753
1754check_stop:
1755        transport_lun_remove_cmd(cmd);
1756        transport_cmd_check_stop_to_fabric(cmd);
1757        return;
1758
1759queue_full:
1760        cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1761        transport_handle_queue_full(cmd, cmd->se_dev);
1762}
1763EXPORT_SYMBOL(transport_generic_request_failure);
1764
1765void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1766{
1767        sense_reason_t ret;
1768
1769        if (!cmd->execute_cmd) {
1770                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1771                goto err;
1772        }
1773        if (do_checks) {
1774                /*
1775                 * Check for an existing UNIT ATTENTION condition after
1776                 * target_handle_task_attr() has done SAM task attr
1777                 * checking, and possibly have already defered execution
1778                 * out to target_restart_delayed_cmds() context.
1779                 */
1780                ret = target_scsi3_ua_check(cmd);
1781                if (ret)
1782                        goto err;
1783
1784                ret = target_alua_state_check(cmd);
1785                if (ret)
1786                        goto err;
1787
1788                ret = target_check_reservation(cmd);
1789                if (ret) {
1790                        cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1791                        goto err;
1792                }
1793        }
1794
1795        ret = cmd->execute_cmd(cmd);
1796        if (!ret)
1797                return;
1798err:
1799        spin_lock_irq(&cmd->t_state_lock);
1800        cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1801        spin_unlock_irq(&cmd->t_state_lock);
1802
1803        transport_generic_request_failure(cmd, ret);
1804}
1805
1806static int target_write_prot_action(struct se_cmd *cmd)
1807{
1808        u32 sectors;
1809        /*
1810         * Perform WRITE_INSERT of PI using software emulation when backend
1811         * device has PI enabled, if the transport has not already generated
1812         * PI using hardware WRITE_INSERT offload.
1813         */
1814        switch (cmd->prot_op) {
1815        case TARGET_PROT_DOUT_INSERT:
1816                if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1817                        sbc_dif_generate(cmd);
1818                break;
1819        case TARGET_PROT_DOUT_STRIP:
1820                if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1821                        break;
1822
1823                sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1824                cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1825                                             sectors, 0, cmd->t_prot_sg, 0);
1826                if (unlikely(cmd->pi_err)) {
1827                        spin_lock_irq(&cmd->t_state_lock);
1828                        cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1829                        spin_unlock_irq(&cmd->t_state_lock);
1830                        transport_generic_request_failure(cmd, cmd->pi_err);
1831                        return -1;
1832                }
1833                break;
1834        default:
1835                break;
1836        }
1837
1838        return 0;
1839}
1840
1841static bool target_handle_task_attr(struct se_cmd *cmd)
1842{
1843        struct se_device *dev = cmd->se_dev;
1844
1845        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1846                return false;
1847
1848        cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1849
1850        /*
1851         * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1852         * to allow the passed struct se_cmd list of tasks to the front of the list.
1853         */
1854        switch (cmd->sam_task_attr) {
1855        case TCM_HEAD_TAG:
1856                pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1857                         cmd->t_task_cdb[0]);
1858                return false;
1859        case TCM_ORDERED_TAG:
1860                atomic_inc_mb(&dev->dev_ordered_sync);
1861
1862                pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1863                         cmd->t_task_cdb[0]);
1864
1865                /*
1866                 * Execute an ORDERED command if no other older commands
1867                 * exist that need to be completed first.
1868                 */
1869                if (!atomic_read(&dev->simple_cmds))
1870                        return false;
1871                break;
1872        default:
1873                /*
1874                 * For SIMPLE and UNTAGGED Task Attribute commands
1875                 */
1876                atomic_inc_mb(&dev->simple_cmds);
1877                break;
1878        }
1879
1880        if (atomic_read(&dev->dev_ordered_sync) == 0)
1881                return false;
1882
1883        spin_lock(&dev->delayed_cmd_lock);
1884        list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1885        spin_unlock(&dev->delayed_cmd_lock);
1886
1887        pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1888                cmd->t_task_cdb[0], cmd->sam_task_attr);
1889        return true;
1890}
1891
1892static int __transport_check_aborted_status(struct se_cmd *, int);
1893
1894void target_execute_cmd(struct se_cmd *cmd)
1895{
1896        /*
1897         * Determine if frontend context caller is requesting the stopping of
1898         * this command for frontend exceptions.
1899         *
1900         * If the received CDB has aleady been aborted stop processing it here.
1901         */
1902        spin_lock_irq(&cmd->t_state_lock);
1903        if (__transport_check_aborted_status(cmd, 1)) {
1904                spin_unlock_irq(&cmd->t_state_lock);
1905                return;
1906        }
1907        if (cmd->transport_state & CMD_T_STOP) {
1908                pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1909                        __func__, __LINE__, cmd->tag);
1910
1911                spin_unlock_irq(&cmd->t_state_lock);
1912                complete_all(&cmd->t_transport_stop_comp);
1913                return;
1914        }
1915
1916        cmd->t_state = TRANSPORT_PROCESSING;
1917        cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1918        spin_unlock_irq(&cmd->t_state_lock);
1919
1920        if (target_write_prot_action(cmd))
1921                return;
1922
1923        if (target_handle_task_attr(cmd)) {
1924                spin_lock_irq(&cmd->t_state_lock);
1925                cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1926                spin_unlock_irq(&cmd->t_state_lock);
1927                return;
1928        }
1929
1930        __target_execute_cmd(cmd, true);
1931}
1932EXPORT_SYMBOL(target_execute_cmd);
1933
1934/*
1935 * Process all commands up to the last received ORDERED task attribute which
1936 * requires another blocking boundary
1937 */
1938static void target_restart_delayed_cmds(struct se_device *dev)
1939{
1940        for (;;) {
1941                struct se_cmd *cmd;
1942
1943                spin_lock(&dev->delayed_cmd_lock);
1944                if (list_empty(&dev->delayed_cmd_list)) {
1945                        spin_unlock(&dev->delayed_cmd_lock);
1946                        break;
1947                }
1948
1949                cmd = list_entry(dev->delayed_cmd_list.next,
1950                                 struct se_cmd, se_delayed_node);
1951                list_del(&cmd->se_delayed_node);
1952                spin_unlock(&dev->delayed_cmd_lock);
1953
1954                __target_execute_cmd(cmd, true);
1955
1956                if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1957                        break;
1958        }
1959}
1960
1961/*
1962 * Called from I/O completion to determine which dormant/delayed
1963 * and ordered cmds need to have their tasks added to the execution queue.
1964 */
1965static void transport_complete_task_attr(struct se_cmd *cmd)
1966{
1967        struct se_device *dev = cmd->se_dev;
1968
1969        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1970                return;
1971
1972        if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
1973                goto restart;
1974
1975        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1976                atomic_dec_mb(&dev->simple_cmds);
1977                dev->dev_cur_ordered_id++;
1978                pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
1979                         dev->dev_cur_ordered_id);
1980        } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1981                dev->dev_cur_ordered_id++;
1982                pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
1983                         dev->dev_cur_ordered_id);
1984        } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1985                atomic_dec_mb(&dev->dev_ordered_sync);
1986
1987                dev->dev_cur_ordered_id++;
1988                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
1989                         dev->dev_cur_ordered_id);
1990        }
1991restart:
1992        target_restart_delayed_cmds(dev);
1993}
1994
1995static void transport_complete_qf(struct se_cmd *cmd)
1996{
1997        int ret = 0;
1998
1999        transport_complete_task_attr(cmd);
2000
2001        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2002                trace_target_cmd_complete(cmd);
2003                ret = cmd->se_tfo->queue_status(cmd);
2004                goto out;
2005        }
2006
2007        switch (cmd->data_direction) {
2008        case DMA_FROM_DEVICE:
2009                if (cmd->scsi_status)
2010                        goto queue_status;
2011
2012                trace_target_cmd_complete(cmd);
2013                ret = cmd->se_tfo->queue_data_in(cmd);
2014                break;
2015        case DMA_TO_DEVICE:
2016                if (cmd->se_cmd_flags & SCF_BIDI) {
2017                        ret = cmd->se_tfo->queue_data_in(cmd);
2018                        break;
2019                }
2020                /* Fall through for DMA_TO_DEVICE */
2021        case DMA_NONE:
2022queue_status:
2023                trace_target_cmd_complete(cmd);
2024                ret = cmd->se_tfo->queue_status(cmd);
2025                break;
2026        default:
2027                break;
2028        }
2029
2030out:
2031        if (ret < 0) {
2032                transport_handle_queue_full(cmd, cmd->se_dev);
2033                return;
2034        }
2035        transport_lun_remove_cmd(cmd);
2036        transport_cmd_check_stop_to_fabric(cmd);
2037}
2038
2039static void transport_handle_queue_full(
2040        struct se_cmd *cmd,
2041        struct se_device *dev)
2042{
2043        spin_lock_irq(&dev->qf_cmd_lock);
2044        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2045        atomic_inc_mb(&dev->dev_qf_count);
2046        spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2047
2048        schedule_work(&cmd->se_dev->qf_work_queue);
2049}
2050
2051static bool target_read_prot_action(struct se_cmd *cmd)
2052{
2053        switch (cmd->prot_op) {
2054        case TARGET_PROT_DIN_STRIP:
2055                if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2056                        u32 sectors = cmd->data_length >>
2057                                  ilog2(cmd->se_dev->dev_attrib.block_size);
2058
2059                        cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2060                                                     sectors, 0, cmd->t_prot_sg,
2061                                                     0);
2062                        if (cmd->pi_err)
2063                                return true;
2064                }
2065                break;
2066        case TARGET_PROT_DIN_INSERT:
2067                if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2068                        break;
2069
2070                sbc_dif_generate(cmd);
2071                break;
2072        default:
2073                break;
2074        }
2075
2076        return false;
2077}
2078
2079static void target_complete_ok_work(struct work_struct *work)
2080{
2081        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2082        int ret;
2083
2084        /*
2085         * Check if we need to move delayed/dormant tasks from cmds on the
2086         * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2087         * Attribute.
2088         */
2089        transport_complete_task_attr(cmd);
2090
2091        /*
2092         * Check to schedule QUEUE_FULL work, or execute an existing
2093         * cmd->transport_qf_callback()
2094         */
2095        if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2096                schedule_work(&cmd->se_dev->qf_work_queue);
2097
2098        /*
2099         * Check if we need to send a sense buffer from
2100         * the struct se_cmd in question.
2101         */
2102        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2103                WARN_ON(!cmd->scsi_status);
2104                ret = transport_send_check_condition_and_sense(
2105                                        cmd, 0, 1);
2106                if (ret == -EAGAIN || ret == -ENOMEM)
2107                        goto queue_full;
2108
2109                transport_lun_remove_cmd(cmd);
2110                transport_cmd_check_stop_to_fabric(cmd);
2111                return;
2112        }
2113        /*
2114         * Check for a callback, used by amongst other things
2115         * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2116         */
2117        if (cmd->transport_complete_callback) {
2118                sense_reason_t rc;
2119                bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2120                bool zero_dl = !(cmd->data_length);
2121                int post_ret = 0;
2122
2123                rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2124                if (!rc && !post_ret) {
2125                        if (caw && zero_dl)
2126                                goto queue_rsp;
2127
2128                        return;
2129                } else if (rc) {
2130                        ret = transport_send_check_condition_and_sense(cmd,
2131                                                rc, 0);
2132                        if (ret == -EAGAIN || ret == -ENOMEM)
2133                                goto queue_full;
2134
2135                        transport_lun_remove_cmd(cmd);
2136                        transport_cmd_check_stop_to_fabric(cmd);
2137                        return;
2138                }
2139        }
2140
2141queue_rsp:
2142        switch (cmd->data_direction) {
2143        case DMA_FROM_DEVICE:
2144                if (cmd->scsi_status)
2145                        goto queue_status;
2146
2147                atomic_long_add(cmd->data_length,
2148                                &cmd->se_lun->lun_stats.tx_data_octets);
2149                /*
2150                 * Perform READ_STRIP of PI using software emulation when
2151                 * backend had PI enabled, if the transport will not be
2152                 * performing hardware READ_STRIP offload.
2153                 */
2154                if (target_read_prot_action(cmd)) {
2155                        ret = transport_send_check_condition_and_sense(cmd,
2156                                                cmd->pi_err, 0);
2157                        if (ret == -EAGAIN || ret == -ENOMEM)
2158                                goto queue_full;
2159
2160                        transport_lun_remove_cmd(cmd);
2161                        transport_cmd_check_stop_to_fabric(cmd);
2162                        return;
2163                }
2164
2165                trace_target_cmd_complete(cmd);
2166                ret = cmd->se_tfo->queue_data_in(cmd);
2167                if (ret == -EAGAIN || ret == -ENOMEM)
2168                        goto queue_full;
2169                break;
2170        case DMA_TO_DEVICE:
2171                atomic_long_add(cmd->data_length,
2172                                &cmd->se_lun->lun_stats.rx_data_octets);
2173                /*
2174                 * Check if we need to send READ payload for BIDI-COMMAND
2175                 */
2176                if (cmd->se_cmd_flags & SCF_BIDI) {
2177                        atomic_long_add(cmd->data_length,
2178                                        &cmd->se_lun->lun_stats.tx_data_octets);
2179                        ret = cmd->se_tfo->queue_data_in(cmd);
2180                        if (ret == -EAGAIN || ret == -ENOMEM)
2181                                goto queue_full;
2182                        break;
2183                }
2184                /* Fall through for DMA_TO_DEVICE */
2185        case DMA_NONE:
2186queue_status:
2187                trace_target_cmd_complete(cmd);
2188                ret = cmd->se_tfo->queue_status(cmd);
2189                if (ret == -EAGAIN || ret == -ENOMEM)
2190                        goto queue_full;
2191                break;
2192        default:
2193                break;
2194        }
2195
2196        transport_lun_remove_cmd(cmd);
2197        transport_cmd_check_stop_to_fabric(cmd);
2198        return;
2199
2200queue_full:
2201        pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2202                " data_direction: %d\n", cmd, cmd->data_direction);
2203        cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2204        transport_handle_queue_full(cmd, cmd->se_dev);
2205}
2206
2207void target_free_sgl(struct scatterlist *sgl, int nents)
2208{
2209        struct scatterlist *sg;
2210        int count;
2211
2212        for_each_sg(sgl, sg, nents, count)
2213                __free_page(sg_page(sg));
2214
2215        kfree(sgl);
2216}
2217EXPORT_SYMBOL(target_free_sgl);
2218
2219static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2220{
2221        /*
2222         * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2223         * emulation, and free + reset pointers if necessary..
2224         */
2225        if (!cmd->t_data_sg_orig)
2226                return;
2227
2228        kfree(cmd->t_data_sg);
2229        cmd->t_data_sg = cmd->t_data_sg_orig;
2230        cmd->t_data_sg_orig = NULL;
2231        cmd->t_data_nents = cmd->t_data_nents_orig;
2232        cmd->t_data_nents_orig = 0;
2233}
2234
2235static inline void transport_free_pages(struct se_cmd *cmd)
2236{
2237        if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2238                target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2239                cmd->t_prot_sg = NULL;
2240                cmd->t_prot_nents = 0;
2241        }
2242
2243        if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2244                /*
2245                 * Release special case READ buffer payload required for
2246                 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2247                 */
2248                if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2249                        target_free_sgl(cmd->t_bidi_data_sg,
2250                                           cmd->t_bidi_data_nents);
2251                        cmd->t_bidi_data_sg = NULL;
2252                        cmd->t_bidi_data_nents = 0;
2253                }
2254                transport_reset_sgl_orig(cmd);
2255                return;
2256        }
2257        transport_reset_sgl_orig(cmd);
2258
2259        target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2260        cmd->t_data_sg = NULL;
2261        cmd->t_data_nents = 0;
2262
2263        target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2264        cmd->t_bidi_data_sg = NULL;
2265        cmd->t_bidi_data_nents = 0;
2266}
2267
2268/**
2269 * transport_put_cmd - release a reference to a command
2270 * @cmd:       command to release
2271 *
2272 * This routine releases our reference to the command and frees it if possible.
2273 */
2274static int transport_put_cmd(struct se_cmd *cmd)
2275{
2276        BUG_ON(!cmd->se_tfo);
2277        /*
2278         * If this cmd has been setup with target_get_sess_cmd(), drop
2279         * the kref and call ->release_cmd() in kref callback.
2280         */
2281        return target_put_sess_cmd(cmd);
2282}
2283
2284void *transport_kmap_data_sg(struct se_cmd *cmd)
2285{
2286        struct scatterlist *sg = cmd->t_data_sg;
2287        struct page **pages;
2288        int i;
2289
2290        /*
2291         * We need to take into account a possible offset here for fabrics like
2292         * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2293         * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2294         */
2295        if (!cmd->t_data_nents)
2296                return NULL;
2297
2298        BUG_ON(!sg);
2299        if (cmd->t_data_nents == 1)
2300                return kmap(sg_page(sg)) + sg->offset;
2301
2302        /* >1 page. use vmap */
2303        pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2304        if (!pages)
2305                return NULL;
2306
2307        /* convert sg[] to pages[] */
2308        for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2309                pages[i] = sg_page(sg);
2310        }
2311
2312        cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2313        kfree(pages);
2314        if (!cmd->t_data_vmap)
2315                return NULL;
2316
2317        return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2318}
2319EXPORT_SYMBOL(transport_kmap_data_sg);
2320
2321void transport_kunmap_data_sg(struct se_cmd *cmd)
2322{
2323        if (!cmd->t_data_nents) {
2324                return;
2325        } else if (cmd->t_data_nents == 1) {
2326                kunmap(sg_page(cmd->t_data_sg));
2327                return;
2328        }
2329
2330        vunmap(cmd->t_data_vmap);
2331        cmd->t_data_vmap = NULL;
2332}
2333EXPORT_SYMBOL(transport_kunmap_data_sg);
2334
2335int
2336target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2337                 bool zero_page, bool chainable)
2338{
2339        struct scatterlist *sg;
2340        struct page *page;
2341        gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2342        unsigned int nalloc, nent;
2343        int i = 0;
2344
2345        nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
2346        if (chainable)
2347                nalloc++;
2348        sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
2349        if (!sg)
2350                return -ENOMEM;
2351
2352        sg_init_table(sg, nalloc);
2353
2354        while (length) {
2355                u32 page_len = min_t(u32, length, PAGE_SIZE);
2356                page = alloc_page(GFP_KERNEL | zero_flag);
2357                if (!page)
2358                        goto out;
2359
2360                sg_set_page(&sg[i], page, page_len, 0);
2361                length -= page_len;
2362                i++;
2363        }
2364        *sgl = sg;
2365        *nents = nent;
2366        return 0;
2367
2368out:
2369        while (i > 0) {
2370                i--;
2371                __free_page(sg_page(&sg[i]));
2372        }
2373        kfree(sg);
2374        return -ENOMEM;
2375}
2376EXPORT_SYMBOL(target_alloc_sgl);
2377
2378/*
2379 * Allocate any required resources to execute the command.  For writes we
2380 * might not have the payload yet, so notify the fabric via a call to
2381 * ->write_pending instead. Otherwise place it on the execution queue.
2382 */
2383sense_reason_t
2384transport_generic_new_cmd(struct se_cmd *cmd)
2385{
2386        int ret = 0;
2387        bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2388
2389        if (cmd->prot_op != TARGET_PROT_NORMAL &&
2390            !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2391                ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2392                                       cmd->prot_length, true, false);
2393                if (ret < 0)
2394                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2395        }
2396
2397        /*
2398         * Determine is the TCM fabric module has already allocated physical
2399         * memory, and is directly calling transport_generic_map_mem_to_cmd()
2400         * beforehand.
2401         */
2402        if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2403            cmd->data_length) {
2404
2405                if ((cmd->se_cmd_flags & SCF_BIDI) ||
2406                    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2407                        u32 bidi_length;
2408
2409                        if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2410                                bidi_length = cmd->t_task_nolb *
2411                                              cmd->se_dev->dev_attrib.block_size;
2412                        else
2413                                bidi_length = cmd->data_length;
2414
2415                        ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2416                                               &cmd->t_bidi_data_nents,
2417                                               bidi_length, zero_flag, false);
2418                        if (ret < 0)
2419                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2420                }
2421
2422                ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2423                                       cmd->data_length, zero_flag, false);
2424                if (ret < 0)
2425                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2426        } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2427                    cmd->data_length) {
2428                /*
2429                 * Special case for COMPARE_AND_WRITE with fabrics
2430                 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2431                 */
2432                u32 caw_length = cmd->t_task_nolb *
2433                                 cmd->se_dev->dev_attrib.block_size;
2434
2435                ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2436                                       &cmd->t_bidi_data_nents,
2437                                       caw_length, zero_flag, false);
2438                if (ret < 0)
2439                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2440        }
2441        /*
2442         * If this command is not a write we can execute it right here,
2443         * for write buffers we need to notify the fabric driver first
2444         * and let it call back once the write buffers are ready.
2445         */
2446        target_add_to_state_list(cmd);
2447        if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2448                target_execute_cmd(cmd);
2449                return 0;
2450        }
2451        transport_cmd_check_stop(cmd, false, true);
2452
2453        ret = cmd->se_tfo->write_pending(cmd);
2454        if (ret == -EAGAIN || ret == -ENOMEM)
2455                goto queue_full;
2456
2457        /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2458        WARN_ON(ret);
2459
2460        return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2461
2462queue_full:
2463        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2464        cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2465        transport_handle_queue_full(cmd, cmd->se_dev);
2466        return 0;
2467}
2468EXPORT_SYMBOL(transport_generic_new_cmd);
2469
2470static void transport_write_pending_qf(struct se_cmd *cmd)
2471{
2472        int ret;
2473
2474        ret = cmd->se_tfo->write_pending(cmd);
2475        if (ret == -EAGAIN || ret == -ENOMEM) {
2476                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2477                         cmd);
2478                transport_handle_queue_full(cmd, cmd->se_dev);
2479        }
2480}
2481
2482static bool
2483__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2484                           unsigned long *flags);
2485
2486static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2487{
2488        unsigned long flags;
2489
2490        spin_lock_irqsave(&cmd->t_state_lock, flags);
2491        __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2492        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2493}
2494
2495int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2496{
2497        int ret = 0;
2498        bool aborted = false, tas = false;
2499
2500        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2501                if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2502                        target_wait_free_cmd(cmd, &aborted, &tas);
2503
2504                if (!aborted || tas)
2505                        ret = transport_put_cmd(cmd);
2506        } else {
2507                if (wait_for_tasks)
2508                        target_wait_free_cmd(cmd, &aborted, &tas);
2509                /*
2510                 * Handle WRITE failure case where transport_generic_new_cmd()
2511                 * has already added se_cmd to state_list, but fabric has
2512                 * failed command before I/O submission.
2513                 */
2514                if (cmd->state_active)
2515                        target_remove_from_state_list(cmd);
2516
2517                if (cmd->se_lun)
2518                        transport_lun_remove_cmd(cmd);
2519
2520                if (!aborted || tas)
2521                        ret = transport_put_cmd(cmd);
2522        }
2523        /*
2524         * If the task has been internally aborted due to TMR ABORT_TASK
2525         * or LUN_RESET, target_core_tmr.c is responsible for performing
2526         * the remaining calls to target_put_sess_cmd(), and not the
2527         * callers of this function.
2528         */
2529        if (aborted) {
2530                pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2531                wait_for_completion(&cmd->cmd_wait_comp);
2532                cmd->se_tfo->release_cmd(cmd);
2533                ret = 1;
2534        }
2535        return ret;
2536}
2537EXPORT_SYMBOL(transport_generic_free_cmd);
2538
2539/* target_get_sess_cmd - Add command to active ->sess_cmd_list
2540 * @se_cmd:     command descriptor to add
2541 * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
2542 */
2543int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2544{
2545        struct se_session *se_sess = se_cmd->se_sess;
2546        unsigned long flags;
2547        int ret = 0;
2548
2549        /*
2550         * Add a second kref if the fabric caller is expecting to handle
2551         * fabric acknowledgement that requires two target_put_sess_cmd()
2552         * invocations before se_cmd descriptor release.
2553         */
2554        if (ack_kref) {
2555                if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2556                        return -EINVAL;
2557
2558                se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2559        }
2560
2561        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2562        if (se_sess->sess_tearing_down) {
2563                ret = -ESHUTDOWN;
2564                goto out;
2565        }
2566        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2567out:
2568        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2569
2570        if (ret && ack_kref)
2571                target_put_sess_cmd(se_cmd);
2572
2573        return ret;
2574}
2575EXPORT_SYMBOL(target_get_sess_cmd);
2576
2577static void target_free_cmd_mem(struct se_cmd *cmd)
2578{
2579        transport_free_pages(cmd);
2580
2581        if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2582                core_tmr_release_req(cmd->se_tmr_req);
2583        if (cmd->t_task_cdb != cmd->__t_task_cdb)
2584                kfree(cmd->t_task_cdb);
2585}
2586
2587static void target_release_cmd_kref(struct kref *kref)
2588{
2589        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2590        struct se_session *se_sess = se_cmd->se_sess;
2591        unsigned long flags;
2592        bool fabric_stop;
2593
2594        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2595
2596        spin_lock(&se_cmd->t_state_lock);
2597        fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
2598                      (se_cmd->transport_state & CMD_T_ABORTED);
2599        spin_unlock(&se_cmd->t_state_lock);
2600
2601        if (se_cmd->cmd_wait_set || fabric_stop) {
2602                list_del_init(&se_cmd->se_cmd_list);
2603                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2604                target_free_cmd_mem(se_cmd);
2605                complete(&se_cmd->cmd_wait_comp);
2606                return;
2607        }
2608        list_del_init(&se_cmd->se_cmd_list);
2609        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2610
2611        target_free_cmd_mem(se_cmd);
2612        se_cmd->se_tfo->release_cmd(se_cmd);
2613}
2614
2615/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2616 * @se_cmd:     command descriptor to drop
2617 */
2618int target_put_sess_cmd(struct se_cmd *se_cmd)
2619{
2620        struct se_session *se_sess = se_cmd->se_sess;
2621
2622        if (!se_sess) {
2623                target_free_cmd_mem(se_cmd);
2624                se_cmd->se_tfo->release_cmd(se_cmd);
2625                return 1;
2626        }
2627        return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2628}
2629EXPORT_SYMBOL(target_put_sess_cmd);
2630
2631/* target_sess_cmd_list_set_waiting - Flag all commands in
2632 *         sess_cmd_list to complete cmd_wait_comp.  Set
2633 *         sess_tearing_down so no more commands are queued.
2634 * @se_sess:    session to flag
2635 */
2636void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2637{
2638        struct se_cmd *se_cmd, *tmp_cmd;
2639        unsigned long flags;
2640        int rc;
2641
2642        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2643        if (se_sess->sess_tearing_down) {
2644                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2645                return;
2646        }
2647        se_sess->sess_tearing_down = 1;
2648        list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2649
2650        list_for_each_entry_safe(se_cmd, tmp_cmd,
2651                                 &se_sess->sess_wait_list, se_cmd_list) {
2652                rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2653                if (rc) {
2654                        se_cmd->cmd_wait_set = 1;
2655                        spin_lock(&se_cmd->t_state_lock);
2656                        se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2657                        spin_unlock(&se_cmd->t_state_lock);
2658                } else
2659                        list_del_init(&se_cmd->se_cmd_list);
2660        }
2661
2662        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2663}
2664EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2665
2666/* target_wait_for_sess_cmds - Wait for outstanding descriptors
2667 * @se_sess:    session to wait for active I/O
2668 */
2669void target_wait_for_sess_cmds(struct se_session *se_sess)
2670{
2671        struct se_cmd *se_cmd, *tmp_cmd;
2672        unsigned long flags;
2673        bool tas;
2674
2675        list_for_each_entry_safe(se_cmd, tmp_cmd,
2676                                &se_sess->sess_wait_list, se_cmd_list) {
2677                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2678                        " %d\n", se_cmd, se_cmd->t_state,
2679                        se_cmd->se_tfo->get_cmd_state(se_cmd));
2680
2681                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2682                tas = (se_cmd->transport_state & CMD_T_TAS);
2683                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2684
2685                if (!target_put_sess_cmd(se_cmd)) {
2686                        if (tas)
2687                                target_put_sess_cmd(se_cmd);
2688                }
2689
2690                wait_for_completion(&se_cmd->cmd_wait_comp);
2691                pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2692                        " fabric state: %d\n", se_cmd, se_cmd->t_state,
2693                        se_cmd->se_tfo->get_cmd_state(se_cmd));
2694
2695                se_cmd->se_tfo->release_cmd(se_cmd);
2696        }
2697
2698        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2699        WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2700        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2701
2702}
2703EXPORT_SYMBOL(target_wait_for_sess_cmds);
2704
2705static void target_lun_confirm(struct percpu_ref *ref)
2706{
2707        struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2708
2709        complete(&lun->lun_ref_comp);
2710}
2711
2712void transport_clear_lun_ref(struct se_lun *lun)
2713{
2714        /*
2715         * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2716         * the initial reference and schedule confirm kill to be
2717         * executed after one full RCU grace period has completed.
2718         */
2719        percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2720        /*
2721         * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2722         * to call target_lun_confirm after lun->lun_ref has been marked
2723         * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2724         * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2725         * fails for all new incoming I/O.
2726         */
2727        wait_for_completion(&lun->lun_ref_comp);
2728        /*
2729         * The second completion waits for percpu_ref_put_many() to
2730         * invoke ->release() after lun->lun_ref has switched to
2731         * atomic_t mode, and lun->lun_ref.count has reached zero.
2732         *
2733         * At this point all target-core lun->lun_ref references have
2734         * been dropped via transport_lun_remove_cmd(), and it's safe
2735         * to proceed with the remaining LUN shutdown.
2736         */
2737        wait_for_completion(&lun->lun_shutdown_comp);
2738}
2739
2740static bool
2741__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2742                           bool *aborted, bool *tas, unsigned long *flags)
2743        __releases(&cmd->t_state_lock)
2744        __acquires(&cmd->t_state_lock)
2745{
2746
2747        assert_spin_locked(&cmd->t_state_lock);
2748        WARN_ON_ONCE(!irqs_disabled());
2749
2750        if (fabric_stop)
2751                cmd->transport_state |= CMD_T_FABRIC_STOP;
2752
2753        if (cmd->transport_state & CMD_T_ABORTED)
2754                *aborted = true;
2755
2756        if (cmd->transport_state & CMD_T_TAS)
2757                *tas = true;
2758
2759        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2760            !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2761                return false;
2762
2763        if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2764            !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2765                return false;
2766
2767        if (!(cmd->transport_state & CMD_T_ACTIVE))
2768                return false;
2769
2770        if (fabric_stop && *aborted)
2771                return false;
2772
2773        cmd->transport_state |= CMD_T_STOP;
2774
2775        pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2776                 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2777                 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2778
2779        spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2780
2781        wait_for_completion(&cmd->t_transport_stop_comp);
2782
2783        spin_lock_irqsave(&cmd->t_state_lock, *flags);
2784        cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2785
2786        pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2787                 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2788
2789        return true;
2790}
2791
2792/**
2793 * transport_wait_for_tasks - wait for completion to occur
2794 * @cmd:        command to wait
2795 *
2796 * Called from frontend fabric context to wait for storage engine
2797 * to pause and/or release frontend generated struct se_cmd.
2798 */
2799bool transport_wait_for_tasks(struct se_cmd *cmd)
2800{
2801        unsigned long flags;
2802        bool ret, aborted = false, tas = false;
2803
2804        spin_lock_irqsave(&cmd->t_state_lock, flags);
2805        ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2806        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2807
2808        return ret;
2809}
2810EXPORT_SYMBOL(transport_wait_for_tasks);
2811
2812struct sense_info {
2813        u8 key;
2814        u8 asc;
2815        u8 ascq;
2816        bool add_sector_info;
2817};
2818
2819static const struct sense_info sense_info_table[] = {
2820        [TCM_NO_SENSE] = {
2821                .key = NOT_READY
2822        },
2823        [TCM_NON_EXISTENT_LUN] = {
2824                .key = ILLEGAL_REQUEST,
2825                .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
2826        },
2827        [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2828                .key = ILLEGAL_REQUEST,
2829                .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2830        },
2831        [TCM_SECTOR_COUNT_TOO_MANY] = {
2832                .key = ILLEGAL_REQUEST,
2833                .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2834        },
2835        [TCM_UNKNOWN_MODE_PAGE] = {
2836                .key = ILLEGAL_REQUEST,
2837                .asc = 0x24, /* INVALID FIELD IN CDB */
2838        },
2839        [TCM_CHECK_CONDITION_ABORT_CMD] = {
2840                .key = ABORTED_COMMAND,
2841                .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
2842                .ascq = 0x03,
2843        },
2844        [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2845                .key = ABORTED_COMMAND,
2846                .asc = 0x0c, /* WRITE ERROR */
2847                .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
2848        },
2849        [TCM_INVALID_CDB_FIELD] = {
2850                .key = ILLEGAL_REQUEST,
2851                .asc = 0x24, /* INVALID FIELD IN CDB */
2852        },
2853        [TCM_INVALID_PARAMETER_LIST] = {
2854                .key = ILLEGAL_REQUEST,
2855                .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2856        },
2857        [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2858                .key = ILLEGAL_REQUEST,
2859                .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
2860        },
2861        [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2862                .key = ILLEGAL_REQUEST,
2863                .asc = 0x0c, /* WRITE ERROR */
2864                .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
2865        },
2866        [TCM_SERVICE_CRC_ERROR] = {
2867                .key = ABORTED_COMMAND,
2868                .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
2869                .ascq = 0x05, /* N/A */
2870        },
2871        [TCM_SNACK_REJECTED] = {
2872                .key = ABORTED_COMMAND,
2873                .asc = 0x11, /* READ ERROR */
2874                .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
2875        },
2876        [TCM_WRITE_PROTECTED] = {
2877                .key = DATA_PROTECT,
2878                .asc = 0x27, /* WRITE PROTECTED */
2879        },
2880        [TCM_ADDRESS_OUT_OF_RANGE] = {
2881                .key = ILLEGAL_REQUEST,
2882                .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2883        },
2884        [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2885                .key = UNIT_ATTENTION,
2886        },
2887        [TCM_CHECK_CONDITION_NOT_READY] = {
2888                .key = NOT_READY,
2889        },
2890        [TCM_MISCOMPARE_VERIFY] = {
2891                .key = MISCOMPARE,
2892                .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
2893                .ascq = 0x00,
2894        },
2895        [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2896                .key = ABORTED_COMMAND,
2897                .asc = 0x10,
2898                .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
2899                .add_sector_info = true,
2900        },
2901        [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2902                .key = ABORTED_COMMAND,
2903                .asc = 0x10,
2904                .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2905                .add_sector_info = true,
2906        },
2907        [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2908                .key = ABORTED_COMMAND,
2909                .asc = 0x10,
2910                .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2911                .add_sector_info = true,
2912        },
2913        [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2914                .key = COPY_ABORTED,
2915                .asc = 0x0d,
2916                .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
2917
2918        },
2919        [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2920                /*
2921                 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2922                 * Solaris initiators.  Returning NOT READY instead means the
2923                 * operations will be retried a finite number of times and we
2924                 * can survive intermittent errors.
2925                 */
2926                .key = NOT_READY,
2927                .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
2928        },
2929};
2930
2931static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2932{
2933        const struct sense_info *si;
2934        u8 *buffer = cmd->sense_buffer;
2935        int r = (__force int)reason;
2936        u8 asc, ascq;
2937        bool desc_format = target_sense_desc_format(cmd->se_dev);
2938
2939        if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
2940                si = &sense_info_table[r];
2941        else
2942                si = &sense_info_table[(__force int)
2943                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
2944
2945        if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
2946                core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2947                WARN_ON_ONCE(asc == 0);
2948        } else if (si->asc == 0) {
2949                WARN_ON_ONCE(cmd->scsi_asc == 0);
2950                asc = cmd->scsi_asc;
2951                ascq = cmd->scsi_ascq;
2952        } else {
2953                asc = si->asc;
2954                ascq = si->ascq;
2955        }
2956
2957        scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2958        if (si->add_sector_info)
2959                return scsi_set_sense_information(buffer,
2960                                                  cmd->scsi_sense_length,
2961                                                  cmd->bad_sector);
2962
2963        return 0;
2964}
2965
2966int
2967transport_send_check_condition_and_sense(struct se_cmd *cmd,
2968                sense_reason_t reason, int from_transport)
2969{
2970        unsigned long flags;
2971
2972        spin_lock_irqsave(&cmd->t_state_lock, flags);
2973        if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2974                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2975                return 0;
2976        }
2977        cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2978        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2979
2980        if (!from_transport) {
2981                int rc;
2982
2983                cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2984                cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2985                cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
2986                rc = translate_sense_reason(cmd, reason);
2987                if (rc)
2988                        return rc;
2989        }
2990
2991        trace_target_cmd_complete(cmd);
2992        return cmd->se_tfo->queue_status(cmd);
2993}
2994EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2995
2996static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2997        __releases(&cmd->t_state_lock)
2998        __acquires(&cmd->t_state_lock)
2999{
3000        assert_spin_locked(&cmd->t_state_lock);
3001        WARN_ON_ONCE(!irqs_disabled());
3002
3003        if (!(cmd->transport_state & CMD_T_ABORTED))
3004                return 0;
3005        /*
3006         * If cmd has been aborted but either no status is to be sent or it has
3007         * already been sent, just return
3008         */
3009        if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3010                if (send_status)
3011                        cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3012                return 1;
3013        }
3014
3015        pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3016                " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3017
3018        cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3019        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3020        trace_target_cmd_complete(cmd);
3021
3022        spin_unlock_irq(&cmd->t_state_lock);
3023        cmd->se_tfo->queue_status(cmd);
3024        spin_lock_irq(&cmd->t_state_lock);
3025
3026        return 1;
3027}
3028
3029int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3030{
3031        int ret;
3032
3033        spin_lock_irq(&cmd->t_state_lock);
3034        ret = __transport_check_aborted_status(cmd, send_status);
3035        spin_unlock_irq(&cmd->t_state_lock);
3036
3037        return ret;
3038}
3039EXPORT_SYMBOL(transport_check_aborted_status);
3040
3041void transport_send_task_abort(struct se_cmd *cmd)
3042{
3043        unsigned long flags;
3044
3045        spin_lock_irqsave(&cmd->t_state_lock, flags);
3046        if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3047                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3048                return;
3049        }
3050        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3051
3052        /*
3053         * If there are still expected incoming fabric WRITEs, we wait
3054         * until until they have completed before sending a TASK_ABORTED
3055         * response.  This response with TASK_ABORTED status will be
3056         * queued back to fabric module by transport_check_aborted_status().
3057         */
3058        if (cmd->data_direction == DMA_TO_DEVICE) {
3059                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3060                        spin_lock_irqsave(&cmd->t_state_lock, flags);
3061                        if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3062                                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3063                                goto send_abort;
3064                        }
3065                        cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3066                        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3067                        return;
3068                }
3069        }
3070send_abort:
3071        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3072
3073        transport_lun_remove_cmd(cmd);
3074
3075        pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3076                 cmd->t_task_cdb[0], cmd->tag);
3077
3078        trace_target_cmd_complete(cmd);
3079        cmd->se_tfo->queue_status(cmd);
3080}
3081
3082static void target_tmr_work(struct work_struct *work)
3083{
3084        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3085        struct se_device *dev = cmd->se_dev;
3086        struct se_tmr_req *tmr = cmd->se_tmr_req;
3087        unsigned long flags;
3088        int ret;
3089
3090        spin_lock_irqsave(&cmd->t_state_lock, flags);
3091        if (cmd->transport_state & CMD_T_ABORTED) {
3092                tmr->response = TMR_FUNCTION_REJECTED;
3093                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3094                goto check_stop;
3095        }
3096        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3097
3098        switch (tmr->function) {
3099        case TMR_ABORT_TASK:
3100                core_tmr_abort_task(dev, tmr, cmd->se_sess);
3101                break;
3102        case TMR_ABORT_TASK_SET:
3103        case TMR_CLEAR_ACA:
3104        case TMR_CLEAR_TASK_SET:
3105                tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3106                break;
3107        case TMR_LUN_RESET:
3108                ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3109                tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3110                                         TMR_FUNCTION_REJECTED;
3111                if (tmr->response == TMR_FUNCTION_COMPLETE) {
3112                        target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3113                                               cmd->orig_fe_lun, 0x29,
3114                                               ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3115                }
3116                break;
3117        case TMR_TARGET_WARM_RESET:
3118                tmr->response = TMR_FUNCTION_REJECTED;
3119                break;
3120        case TMR_TARGET_COLD_RESET:
3121                tmr->response = TMR_FUNCTION_REJECTED;
3122                break;
3123        default:
3124                pr_err("Uknown TMR function: 0x%02x.\n",
3125                                tmr->function);
3126                tmr->response = TMR_FUNCTION_REJECTED;
3127                break;
3128        }
3129
3130        spin_lock_irqsave(&cmd->t_state_lock, flags);
3131        if (cmd->transport_state & CMD_T_ABORTED) {
3132                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3133                goto check_stop;
3134        }
3135        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3136
3137        cmd->se_tfo->queue_tm_rsp(cmd);
3138
3139check_stop:
3140        transport_cmd_check_stop_to_fabric(cmd);
3141}
3142
3143int transport_generic_handle_tmr(
3144        struct se_cmd *cmd)
3145{
3146        unsigned long flags;
3147        bool aborted = false;
3148
3149        spin_lock_irqsave(&cmd->t_state_lock, flags);
3150        if (cmd->transport_state & CMD_T_ABORTED) {
3151                aborted = true;
3152        } else {
3153                cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3154                cmd->transport_state |= CMD_T_ACTIVE;
3155        }
3156        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3157
3158        if (aborted) {
3159                pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3160                        "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3161                        cmd->se_tmr_req->ref_task_tag, cmd->tag);
3162                transport_cmd_check_stop_to_fabric(cmd);
3163                return 0;
3164        }
3165
3166        INIT_WORK(&cmd->work, target_tmr_work);
3167        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3168        return 0;
3169}
3170EXPORT_SYMBOL(transport_generic_handle_tmr);
3171
3172bool
3173target_check_wce(struct se_device *dev)
3174{
3175        bool wce = false;
3176
3177        if (dev->transport->get_write_cache)
3178                wce = dev->transport->get_write_cache(dev);
3179        else if (dev->dev_attrib.emulate_write_cache > 0)
3180                wce = true;
3181
3182        return wce;
3183}
3184
3185bool
3186target_check_fua(struct se_device *dev)
3187{
3188        return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3189}
Note: See TracBrowser for help on using the repository browser.