Merge tag 'nolibc-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau...
[linux-2.6-block.git] / drivers / target / target_core_transport.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
4c76251e 6 * (c) Copyright 2002-2013 Datera, Inc.
c66ac9db
NB
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
c66ac9db
NB
26#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
c66ac9db 31#include <linux/spinlock.h>
c66ac9db
NB
32#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
827509e3 35#include <linux/module.h>
015487b8 36#include <linux/ratelimit.h>
5538d294 37#include <linux/vmalloc.h>
c66ac9db
NB
38#include <asm/unaligned.h>
39#include <net/sock.h>
40#include <net/tcp.h>
ba929992 41#include <scsi/scsi_proto.h>
9ec1e1ce 42#include <scsi/scsi_common.h>
c66ac9db
NB
43
44#include <target/target_core_base.h>
c4795fb2
CH
45#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
c66ac9db 47
e26d99ae 48#include "target_core_internal.h"
c66ac9db 49#include "target_core_alua.h"
c66ac9db 50#include "target_core_pr.h"
c66ac9db
NB
51#include "target_core_ua.h"
52
e5c0d6ad
RD
53#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
35e0e757 56static struct workqueue_struct *target_completion_wq;
c66ac9db 57static struct kmem_cache *se_sess_cache;
c66ac9db 58struct kmem_cache *se_ua_cache;
c66ac9db
NB
59struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
229d4f11
HR
63struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache;
c66ac9db 65
c66ac9db 66static void transport_complete_task_attr(struct se_cmd *cmd);
17e391dd 67static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
07bde79a 68static void transport_handle_queue_full(struct se_cmd *cmd,
fa7e25cf 69 struct se_device *dev, int err, bool write_pending);
35e0e757 70static void target_complete_ok_work(struct work_struct *work);
c66ac9db 71
e3d6f909 72int init_se_kmem_caches(void)
c66ac9db 73{
c66ac9db
NB
74 se_sess_cache = kmem_cache_create("se_sess_cache",
75 sizeof(struct se_session), __alignof__(struct se_session),
76 0, NULL);
6708bb27
AG
77 if (!se_sess_cache) {
78 pr_err("kmem_cache_create() for struct se_session"
c66ac9db 79 " failed\n");
c8e31f26 80 goto out;
c66ac9db
NB
81 }
82 se_ua_cache = kmem_cache_create("se_ua_cache",
83 sizeof(struct se_ua), __alignof__(struct se_ua),
84 0, NULL);
6708bb27
AG
85 if (!se_ua_cache) {
86 pr_err("kmem_cache_create() for struct se_ua failed\n");
35e0e757 87 goto out_free_sess_cache;
c66ac9db 88 }
c66ac9db
NB
89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90 sizeof(struct t10_pr_registration),
91 __alignof__(struct t10_pr_registration), 0, NULL);
6708bb27
AG
92 if (!t10_pr_reg_cache) {
93 pr_err("kmem_cache_create() for struct t10_pr_registration"
c66ac9db 94 " failed\n");
35e0e757 95 goto out_free_ua_cache;
c66ac9db
NB
96 }
97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99 0, NULL);
6708bb27
AG
100 if (!t10_alua_lu_gp_cache) {
101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
c66ac9db 102 " failed\n");
35e0e757 103 goto out_free_pr_reg_cache;
c66ac9db
NB
104 }
105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106 sizeof(struct t10_alua_lu_gp_member),
107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
6708bb27
AG
108 if (!t10_alua_lu_gp_mem_cache) {
109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
c66ac9db 110 "cache failed\n");
35e0e757 111 goto out_free_lu_gp_cache;
c66ac9db
NB
112 }
113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114 sizeof(struct t10_alua_tg_pt_gp),
115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
6708bb27
AG
116 if (!t10_alua_tg_pt_gp_cache) {
117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db 118 "cache failed\n");
35e0e757 119 goto out_free_lu_gp_mem_cache;
c66ac9db 120 }
229d4f11
HR
121 t10_alua_lba_map_cache = kmem_cache_create(
122 "t10_alua_lba_map_cache",
123 sizeof(struct t10_alua_lba_map),
124 __alignof__(struct t10_alua_lba_map), 0, NULL);
125 if (!t10_alua_lba_map_cache) {
126 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127 "cache failed\n");
adf653f9 128 goto out_free_tg_pt_gp_cache;
229d4f11
HR
129 }
130 t10_alua_lba_map_mem_cache = kmem_cache_create(
131 "t10_alua_lba_map_mem_cache",
132 sizeof(struct t10_alua_lba_map_member),
133 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134 if (!t10_alua_lba_map_mem_cache) {
135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136 "cache failed\n");
137 goto out_free_lba_map_cache;
138 }
c66ac9db 139
35e0e757
CH
140 target_completion_wq = alloc_workqueue("target_completion",
141 WQ_MEM_RECLAIM, 0);
142 if (!target_completion_wq)
229d4f11 143 goto out_free_lba_map_mem_cache;
35e0e757 144
c66ac9db 145 return 0;
35e0e757 146
229d4f11
HR
147out_free_lba_map_mem_cache:
148 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149out_free_lba_map_cache:
150 kmem_cache_destroy(t10_alua_lba_map_cache);
35e0e757
CH
151out_free_tg_pt_gp_cache:
152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153out_free_lu_gp_mem_cache:
154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155out_free_lu_gp_cache:
156 kmem_cache_destroy(t10_alua_lu_gp_cache);
157out_free_pr_reg_cache:
158 kmem_cache_destroy(t10_pr_reg_cache);
159out_free_ua_cache:
160 kmem_cache_destroy(se_ua_cache);
161out_free_sess_cache:
162 kmem_cache_destroy(se_sess_cache);
c66ac9db 163out:
e3d6f909 164 return -ENOMEM;
c66ac9db
NB
165}
166
e3d6f909 167void release_se_kmem_caches(void)
c66ac9db 168{
35e0e757 169 destroy_workqueue(target_completion_wq);
c66ac9db
NB
170 kmem_cache_destroy(se_sess_cache);
171 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
172 kmem_cache_destroy(t10_pr_reg_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_cache);
174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
229d4f11
HR
176 kmem_cache_destroy(t10_alua_lba_map_cache);
177 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
c66ac9db
NB
178}
179
e3d6f909
AG
180/* This code ensures unique mib indexes are handed out. */
181static DEFINE_SPINLOCK(scsi_mib_index_lock);
182static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
e89d15ee
NB
183
184/*
185 * Allocate a new row index for the entry type specified
186 */
187u32 scsi_get_new_index(scsi_index_t type)
188{
189 u32 new_index;
190
e3d6f909 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
e89d15ee 192
e3d6f909
AG
193 spin_lock(&scsi_mib_index_lock);
194 new_index = ++scsi_mib_index[type];
195 spin_unlock(&scsi_mib_index_lock);
e89d15ee
NB
196
197 return new_index;
198}
199
dbc5623e 200void transport_subsystem_check_init(void)
c66ac9db
NB
201{
202 int ret;
283669d2 203 static int sub_api_initialized;
c66ac9db 204
dbc5623e
NB
205 if (sub_api_initialized)
206 return;
207
6110f37f 208 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
c66ac9db 209 if (ret != 0)
6708bb27 210 pr_err("Unable to load target_core_iblock\n");
c66ac9db 211
6110f37f 212 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
c66ac9db 213 if (ret != 0)
6708bb27 214 pr_err("Unable to load target_core_file\n");
c66ac9db 215
6110f37f 216 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
c66ac9db 217 if (ret != 0)
6708bb27 218 pr_err("Unable to load target_core_pscsi\n");
c66ac9db 219
6110f37f 220 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
7c9e7a6f
AG
221 if (ret != 0)
222 pr_err("Unable to load target_core_user\n");
223
e3d6f909 224 sub_api_initialized = 1;
c66ac9db
NB
225}
226
ad669505
BVA
227static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
228{
229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
230
231 wake_up(&sess->cmd_list_wq);
232}
233
d1bff07f
BVA
234/**
235 * transport_init_session - initialize a session object
236 * @se_sess: Session object pointer.
237 *
238 * The caller must have zero-initialized @se_sess before calling this function.
239 */
ad669505 240int transport_init_session(struct se_session *se_sess)
d1bff07f
BVA
241{
242 INIT_LIST_HEAD(&se_sess->sess_list);
243 INIT_LIST_HEAD(&se_sess->sess_acl_list);
244 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
d1bff07f 245 spin_lock_init(&se_sess->sess_cmd_lock);
00d909a1 246 init_waitqueue_head(&se_sess->cmd_list_wq);
ad669505
BVA
247 return percpu_ref_init(&se_sess->cmd_count,
248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
d1bff07f
BVA
249}
250EXPORT_SYMBOL(transport_init_session);
251
317f8971
BVA
252/**
253 * transport_alloc_session - allocate a session object and initialize it
254 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
255 */
256struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
c66ac9db
NB
257{
258 struct se_session *se_sess;
ad669505 259 int ret;
c66ac9db
NB
260
261 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
6708bb27
AG
262 if (!se_sess) {
263 pr_err("Unable to allocate struct se_session from"
c66ac9db
NB
264 " se_sess_cache\n");
265 return ERR_PTR(-ENOMEM);
266 }
ad669505
BVA
267 ret = transport_init_session(se_sess);
268 if (ret < 0) {
8b2db98e 269 kmem_cache_free(se_sess_cache, se_sess);
ad669505
BVA
270 return ERR_PTR(ret);
271 }
e70beee7 272 se_sess->sup_prot_ops = sup_prot_ops;
c66ac9db
NB
273
274 return se_sess;
275}
317f8971
BVA
276EXPORT_SYMBOL(transport_alloc_session);
277
278/**
279 * transport_alloc_session_tags - allocate target driver private data
280 * @se_sess: Session pointer.
281 * @tag_num: Maximum number of in-flight commands between initiator and target.
282 * @tag_size: Size in bytes of the private data a target driver associates with
283 * each command.
284 */
c0add7fd
NB
285int transport_alloc_session_tags(struct se_session *se_sess,
286 unsigned int tag_num, unsigned int tag_size)
287{
288 int rc;
289
5d6cd9fe
BVA
290 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
291 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
c0add7fd 292 if (!se_sess->sess_cmd_map) {
5d6cd9fe
BVA
293 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
294 return -ENOMEM;
c0add7fd
NB
295 }
296
10e9cbb6
MW
297 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
298 false, GFP_KERNEL, NUMA_NO_NODE);
c0add7fd
NB
299 if (rc < 0) {
300 pr_err("Unable to init se_sess->sess_tag_pool,"
301 " tag_num: %u\n", tag_num);
de64d3a6 302 kvfree(se_sess->sess_cmd_map);
c0add7fd
NB
303 se_sess->sess_cmd_map = NULL;
304 return -ENOMEM;
305 }
306
307 return 0;
308}
309EXPORT_SYMBOL(transport_alloc_session_tags);
310
317f8971
BVA
311/**
312 * transport_init_session_tags - allocate a session and target driver private data
313 * @tag_num: Maximum number of in-flight commands between initiator and target.
314 * @tag_size: Size in bytes of the private data a target driver associates with
315 * each command.
316 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
317 */
3cd14285
MC
318static struct se_session *
319transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
320 enum target_prot_op sup_prot_ops)
c0add7fd
NB
321{
322 struct se_session *se_sess;
323 int rc;
324
7861728d
NB
325 if (tag_num != 0 && !tag_size) {
326 pr_err("init_session_tags called with percpu-ida tag_num:"
327 " %u, but zero tag_size\n", tag_num);
328 return ERR_PTR(-EINVAL);
329 }
330 if (!tag_num && tag_size) {
331 pr_err("init_session_tags called with percpu-ida tag_size:"
332 " %u, but zero tag_num\n", tag_size);
333 return ERR_PTR(-EINVAL);
334 }
335
317f8971 336 se_sess = transport_alloc_session(sup_prot_ops);
c0add7fd
NB
337 if (IS_ERR(se_sess))
338 return se_sess;
339
340 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
341 if (rc < 0) {
342 transport_free_session(se_sess);
343 return ERR_PTR(-ENOMEM);
344 }
345
346 return se_sess;
347}
c0add7fd 348
c66ac9db 349/*
140854cb 350 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
c66ac9db
NB
351 */
352void __transport_register_session(
353 struct se_portal_group *se_tpg,
354 struct se_node_acl *se_nacl,
355 struct se_session *se_sess,
356 void *fabric_sess_ptr)
357{
9ac8928e 358 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
c66ac9db 359 unsigned char buf[PR_REG_ISID_LEN];
6a64f6e1 360 unsigned long flags;
c66ac9db
NB
361
362 se_sess->se_tpg = se_tpg;
363 se_sess->fabric_sess_ptr = fabric_sess_ptr;
364 /*
365 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
366 *
367 * Only set for struct se_session's that will actually be moving I/O.
368 * eg: *NOT* discovery sessions.
369 */
370 if (se_nacl) {
bffb5128
NB
371 /*
372 *
373 * Determine if fabric allows for T10-PI feature bits exposed to
374 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
375 *
376 * If so, then always save prot_type on a per se_node_acl node
377 * basis and re-instate the previous sess_prot_type to avoid
378 * disabling PI from below any previously initiator side
379 * registered LUNs.
380 */
381 if (se_nacl->saved_prot_type)
382 se_sess->sess_prot_type = se_nacl->saved_prot_type;
383 else if (tfo->tpg_check_prot_fabric_only)
384 se_sess->sess_prot_type = se_nacl->saved_prot_type =
385 tfo->tpg_check_prot_fabric_only(se_tpg);
c66ac9db
NB
386 /*
387 * If the fabric module supports an ISID based TransportID,
388 * save this value in binary from the fabric I_T Nexus now.
389 */
e3d6f909 390 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
c66ac9db 391 memset(&buf[0], 0, PR_REG_ISID_LEN);
e3d6f909 392 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
c66ac9db
NB
393 &buf[0], PR_REG_ISID_LEN);
394 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
395 }
afb999ff 396
6a64f6e1 397 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
398 /*
399 * The se_nacl->nacl_sess pointer will be set to the
400 * last active I_T Nexus for each struct se_node_acl.
401 */
402 se_nacl->nacl_sess = se_sess;
403
404 list_add_tail(&se_sess->sess_acl_list,
405 &se_nacl->acl_sess_list);
6a64f6e1 406 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
407 }
408 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
409
6708bb27 410 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
30c7ca93 411 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
c66ac9db
NB
412}
413EXPORT_SYMBOL(__transport_register_session);
414
415void transport_register_session(
416 struct se_portal_group *se_tpg,
417 struct se_node_acl *se_nacl,
418 struct se_session *se_sess,
419 void *fabric_sess_ptr)
420{
140854cb
NB
421 unsigned long flags;
422
423 spin_lock_irqsave(&se_tpg->session_lock, flags);
c66ac9db 424 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
140854cb 425 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
c66ac9db
NB
426}
427EXPORT_SYMBOL(transport_register_session);
428
7861728d 429struct se_session *
fa834287 430target_setup_session(struct se_portal_group *tpg,
7861728d
NB
431 unsigned int tag_num, unsigned int tag_size,
432 enum target_prot_op prot_op,
433 const char *initiatorname, void *private,
434 int (*callback)(struct se_portal_group *,
435 struct se_session *, void *))
436{
437 struct se_session *sess;
438
439 /*
440 * If the fabric driver is using percpu-ida based pre allocation
441 * of I/O descriptor tags, go ahead and perform that setup now..
442 */
443 if (tag_num != 0)
444 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
445 else
317f8971 446 sess = transport_alloc_session(prot_op);
7861728d
NB
447
448 if (IS_ERR(sess))
449 return sess;
450
451 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
452 (unsigned char *)initiatorname);
453 if (!sess->se_node_acl) {
454 transport_free_session(sess);
455 return ERR_PTR(-EACCES);
456 }
457 /*
458 * Go ahead and perform any remaining fabric setup that is
459 * required before transport_register_session().
460 */
461 if (callback != NULL) {
462 int rc = callback(tpg, sess, private);
463 if (rc) {
464 transport_free_session(sess);
465 return ERR_PTR(rc);
466 }
467 }
468
469 transport_register_session(tpg, sess->se_node_acl, sess, private);
470 return sess;
471}
fa834287 472EXPORT_SYMBOL(target_setup_session);
7861728d 473
f8e471f9
NB
474ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
475{
476 struct se_session *se_sess;
477 ssize_t len = 0;
478
479 spin_lock_bh(&se_tpg->session_lock);
480 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
481 if (!se_sess->se_node_acl)
482 continue;
483 if (!se_sess->se_node_acl->dynamic_node_acl)
484 continue;
485 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
486 break;
487
488 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
489 se_sess->se_node_acl->initiatorname);
490 len += 1; /* Include NULL terminator */
491 }
492 spin_unlock_bh(&se_tpg->session_lock);
493
494 return len;
495}
496EXPORT_SYMBOL(target_show_dynamic_sessions);
497
afb999ff
NB
498static void target_complete_nacl(struct kref *kref)
499{
500 struct se_node_acl *nacl = container_of(kref,
501 struct se_node_acl, acl_kref);
01d4d673 502 struct se_portal_group *se_tpg = nacl->se_tpg;
afb999ff 503
01d4d673
NB
504 if (!nacl->dynamic_stop) {
505 complete(&nacl->acl_free_comp);
506 return;
507 }
508
509 mutex_lock(&se_tpg->acl_node_mutex);
6f48655f 510 list_del_init(&nacl->acl_list);
01d4d673
NB
511 mutex_unlock(&se_tpg->acl_node_mutex);
512
513 core_tpg_wait_for_nacl_pr_ref(nacl);
514 core_free_device_list_for_node(nacl, se_tpg);
515 kfree(nacl);
afb999ff
NB
516}
517
518void target_put_nacl(struct se_node_acl *nacl)
519{
520 kref_put(&nacl->acl_kref, target_complete_nacl);
521}
21aaa23b 522EXPORT_SYMBOL(target_put_nacl);
afb999ff 523
c66ac9db
NB
524void transport_deregister_session_configfs(struct se_session *se_sess)
525{
526 struct se_node_acl *se_nacl;
23388864 527 unsigned long flags;
c66ac9db
NB
528 /*
529 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
530 */
531 se_nacl = se_sess->se_node_acl;
6708bb27 532 if (se_nacl) {
23388864 533 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
fba81f88
CH
534 if (!list_empty(&se_sess->sess_acl_list))
535 list_del_init(&se_sess->sess_acl_list);
c66ac9db
NB
536 /*
537 * If the session list is empty, then clear the pointer.
538 * Otherwise, set the struct se_session pointer from the tail
539 * element of the per struct se_node_acl active session list.
540 */
541 if (list_empty(&se_nacl->acl_sess_list))
542 se_nacl->nacl_sess = NULL;
543 else {
544 se_nacl->nacl_sess = container_of(
545 se_nacl->acl_sess_list.prev,
546 struct se_session, sess_acl_list);
547 }
23388864 548 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
549 }
550}
551EXPORT_SYMBOL(transport_deregister_session_configfs);
552
553void transport_free_session(struct se_session *se_sess)
554{
21aaa23b 555 struct se_node_acl *se_nacl = se_sess->se_node_acl;
01d4d673 556
21aaa23b
NB
557 /*
558 * Drop the se_node_acl->nacl_kref obtained from within
559 * core_tpg_get_initiator_node_acl().
560 */
561 if (se_nacl) {
01d4d673
NB
562 struct se_portal_group *se_tpg = se_nacl->se_tpg;
563 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
564 unsigned long flags;
565
21aaa23b 566 se_sess->se_node_acl = NULL;
01d4d673
NB
567
568 /*
569 * Also determine if we need to drop the extra ->cmd_kref if
570 * it had been previously dynamically generated, and
571 * the endpoint is not caching dynamic ACLs.
572 */
573 mutex_lock(&se_tpg->acl_node_mutex);
574 if (se_nacl->dynamic_node_acl &&
575 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
576 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
577 if (list_empty(&se_nacl->acl_sess_list))
578 se_nacl->dynamic_stop = true;
579 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
580
581 if (se_nacl->dynamic_stop)
6f48655f 582 list_del_init(&se_nacl->acl_list);
01d4d673
NB
583 }
584 mutex_unlock(&se_tpg->acl_node_mutex);
585
586 if (se_nacl->dynamic_stop)
587 target_put_nacl(se_nacl);
588
21aaa23b
NB
589 target_put_nacl(se_nacl);
590 }
c0add7fd 591 if (se_sess->sess_cmd_map) {
10e9cbb6 592 sbitmap_queue_free(&se_sess->sess_tag_pool);
de64d3a6 593 kvfree(se_sess->sess_cmd_map);
c0add7fd 594 }
ad669505 595 percpu_ref_exit(&se_sess->cmd_count);
c66ac9db
NB
596 kmem_cache_free(se_sess_cache, se_sess);
597}
598EXPORT_SYMBOL(transport_free_session);
599
600void transport_deregister_session(struct se_session *se_sess)
601{
602 struct se_portal_group *se_tpg = se_sess->se_tpg;
e63a8e19 603 unsigned long flags;
c66ac9db 604
6708bb27 605 if (!se_tpg) {
c66ac9db
NB
606 transport_free_session(se_sess);
607 return;
608 }
c66ac9db 609
e63a8e19 610 spin_lock_irqsave(&se_tpg->session_lock, flags);
c66ac9db
NB
611 list_del(&se_sess->sess_list);
612 se_sess->se_tpg = NULL;
613 se_sess->fabric_sess_ptr = NULL;
e63a8e19 614 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
c66ac9db 615
6708bb27 616 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
30c7ca93 617 se_tpg->se_tpg_tfo->fabric_name);
01468346 618 /*
125d0119 619 * If last kref is dropping now for an explicit NodeACL, awake sleeping
afb999ff 620 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
21aaa23b 621 * removal context from within transport_free_session() code.
01d4d673
NB
622 *
623 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
624 * to release all remaining generate_node_acl=1 created ACL resources.
01468346 625 */
01468346 626
afb999ff 627 transport_free_session(se_sess);
c66ac9db
NB
628}
629EXPORT_SYMBOL(transport_deregister_session);
630
fb7c70f2
MC
631void target_remove_session(struct se_session *se_sess)
632{
633 transport_deregister_session_configfs(se_sess);
634 transport_deregister_session(se_sess);
635}
636EXPORT_SYMBOL(target_remove_session);
637
cf572a96 638static void target_remove_from_state_list(struct se_cmd *cmd)
c66ac9db 639{
42bf829e 640 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
641 unsigned long flags;
642
42bf829e
CH
643 if (!dev)
644 return;
c66ac9db 645
cf572a96
CH
646 spin_lock_irqsave(&dev->execute_task_lock, flags);
647 if (cmd->state_active) {
648 list_del(&cmd->state_list);
cf572a96 649 cmd->state_active = false;
c66ac9db 650 }
cf572a96 651 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
652}
653
953bcf7a
BVA
654/*
655 * This function is called by the target core after the target core has
656 * finished processing a SCSI command or SCSI TMF. Both the regular command
657 * processing code and the code for aborting commands can call this
658 * function. CMD_T_STOP is set if and only if another thread is waiting
659 * inside transport_wait_for_tasks() for t_transport_stop_comp.
660 */
b1a2ecda 661static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
c66ac9db
NB
662{
663 unsigned long flags;
664
b1a2ecda 665 target_remove_from_state_list(cmd);
f7113a47 666
febe562c 667 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
668 /*
669 * Determine if frontend context caller is requesting the stopping of
e3d6f909 670 * this command for frontend exceptions.
c66ac9db 671 */
7d680f3b 672 if (cmd->transport_state & CMD_T_STOP) {
649ee054
BVA
673 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
674 __func__, __LINE__, cmd->tag);
c66ac9db 675
a1d8b49a 676 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 677
a95d6511 678 complete_all(&cmd->t_transport_stop_comp);
c66ac9db
NB
679 return 1;
680 }
f7113a47 681 cmd->transport_state &= ~CMD_T_ACTIVE;
a1d8b49a 682 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 683
b1a2ecda
BVA
684 /*
685 * Some fabric modules like tcm_loop can release their internally
686 * allocated I/O reference and struct se_cmd now.
687 *
688 * Fabric modules are expected to return '1' here if the se_cmd being
689 * passed is released at this point, or zero if not being released.
690 */
9c28ca4f 691 return cmd->se_tfo->check_stop_free(cmd);
c66ac9db
NB
692}
693
35e0e757
CH
694static void target_complete_failure_work(struct work_struct *work)
695{
696 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
697
de103c93
CH
698 transport_generic_request_failure(cmd,
699 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
35e0e757
CH
700}
701
6138ed2a 702/*
d5829eac
PB
703 * Used when asking transport to copy Sense Data from the underlying
704 * Linux/SCSI struct scsi_cmnd
6138ed2a 705 */
d5829eac 706static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
6138ed2a 707{
6138ed2a 708 struct se_device *dev = cmd->se_dev;
6138ed2a
PB
709
710 WARN_ON(!cmd->se_lun);
711
712 if (!dev)
d5829eac 713 return NULL;
6138ed2a 714
d5829eac
PB
715 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
716 return NULL;
6138ed2a 717
9c58b7dd 718 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
6138ed2a 719
d5829eac 720 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
6138ed2a 721 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
9c58b7dd 722 return cmd->sense_buffer;
6138ed2a
PB
723}
724
c6d66aba
MC
725void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
726{
727 unsigned char *cmd_sense_buf;
728 unsigned long flags;
729
730 spin_lock_irqsave(&cmd->t_state_lock, flags);
731 cmd_sense_buf = transport_get_sense_buffer(cmd);
732 if (!cmd_sense_buf) {
733 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
734 return;
735 }
736
737 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
738 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
739 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
740}
741EXPORT_SYMBOL(transport_copy_sense_to_cmd);
742
2c9fa49e
BVA
743static void target_handle_abort(struct se_cmd *cmd)
744{
745 bool tas = cmd->transport_state & CMD_T_TAS;
746 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
747 int ret;
748
749 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
750
751 if (tas) {
752 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
753 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
754 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
755 cmd->t_task_cdb[0], cmd->tag);
756 trace_target_cmd_complete(cmd);
757 ret = cmd->se_tfo->queue_status(cmd);
758 if (ret) {
759 transport_handle_queue_full(cmd, cmd->se_dev,
760 ret, false);
761 return;
762 }
763 } else {
764 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
765 cmd->se_tfo->queue_tm_rsp(cmd);
766 }
767 } else {
768 /*
769 * Allow the fabric driver to unmap any resources before
770 * releasing the descriptor via TFO->release_cmd().
771 */
772 cmd->se_tfo->aborted_task(cmd);
773 if (ack_kref)
774 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
775 /*
776 * To do: establish a unit attention condition on the I_T
777 * nexus associated with cmd. See also the paragraph "Aborting
778 * commands" in SAM.
779 */
780 }
781
782 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
783
2c9fa49e
BVA
784 transport_cmd_check_stop_to_fabric(cmd);
785}
786
787static void target_abort_work(struct work_struct *work)
788{
789 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
790
791 target_handle_abort(cmd);
792}
793
794static bool target_cmd_interrupted(struct se_cmd *cmd)
795{
796 int post_ret;
797
798 if (cmd->transport_state & CMD_T_ABORTED) {
799 if (cmd->transport_complete_callback)
800 cmd->transport_complete_callback(cmd, false, &post_ret);
801 INIT_WORK(&cmd->work, target_abort_work);
802 queue_work(target_completion_wq, &cmd->work);
803 return true;
804 } else if (cmd->transport_state & CMD_T_STOP) {
805 if (cmd->transport_complete_callback)
806 cmd->transport_complete_callback(cmd, false, &post_ret);
807 complete_all(&cmd->t_transport_stop_comp);
808 return true;
809 }
810
811 return false;
812}
813
814/* May be called from interrupt context so must not sleep. */
5787cacd 815void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
c66ac9db 816{
402242c9 817 int success;
c66ac9db 818 unsigned long flags;
c66ac9db 819
2c9fa49e
BVA
820 if (target_cmd_interrupted(cmd))
821 return;
822
5787cacd
CH
823 cmd->scsi_status = scsi_status;
824
a1d8b49a 825 spin_lock_irqsave(&cmd->t_state_lock, flags);
402242c9
MC
826 switch (cmd->scsi_status) {
827 case SAM_STAT_CHECK_CONDITION:
d5829eac 828 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
c66ac9db 829 success = 1;
402242c9
MC
830 else
831 success = 0;
832 break;
833 default:
9fe36984 834 success = 1;
402242c9 835 break;
c66ac9db
NB
836 }
837
35e0e757 838 cmd->t_state = TRANSPORT_COMPLETE;
3d28934a 839 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
a1d8b49a 840 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 841
3ad98002
BVA
842 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
843 target_complete_failure_work);
9095adaa 844 if (cmd->se_cmd_flags & SCF_USE_CPUID)
fb3269ba 845 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
9095adaa
QT
846 else
847 queue_work(target_completion_wq, &cmd->work);
c66ac9db 848}
6bb35e00
CH
849EXPORT_SYMBOL(target_complete_cmd);
850
2426bd45
RD
851void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
852{
bd813720
LD
853 if ((scsi_status == SAM_STAT_GOOD ||
854 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
855 length < cmd->data_length) {
2426bd45
RD
856 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
857 cmd->residual_count += cmd->data_length - length;
858 } else {
859 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
860 cmd->residual_count = cmd->data_length - length;
861 }
862
863 cmd->data_length = length;
864 }
865
866 target_complete_cmd(cmd, scsi_status);
867}
868EXPORT_SYMBOL(target_complete_cmd_with_length);
869
cf572a96 870static void target_add_to_state_list(struct se_cmd *cmd)
c66ac9db 871{
cf572a96
CH
872 struct se_device *dev = cmd->se_dev;
873 unsigned long flags;
c66ac9db 874
cf572a96
CH
875 spin_lock_irqsave(&dev->execute_task_lock, flags);
876 if (!cmd->state_active) {
877 list_add_tail(&cmd->state_list, &dev->state_list);
878 cmd->state_active = true;
c66ac9db 879 }
cf572a96 880 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
881}
882
07bde79a 883/*
f147abb4 884 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
07bde79a 885 */
7a6f0a1e
CH
886static void transport_write_pending_qf(struct se_cmd *cmd);
887static void transport_complete_qf(struct se_cmd *cmd);
07bde79a 888
0fd97ccf 889void target_qf_do_work(struct work_struct *work)
07bde79a
NB
890{
891 struct se_device *dev = container_of(work, struct se_device,
892 qf_work_queue);
bcac364a 893 LIST_HEAD(qf_cmd_list);
07bde79a
NB
894 struct se_cmd *cmd, *cmd_tmp;
895
896 spin_lock_irq(&dev->qf_cmd_lock);
bcac364a
RD
897 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
898 spin_unlock_irq(&dev->qf_cmd_lock);
07bde79a 899
bcac364a 900 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
07bde79a 901 list_del(&cmd->se_qf_node);
33940d09 902 atomic_dec_mb(&dev->dev_qf_count);
07bde79a 903
6708bb27 904 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
30c7ca93 905 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
e057f533 906 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
07bde79a
NB
907 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
908 : "UNKNOWN");
f7a5cc0b 909
7a6f0a1e
CH
910 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
911 transport_write_pending_qf(cmd);
fa7e25cf
NB
912 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
913 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
7a6f0a1e 914 transport_complete_qf(cmd);
07bde79a 915 }
07bde79a
NB
916}
917
c66ac9db
NB
918unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
919{
920 switch (cmd->data_direction) {
921 case DMA_NONE:
922 return "NONE";
923 case DMA_FROM_DEVICE:
924 return "READ";
925 case DMA_TO_DEVICE:
926 return "WRITE";
927 case DMA_BIDIRECTIONAL:
928 return "BIDI";
929 default:
930 break;
931 }
932
933 return "UNKNOWN";
934}
935
936void transport_dump_dev_state(
937 struct se_device *dev,
938 char *b,
939 int *bl)
940{
941 *bl += sprintf(b + *bl, "Status: ");
0fd97ccf 942 if (dev->export_count)
c66ac9db 943 *bl += sprintf(b + *bl, "ACTIVATED");
0fd97ccf 944 else
c66ac9db 945 *bl += sprintf(b + *bl, "DEACTIVATED");
c66ac9db 946
5f41a31d 947 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
11e764bd 948 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
0fd97ccf
CH
949 dev->dev_attrib.block_size,
950 dev->dev_attrib.hw_max_sectors);
c66ac9db
NB
951 *bl += sprintf(b + *bl, " ");
952}
953
c66ac9db
NB
954void transport_dump_vpd_proto_id(
955 struct t10_vpd *vpd,
956 unsigned char *p_buf,
957 int p_buf_len)
958{
959 unsigned char buf[VPD_TMP_BUF_SIZE];
960 int len;
961
962 memset(buf, 0, VPD_TMP_BUF_SIZE);
963 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
964
965 switch (vpd->protocol_identifier) {
966 case 0x00:
967 sprintf(buf+len, "Fibre Channel\n");
968 break;
969 case 0x10:
970 sprintf(buf+len, "Parallel SCSI\n");
971 break;
972 case 0x20:
973 sprintf(buf+len, "SSA\n");
974 break;
975 case 0x30:
976 sprintf(buf+len, "IEEE 1394\n");
977 break;
978 case 0x40:
979 sprintf(buf+len, "SCSI Remote Direct Memory Access"
980 " Protocol\n");
981 break;
982 case 0x50:
983 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
984 break;
985 case 0x60:
986 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
987 break;
988 case 0x70:
989 sprintf(buf+len, "Automation/Drive Interface Transport"
990 " Protocol\n");
991 break;
992 case 0x80:
993 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
994 break;
995 default:
996 sprintf(buf+len, "Unknown 0x%02x\n",
997 vpd->protocol_identifier);
998 break;
999 }
1000
1001 if (p_buf)
1002 strncpy(p_buf, buf, p_buf_len);
1003 else
6708bb27 1004 pr_debug("%s", buf);
c66ac9db
NB
1005}
1006
1007void
1008transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1009{
1010 /*
1011 * Check if the Protocol Identifier Valid (PIV) bit is set..
1012 *
1013 * from spc3r23.pdf section 7.5.1
1014 */
1015 if (page_83[1] & 0x80) {
1016 vpd->protocol_identifier = (page_83[0] & 0xf0);
1017 vpd->protocol_identifier_set = 1;
1018 transport_dump_vpd_proto_id(vpd, NULL, 0);
1019 }
1020}
1021EXPORT_SYMBOL(transport_set_vpd_proto_id);
1022
1023int transport_dump_vpd_assoc(
1024 struct t10_vpd *vpd,
1025 unsigned char *p_buf,
1026 int p_buf_len)
1027{
1028 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1029 int ret = 0;
1030 int len;
c66ac9db
NB
1031
1032 memset(buf, 0, VPD_TMP_BUF_SIZE);
1033 len = sprintf(buf, "T10 VPD Identifier Association: ");
1034
1035 switch (vpd->association) {
1036 case 0x00:
1037 sprintf(buf+len, "addressed logical unit\n");
1038 break;
1039 case 0x10:
1040 sprintf(buf+len, "target port\n");
1041 break;
1042 case 0x20:
1043 sprintf(buf+len, "SCSI target device\n");
1044 break;
1045 default:
1046 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
e3d6f909 1047 ret = -EINVAL;
c66ac9db
NB
1048 break;
1049 }
1050
1051 if (p_buf)
1052 strncpy(p_buf, buf, p_buf_len);
1053 else
6708bb27 1054 pr_debug("%s", buf);
c66ac9db
NB
1055
1056 return ret;
1057}
1058
1059int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1060{
1061 /*
1062 * The VPD identification association..
1063 *
1064 * from spc3r23.pdf Section 7.6.3.1 Table 297
1065 */
1066 vpd->association = (page_83[1] & 0x30);
1067 return transport_dump_vpd_assoc(vpd, NULL, 0);
1068}
1069EXPORT_SYMBOL(transport_set_vpd_assoc);
1070
1071int transport_dump_vpd_ident_type(
1072 struct t10_vpd *vpd,
1073 unsigned char *p_buf,
1074 int p_buf_len)
1075{
1076 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1077 int ret = 0;
1078 int len;
c66ac9db
NB
1079
1080 memset(buf, 0, VPD_TMP_BUF_SIZE);
1081 len = sprintf(buf, "T10 VPD Identifier Type: ");
1082
1083 switch (vpd->device_identifier_type) {
1084 case 0x00:
1085 sprintf(buf+len, "Vendor specific\n");
1086 break;
1087 case 0x01:
1088 sprintf(buf+len, "T10 Vendor ID based\n");
1089 break;
1090 case 0x02:
1091 sprintf(buf+len, "EUI-64 based\n");
1092 break;
1093 case 0x03:
1094 sprintf(buf+len, "NAA\n");
1095 break;
1096 case 0x04:
1097 sprintf(buf+len, "Relative target port identifier\n");
1098 break;
1099 case 0x08:
1100 sprintf(buf+len, "SCSI name string\n");
1101 break;
1102 default:
1103 sprintf(buf+len, "Unsupported: 0x%02x\n",
1104 vpd->device_identifier_type);
e3d6f909 1105 ret = -EINVAL;
c66ac9db
NB
1106 break;
1107 }
1108
e3d6f909
AG
1109 if (p_buf) {
1110 if (p_buf_len < strlen(buf)+1)
1111 return -EINVAL;
c66ac9db 1112 strncpy(p_buf, buf, p_buf_len);
e3d6f909 1113 } else {
6708bb27 1114 pr_debug("%s", buf);
e3d6f909 1115 }
c66ac9db
NB
1116
1117 return ret;
1118}
1119
1120int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1121{
1122 /*
1123 * The VPD identifier type..
1124 *
1125 * from spc3r23.pdf Section 7.6.3.1 Table 298
1126 */
1127 vpd->device_identifier_type = (page_83[1] & 0x0f);
1128 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1129}
1130EXPORT_SYMBOL(transport_set_vpd_ident_type);
1131
1132int transport_dump_vpd_ident(
1133 struct t10_vpd *vpd,
1134 unsigned char *p_buf,
1135 int p_buf_len)
1136{
1137 unsigned char buf[VPD_TMP_BUF_SIZE];
1138 int ret = 0;
1139
1140 memset(buf, 0, VPD_TMP_BUF_SIZE);
1141
1142 switch (vpd->device_identifier_code_set) {
1143 case 0x01: /* Binary */
703d641d
DC
1144 snprintf(buf, sizeof(buf),
1145 "T10 VPD Binary Device Identifier: %s\n",
c66ac9db
NB
1146 &vpd->device_identifier[0]);
1147 break;
1148 case 0x02: /* ASCII */
703d641d
DC
1149 snprintf(buf, sizeof(buf),
1150 "T10 VPD ASCII Device Identifier: %s\n",
c66ac9db
NB
1151 &vpd->device_identifier[0]);
1152 break;
1153 case 0x03: /* UTF-8 */
703d641d
DC
1154 snprintf(buf, sizeof(buf),
1155 "T10 VPD UTF-8 Device Identifier: %s\n",
c66ac9db
NB
1156 &vpd->device_identifier[0]);
1157 break;
1158 default:
1159 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1160 " 0x%02x", vpd->device_identifier_code_set);
e3d6f909 1161 ret = -EINVAL;
c66ac9db
NB
1162 break;
1163 }
1164
1165 if (p_buf)
1166 strncpy(p_buf, buf, p_buf_len);
1167 else
6708bb27 1168 pr_debug("%s", buf);
c66ac9db
NB
1169
1170 return ret;
1171}
1172
1173int
1174transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1175{
1176 static const char hex_str[] = "0123456789abcdef";
35d1efe8 1177 int j = 0, i = 4; /* offset to start of the identifier */
c66ac9db
NB
1178
1179 /*
1180 * The VPD Code Set (encoding)
1181 *
1182 * from spc3r23.pdf Section 7.6.3.1 Table 296
1183 */
1184 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1185 switch (vpd->device_identifier_code_set) {
1186 case 0x01: /* Binary */
1187 vpd->device_identifier[j++] =
1188 hex_str[vpd->device_identifier_type];
1189 while (i < (4 + page_83[3])) {
1190 vpd->device_identifier[j++] =
1191 hex_str[(page_83[i] & 0xf0) >> 4];
1192 vpd->device_identifier[j++] =
1193 hex_str[page_83[i] & 0x0f];
1194 i++;
1195 }
1196 break;
1197 case 0x02: /* ASCII */
1198 case 0x03: /* UTF-8 */
1199 while (i < (4 + page_83[3]))
1200 vpd->device_identifier[j++] = page_83[i++];
1201 break;
1202 default:
1203 break;
1204 }
1205
1206 return transport_dump_vpd_ident(vpd, NULL, 0);
1207}
1208EXPORT_SYMBOL(transport_set_vpd_ident);
1209
8f9b5654
NB
1210static sense_reason_t
1211target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1212 unsigned int size)
1213{
1214 u32 mtl;
1215
1216 if (!cmd->se_tfo->max_data_sg_nents)
1217 return TCM_NO_SENSE;
1218 /*
1219 * Check if fabric enforced maximum SGL entries per I/O descriptor
1220 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
1221 * residual_count and reduce original cmd->data_length to maximum
1222 * length based on single PAGE_SIZE entry scatter-lists.
1223 */
1224 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1225 if (cmd->data_length > mtl) {
1226 /*
1227 * If an existing CDB overflow is present, calculate new residual
1228 * based on CDB size minus fabric maximum transfer length.
1229 *
1230 * If an existing CDB underflow is present, calculate new residual
1231 * based on original cmd->data_length minus fabric maximum transfer
1232 * length.
1233 *
1234 * Otherwise, set the underflow residual based on cmd->data_length
1235 * minus fabric maximum transfer length.
1236 */
1237 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1238 cmd->residual_count = (size - mtl);
1239 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1240 u32 orig_dl = size + cmd->residual_count;
1241 cmd->residual_count = (orig_dl - mtl);
1242 } else {
1243 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1244 cmd->residual_count = (cmd->data_length - mtl);
1245 }
1246 cmd->data_length = mtl;
1247 /*
1248 * Reset sbc_check_prot() calculated protection payload
1249 * length based upon the new smaller MTL.
1250 */
1251 if (cmd->prot_length) {
1252 u32 sectors = (mtl / dev->dev_attrib.block_size);
1253 cmd->prot_length = dev->prot_length * sectors;
1254 }
1255 }
1256 return TCM_NO_SENSE;
1257}
1258
de103c93
CH
1259sense_reason_t
1260target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
9b3b8041
CH
1261{
1262 struct se_device *dev = cmd->se_dev;
1263
1264 if (cmd->unknown_data_length) {
1265 cmd->data_length = size;
1266 } else if (size != cmd->data_length) {
4ff83daa 1267 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
9b3b8041 1268 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
30c7ca93 1269 " 0x%02x\n", cmd->se_tfo->fabric_name,
9b3b8041
CH
1270 cmd->data_length, size, cmd->t_task_cdb[0]);
1271
4ff83daa
NB
1272 if (cmd->data_direction == DMA_TO_DEVICE) {
1273 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1274 pr_err_ratelimited("Rejecting underflow/overflow"
1275 " for WRITE data CDB\n");
1276 return TCM_INVALID_CDB_FIELD;
1277 }
1278 /*
1279 * Some fabric drivers like iscsi-target still expect to
1280 * always reject overflow writes. Reject this case until
1281 * full fabric driver level support for overflow writes
1282 * is introduced tree-wide.
1283 */
1284 if (size > cmd->data_length) {
1285 pr_err_ratelimited("Rejecting overflow for"
1286 " WRITE control CDB\n");
1287 return TCM_INVALID_CDB_FIELD;
1288 }
9b3b8041
CH
1289 }
1290 /*
1291 * Reject READ_* or WRITE_* with overflow/underflow for
1292 * type SCF_SCSI_DATA_CDB.
1293 */
0fd97ccf 1294 if (dev->dev_attrib.block_size != 512) {
9b3b8041
CH
1295 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1296 " CDB on non 512-byte sector setup subsystem"
1297 " plugin: %s\n", dev->transport->name);
1298 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
de103c93 1299 return TCM_INVALID_CDB_FIELD;
9b3b8041 1300 }
4c054ba6
NB
1301 /*
1302 * For the overflow case keep the existing fabric provided
1303 * ->data_length. Otherwise for the underflow case, reset
1304 * ->data_length to the smaller SCSI expected data transfer
1305 * length.
1306 */
9b3b8041
CH
1307 if (size > cmd->data_length) {
1308 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1309 cmd->residual_count = (size - cmd->data_length);
1310 } else {
1311 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1312 cmd->residual_count = (cmd->data_length - size);
4c054ba6 1313 cmd->data_length = size;
9b3b8041 1314 }
9b3b8041
CH
1315 }
1316
8f9b5654 1317 return target_check_max_data_sg_nents(cmd, dev, size);
9b3b8041 1318
9b3b8041
CH
1319}
1320
c66ac9db
NB
1321/*
1322 * Used by fabric modules containing a local struct se_cmd within their
1323 * fabric dependent per I/O descriptor.
649ee054
BVA
1324 *
1325 * Preserves the value of @cmd->tag.
c66ac9db
NB
1326 */
1327void transport_init_se_cmd(
1328 struct se_cmd *cmd,
9ac8928e 1329 const struct target_core_fabric_ops *tfo,
c66ac9db
NB
1330 struct se_session *se_sess,
1331 u32 data_length,
1332 int data_direction,
1333 int task_attr,
1334 unsigned char *sense_buffer)
1335{
5951146d 1336 INIT_LIST_HEAD(&cmd->se_delayed_node);
07bde79a 1337 INIT_LIST_HEAD(&cmd->se_qf_node);
a17f091d 1338 INIT_LIST_HEAD(&cmd->se_cmd_list);
cf572a96 1339 INIT_LIST_HEAD(&cmd->state_list);
a1d8b49a 1340 init_completion(&cmd->t_transport_stop_comp);
a014c364
BVA
1341 cmd->free_compl = NULL;
1342 cmd->abrt_compl = NULL;
a1d8b49a 1343 spin_lock_init(&cmd->t_state_lock);
f2b72d6a 1344 INIT_WORK(&cmd->work, NULL);
1e1110c4 1345 kref_init(&cmd->cmd_kref);
c66ac9db
NB
1346
1347 cmd->se_tfo = tfo;
1348 cmd->se_sess = se_sess;
1349 cmd->data_length = data_length;
1350 cmd->data_direction = data_direction;
1351 cmd->sam_task_attr = task_attr;
1352 cmd->sense_buffer = sense_buffer;
cf572a96
CH
1353
1354 cmd->state_active = false;
c66ac9db
NB
1355}
1356EXPORT_SYMBOL(transport_init_se_cmd);
1357
de103c93
CH
1358static sense_reason_t
1359transport_check_alloc_task_attr(struct se_cmd *cmd)
c66ac9db 1360{
019c4ca6
CH
1361 struct se_device *dev = cmd->se_dev;
1362
c66ac9db
NB
1363 /*
1364 * Check if SAM Task Attribute emulation is enabled for this
1365 * struct se_device storage object
1366 */
a3541703 1367 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
c66ac9db
NB
1368 return 0;
1369
68d81f40 1370 if (cmd->sam_task_attr == TCM_ACA_TAG) {
6708bb27 1371 pr_debug("SAM Task Attribute ACA"
c66ac9db 1372 " emulation is not supported\n");
de103c93 1373 return TCM_INVALID_CDB_FIELD;
c66ac9db 1374 }
9c31820b 1375
c66ac9db
NB
1376 return 0;
1377}
1378
de103c93
CH
1379sense_reason_t
1380target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
c66ac9db 1381{
0fd97ccf 1382 struct se_device *dev = cmd->se_dev;
de103c93 1383 sense_reason_t ret;
c66ac9db 1384
c66ac9db
NB
1385 /*
1386 * Ensure that the received CDB is less than the max (252 + 8) bytes
1387 * for VARIABLE_LENGTH_CMD
1388 */
1389 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
6708bb27 1390 pr_err("Received SCSI CDB with command_size: %d that"
c66ac9db
NB
1391 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1392 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
de103c93 1393 return TCM_INVALID_CDB_FIELD;
c66ac9db
NB
1394 }
1395 /*
1396 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1397 * allocate the additional extended CDB buffer now.. Otherwise
1398 * setup the pointer from __t_task_cdb to t_task_cdb.
1399 */
a1d8b49a
AG
1400 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1401 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
c66ac9db 1402 GFP_KERNEL);
6708bb27
AG
1403 if (!cmd->t_task_cdb) {
1404 pr_err("Unable to allocate cmd->t_task_cdb"
a1d8b49a 1405 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
c66ac9db 1406 scsi_command_size(cdb),
a1d8b49a 1407 (unsigned long)sizeof(cmd->__t_task_cdb));
de103c93 1408 return TCM_OUT_OF_RESOURCES;
c66ac9db
NB
1409 }
1410 } else
a1d8b49a 1411 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
c66ac9db 1412 /*
a1d8b49a 1413 * Copy the original CDB into cmd->
c66ac9db 1414 */
a1d8b49a 1415 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
cb4f4d3c 1416
e5c0d6ad
RD
1417 trace_target_sequencer_start(cmd);
1418
0fd97ccf 1419 ret = dev->transport->parse_cdb(cmd);
568e1f65
JE
1420 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1421 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
30c7ca93 1422 cmd->se_tfo->fabric_name,
568e1f65
JE
1423 cmd->se_sess->se_node_acl->initiatorname,
1424 cmd->t_task_cdb[0]);
de103c93
CH
1425 if (ret)
1426 return ret;
1427
1428 ret = transport_check_alloc_task_attr(cmd);
1429 if (ret)
c66ac9db 1430 return ret;
cb4f4d3c 1431
cb4f4d3c 1432 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
4cc987ea 1433 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
c66ac9db
NB
1434 return 0;
1435}
a12f41f8 1436EXPORT_SYMBOL(target_setup_cmd_from_cdb);
c66ac9db 1437
695434e1
NB
1438/*
1439 * Used by fabric module frontends to queue tasks directly.
ac75d8be 1440 * May only be used from process context.
695434e1
NB
1441 */
1442int transport_handle_cdb_direct(
1443 struct se_cmd *cmd)
1444{
de103c93 1445 sense_reason_t ret;
dd8ae59d 1446
695434e1
NB
1447 if (!cmd->se_lun) {
1448 dump_stack();
6708bb27 1449 pr_err("cmd->se_lun is NULL\n");
695434e1
NB
1450 return -EINVAL;
1451 }
1452 if (in_interrupt()) {
1453 dump_stack();
6708bb27 1454 pr_err("transport_generic_handle_cdb cannot be called"
695434e1
NB
1455 " from interrupt context\n");
1456 return -EINVAL;
1457 }
dd8ae59d 1458 /*
af877292
CH
1459 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1460 * outstanding descriptors are handled correctly during shutdown via
1461 * transport_wait_for_tasks()
dd8ae59d
NB
1462 *
1463 * Also, we don't take cmd->t_state_lock here as we only expect
1464 * this to be called for initial descriptor submission.
1465 */
1466 cmd->t_state = TRANSPORT_NEW_CMD;
7d680f3b
CH
1467 cmd->transport_state |= CMD_T_ACTIVE;
1468
dd8ae59d
NB
1469 /*
1470 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1471 * so follow TRANSPORT_NEW_CMD processing thread context usage
1472 * and call transport_generic_request_failure() if necessary..
1473 */
1474 ret = transport_generic_new_cmd(cmd);
de103c93
CH
1475 if (ret)
1476 transport_generic_request_failure(cmd, ret);
dd8ae59d 1477 return 0;
695434e1
NB
1478}
1479EXPORT_SYMBOL(transport_handle_cdb_direct);
1480
c5ff8d6b 1481sense_reason_t
de103c93
CH
1482transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1483 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1484{
1485 if (!sgl || !sgl_count)
1486 return 0;
1487
1488 /*
1489 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1490 * scatterlists already have been set to follow what the fabric
1491 * passes for the original expected data transfer length.
1492 */
1493 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1494 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1495 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1496 return TCM_INVALID_CDB_FIELD;
1497 }
1498
1499 cmd->t_data_sg = sgl;
1500 cmd->t_data_nents = sgl_count;
b32bd0a8
IT
1501 cmd->t_bidi_data_sg = sgl_bidi;
1502 cmd->t_bidi_data_nents = sgl_bidi_count;
de103c93 1503
de103c93
CH
1504 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1505 return 0;
1506}
1507
9ad97b8b 1508/**
a026757f
NB
1509 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1510 * se_cmd + use pre-allocated SGL memory.
a6360785
NB
1511 *
1512 * @se_cmd: command descriptor to submit
1513 * @se_sess: associated se_sess for endpoint
1514 * @cdb: pointer to SCSI CDB
1515 * @sense: pointer to SCSI sense buffer
1516 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1517 * @data_length: fabric expected data transfer length
9ad97b8b 1518 * @task_attr: SAM task attribute
a6360785
NB
1519 * @data_dir: DMA data direction
1520 * @flags: flags for command submission from target_sc_flags_tables
a026757f
NB
1521 * @sgl: struct scatterlist memory for unidirectional mapping
1522 * @sgl_count: scatterlist count for unidirectional mapping
1523 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1524 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
def2b339
NB
1525 * @sgl_prot: struct scatterlist memory protection information
1526 * @sgl_prot_count: scatterlist count for protection information
a6360785 1527 *
649ee054
BVA
1528 * Task tags are supported if the caller has set @se_cmd->tag.
1529 *
d6dfc868
RD
1530 * Returns non zero to signal active I/O shutdown failure. All other
1531 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1532 * but still return zero here.
1533 *
a6360785
NB
1534 * This may only be called from process context, and also currently
1535 * assumes internal allocation of fabric payload buffer by target-core.
a026757f
NB
1536 */
1537int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
f2d30680 1538 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
a026757f
NB
1539 u32 data_length, int task_attr, int data_dir, int flags,
1540 struct scatterlist *sgl, u32 sgl_count,
def2b339
NB
1541 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1542 struct scatterlist *sgl_prot, u32 sgl_prot_count)
a6360785
NB
1543{
1544 struct se_portal_group *se_tpg;
de103c93
CH
1545 sense_reason_t rc;
1546 int ret;
a6360785
NB
1547
1548 se_tpg = se_sess->se_tpg;
1549 BUG_ON(!se_tpg);
1550 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1551 BUG_ON(in_interrupt());
1552 /*
1553 * Initialize se_cmd for target operation. From this point
1554 * exceptions are handled by sending exception status via
1555 * target_core_fabric_ops->queue_status() callback
1556 */
1557 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1558 data_length, data_dir, task_attr, sense);
9095adaa
QT
1559
1560 if (flags & TARGET_SCF_USE_CPUID)
1561 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1562 else
1563 se_cmd->cpuid = WORK_CPU_UNBOUND;
1564
b0d79946
SAS
1565 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1566 se_cmd->unknown_data_length = 1;
a6360785
NB
1567 /*
1568 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1569 * se_sess->sess_cmd_list. A second kref_get here is necessary
1570 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1571 * kref_put() to happen during fabric packet acknowledgement.
1572 */
afc16604 1573 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
de103c93
CH
1574 if (ret)
1575 return ret;
a6360785
NB
1576 /*
1577 * Signal bidirectional data payloads to target-core
1578 */
1579 if (flags & TARGET_SCF_BIDI_OP)
1580 se_cmd->se_cmd_flags |= SCF_BIDI;
1581 /*
1582 * Locate se_lun pointer and attach it to struct se_cmd
1583 */
de103c93
CH
1584 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1585 if (rc) {
1586 transport_send_check_condition_and_sense(se_cmd, rc, 0);
afc16604 1587 target_put_sess_cmd(se_cmd);
d6dfc868 1588 return 0;
735703ca 1589 }
b5b8e298
SG
1590
1591 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1592 if (rc != 0) {
1593 transport_generic_request_failure(se_cmd, rc);
1594 return 0;
1595 }
1596
def2b339
NB
1597 /*
1598 * Save pointers for SGLs containing protection information,
1599 * if present.
1600 */
1601 if (sgl_prot_count) {
1602 se_cmd->t_prot_sg = sgl_prot;
1603 se_cmd->t_prot_nents = sgl_prot_count;
5835812f 1604 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
def2b339 1605 }
d6e0175c 1606
a026757f
NB
1607 /*
1608 * When a non zero sgl_count has been passed perform SGL passthrough
1609 * mapping for pre-allocated fabric memory instead of having target
1610 * core perform an internal SGL allocation..
1611 */
1612 if (sgl_count != 0) {
1613 BUG_ON(!sgl);
1614
944981c7
NB
1615 /*
1616 * A work-around for tcm_loop as some userspace code via
1617 * scsi-generic do not memset their associated read buffers,
1618 * so go ahead and do that here for type non-data CDBs. Also
1619 * note that this is currently guaranteed to be a single SGL
1620 * for this case by target core in target_setup_cmd_from_cdb()
1621 * -> transport_generic_cmd_sequencer().
1622 */
1623 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1624 se_cmd->data_direction == DMA_FROM_DEVICE) {
1625 unsigned char *buf = NULL;
1626
1627 if (sgl)
1628 buf = kmap(sg_page(sgl)) + sgl->offset;
1629
1630 if (buf) {
1631 memset(buf, 0, sgl->length);
1632 kunmap(sg_page(sgl));
1633 }
1634 }
1635
a026757f
NB
1636 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1637 sgl_bidi, sgl_bidi_count);
1638 if (rc != 0) {
de103c93 1639 transport_generic_request_failure(se_cmd, rc);
a026757f
NB
1640 return 0;
1641 }
1642 }
def2b339 1643
11e319ed
AG
1644 /*
1645 * Check if we need to delay processing because of ALUA
1646 * Active/NonOptimized primary access state..
1647 */
1648 core_alua_check_nonop_delay(se_cmd);
1649
a6360785 1650 transport_handle_cdb_direct(se_cmd);
d6dfc868 1651 return 0;
a6360785 1652}
a026757f
NB
1653EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1654
9ad97b8b 1655/**
a026757f
NB
1656 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1657 *
1658 * @se_cmd: command descriptor to submit
1659 * @se_sess: associated se_sess for endpoint
1660 * @cdb: pointer to SCSI CDB
1661 * @sense: pointer to SCSI sense buffer
1662 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1663 * @data_length: fabric expected data transfer length
9ad97b8b 1664 * @task_attr: SAM task attribute
a026757f
NB
1665 * @data_dir: DMA data direction
1666 * @flags: flags for command submission from target_sc_flags_tables
1667 *
649ee054
BVA
1668 * Task tags are supported if the caller has set @se_cmd->tag.
1669 *
a026757f
NB
1670 * Returns non zero to signal active I/O shutdown failure. All other
1671 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1672 * but still return zero here.
1673 *
1674 * This may only be called from process context, and also currently
1675 * assumes internal allocation of fabric payload buffer by target-core.
1676 *
1677 * It also assumes interal target core SGL memory allocation.
1678 */
1679int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
f2d30680 1680 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
a026757f
NB
1681 u32 data_length, int task_attr, int data_dir, int flags)
1682{
1683 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1684 unpacked_lun, data_length, task_attr, data_dir,
def2b339 1685 flags, NULL, 0, NULL, 0, NULL, 0);
a026757f 1686}
a6360785
NB
1687EXPORT_SYMBOL(target_submit_cmd);
1688
9f0d05c2
NB
1689static void target_complete_tmr_failure(struct work_struct *work)
1690{
1691 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1692
1693 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1694 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
5a3b6fc0
RD
1695
1696 transport_cmd_check_stop_to_fabric(se_cmd);
9f0d05c2
NB
1697}
1698
5465e7d3
NB
1699static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1700 u64 *unpacked_lun)
1701{
1702 struct se_cmd *se_cmd;
1703 unsigned long flags;
1704 bool ret = false;
1705
1706 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1707 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1708 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1709 continue;
1710
1711 if (se_cmd->tag == tag) {
1712 *unpacked_lun = se_cmd->orig_fe_lun;
1713 ret = true;
1714 break;
1715 }
1716 }
1717 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1718
1719 return ret;
1720}
1721
ea98d7f9
AG
1722/**
1723 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1724 * for TMR CDBs
1725 *
1726 * @se_cmd: command descriptor to submit
1727 * @se_sess: associated se_sess for endpoint
1728 * @sense: pointer to SCSI sense buffer
1729 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1e74aff1 1730 * @fabric_tmr_ptr: fabric context for TMR req
ea98d7f9 1731 * @tm_type: Type of TM request
c0974f89
NB
1732 * @gfp: gfp type for caller
1733 * @tag: referenced task tag for TMR_ABORT_TASK
c7042cae 1734 * @flags: submit cmd flags
ea98d7f9
AG
1735 *
1736 * Callable from all contexts.
1737 **/
1738
c7042cae 1739int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
f2d30680 1740 unsigned char *sense, u64 unpacked_lun,
c0974f89 1741 void *fabric_tmr_ptr, unsigned char tm_type,
5261d86c 1742 gfp_t gfp, u64 tag, int flags)
ea98d7f9
AG
1743{
1744 struct se_portal_group *se_tpg;
1745 int ret;
1746
1747 se_tpg = se_sess->se_tpg;
1748 BUG_ON(!se_tpg);
1749
1750 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
68d81f40 1751 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
c7042cae
NB
1752 /*
1753 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1754 * allocation failure.
1755 */
c0974f89 1756 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
c7042cae
NB
1757 if (ret < 0)
1758 return -ENOMEM;
ea98d7f9 1759
c0974f89
NB
1760 if (tm_type == TMR_ABORT_TASK)
1761 se_cmd->se_tmr_req->ref_task_tag = tag;
1762
ea98d7f9 1763 /* See target_submit_cmd for commentary */
afc16604 1764 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
bc187ea6
RD
1765 if (ret) {
1766 core_tmr_release_req(se_cmd->se_tmr_req);
1767 return ret;
1768 }
5465e7d3
NB
1769 /*
1770 * If this is ABORT_TASK with no explicit fabric provided LUN,
1771 * go ahead and search active session tags for a match to figure
1772 * out unpacked_lun for the original se_cmd.
1773 */
1774 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1775 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1776 goto failure;
1777 }
ea98d7f9 1778
ea98d7f9 1779 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
5465e7d3
NB
1780 if (ret)
1781 goto failure;
1782
ea98d7f9 1783 transport_generic_handle_tmr(se_cmd);
c7042cae 1784 return 0;
5465e7d3
NB
1785
1786 /*
1787 * For callback during failure handling, push this work off
1788 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1789 */
1790failure:
1791 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1792 schedule_work(&se_cmd->work);
1793 return 0;
ea98d7f9
AG
1794}
1795EXPORT_SYMBOL(target_submit_tmr);
1796
c66ac9db
NB
1797/*
1798 * Handle SAM-esque emulation for generic transport request failures.
1799 */
de103c93
CH
1800void transport_generic_request_failure(struct se_cmd *cmd,
1801 sense_reason_t sense_reason)
c66ac9db 1802{
5841734f 1803 int ret = 0, post_ret;
07bde79a 1804
c00e6220
BVA
1805 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1806 sense_reason);
1807 target_show_cmd("-----[ ", cmd);
c66ac9db 1808
c66ac9db
NB
1809 /*
1810 * For SAM Task Attribute emulation for failed struct se_cmd
1811 */
019c4ca6 1812 transport_complete_task_attr(cmd);
fd2f928b 1813
aa73237d 1814 if (cmd->transport_complete_callback)
5841734f 1815 cmd->transport_complete_callback(cmd, false, &post_ret);
c66ac9db 1816
2c9fa49e
BVA
1817 if (cmd->transport_state & CMD_T_ABORTED) {
1818 INIT_WORK(&cmd->work, target_abort_work);
1819 queue_work(target_completion_wq, &cmd->work);
fd2f928b 1820 return;
2c9fa49e 1821 }
fd2f928b 1822
de103c93 1823 switch (sense_reason) {
03e98c9e
NB
1824 case TCM_NON_EXISTENT_LUN:
1825 case TCM_UNSUPPORTED_SCSI_OPCODE:
1826 case TCM_INVALID_CDB_FIELD:
1827 case TCM_INVALID_PARAMETER_LIST:
bb992e72 1828 case TCM_PARAMETER_LIST_LENGTH_ERROR:
03e98c9e
NB
1829 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1830 case TCM_UNKNOWN_MODE_PAGE:
1831 case TCM_WRITE_PROTECTED:
e2397c70 1832 case TCM_ADDRESS_OUT_OF_RANGE:
03e98c9e
NB
1833 case TCM_CHECK_CONDITION_ABORT_CMD:
1834 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1835 case TCM_CHECK_CONDITION_NOT_READY:
94387aa7
NB
1836 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1837 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1838 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
449a1378 1839 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
e8642120
DD
1840 case TCM_TOO_MANY_TARGET_DESCS:
1841 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1842 case TCM_TOO_MANY_SEGMENT_DESCS:
1843 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
c66ac9db 1844 break;
de103c93 1845 case TCM_OUT_OF_RESOURCES:
a271eac4
MC
1846 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1847 goto queue_status;
d120c708
MC
1848 case TCM_LUN_BUSY:
1849 cmd->scsi_status = SAM_STAT_BUSY;
1850 goto queue_status;
03e98c9e 1851 case TCM_RESERVATION_CONFLICT:
c66ac9db
NB
1852 /*
1853 * No SENSE Data payload for this case, set SCSI Status
1854 * and queue the response to $FABRIC_MOD.
1855 *
1856 * Uses linux/include/scsi/scsi.h SAM status codes defs
1857 */
1858 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1859 /*
1860 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1861 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1862 * CONFLICT STATUS.
1863 *
1864 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1865 */
e3d6f909 1866 if (cmd->se_sess &&
c51c8e7b
HR
1867 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1868 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1869 cmd->orig_fe_lun, 0x2C,
1870 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1871 }
a271eac4
MC
1872
1873 goto queue_status;
c66ac9db 1874 default:
6708bb27 1875 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
de103c93
CH
1876 cmd->t_task_cdb[0], sense_reason);
1877 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
c66ac9db
NB
1878 break;
1879 }
f3146437 1880
de103c93 1881 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
fa7e25cf 1882 if (ret)
03e98c9e 1883 goto queue_full;
07bde79a 1884
c66ac9db 1885check_stop:
06b967e4 1886 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
1887 return;
1888
a271eac4
MC
1889queue_status:
1890 trace_target_cmd_complete(cmd);
1891 ret = cmd->se_tfo->queue_status(cmd);
1892 if (!ret)
1893 goto check_stop;
07bde79a 1894queue_full:
fa7e25cf 1895 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
c66ac9db 1896}
2fbff127 1897EXPORT_SYMBOL(transport_generic_request_failure);
c66ac9db 1898
dff0ca9e 1899void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
c66ac9db 1900{
de103c93 1901 sense_reason_t ret;
5f41a31d 1902
dff0ca9e
NB
1903 if (!cmd->execute_cmd) {
1904 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1905 goto err;
1906 }
1907 if (do_checks) {
1908 /*
1909 * Check for an existing UNIT ATTENTION condition after
1910 * target_handle_task_attr() has done SAM task attr
1911 * checking, and possibly have already defered execution
1912 * out to target_restart_delayed_cmds() context.
1913 */
1914 ret = target_scsi3_ua_check(cmd);
1915 if (ret)
1916 goto err;
1917
1918 ret = target_alua_state_check(cmd);
1919 if (ret)
1920 goto err;
5f41a31d 1921
dff0ca9e
NB
1922 ret = target_check_reservation(cmd);
1923 if (ret) {
1924 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1925 goto err;
de103c93 1926 }
5f41a31d 1927 }
dff0ca9e
NB
1928
1929 ret = cmd->execute_cmd(cmd);
1930 if (!ret)
1931 return;
1932err:
1933 spin_lock_irq(&cmd->t_state_lock);
fd5e64de 1934 cmd->transport_state &= ~CMD_T_SENT;
dff0ca9e
NB
1935 spin_unlock_irq(&cmd->t_state_lock);
1936
1937 transport_generic_request_failure(cmd, ret);
5f41a31d
CH
1938}
1939
aa58b531
NB
1940static int target_write_prot_action(struct se_cmd *cmd)
1941{
5132d1e6 1942 u32 sectors;
aa58b531
NB
1943 /*
1944 * Perform WRITE_INSERT of PI using software emulation when backend
1945 * device has PI enabled, if the transport has not already generated
1946 * PI using hardware WRITE_INSERT offload.
1947 */
1948 switch (cmd->prot_op) {
1949 case TARGET_PROT_DOUT_INSERT:
1950 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1951 sbc_dif_generate(cmd);
1952 break;
5132d1e6
NB
1953 case TARGET_PROT_DOUT_STRIP:
1954 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1955 break;
1956
1957 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
f75b6fae
SG
1958 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1959 sectors, 0, cmd->t_prot_sg, 0);
5132d1e6
NB
1960 if (unlikely(cmd->pi_err)) {
1961 spin_lock_irq(&cmd->t_state_lock);
fd5e64de 1962 cmd->transport_state &= ~CMD_T_SENT;
5132d1e6
NB
1963 spin_unlock_irq(&cmd->t_state_lock);
1964 transport_generic_request_failure(cmd, cmd->pi_err);
1965 return -1;
1966 }
1967 break;
aa58b531
NB
1968 default:
1969 break;
1970 }
1971
1972 return 0;
1973}
1974
019c4ca6 1975static bool target_handle_task_attr(struct se_cmd *cmd)
5f41a31d
CH
1976{
1977 struct se_device *dev = cmd->se_dev;
1978
a3541703 1979 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
019c4ca6 1980 return false;
5f41a31d 1981
410c29df
NB
1982 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1983
c66ac9db 1984 /*
25985edc 1985 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
c66ac9db
NB
1986 * to allow the passed struct se_cmd list of tasks to the front of the list.
1987 */
5f41a31d 1988 switch (cmd->sam_task_attr) {
68d81f40 1989 case TCM_HEAD_TAG:
9c31820b
RD
1990 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1991 cmd->t_task_cdb[0]);
019c4ca6 1992 return false;
68d81f40 1993 case TCM_ORDERED_TAG:
33940d09 1994 atomic_inc_mb(&dev->dev_ordered_sync);
c66ac9db 1995
9c31820b
RD
1996 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1997 cmd->t_task_cdb[0]);
5f41a31d 1998
c66ac9db 1999 /*
5f41a31d
CH
2000 * Execute an ORDERED command if no other older commands
2001 * exist that need to be completed first.
c66ac9db 2002 */
5f41a31d 2003 if (!atomic_read(&dev->simple_cmds))
019c4ca6 2004 return false;
5f41a31d
CH
2005 break;
2006 default:
c66ac9db
NB
2007 /*
2008 * For SIMPLE and UNTAGGED Task Attribute commands
2009 */
33940d09 2010 atomic_inc_mb(&dev->simple_cmds);
5f41a31d 2011 break;
c66ac9db 2012 }
5f41a31d 2013
019c4ca6
CH
2014 if (atomic_read(&dev->dev_ordered_sync) == 0)
2015 return false;
c66ac9db 2016
019c4ca6
CH
2017 spin_lock(&dev->delayed_cmd_lock);
2018 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2019 spin_unlock(&dev->delayed_cmd_lock);
2020
9c31820b
RD
2021 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2022 cmd->t_task_cdb[0], cmd->sam_task_attr);
019c4ca6
CH
2023 return true;
2024}
2025
2026void target_execute_cmd(struct se_cmd *cmd)
2027{
019c4ca6
CH
2028 /*
2029 * Determine if frontend context caller is requesting the stopping of
2030 * this command for frontend exceptions.
310d3d31 2031 *
4240d448 2032 * If the received CDB has already been aborted stop processing it here.
019c4ca6 2033 */
2c9fa49e 2034 if (target_cmd_interrupted(cmd))
019c4ca6 2035 return;
019c4ca6 2036
2c9fa49e 2037 spin_lock_irq(&cmd->t_state_lock);
019c4ca6 2038 cmd->t_state = TRANSPORT_PROCESSING;
fd5e64de 2039 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
019c4ca6 2040 spin_unlock_irq(&cmd->t_state_lock);
aa58b531
NB
2041
2042 if (target_write_prot_action(cmd))
2043 return;
019c4ca6 2044
1a398b97
NB
2045 if (target_handle_task_attr(cmd)) {
2046 spin_lock_irq(&cmd->t_state_lock);
fd5e64de 2047 cmd->transport_state &= ~CMD_T_SENT;
1a398b97
NB
2048 spin_unlock_irq(&cmd->t_state_lock);
2049 return;
2050 }
2051
dff0ca9e 2052 __target_execute_cmd(cmd, true);
c66ac9db 2053}
70baf0ab 2054EXPORT_SYMBOL(target_execute_cmd);
c66ac9db 2055
5f41a31d
CH
2056/*
2057 * Process all commands up to the last received ORDERED task attribute which
2058 * requires another blocking boundary
2059 */
2060static void target_restart_delayed_cmds(struct se_device *dev)
2061{
2062 for (;;) {
2063 struct se_cmd *cmd;
2064
2065 spin_lock(&dev->delayed_cmd_lock);
2066 if (list_empty(&dev->delayed_cmd_list)) {
2067 spin_unlock(&dev->delayed_cmd_lock);
2068 break;
2069 }
2070
2071 cmd = list_entry(dev->delayed_cmd_list.next,
2072 struct se_cmd, se_delayed_node);
2073 list_del(&cmd->se_delayed_node);
2074 spin_unlock(&dev->delayed_cmd_lock);
2075
1c79df1f
NB
2076 cmd->transport_state |= CMD_T_SENT;
2077
dff0ca9e 2078 __target_execute_cmd(cmd, true);
5f41a31d 2079
68d81f40 2080 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
5f41a31d
CH
2081 break;
2082 }
2083}
2084
c66ac9db 2085/*
35e0e757 2086 * Called from I/O completion to determine which dormant/delayed
c66ac9db
NB
2087 * and ordered cmds need to have their tasks added to the execution queue.
2088 */
2089static void transport_complete_task_attr(struct se_cmd *cmd)
2090{
5951146d 2091 struct se_device *dev = cmd->se_dev;
c66ac9db 2092
a3541703 2093 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
019c4ca6
CH
2094 return;
2095
410c29df
NB
2096 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2097 goto restart;
2098
68d81f40 2099 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
33940d09 2100 atomic_dec_mb(&dev->simple_cmds);
c66ac9db 2101 dev->dev_cur_ordered_id++;
68d81f40 2102 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
c66ac9db 2103 dev->dev_cur_ordered_id++;
9c31820b
RD
2104 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2105 dev->dev_cur_ordered_id);
68d81f40 2106 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
33940d09 2107 atomic_dec_mb(&dev->dev_ordered_sync);
c66ac9db
NB
2108
2109 dev->dev_cur_ordered_id++;
9c31820b
RD
2110 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2111 dev->dev_cur_ordered_id);
c66ac9db 2112 }
1c79df1f
NB
2113 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2114
410c29df 2115restart:
5f41a31d 2116 target_restart_delayed_cmds(dev);
c66ac9db
NB
2117}
2118
e057f533 2119static void transport_complete_qf(struct se_cmd *cmd)
07bde79a
NB
2120{
2121 int ret = 0;
2122
019c4ca6 2123 transport_complete_task_attr(cmd);
fa7e25cf
NB
2124 /*
2125 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2126 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2127 * the same callbacks should not be retried. Return CHECK_CONDITION
2128 * if a scsi_status is not already set.
2129 *
2130 * If a fabric driver ->queue_status() has returned non zero, always
2131 * keep retrying no matter what..
2132 */
2133 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2134 if (cmd->scsi_status)
2135 goto queue_status;
e057f533 2136
fa7e25cf
NB
2137 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2138 goto queue_status;
e057f533 2139 }
07bde79a 2140
bd813720
LD
2141 /*
2142 * Check if we need to send a sense buffer from
2143 * the struct se_cmd in question. We do NOT want
2144 * to take this path of the IO has been marked as
2145 * needing to be treated like a "normal read". This
2146 * is the case if it's a tape read, and either the
2147 * FM, EOM, or ILI bits are set, but there is no
2148 * sense data.
2149 */
2150 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2151 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
fa7e25cf
NB
2152 goto queue_status;
2153
07bde79a
NB
2154 switch (cmd->data_direction) {
2155 case DMA_FROM_DEVICE:
bd813720
LD
2156 /* queue status if not treating this as a normal read */
2157 if (cmd->scsi_status &&
2158 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
4347ab5a
NB
2159 goto queue_status;
2160
e5c0d6ad 2161 trace_target_cmd_complete(cmd);
07bde79a
NB
2162 ret = cmd->se_tfo->queue_data_in(cmd);
2163 break;
2164 case DMA_TO_DEVICE:
64577407 2165 if (cmd->se_cmd_flags & SCF_BIDI) {
07bde79a 2166 ret = cmd->se_tfo->queue_data_in(cmd);
63509c60 2167 break;
07bde79a 2168 }
d7e595dd 2169 /* fall through */
07bde79a 2170 case DMA_NONE:
4347ab5a 2171queue_status:
e5c0d6ad 2172 trace_target_cmd_complete(cmd);
07bde79a
NB
2173 ret = cmd->se_tfo->queue_status(cmd);
2174 break;
2175 default:
2176 break;
2177 }
2178
e057f533 2179 if (ret < 0) {
fa7e25cf 2180 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
e057f533
CH
2181 return;
2182 }
e057f533 2183 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
2184}
2185
fa7e25cf
NB
2186static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2187 int err, bool write_pending)
07bde79a 2188{
fa7e25cf
NB
2189 /*
2190 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2191 * ->queue_data_in() callbacks from new process context.
2192 *
2193 * Otherwise for other errors, transport_complete_qf() will send
2194 * CHECK_CONDITION via ->queue_status() instead of attempting to
2195 * retry associated fabric driver data-transfer callbacks.
2196 */
2197 if (err == -EAGAIN || err == -ENOMEM) {
2198 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2199 TRANSPORT_COMPLETE_QF_OK;
2200 } else {
2201 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2202 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2203 }
2204
07bde79a 2205 spin_lock_irq(&dev->qf_cmd_lock);
07bde79a 2206 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
33940d09 2207 atomic_inc_mb(&dev->dev_qf_count);
07bde79a
NB
2208 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2209
2210 schedule_work(&cmd->se_dev->qf_work_queue);
2211}
2212
fdeab852 2213static bool target_read_prot_action(struct se_cmd *cmd)
bc005869 2214{
fdeab852
NB
2215 switch (cmd->prot_op) {
2216 case TARGET_PROT_DIN_STRIP:
2217 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
f75b6fae
SG
2218 u32 sectors = cmd->data_length >>
2219 ilog2(cmd->se_dev->dev_attrib.block_size);
2220
2221 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2222 sectors, 0, cmd->t_prot_sg,
2223 0);
2224 if (cmd->pi_err)
fdeab852 2225 return true;
bc005869 2226 }
fdeab852 2227 break;
72c03850
NB
2228 case TARGET_PROT_DIN_INSERT:
2229 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2230 break;
2231
2232 sbc_dif_generate(cmd);
2233 break;
fdeab852
NB
2234 default:
2235 break;
bc005869
NB
2236 }
2237
2238 return false;
2239}
2240
35e0e757 2241static void target_complete_ok_work(struct work_struct *work)
c66ac9db 2242{
35e0e757 2243 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
27a27099 2244 int ret;
35e0e757 2245
c66ac9db
NB
2246 /*
2247 * Check if we need to move delayed/dormant tasks from cmds on the
2248 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2249 * Attribute.
2250 */
019c4ca6
CH
2251 transport_complete_task_attr(cmd);
2252
07bde79a
NB
2253 /*
2254 * Check to schedule QUEUE_FULL work, or execute an existing
2255 * cmd->transport_qf_callback()
2256 */
2257 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2258 schedule_work(&cmd->se_dev->qf_work_queue);
2259
c66ac9db 2260 /*
d5829eac 2261 * Check if we need to send a sense buffer from
bd813720
LD
2262 * the struct se_cmd in question. We do NOT want
2263 * to take this path of the IO has been marked as
2264 * needing to be treated like a "normal read". This
2265 * is the case if it's a tape read, and either the
2266 * FM, EOM, or ILI bits are set, but there is no
2267 * sense data.
c66ac9db 2268 */
bd813720
LD
2269 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2270 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
27a27099 2271 WARN_ON(!cmd->scsi_status);
27a27099
PB
2272 ret = transport_send_check_condition_and_sense(
2273 cmd, 0, 1);
fa7e25cf 2274 if (ret)
27a27099
PB
2275 goto queue_full;
2276
27a27099
PB
2277 transport_cmd_check_stop_to_fabric(cmd);
2278 return;
c66ac9db
NB
2279 }
2280 /*
25985edc 2281 * Check for a callback, used by amongst other things
a6b0133c 2282 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
c66ac9db 2283 */
a6b0133c
NB
2284 if (cmd->transport_complete_callback) {
2285 sense_reason_t rc;
057085e5
NB
2286 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2287 bool zero_dl = !(cmd->data_length);
2288 int post_ret = 0;
a6b0133c 2289
057085e5
NB
2290 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2291 if (!rc && !post_ret) {
2292 if (caw && zero_dl)
c8e63985
NB
2293 goto queue_rsp;
2294
a6b0133c 2295 return;
a2890087
NB
2296 } else if (rc) {
2297 ret = transport_send_check_condition_and_sense(cmd,
2298 rc, 0);
fa7e25cf 2299 if (ret)
a2890087 2300 goto queue_full;
a6b0133c 2301
a2890087
NB
2302 transport_cmd_check_stop_to_fabric(cmd);
2303 return;
2304 }
a6b0133c 2305 }
c66ac9db 2306
c8e63985 2307queue_rsp:
c66ac9db
NB
2308 switch (cmd->data_direction) {
2309 case DMA_FROM_DEVICE:
bd813720
LD
2310 /*
2311 * if this is a READ-type IO, but SCSI status
2312 * is set, then skip returning data and just
2313 * return the status -- unless this IO is marked
2314 * as needing to be treated as a normal read,
2315 * in which case we want to go ahead and return
2316 * the data. This happens, for example, for tape
2317 * reads with the FM, EOM, or ILI bits set, with
2318 * no sense data.
2319 */
2320 if (cmd->scsi_status &&
2321 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
4347ab5a
NB
2322 goto queue_status;
2323
4cc987ea
NB
2324 atomic_long_add(cmd->data_length,
2325 &cmd->se_lun->lun_stats.tx_data_octets);
bc005869
NB
2326 /*
2327 * Perform READ_STRIP of PI using software emulation when
2328 * backend had PI enabled, if the transport will not be
2329 * performing hardware READ_STRIP offload.
2330 */
fdeab852 2331 if (target_read_prot_action(cmd)) {
bc005869
NB
2332 ret = transport_send_check_condition_and_sense(cmd,
2333 cmd->pi_err, 0);
fa7e25cf 2334 if (ret)
bc005869
NB
2335 goto queue_full;
2336
bc005869
NB
2337 transport_cmd_check_stop_to_fabric(cmd);
2338 return;
2339 }
c66ac9db 2340
e5c0d6ad 2341 trace_target_cmd_complete(cmd);
07bde79a 2342 ret = cmd->se_tfo->queue_data_in(cmd);
fa7e25cf 2343 if (ret)
07bde79a 2344 goto queue_full;
c66ac9db
NB
2345 break;
2346 case DMA_TO_DEVICE:
4cc987ea
NB
2347 atomic_long_add(cmd->data_length,
2348 &cmd->se_lun->lun_stats.rx_data_octets);
c66ac9db
NB
2349 /*
2350 * Check if we need to send READ payload for BIDI-COMMAND
2351 */
64577407 2352 if (cmd->se_cmd_flags & SCF_BIDI) {
4cc987ea
NB
2353 atomic_long_add(cmd->data_length,
2354 &cmd->se_lun->lun_stats.tx_data_octets);
07bde79a 2355 ret = cmd->se_tfo->queue_data_in(cmd);
fa7e25cf 2356 if (ret)
07bde79a 2357 goto queue_full;
c66ac9db
NB
2358 break;
2359 }
d7e595dd 2360 /* fall through */
c66ac9db 2361 case DMA_NONE:
4347ab5a 2362queue_status:
e5c0d6ad 2363 trace_target_cmd_complete(cmd);
07bde79a 2364 ret = cmd->se_tfo->queue_status(cmd);
fa7e25cf 2365 if (ret)
07bde79a 2366 goto queue_full;
c66ac9db
NB
2367 break;
2368 default:
2369 break;
2370 }
2371
c66ac9db 2372 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
2373 return;
2374
2375queue_full:
6708bb27 2376 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
07bde79a 2377 " data_direction: %d\n", cmd, cmd->data_direction);
fa7e25cf
NB
2378
2379 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
c66ac9db
NB
2380}
2381
e64aa657 2382void target_free_sgl(struct scatterlist *sgl, int nents)
c66ac9db 2383{
8c7a8d1c 2384 sgl_free_n_order(sgl, nents, 0);
6708bb27 2385}
e64aa657 2386EXPORT_SYMBOL(target_free_sgl);
c66ac9db 2387
47e459e6
NB
2388static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2389{
2390 /*
2391 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2392 * emulation, and free + reset pointers if necessary..
2393 */
2394 if (!cmd->t_data_sg_orig)
2395 return;
2396
2397 kfree(cmd->t_data_sg);
2398 cmd->t_data_sg = cmd->t_data_sg_orig;
2399 cmd->t_data_sg_orig = NULL;
2400 cmd->t_data_nents = cmd->t_data_nents_orig;
2401 cmd->t_data_nents_orig = 0;
2402}
2403
6708bb27
AG
2404static inline void transport_free_pages(struct se_cmd *cmd)
2405{
5835812f 2406 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
e64aa657 2407 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
5835812f
AM
2408 cmd->t_prot_sg = NULL;
2409 cmd->t_prot_nents = 0;
2410 }
2411
47e459e6 2412 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
c8e63985
NB
2413 /*
2414 * Release special case READ buffer payload required for
2415 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2416 */
2417 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
e64aa657 2418 target_free_sgl(cmd->t_bidi_data_sg,
c8e63985
NB
2419 cmd->t_bidi_data_nents);
2420 cmd->t_bidi_data_sg = NULL;
2421 cmd->t_bidi_data_nents = 0;
2422 }
47e459e6 2423 transport_reset_sgl_orig(cmd);
6708bb27 2424 return;
47e459e6
NB
2425 }
2426 transport_reset_sgl_orig(cmd);
6708bb27 2427
e64aa657 2428 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
ec98f782
AG
2429 cmd->t_data_sg = NULL;
2430 cmd->t_data_nents = 0;
c66ac9db 2431
e64aa657 2432 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
ec98f782
AG
2433 cmd->t_bidi_data_sg = NULL;
2434 cmd->t_bidi_data_nents = 0;
c66ac9db
NB
2435}
2436
4949314c 2437void *transport_kmap_data_sg(struct se_cmd *cmd)
05d1c7c0 2438{
ec98f782 2439 struct scatterlist *sg = cmd->t_data_sg;
4949314c
AG
2440 struct page **pages;
2441 int i;
05d1c7c0
AG
2442
2443 /*
ec98f782
AG
2444 * We need to take into account a possible offset here for fabrics like
2445 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2446 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
05d1c7c0 2447 */
4949314c
AG
2448 if (!cmd->t_data_nents)
2449 return NULL;
3717ef0c
PB
2450
2451 BUG_ON(!sg);
2452 if (cmd->t_data_nents == 1)
4949314c
AG
2453 return kmap(sg_page(sg)) + sg->offset;
2454
2455 /* >1 page. use vmap */
df6751f3 2456 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
de103c93 2457 if (!pages)
4949314c
AG
2458 return NULL;
2459
2460 /* convert sg[] to pages[] */
2461 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2462 pages[i] = sg_page(sg);
2463 }
2464
2465 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2466 kfree(pages);
de103c93 2467 if (!cmd->t_data_vmap)
4949314c
AG
2468 return NULL;
2469
2470 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
05d1c7c0 2471}
4949314c 2472EXPORT_SYMBOL(transport_kmap_data_sg);
05d1c7c0 2473
4949314c 2474void transport_kunmap_data_sg(struct se_cmd *cmd)
05d1c7c0 2475{
a1edf9cf 2476 if (!cmd->t_data_nents) {
4949314c 2477 return;
a1edf9cf 2478 } else if (cmd->t_data_nents == 1) {
4949314c 2479 kunmap(sg_page(cmd->t_data_sg));
a1edf9cf
AG
2480 return;
2481 }
4949314c
AG
2482
2483 vunmap(cmd->t_data_vmap);
2484 cmd->t_data_vmap = NULL;
05d1c7c0 2485}
4949314c 2486EXPORT_SYMBOL(transport_kunmap_data_sg);
05d1c7c0 2487
c5ff8d6b 2488int
20093994 2489target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
e64aa657 2490 bool zero_page, bool chainable)
c66ac9db 2491{
14db4917 2492 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
c66ac9db 2493
14db4917
BVA
2494 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2495 return *sgl ? 0 : -ENOMEM;
c66ac9db 2496}
e64aa657 2497EXPORT_SYMBOL(target_alloc_sgl);
c66ac9db 2498
da0f7619 2499/*
b16a35b0
AG
2500 * Allocate any required resources to execute the command. For writes we
2501 * might not have the payload yet, so notify the fabric via a call to
2502 * ->write_pending instead. Otherwise place it on the execution queue.
c66ac9db 2503 */
de103c93
CH
2504sense_reason_t
2505transport_generic_new_cmd(struct se_cmd *cmd)
c66ac9db 2506{
b1a2ecda 2507 unsigned long flags;
c66ac9db 2508 int ret = 0;
c8e63985 2509 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
c66ac9db 2510
5835812f
AM
2511 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2512 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2513 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
e64aa657 2514 cmd->prot_length, true, false);
5835812f
AM
2515 if (ret < 0)
2516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2517 }
2518
c66ac9db 2519 /*
4240d448 2520 * Determine if the TCM fabric module has already allocated physical
c66ac9db 2521 * memory, and is directly calling transport_generic_map_mem_to_cmd()
ec98f782 2522 * beforehand.
c66ac9db 2523 */
ec98f782
AG
2524 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2525 cmd->data_length) {
20093994 2526
8cefe07b
NB
2527 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2528 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2529 u32 bidi_length;
2530
2531 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2532 bidi_length = cmd->t_task_nolb *
2533 cmd->se_dev->dev_attrib.block_size;
2534 else
2535 bidi_length = cmd->data_length;
2536
2537 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2538 &cmd->t_bidi_data_nents,
e64aa657 2539 bidi_length, zero_flag, false);
8cefe07b
NB
2540 if (ret < 0)
2541 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2542 }
2543
20093994 2544 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
e64aa657 2545 cmd->data_length, zero_flag, false);
c66ac9db 2546 if (ret < 0)
de103c93 2547 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
c8e63985
NB
2548 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2549 cmd->data_length) {
2550 /*
2551 * Special case for COMPARE_AND_WRITE with fabrics
2552 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2553 */
2554 u32 caw_length = cmd->t_task_nolb *
2555 cmd->se_dev->dev_attrib.block_size;
2556
2557 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2558 &cmd->t_bidi_data_nents,
e64aa657 2559 caw_length, zero_flag, false);
c8e63985
NB
2560 if (ret < 0)
2561 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
c66ac9db 2562 }
c66ac9db 2563 /*
c3196f0c
CH
2564 * If this command is not a write we can execute it right here,
2565 * for write buffers we need to notify the fabric driver first
2566 * and let it call back once the write buffers are ready.
c66ac9db 2567 */
5f41a31d 2568 target_add_to_state_list(cmd);
885e7b0e 2569 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
c3196f0c
CH
2570 target_execute_cmd(cmd);
2571 return 0;
2572 }
b1a2ecda
BVA
2573
2574 spin_lock_irqsave(&cmd->t_state_lock, flags);
2575 cmd->t_state = TRANSPORT_WRITE_PENDING;
2576 /*
2577 * Determine if frontend context caller is requesting the stopping of
2578 * this command for frontend exceptions.
2579 */
fbbd4923
BVA
2580 if (cmd->transport_state & CMD_T_STOP &&
2581 !cmd->se_tfo->write_pending_must_be_called) {
b1a2ecda
BVA
2582 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2583 __func__, __LINE__, cmd->tag);
2584
2585 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2586
2587 complete_all(&cmd->t_transport_stop_comp);
a5eb307f 2588 return 0;
b1a2ecda
BVA
2589 }
2590 cmd->transport_state &= ~CMD_T_ACTIVE;
2591 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c3196f0c
CH
2592
2593 ret = cmd->se_tfo->write_pending(cmd);
fa7e25cf 2594 if (ret)
c3196f0c
CH
2595 goto queue_full;
2596
fa7e25cf 2597 return 0;
da0f7619 2598
c3196f0c
CH
2599queue_full:
2600 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
fa7e25cf 2601 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
c3196f0c 2602 return 0;
c66ac9db 2603}
a1d8b49a 2604EXPORT_SYMBOL(transport_generic_new_cmd);
c66ac9db 2605
e057f533 2606static void transport_write_pending_qf(struct se_cmd *cmd)
07bde79a 2607{
9574a497 2608 unsigned long flags;
f147abb4 2609 int ret;
9574a497
NB
2610 bool stop;
2611
2612 spin_lock_irqsave(&cmd->t_state_lock, flags);
2613 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2614 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2615
2616 if (stop) {
2617 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2618 __func__, __LINE__, cmd->tag);
2619 complete_all(&cmd->t_transport_stop_comp);
2620 return;
2621 }
f147abb4
NB
2622
2623 ret = cmd->se_tfo->write_pending(cmd);
fa7e25cf 2624 if (ret) {
e057f533
CH
2625 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2626 cmd);
fa7e25cf 2627 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
e057f533 2628 }
07bde79a
NB
2629}
2630
0f4a9431
NB
2631static bool
2632__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2633 unsigned long *flags);
2634
2635static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2636{
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(&cmd->t_state_lock, flags);
2640 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2641 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2642}
2643
2c9fa49e
BVA
2644/*
2645 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2646 * finished.
2647 */
2648void target_put_cmd_and_wait(struct se_cmd *cmd)
2649{
2650 DECLARE_COMPLETION_ONSTACK(compl);
2651
2652 WARN_ON_ONCE(cmd->abrt_compl);
2653 cmd->abrt_compl = &compl;
2654 target_put_sess_cmd(cmd);
2655 wait_for_completion(&compl);
2656}
2657
953bcf7a
BVA
2658/*
2659 * This function is called by frontend drivers after processing of a command
2660 * has finished.
2661 *
2c9fa49e
BVA
2662 * The protocol for ensuring that either the regular frontend command
2663 * processing flow or target_handle_abort() code drops one reference is as
2664 * follows:
953bcf7a 2665 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2c9fa49e
BVA
2666 * the frontend driver to call this function synchronously or asynchronously.
2667 * That will cause one reference to be dropped.
953bcf7a
BVA
2668 * - During regular command processing the target core sets CMD_T_COMPLETE
2669 * before invoking one of the .queue_*() functions.
2670 * - The code that aborts commands skips commands and TMFs for which
2671 * CMD_T_COMPLETE has been set.
2672 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2673 * commands that will be aborted.
2674 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2675 * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2676 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2677 * be called and will drop a reference.
2678 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2c9fa49e 2679 * will be called. target_handle_abort() will drop the final reference.
953bcf7a 2680 */
d5ddad41 2681int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
c66ac9db 2682{
7b2cc7dc 2683 DECLARE_COMPLETION_ONSTACK(compl);
d5ddad41 2684 int ret = 0;
0f4a9431 2685 bool aborted = false, tas = false;
d5ddad41 2686
edf46eee
BVA
2687 if (wait_for_tasks)
2688 target_wait_free_cmd(cmd, &aborted, &tas);
2689
2690 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
c130480b
NB
2691 /*
2692 * Handle WRITE failure case where transport_generic_new_cmd()
2693 * has already added se_cmd to state_list, but fabric has
2694 * failed command before I/O submission.
2695 */
febe562c 2696 if (cmd->state_active)
c130480b 2697 target_remove_from_state_list(cmd);
0f4a9431 2698 }
7b2cc7dc 2699 if (aborted)
a014c364 2700 cmd->free_compl = &compl;
2c9fa49e 2701 ret = target_put_sess_cmd(cmd);
0f4a9431
NB
2702 if (aborted) {
2703 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
7b2cc7dc 2704 wait_for_completion(&compl);
0f4a9431 2705 ret = 1;
c66ac9db 2706 }
d5ddad41 2707 return ret;
c66ac9db
NB
2708}
2709EXPORT_SYMBOL(transport_generic_free_cmd);
2710
9ad97b8b
RD
2711/**
2712 * target_get_sess_cmd - Add command to active ->sess_cmd_list
a17f091d 2713 * @se_cmd: command descriptor to add
a6360785 2714 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
a17f091d 2715 */
afc16604 2716int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
a17f091d 2717{
afc16604 2718 struct se_session *se_sess = se_cmd->se_sess;
a17f091d 2719 unsigned long flags;
bc187ea6 2720 int ret = 0;
a17f091d 2721
a6360785
NB
2722 /*
2723 * Add a second kref if the fabric caller is expecting to handle
2724 * fabric acknowledgement that requires two target_put_sess_cmd()
2725 * invocations before se_cmd descriptor release.
2726 */
527268df 2727 if (ack_kref) {
1b4c59b7
HR
2728 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2729 return -EINVAL;
2730
527268df
NB
2731 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2732 }
7481deb4 2733
a17f091d 2734 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
bc187ea6
RD
2735 if (se_sess->sess_tearing_down) {
2736 ret = -ESHUTDOWN;
2737 goto out;
2738 }
a17f091d 2739 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
ad669505 2740 percpu_ref_get(&se_sess->cmd_count);
bc187ea6 2741out:
a17f091d 2742 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7544e597
BVA
2743
2744 if (ret && ack_kref)
afc16604 2745 target_put_sess_cmd(se_cmd);
7544e597 2746
bc187ea6 2747 return ret;
a17f091d 2748}
20361e69 2749EXPORT_SYMBOL(target_get_sess_cmd);
a17f091d 2750
febe562c
NB
2751static void target_free_cmd_mem(struct se_cmd *cmd)
2752{
2753 transport_free_pages(cmd);
2754
2755 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2756 core_tmr_release_req(cmd->se_tmr_req);
2757 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2758 kfree(cmd->t_task_cdb);
2759}
2760
7481deb4 2761static void target_release_cmd_kref(struct kref *kref)
a17f091d 2762{
7481deb4
NB
2763 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2764 struct se_session *se_sess = se_cmd->se_sess;
a014c364
BVA
2765 struct completion *free_compl = se_cmd->free_compl;
2766 struct completion *abrt_compl = se_cmd->abrt_compl;
9ff9d15e 2767 unsigned long flags;
a17f091d 2768
83f85b8e
BVA
2769 if (se_cmd->lun_ref_active)
2770 percpu_ref_put(&se_cmd->se_lun->lun_ref);
2771
6b6427b6
BVA
2772 if (se_sess) {
2773 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3eeff198 2774 list_del_init(&se_cmd->se_cmd_list);
9ff9d15e 2775 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
a17f091d 2776 }
a17f091d 2777
febe562c 2778 target_free_cmd_mem(se_cmd);
7481deb4 2779 se_cmd->se_tfo->release_cmd(se_cmd);
a014c364
BVA
2780 if (free_compl)
2781 complete(free_compl);
2782 if (abrt_compl)
2783 complete(abrt_compl);
ad669505
BVA
2784
2785 percpu_ref_put(&se_sess->cmd_count);
7481deb4
NB
2786}
2787
6b6427b6
BVA
2788/**
2789 * target_put_sess_cmd - decrease the command reference count
2790 * @se_cmd: command to drop a reference from
2791 *
2792 * Returns 1 if and only if this target_put_sess_cmd() call caused the
2793 * refcount to drop to zero. Returns zero otherwise.
7481deb4 2794 */
afc16604 2795int target_put_sess_cmd(struct se_cmd *se_cmd)
7481deb4 2796{
9ff9d15e 2797 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
a17f091d
NB
2798}
2799EXPORT_SYMBOL(target_put_sess_cmd);
2800
c00e6220
BVA
2801static const char *data_dir_name(enum dma_data_direction d)
2802{
2803 switch (d) {
2804 case DMA_BIDIRECTIONAL: return "BIDI";
2805 case DMA_TO_DEVICE: return "WRITE";
2806 case DMA_FROM_DEVICE: return "READ";
2807 case DMA_NONE: return "NONE";
2808 }
2809
2810 return "(?)";
2811}
2812
2813static const char *cmd_state_name(enum transport_state_table t)
2814{
2815 switch (t) {
2816 case TRANSPORT_NO_STATE: return "NO_STATE";
2817 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2818 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2819 case TRANSPORT_PROCESSING: return "PROCESSING";
2820 case TRANSPORT_COMPLETE: return "COMPLETE";
2821 case TRANSPORT_ISTATE_PROCESSING:
2822 return "ISTATE_PROCESSING";
2823 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2824 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2825 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2826 }
2827
2828 return "(?)";
2829}
2830
2831static void target_append_str(char **str, const char *txt)
2832{
2833 char *prev = *str;
2834
2835 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2836 kstrdup(txt, GFP_ATOMIC);
2837 kfree(prev);
2838}
2839
2840/*
2841 * Convert a transport state bitmask into a string. The caller is
2842 * responsible for freeing the returned pointer.
2843 */
2844static char *target_ts_to_str(u32 ts)
2845{
2846 char *str = NULL;
2847
2848 if (ts & CMD_T_ABORTED)
2849 target_append_str(&str, "aborted");
2850 if (ts & CMD_T_ACTIVE)
2851 target_append_str(&str, "active");
2852 if (ts & CMD_T_COMPLETE)
2853 target_append_str(&str, "complete");
2854 if (ts & CMD_T_SENT)
2855 target_append_str(&str, "sent");
2856 if (ts & CMD_T_STOP)
2857 target_append_str(&str, "stop");
2858 if (ts & CMD_T_FABRIC_STOP)
2859 target_append_str(&str, "fabric_stop");
2860
2861 return str;
2862}
2863
2864static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2865{
2866 switch (tmf) {
2867 case TMR_ABORT_TASK: return "ABORT_TASK";
2868 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2869 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2870 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2871 case TMR_LUN_RESET: return "LUN_RESET";
2872 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2873 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2874 case TMR_UNKNOWN: break;
2875 }
2876 return "(?)";
2877}
2878
2879void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2880{
2881 char *ts_str = target_ts_to_str(cmd->transport_state);
2882 const u8 *cdb = cmd->t_task_cdb;
2883 struct se_tmr_req *tmf = cmd->se_tmr_req;
2884
2885 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2886 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2887 pfx, cdb[0], cdb[1], cmd->tag,
2888 data_dir_name(cmd->data_direction),
2889 cmd->se_tfo->get_cmd_state(cmd),
2890 cmd_state_name(cmd->t_state), cmd->data_length,
2891 kref_read(&cmd->cmd_kref), ts_str);
2892 } else {
2893 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2894 pfx, target_tmf_name(tmf->function), cmd->tag,
2895 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2896 cmd_state_name(cmd->t_state),
2897 kref_read(&cmd->cmd_kref), ts_str);
2898 }
2899 kfree(ts_str);
2900}
2901EXPORT_SYMBOL(target_show_cmd);
2902
9ad97b8b 2903/**
00d909a1 2904 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
1c7b13fe 2905 * @se_sess: session to flag
a17f091d 2906 */
1c7b13fe 2907void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
a17f091d 2908{
a17f091d
NB
2909 unsigned long flags;
2910
a17f091d 2911 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1c7b13fe 2912 se_sess->sess_tearing_down = 1;
a17f091d 2913 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
ad669505
BVA
2914
2915 percpu_ref_kill(&se_sess->cmd_count);
a17f091d 2916}
1c7b13fe 2917EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
a17f091d 2918
9ad97b8b 2919/**
00d909a1 2920 * target_wait_for_sess_cmds - Wait for outstanding commands
a17f091d 2921 * @se_sess: session to wait for active I/O
a17f091d 2922 */
be646c2d 2923void target_wait_for_sess_cmds(struct se_session *se_sess)
a17f091d 2924{
00d909a1
BVA
2925 struct se_cmd *cmd;
2926 int ret;
9b31a328 2927
00d909a1
BVA
2928 WARN_ON_ONCE(!se_sess->sess_tearing_down);
2929
00d909a1 2930 do {
ad669505
BVA
2931 ret = wait_event_timeout(se_sess->cmd_list_wq,
2932 percpu_ref_is_zero(&se_sess->cmd_count),
2933 180 * HZ);
00d909a1
BVA
2934 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2935 target_show_cmd("session shutdown: still waiting for ",
2936 cmd);
2937 } while (ret <= 0);
a17f091d
NB
2938}
2939EXPORT_SYMBOL(target_wait_for_sess_cmds);
2940
a95be384
BVA
2941/*
2942 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
2943 * all references to the LUN have been released. Called during LUN shutdown.
2944 */
b3eeea66 2945void transport_clear_lun_ref(struct se_lun *lun)
c66ac9db 2946{
a95be384 2947 percpu_ref_kill(&lun->lun_ref);
bd4e2d29 2948 wait_for_completion(&lun->lun_shutdown_comp);
c66ac9db
NB
2949}
2950
0f4a9431
NB
2951static bool
2952__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2953 bool *aborted, bool *tas, unsigned long *flags)
2954 __releases(&cmd->t_state_lock)
2955 __acquires(&cmd->t_state_lock)
c66ac9db 2956{
c66ac9db 2957
0f4a9431
NB
2958 assert_spin_locked(&cmd->t_state_lock);
2959 WARN_ON_ONCE(!irqs_disabled());
2960
2961 if (fabric_stop)
2962 cmd->transport_state |= CMD_T_FABRIC_STOP;
2963
2964 if (cmd->transport_state & CMD_T_ABORTED)
2965 *aborted = true;
2966
2967 if (cmd->transport_state & CMD_T_TAS)
2968 *tas = true;
2969
c8e31f26 2970 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
0f4a9431 2971 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
a17f091d 2972 return false;
cb4f4d3c 2973
c8e31f26 2974 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
0f4a9431 2975 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
a17f091d 2976 return false;
7d680f3b 2977
0f4a9431
NB
2978 if (!(cmd->transport_state & CMD_T_ACTIVE))
2979 return false;
2980
2981 if (fabric_stop && *aborted)
a17f091d 2982 return false;
c66ac9db 2983
7d680f3b 2984 cmd->transport_state |= CMD_T_STOP;
c66ac9db 2985
c00e6220 2986 target_show_cmd("wait_for_tasks: Stopping ", cmd);
c66ac9db 2987
0f4a9431 2988 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
c66ac9db 2989
c00e6220
BVA
2990 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
2991 180 * HZ))
2992 target_show_cmd("wait for tasks: ", cmd);
c66ac9db 2993
0f4a9431 2994 spin_lock_irqsave(&cmd->t_state_lock, *flags);
7d680f3b 2995 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
c66ac9db 2996
0f4a9431
NB
2997 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2998 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
c66ac9db 2999
0f4a9431
NB
3000 return true;
3001}
3002
3003/**
2d4760ee
BVA
3004 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3005 * @cmd: command to wait on
0f4a9431
NB
3006 */
3007bool transport_wait_for_tasks(struct se_cmd *cmd)
3008{
3009 unsigned long flags;
3010 bool ret, aborted = false, tas = false;
3011
3012 spin_lock_irqsave(&cmd->t_state_lock, flags);
3013 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
d14921d6 3014 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
a17f091d 3015
0f4a9431 3016 return ret;
c66ac9db 3017}
d14921d6 3018EXPORT_SYMBOL(transport_wait_for_tasks);
c66ac9db 3019
ab78fef4
BVA
3020struct sense_info {
3021 u8 key;
3022 u8 asc;
3023 u8 ascq;
3024 bool add_sector_info;
3025};
3026
3027static const struct sense_info sense_info_table[] = {
3028 [TCM_NO_SENSE] = {
3029 .key = NOT_READY
3030 },
3031 [TCM_NON_EXISTENT_LUN] = {
3032 .key = ILLEGAL_REQUEST,
3033 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3034 },
3035 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3036 .key = ILLEGAL_REQUEST,
3037 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3038 },
3039 [TCM_SECTOR_COUNT_TOO_MANY] = {
3040 .key = ILLEGAL_REQUEST,
3041 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3042 },
3043 [TCM_UNKNOWN_MODE_PAGE] = {
3044 .key = ILLEGAL_REQUEST,
3045 .asc = 0x24, /* INVALID FIELD IN CDB */
3046 },
3047 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3048 .key = ABORTED_COMMAND,
3049 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3050 .ascq = 0x03,
3051 },
3052 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3053 .key = ABORTED_COMMAND,
3054 .asc = 0x0c, /* WRITE ERROR */
3055 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3056 },
3057 [TCM_INVALID_CDB_FIELD] = {
3058 .key = ILLEGAL_REQUEST,
3059 .asc = 0x24, /* INVALID FIELD IN CDB */
3060 },
3061 [TCM_INVALID_PARAMETER_LIST] = {
3062 .key = ILLEGAL_REQUEST,
3063 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3064 },
e8642120
DD
3065 [TCM_TOO_MANY_TARGET_DESCS] = {
3066 .key = ILLEGAL_REQUEST,
3067 .asc = 0x26,
3068 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3069 },
3070 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3071 .key = ILLEGAL_REQUEST,
3072 .asc = 0x26,
3073 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3074 },
3075 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3076 .key = ILLEGAL_REQUEST,
3077 .asc = 0x26,
3078 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3079 },
3080 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3081 .key = ILLEGAL_REQUEST,
3082 .asc = 0x26,
3083 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3084 },
ab78fef4
BVA
3085 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3086 .key = ILLEGAL_REQUEST,
3087 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3088 },
3089 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3090 .key = ILLEGAL_REQUEST,
3091 .asc = 0x0c, /* WRITE ERROR */
3092 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3093 },
3094 [TCM_SERVICE_CRC_ERROR] = {
3095 .key = ABORTED_COMMAND,
3096 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3097 .ascq = 0x05, /* N/A */
3098 },
3099 [TCM_SNACK_REJECTED] = {
3100 .key = ABORTED_COMMAND,
3101 .asc = 0x11, /* READ ERROR */
3102 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3103 },
3104 [TCM_WRITE_PROTECTED] = {
3105 .key = DATA_PROTECT,
3106 .asc = 0x27, /* WRITE PROTECTED */
3107 },
3108 [TCM_ADDRESS_OUT_OF_RANGE] = {
3109 .key = ILLEGAL_REQUEST,
3110 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3111 },
3112 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3113 .key = UNIT_ATTENTION,
3114 },
3115 [TCM_CHECK_CONDITION_NOT_READY] = {
3116 .key = NOT_READY,
3117 },
3118 [TCM_MISCOMPARE_VERIFY] = {
3119 .key = MISCOMPARE,
3120 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3121 .ascq = 0x00,
3122 },
3123 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
734ca5c4 3124 .key = ABORTED_COMMAND,
ab78fef4
BVA
3125 .asc = 0x10,
3126 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3127 .add_sector_info = true,
3128 },
3129 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
734ca5c4 3130 .key = ABORTED_COMMAND,
ab78fef4
BVA
3131 .asc = 0x10,
3132 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3133 .add_sector_info = true,
3134 },
3135 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
734ca5c4 3136 .key = ABORTED_COMMAND,
ab78fef4
BVA
3137 .asc = 0x10,
3138 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3139 .add_sector_info = true,
3140 },
449a1378
NB
3141 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3142 .key = COPY_ABORTED,
3143 .asc = 0x0d,
3144 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3145
3146 },
ab78fef4
BVA
3147 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3148 /*
3149 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3150 * Solaris initiators. Returning NOT READY instead means the
3151 * operations will be retried a finite number of times and we
3152 * can survive intermittent errors.
3153 */
3154 .key = NOT_READY,
3155 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3156 },
79dd6f2f 3157 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3158 /*
3159 * From spc4r22 section5.7.7,5.7.8
3160 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3161 * or a REGISTER AND IGNORE EXISTING KEY service action or
3162 * REGISTER AND MOVE service actionis attempted,
3163 * but there are insufficient device server resources to complete the
3164 * operation, then the command shall be terminated with CHECK CONDITION
3165 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3166 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3167 */
3168 .key = ILLEGAL_REQUEST,
3169 .asc = 0x55,
3170 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3171 },
ab78fef4
BVA
3172};
3173
325c1e8b
BVA
3174/**
3175 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3176 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3177 * be stored.
3178 * @reason: LIO sense reason code. If this argument has the value
3179 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3180 * dequeuing a unit attention fails due to multiple commands being processed
3181 * concurrently, set the command status to BUSY.
3182 *
3183 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3184 */
17e391dd 3185static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
ab78fef4
BVA
3186{
3187 const struct sense_info *si;
3188 u8 *buffer = cmd->sense_buffer;
3189 int r = (__force int)reason;
325c1e8b 3190 u8 key, asc, ascq;
4e4937e8 3191 bool desc_format = target_sense_desc_format(cmd->se_dev);
ab78fef4
BVA
3192
3193 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3194 si = &sense_info_table[r];
3195 else
3196 si = &sense_info_table[(__force int)
3197 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3198
325c1e8b 3199 key = si->key;
ab78fef4 3200 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
325c1e8b
BVA
3201 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3202 &ascq)) {
3203 cmd->scsi_status = SAM_STAT_BUSY;
3204 return;
3205 }
ab78fef4
BVA
3206 } else if (si->asc == 0) {
3207 WARN_ON_ONCE(cmd->scsi_asc == 0);
3208 asc = cmd->scsi_asc;
3209 ascq = cmd->scsi_ascq;
3210 } else {
3211 asc = si->asc;
3212 ascq = si->ascq;
3213 }
9ec1e1ce 3214
89a104ed
BVA
3215 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3216 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3217 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
325c1e8b 3218 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
ab78fef4 3219 if (si->add_sector_info)
17e391dd
BVA
3220 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3221 cmd->scsi_sense_length,
3222 cmd->bad_sector) < 0);
ab78fef4
BVA
3223}
3224
de103c93
CH
3225int
3226transport_send_check_condition_and_sense(struct se_cmd *cmd,
3227 sense_reason_t reason, int from_transport)
c66ac9db 3228{
c66ac9db 3229 unsigned long flags;
c66ac9db 3230
2c9fa49e
BVA
3231 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3232
a1d8b49a 3233 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3234 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 3235 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3236 return 0;
3237 }
3238 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
a1d8b49a 3239 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3240
17e391dd
BVA
3241 if (!from_transport)
3242 translate_sense_reason(cmd, reason);
c66ac9db 3243
e5c0d6ad 3244 trace_target_cmd_complete(cmd);
07bde79a 3245 return cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
3246}
3247EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3248
94ebb471
BVA
3249/**
3250 * target_send_busy - Send SCSI BUSY status back to the initiator
3251 * @cmd: SCSI command for which to send a BUSY reply.
3252 *
3253 * Note: Only call this function if target_submit_cmd*() failed.
3254 */
3255int target_send_busy(struct se_cmd *cmd)
3256{
3257 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3258
3259 cmd->scsi_status = SAM_STAT_BUSY;
3260 trace_target_cmd_complete(cmd);
3261 return cmd->se_tfo->queue_status(cmd);
3262}
3263EXPORT_SYMBOL(target_send_busy);
3264
af877292 3265static void target_tmr_work(struct work_struct *work)
c66ac9db 3266{
af877292 3267 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
5951146d 3268 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3269 struct se_tmr_req *tmr = cmd->se_tmr_req;
3270 int ret;
3271
2c9fa49e
BVA
3272 if (cmd->transport_state & CMD_T_ABORTED)
3273 goto aborted;
a6d9bb1c 3274
c66ac9db 3275 switch (tmr->function) {
5c6cd613 3276 case TMR_ABORT_TASK:
3d28934a 3277 core_tmr_abort_task(dev, tmr, cmd->se_sess);
c66ac9db 3278 break;
5c6cd613
NB
3279 case TMR_ABORT_TASK_SET:
3280 case TMR_CLEAR_ACA:
3281 case TMR_CLEAR_TASK_SET:
c66ac9db
NB
3282 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3283 break;
5c6cd613 3284 case TMR_LUN_RESET:
c66ac9db
NB
3285 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3286 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3287 TMR_FUNCTION_REJECTED;
b5aafb16
HR
3288 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3289 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3290 cmd->orig_fe_lun, 0x29,
3291 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3292 }
c66ac9db 3293 break;
5c6cd613 3294 case TMR_TARGET_WARM_RESET:
c66ac9db
NB
3295 tmr->response = TMR_FUNCTION_REJECTED;
3296 break;
5c6cd613 3297 case TMR_TARGET_COLD_RESET:
c66ac9db
NB
3298 tmr->response = TMR_FUNCTION_REJECTED;
3299 break;
c66ac9db 3300 default:
c1678280 3301 pr_err("Unknown TMR function: 0x%02x.\n",
c66ac9db
NB
3302 tmr->function);
3303 tmr->response = TMR_FUNCTION_REJECTED;
3304 break;
3305 }
3306
2c9fa49e
BVA
3307 if (cmd->transport_state & CMD_T_ABORTED)
3308 goto aborted;
a6d9bb1c 3309
e3d6f909 3310 cmd->se_tfo->queue_tm_rsp(cmd);
c66ac9db 3311
b7b8bef7 3312 transport_cmd_check_stop_to_fabric(cmd);
2c9fa49e
BVA
3313 return;
3314
3315aborted:
3316 target_handle_abort(cmd);
c66ac9db
NB
3317}
3318
af877292
CH
3319int transport_generic_handle_tmr(
3320 struct se_cmd *cmd)
c66ac9db 3321{
f15e9cd9 3322 unsigned long flags;
c54eeffb 3323 bool aborted = false;
f15e9cd9
NB
3324
3325 spin_lock_irqsave(&cmd->t_state_lock, flags);
c54eeffb
NB
3326 if (cmd->transport_state & CMD_T_ABORTED) {
3327 aborted = true;
3328 } else {
3329 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3330 cmd->transport_state |= CMD_T_ACTIVE;
3331 }
f15e9cd9
NB
3332 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3333
c54eeffb 3334 if (aborted) {
2c9fa49e
BVA
3335 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3336 cmd->se_tmr_req->function,
3337 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3338 target_handle_abort(cmd);
c54eeffb
NB
3339 return 0;
3340 }
3341
af877292 3342 INIT_WORK(&cmd->work, target_tmr_work);
db5b21a2 3343 schedule_work(&cmd->work);
c66ac9db
NB
3344 return 0;
3345}
af877292 3346EXPORT_SYMBOL(transport_generic_handle_tmr);
814e5b45
CH
3347
3348bool
3349target_check_wce(struct se_device *dev)
3350{
3351 bool wce = false;
3352
3353 if (dev->transport->get_write_cache)
3354 wce = dev->transport->get_write_cache(dev);
3355 else if (dev->dev_attrib.emulate_write_cache > 0)
3356 wce = true;
3357
3358 return wce;
3359}
3360
3361bool
3362target_check_fua(struct se_device *dev)
3363{
3364 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3365}