target: Cleanup unused se_task bits
[linux-2.6-block.git] / drivers / target / target_core_transport.c
CommitLineData
c66ac9db
NB
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
c66ac9db
NB
29#include <linux/net.h>
30#include <linux/delay.h>
31#include <linux/string.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/spinlock.h>
c66ac9db
NB
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <linux/cdrom.h>
39#include <asm/unaligned.h>
40#include <net/sock.h>
41#include <net/tcp.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
e66ecd50 44#include <scsi/scsi_tcq.h>
c66ac9db
NB
45
46#include <target/target_core_base.h>
47#include <target/target_core_device.h>
48#include <target/target_core_tmr.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_transport.h>
51#include <target/target_core_fabric_ops.h>
52#include <target/target_core_configfs.h>
53
54#include "target_core_alua.h"
55#include "target_core_hba.h"
56#include "target_core_pr.h"
57#include "target_core_scdb.h"
58#include "target_core_ua.h"
59
e3d6f909 60static int sub_api_initialized;
c66ac9db
NB
61
62static struct kmem_cache *se_cmd_cache;
63static struct kmem_cache *se_sess_cache;
64struct kmem_cache *se_tmr_req_cache;
65struct kmem_cache *se_ua_cache;
c66ac9db
NB
66struct kmem_cache *t10_pr_reg_cache;
67struct kmem_cache *t10_alua_lu_gp_cache;
68struct kmem_cache *t10_alua_lu_gp_mem_cache;
69struct kmem_cache *t10_alua_tg_pt_gp_cache;
70struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
71
c66ac9db 72static int transport_generic_write_pending(struct se_cmd *);
5951146d 73static int transport_processing_thread(void *param);
c66ac9db
NB
74static int __transport_execute_tasks(struct se_device *dev);
75static void transport_complete_task_attr(struct se_cmd *cmd);
07bde79a
NB
76static int transport_complete_qf(struct se_cmd *cmd);
77static void transport_handle_queue_full(struct se_cmd *cmd,
78 struct se_device *dev, int (*qf_callback)(struct se_cmd *));
c66ac9db
NB
79static void transport_direct_request_timeout(struct se_cmd *cmd);
80static void transport_free_dev_tasks(struct se_cmd *cmd);
a1d8b49a 81static u32 transport_allocate_tasks(struct se_cmd *cmd,
ec98f782 82 unsigned long long starting_lba,
c66ac9db 83 enum dma_data_direction data_direction,
ec98f782 84 struct scatterlist *sgl, unsigned int nents);
05d1c7c0 85static int transport_generic_get_mem(struct se_cmd *cmd);
39c05f32 86static void transport_put_cmd(struct se_cmd *cmd);
c66ac9db
NB
87static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
88 struct se_queue_obj *qobj);
89static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
90static void transport_stop_all_task_timers(struct se_cmd *cmd);
91
e3d6f909 92int init_se_kmem_caches(void)
c66ac9db 93{
c66ac9db
NB
94 se_cmd_cache = kmem_cache_create("se_cmd_cache",
95 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
6708bb27
AG
96 if (!se_cmd_cache) {
97 pr_err("kmem_cache_create for struct se_cmd failed\n");
c66ac9db
NB
98 goto out;
99 }
100 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
101 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
102 0, NULL);
6708bb27
AG
103 if (!se_tmr_req_cache) {
104 pr_err("kmem_cache_create() for struct se_tmr_req"
c66ac9db
NB
105 " failed\n");
106 goto out;
107 }
108 se_sess_cache = kmem_cache_create("se_sess_cache",
109 sizeof(struct se_session), __alignof__(struct se_session),
110 0, NULL);
6708bb27
AG
111 if (!se_sess_cache) {
112 pr_err("kmem_cache_create() for struct se_session"
c66ac9db
NB
113 " failed\n");
114 goto out;
115 }
116 se_ua_cache = kmem_cache_create("se_ua_cache",
117 sizeof(struct se_ua), __alignof__(struct se_ua),
118 0, NULL);
6708bb27
AG
119 if (!se_ua_cache) {
120 pr_err("kmem_cache_create() for struct se_ua failed\n");
c66ac9db
NB
121 goto out;
122 }
c66ac9db
NB
123 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
124 sizeof(struct t10_pr_registration),
125 __alignof__(struct t10_pr_registration), 0, NULL);
6708bb27
AG
126 if (!t10_pr_reg_cache) {
127 pr_err("kmem_cache_create() for struct t10_pr_registration"
c66ac9db
NB
128 " failed\n");
129 goto out;
130 }
131 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
132 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
133 0, NULL);
6708bb27
AG
134 if (!t10_alua_lu_gp_cache) {
135 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
c66ac9db
NB
136 " failed\n");
137 goto out;
138 }
139 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
140 sizeof(struct t10_alua_lu_gp_member),
141 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
6708bb27
AG
142 if (!t10_alua_lu_gp_mem_cache) {
143 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
c66ac9db
NB
144 "cache failed\n");
145 goto out;
146 }
147 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
148 sizeof(struct t10_alua_tg_pt_gp),
149 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
6708bb27
AG
150 if (!t10_alua_tg_pt_gp_cache) {
151 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db
NB
152 "cache failed\n");
153 goto out;
154 }
155 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
156 "t10_alua_tg_pt_gp_mem_cache",
157 sizeof(struct t10_alua_tg_pt_gp_member),
158 __alignof__(struct t10_alua_tg_pt_gp_member),
159 0, NULL);
6708bb27
AG
160 if (!t10_alua_tg_pt_gp_mem_cache) {
161 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
c66ac9db
NB
162 "mem_t failed\n");
163 goto out;
164 }
165
c66ac9db
NB
166 return 0;
167out:
168 if (se_cmd_cache)
169 kmem_cache_destroy(se_cmd_cache);
170 if (se_tmr_req_cache)
171 kmem_cache_destroy(se_tmr_req_cache);
172 if (se_sess_cache)
173 kmem_cache_destroy(se_sess_cache);
174 if (se_ua_cache)
175 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
176 if (t10_pr_reg_cache)
177 kmem_cache_destroy(t10_pr_reg_cache);
178 if (t10_alua_lu_gp_cache)
179 kmem_cache_destroy(t10_alua_lu_gp_cache);
180 if (t10_alua_lu_gp_mem_cache)
181 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
182 if (t10_alua_tg_pt_gp_cache)
183 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
184 if (t10_alua_tg_pt_gp_mem_cache)
185 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
e3d6f909 186 return -ENOMEM;
c66ac9db
NB
187}
188
e3d6f909 189void release_se_kmem_caches(void)
c66ac9db 190{
c66ac9db
NB
191 kmem_cache_destroy(se_cmd_cache);
192 kmem_cache_destroy(se_tmr_req_cache);
193 kmem_cache_destroy(se_sess_cache);
194 kmem_cache_destroy(se_ua_cache);
c66ac9db
NB
195 kmem_cache_destroy(t10_pr_reg_cache);
196 kmem_cache_destroy(t10_alua_lu_gp_cache);
197 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
198 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
199 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
c66ac9db
NB
200}
201
e3d6f909
AG
202/* This code ensures unique mib indexes are handed out. */
203static DEFINE_SPINLOCK(scsi_mib_index_lock);
204static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
e89d15ee
NB
205
206/*
207 * Allocate a new row index for the entry type specified
208 */
209u32 scsi_get_new_index(scsi_index_t type)
210{
211 u32 new_index;
212
e3d6f909 213 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
e89d15ee 214
e3d6f909
AG
215 spin_lock(&scsi_mib_index_lock);
216 new_index = ++scsi_mib_index[type];
217 spin_unlock(&scsi_mib_index_lock);
e89d15ee
NB
218
219 return new_index;
220}
221
c66ac9db
NB
222void transport_init_queue_obj(struct se_queue_obj *qobj)
223{
224 atomic_set(&qobj->queue_cnt, 0);
225 INIT_LIST_HEAD(&qobj->qobj_list);
226 init_waitqueue_head(&qobj->thread_wq);
227 spin_lock_init(&qobj->cmd_queue_lock);
228}
229EXPORT_SYMBOL(transport_init_queue_obj);
230
231static int transport_subsystem_reqmods(void)
232{
233 int ret;
234
235 ret = request_module("target_core_iblock");
236 if (ret != 0)
6708bb27 237 pr_err("Unable to load target_core_iblock\n");
c66ac9db
NB
238
239 ret = request_module("target_core_file");
240 if (ret != 0)
6708bb27 241 pr_err("Unable to load target_core_file\n");
c66ac9db
NB
242
243 ret = request_module("target_core_pscsi");
244 if (ret != 0)
6708bb27 245 pr_err("Unable to load target_core_pscsi\n");
c66ac9db
NB
246
247 ret = request_module("target_core_stgt");
248 if (ret != 0)
6708bb27 249 pr_err("Unable to load target_core_stgt\n");
c66ac9db
NB
250
251 return 0;
252}
253
254int transport_subsystem_check_init(void)
255{
e3d6f909
AG
256 int ret;
257
258 if (sub_api_initialized)
c66ac9db
NB
259 return 0;
260 /*
261 * Request the loading of known TCM subsystem plugins..
262 */
e3d6f909
AG
263 ret = transport_subsystem_reqmods();
264 if (ret < 0)
265 return ret;
c66ac9db 266
e3d6f909 267 sub_api_initialized = 1;
c66ac9db
NB
268 return 0;
269}
270
271struct se_session *transport_init_session(void)
272{
273 struct se_session *se_sess;
274
275 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
6708bb27
AG
276 if (!se_sess) {
277 pr_err("Unable to allocate struct se_session from"
c66ac9db
NB
278 " se_sess_cache\n");
279 return ERR_PTR(-ENOMEM);
280 }
281 INIT_LIST_HEAD(&se_sess->sess_list);
282 INIT_LIST_HEAD(&se_sess->sess_acl_list);
c66ac9db
NB
283
284 return se_sess;
285}
286EXPORT_SYMBOL(transport_init_session);
287
288/*
289 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
290 */
291void __transport_register_session(
292 struct se_portal_group *se_tpg,
293 struct se_node_acl *se_nacl,
294 struct se_session *se_sess,
295 void *fabric_sess_ptr)
296{
297 unsigned char buf[PR_REG_ISID_LEN];
298
299 se_sess->se_tpg = se_tpg;
300 se_sess->fabric_sess_ptr = fabric_sess_ptr;
301 /*
302 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
303 *
304 * Only set for struct se_session's that will actually be moving I/O.
305 * eg: *NOT* discovery sessions.
306 */
307 if (se_nacl) {
308 /*
309 * If the fabric module supports an ISID based TransportID,
310 * save this value in binary from the fabric I_T Nexus now.
311 */
e3d6f909 312 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
c66ac9db 313 memset(&buf[0], 0, PR_REG_ISID_LEN);
e3d6f909 314 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
c66ac9db
NB
315 &buf[0], PR_REG_ISID_LEN);
316 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
317 }
318 spin_lock_irq(&se_nacl->nacl_sess_lock);
319 /*
320 * The se_nacl->nacl_sess pointer will be set to the
321 * last active I_T Nexus for each struct se_node_acl.
322 */
323 se_nacl->nacl_sess = se_sess;
324
325 list_add_tail(&se_sess->sess_acl_list,
326 &se_nacl->acl_sess_list);
327 spin_unlock_irq(&se_nacl->nacl_sess_lock);
328 }
329 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
330
6708bb27 331 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
e3d6f909 332 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
c66ac9db
NB
333}
334EXPORT_SYMBOL(__transport_register_session);
335
336void transport_register_session(
337 struct se_portal_group *se_tpg,
338 struct se_node_acl *se_nacl,
339 struct se_session *se_sess,
340 void *fabric_sess_ptr)
341{
342 spin_lock_bh(&se_tpg->session_lock);
343 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
344 spin_unlock_bh(&se_tpg->session_lock);
345}
346EXPORT_SYMBOL(transport_register_session);
347
348void transport_deregister_session_configfs(struct se_session *se_sess)
349{
350 struct se_node_acl *se_nacl;
23388864 351 unsigned long flags;
c66ac9db
NB
352 /*
353 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
354 */
355 se_nacl = se_sess->se_node_acl;
6708bb27 356 if (se_nacl) {
23388864 357 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
358 list_del(&se_sess->sess_acl_list);
359 /*
360 * If the session list is empty, then clear the pointer.
361 * Otherwise, set the struct se_session pointer from the tail
362 * element of the per struct se_node_acl active session list.
363 */
364 if (list_empty(&se_nacl->acl_sess_list))
365 se_nacl->nacl_sess = NULL;
366 else {
367 se_nacl->nacl_sess = container_of(
368 se_nacl->acl_sess_list.prev,
369 struct se_session, sess_acl_list);
370 }
23388864 371 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
c66ac9db
NB
372 }
373}
374EXPORT_SYMBOL(transport_deregister_session_configfs);
375
376void transport_free_session(struct se_session *se_sess)
377{
378 kmem_cache_free(se_sess_cache, se_sess);
379}
380EXPORT_SYMBOL(transport_free_session);
381
382void transport_deregister_session(struct se_session *se_sess)
383{
384 struct se_portal_group *se_tpg = se_sess->se_tpg;
385 struct se_node_acl *se_nacl;
e63a8e19 386 unsigned long flags;
c66ac9db 387
6708bb27 388 if (!se_tpg) {
c66ac9db
NB
389 transport_free_session(se_sess);
390 return;
391 }
c66ac9db 392
e63a8e19 393 spin_lock_irqsave(&se_tpg->session_lock, flags);
c66ac9db
NB
394 list_del(&se_sess->sess_list);
395 se_sess->se_tpg = NULL;
396 se_sess->fabric_sess_ptr = NULL;
e63a8e19 397 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
c66ac9db
NB
398
399 /*
400 * Determine if we need to do extra work for this initiator node's
401 * struct se_node_acl if it had been previously dynamically generated.
402 */
403 se_nacl = se_sess->se_node_acl;
6708bb27 404 if (se_nacl) {
e63a8e19 405 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db 406 if (se_nacl->dynamic_node_acl) {
6708bb27
AG
407 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
408 se_tpg)) {
c66ac9db
NB
409 list_del(&se_nacl->acl_list);
410 se_tpg->num_node_acls--;
e63a8e19 411 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
412
413 core_tpg_wait_for_nacl_pr_ref(se_nacl);
c66ac9db 414 core_free_device_list_for_node(se_nacl, se_tpg);
e3d6f909 415 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
c66ac9db 416 se_nacl);
e63a8e19 417 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
418 }
419 }
e63a8e19 420 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
c66ac9db
NB
421 }
422
423 transport_free_session(se_sess);
424
6708bb27 425 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
e3d6f909 426 se_tpg->se_tpg_tfo->get_fabric_name());
c66ac9db
NB
427}
428EXPORT_SYMBOL(transport_deregister_session);
429
430/*
a1d8b49a 431 * Called with cmd->t_state_lock held.
c66ac9db
NB
432 */
433static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
434{
42bf829e 435 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
436 struct se_task *task;
437 unsigned long flags;
438
42bf829e
CH
439 if (!dev)
440 return;
c66ac9db 441
42bf829e 442 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
443 if (atomic_read(&task->task_active))
444 continue;
445
6708bb27 446 if (!atomic_read(&task->task_state_active))
c66ac9db
NB
447 continue;
448
449 spin_lock_irqsave(&dev->execute_task_lock, flags);
450 list_del(&task->t_state_list);
6708bb27
AG
451 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
452 cmd->se_tfo->get_task_tag(cmd), dev, task);
c66ac9db
NB
453 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
454
455 atomic_set(&task->task_state_active, 0);
a1d8b49a 456 atomic_dec(&cmd->t_task_cdbs_ex_left);
c66ac9db
NB
457 }
458}
459
460/* transport_cmd_check_stop():
461 *
462 * 'transport_off = 1' determines if t_transport_active should be cleared.
463 * 'transport_off = 2' determines if task_dev_state should be removed.
464 *
465 * A non-zero u8 t_state sets cmd->t_state.
466 * Returns 1 when command is stopped, else 0.
467 */
468static int transport_cmd_check_stop(
469 struct se_cmd *cmd,
470 int transport_off,
471 u8 t_state)
472{
473 unsigned long flags;
474
a1d8b49a 475 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
476 /*
477 * Determine if IOCTL context caller in requesting the stopping of this
478 * command for LUN shutdown purposes.
479 */
a1d8b49a 480 if (atomic_read(&cmd->transport_lun_stop)) {
6708bb27 481 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
c66ac9db 482 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 483 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
484
485 cmd->deferred_t_state = cmd->t_state;
486 cmd->t_state = TRANSPORT_DEFERRED_CMD;
a1d8b49a 487 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
488 if (transport_off == 2)
489 transport_all_task_dev_remove_state(cmd);
a1d8b49a 490 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 491
a1d8b49a 492 complete(&cmd->transport_lun_stop_comp);
c66ac9db
NB
493 return 1;
494 }
495 /*
496 * Determine if frontend context caller is requesting the stopping of
e3d6f909 497 * this command for frontend exceptions.
c66ac9db 498 */
a1d8b49a 499 if (atomic_read(&cmd->t_transport_stop)) {
6708bb27 500 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
c66ac9db 501 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
e3d6f909 502 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
503
504 cmd->deferred_t_state = cmd->t_state;
505 cmd->t_state = TRANSPORT_DEFERRED_CMD;
506 if (transport_off == 2)
507 transport_all_task_dev_remove_state(cmd);
508
509 /*
510 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
511 * to FE.
512 */
513 if (transport_off == 2)
514 cmd->se_lun = NULL;
a1d8b49a 515 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 516
a1d8b49a 517 complete(&cmd->t_transport_stop_comp);
c66ac9db
NB
518 return 1;
519 }
520 if (transport_off) {
a1d8b49a 521 atomic_set(&cmd->t_transport_active, 0);
c66ac9db
NB
522 if (transport_off == 2) {
523 transport_all_task_dev_remove_state(cmd);
524 /*
525 * Clear struct se_cmd->se_lun before the transport_off == 2
526 * handoff to fabric module.
527 */
528 cmd->se_lun = NULL;
529 /*
530 * Some fabric modules like tcm_loop can release
25985edc 531 * their internally allocated I/O reference now and
c66ac9db
NB
532 * struct se_cmd now.
533 */
e3d6f909 534 if (cmd->se_tfo->check_stop_free != NULL) {
c66ac9db 535 spin_unlock_irqrestore(
a1d8b49a 536 &cmd->t_state_lock, flags);
c66ac9db 537
e3d6f909 538 cmd->se_tfo->check_stop_free(cmd);
c66ac9db
NB
539 return 1;
540 }
541 }
a1d8b49a 542 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
543
544 return 0;
545 } else if (t_state)
546 cmd->t_state = t_state;
a1d8b49a 547 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
548
549 return 0;
550}
551
552static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
553{
554 return transport_cmd_check_stop(cmd, 2, 0);
555}
556
557static void transport_lun_remove_cmd(struct se_cmd *cmd)
558{
e3d6f909 559 struct se_lun *lun = cmd->se_lun;
c66ac9db
NB
560 unsigned long flags;
561
562 if (!lun)
563 return;
564
a1d8b49a 565 spin_lock_irqsave(&cmd->t_state_lock, flags);
6708bb27 566 if (!atomic_read(&cmd->transport_dev_active)) {
a1d8b49a 567 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
568 goto check_lun;
569 }
a1d8b49a 570 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 571 transport_all_task_dev_remove_state(cmd);
a1d8b49a 572 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 573
c66ac9db
NB
574
575check_lun:
576 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
a1d8b49a 577 if (atomic_read(&cmd->transport_lun_active)) {
5951146d 578 list_del(&cmd->se_lun_node);
a1d8b49a 579 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db 580#if 0
6708bb27 581 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
e3d6f909 582 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
c66ac9db
NB
583#endif
584 }
585 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
586}
587
588void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
589{
8dc52b54
NB
590 if (!cmd->se_tmr_req)
591 transport_lun_remove_cmd(cmd);
c66ac9db
NB
592
593 if (transport_cmd_check_stop_to_fabric(cmd))
594 return;
77039d1e
NB
595 if (remove) {
596 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
e6a2573f 597 transport_put_cmd(cmd);
77039d1e 598 }
c66ac9db
NB
599}
600
5951146d 601static void transport_add_cmd_to_queue(
c66ac9db
NB
602 struct se_cmd *cmd,
603 int t_state)
604{
605 struct se_device *dev = cmd->se_dev;
e3d6f909 606 struct se_queue_obj *qobj = &dev->dev_queue_obj;
c66ac9db
NB
607 unsigned long flags;
608
c66ac9db 609 if (t_state) {
a1d8b49a 610 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 611 cmd->t_state = t_state;
a1d8b49a
AG
612 atomic_set(&cmd->t_transport_active, 1);
613 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
614 }
615
616 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
79a7fef2
RD
617
618 /* If the cmd is already on the list, remove it before we add it */
619 if (!list_empty(&cmd->se_queue_node))
620 list_del(&cmd->se_queue_node);
621 else
622 atomic_inc(&qobj->queue_cnt);
623
07bde79a
NB
624 if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
625 cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
626 list_add(&cmd->se_queue_node, &qobj->qobj_list);
627 } else
628 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
79a7fef2 629 atomic_set(&cmd->t_transport_queue_active, 1);
c66ac9db
NB
630 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
631
c66ac9db 632 wake_up_interruptible(&qobj->thread_wq);
c66ac9db
NB
633}
634
5951146d
AG
635static struct se_cmd *
636transport_get_cmd_from_queue(struct se_queue_obj *qobj)
c66ac9db 637{
5951146d 638 struct se_cmd *cmd;
c66ac9db
NB
639 unsigned long flags;
640
641 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
642 if (list_empty(&qobj->qobj_list)) {
643 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
644 return NULL;
645 }
5951146d 646 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
c66ac9db 647
79a7fef2 648 atomic_set(&cmd->t_transport_queue_active, 0);
c66ac9db 649
79a7fef2 650 list_del_init(&cmd->se_queue_node);
c66ac9db
NB
651 atomic_dec(&qobj->queue_cnt);
652 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
653
5951146d 654 return cmd;
c66ac9db
NB
655}
656
657static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
658 struct se_queue_obj *qobj)
659{
c66ac9db
NB
660 unsigned long flags;
661
662 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
6708bb27 663 if (!atomic_read(&cmd->t_transport_queue_active)) {
c66ac9db
NB
664 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
665 return;
666 }
79a7fef2
RD
667 atomic_set(&cmd->t_transport_queue_active, 0);
668 atomic_dec(&qobj->queue_cnt);
669 list_del_init(&cmd->se_queue_node);
c66ac9db
NB
670 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
671
a1d8b49a 672 if (atomic_read(&cmd->t_transport_queue_active)) {
6708bb27 673 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
e3d6f909 674 cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 675 atomic_read(&cmd->t_transport_queue_active));
c66ac9db
NB
676 }
677}
678
679/*
680 * Completion function used by TCM subsystem plugins (such as FILEIO)
681 * for queueing up response from struct se_subsystem_api->do_task()
682 */
683void transport_complete_sync_cache(struct se_cmd *cmd, int good)
684{
a1d8b49a 685 struct se_task *task = list_entry(cmd->t_task_list.next,
c66ac9db
NB
686 struct se_task, t_list);
687
688 if (good) {
689 cmd->scsi_status = SAM_STAT_GOOD;
690 task->task_scsi_status = GOOD;
691 } else {
692 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
693 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
e3d6f909 694 task->task_se_cmd->transport_error_status =
c66ac9db
NB
695 PYX_TRANSPORT_ILLEGAL_REQUEST;
696 }
697
698 transport_complete_task(task, good);
699}
700EXPORT_SYMBOL(transport_complete_sync_cache);
701
702/* transport_complete_task():
703 *
704 * Called from interrupt and non interrupt context depending
705 * on the transport plugin.
706 */
707void transport_complete_task(struct se_task *task, int success)
708{
e3d6f909 709 struct se_cmd *cmd = task->task_se_cmd;
42bf829e 710 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
711 int t_state;
712 unsigned long flags;
713#if 0
6708bb27 714 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
a1d8b49a 715 cmd->t_task_cdb[0], dev);
c66ac9db 716#endif
e3d6f909 717 if (dev)
c66ac9db 718 atomic_inc(&dev->depth_left);
c66ac9db 719
a1d8b49a 720 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
721 atomic_set(&task->task_active, 0);
722
723 /*
724 * See if any sense data exists, if so set the TASK_SENSE flag.
725 * Also check for any other post completion work that needs to be
726 * done by the plugins.
727 */
728 if (dev && dev->transport->transport_complete) {
729 if (dev->transport->transport_complete(task) != 0) {
730 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
731 task->task_sense = 1;
732 success = 1;
733 }
734 }
735
736 /*
737 * See if we are waiting for outstanding struct se_task
738 * to complete for an exception condition
739 */
740 if (atomic_read(&task->task_stop)) {
741 /*
a1d8b49a 742 * Decrement cmd->t_se_count if this task had
c66ac9db
NB
743 * previously thrown its timeout exception handler.
744 */
745 if (atomic_read(&task->task_timeout)) {
a1d8b49a 746 atomic_dec(&cmd->t_se_count);
c66ac9db
NB
747 atomic_set(&task->task_timeout, 0);
748 }
a1d8b49a 749 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
750
751 complete(&task->task_stop_comp);
752 return;
753 }
754 /*
755 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
756 * left counter to determine when the struct se_cmd is ready to be queued to
757 * the processing thread.
758 */
759 if (atomic_read(&task->task_timeout)) {
6708bb27
AG
760 if (!atomic_dec_and_test(
761 &cmd->t_task_cdbs_timeout_left)) {
a1d8b49a 762 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
763 flags);
764 return;
765 }
766 t_state = TRANSPORT_COMPLETE_TIMEOUT;
a1d8b49a 767 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
768
769 transport_add_cmd_to_queue(cmd, t_state);
770 return;
771 }
a1d8b49a 772 atomic_dec(&cmd->t_task_cdbs_timeout_left);
c66ac9db
NB
773
774 /*
775 * Decrement the outstanding t_task_cdbs_left count. The last
776 * struct se_task from struct se_cmd will complete itself into the
777 * device queue depending upon int success.
778 */
6708bb27 779 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
c66ac9db 780 if (!success)
a1d8b49a 781 cmd->t_tasks_failed = 1;
c66ac9db 782
a1d8b49a 783 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
784 return;
785 }
786
a1d8b49a 787 if (!success || cmd->t_tasks_failed) {
c66ac9db
NB
788 t_state = TRANSPORT_COMPLETE_FAILURE;
789 if (!task->task_error_status) {
790 task->task_error_status =
791 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
792 cmd->transport_error_status =
793 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
794 }
795 } else {
a1d8b49a 796 atomic_set(&cmd->t_transport_complete, 1);
c66ac9db
NB
797 t_state = TRANSPORT_COMPLETE_OK;
798 }
a1d8b49a 799 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
800
801 transport_add_cmd_to_queue(cmd, t_state);
802}
803EXPORT_SYMBOL(transport_complete_task);
804
805/*
806 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
807 * struct se_task list are ready to be added to the active execution list
808 * struct se_device
809
810 * Called with se_dev_t->execute_task_lock called.
811 */
812static inline int transport_add_task_check_sam_attr(
813 struct se_task *task,
814 struct se_task *task_prev,
815 struct se_device *dev)
816{
817 /*
818 * No SAM Task attribute emulation enabled, add to tail of
819 * execution queue
820 */
821 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
822 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
823 return 0;
824 }
825 /*
826 * HEAD_OF_QUEUE attribute for received CDB, which means
827 * the first task that is associated with a struct se_cmd goes to
828 * head of the struct se_device->execute_task_list, and task_prev
829 * after that for each subsequent task
830 */
e66ecd50 831 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
832 list_add(&task->t_execute_list,
833 (task_prev != NULL) ?
834 &task_prev->t_execute_list :
835 &dev->execute_task_list);
836
6708bb27 837 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
c66ac9db 838 " in execution queue\n",
6708bb27 839 task->task_se_cmd->t_task_cdb[0]);
c66ac9db
NB
840 return 1;
841 }
842 /*
843 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
844 * transitioned from Dermant -> Active state, and are added to the end
845 * of the struct se_device->execute_task_list
846 */
847 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
848 return 0;
849}
850
851/* __transport_add_task_to_execute_queue():
852 *
853 * Called with se_dev_t->execute_task_lock called.
854 */
855static void __transport_add_task_to_execute_queue(
856 struct se_task *task,
857 struct se_task *task_prev,
858 struct se_device *dev)
859{
860 int head_of_queue;
861
862 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
863 atomic_inc(&dev->execute_tasks);
864
865 if (atomic_read(&task->task_state_active))
866 return;
867 /*
868 * Determine if this task needs to go to HEAD_OF_QUEUE for the
869 * state list as well. Running with SAM Task Attribute emulation
870 * will always return head_of_queue == 0 here
871 */
872 if (head_of_queue)
873 list_add(&task->t_state_list, (task_prev) ?
874 &task_prev->t_state_list :
875 &dev->state_task_list);
876 else
877 list_add_tail(&task->t_state_list, &dev->state_task_list);
878
879 atomic_set(&task->task_state_active, 1);
880
6708bb27 881 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
e3d6f909 882 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
c66ac9db
NB
883 task, dev);
884}
885
886static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
887{
42bf829e 888 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
889 struct se_task *task;
890 unsigned long flags;
891
a1d8b49a
AG
892 spin_lock_irqsave(&cmd->t_state_lock, flags);
893 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
894 if (atomic_read(&task->task_state_active))
895 continue;
896
897 spin_lock(&dev->execute_task_lock);
898 list_add_tail(&task->t_state_list, &dev->state_task_list);
899 atomic_set(&task->task_state_active, 1);
900
6708bb27
AG
901 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
902 task->task_se_cmd->se_tfo->get_task_tag(
c66ac9db
NB
903 task->task_se_cmd), task, dev);
904
905 spin_unlock(&dev->execute_task_lock);
906 }
a1d8b49a 907 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
908}
909
910static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
911{
5951146d 912 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
913 struct se_task *task, *task_prev = NULL;
914 unsigned long flags;
915
916 spin_lock_irqsave(&dev->execute_task_lock, flags);
a1d8b49a 917 list_for_each_entry(task, &cmd->t_task_list, t_list) {
c66ac9db
NB
918 if (atomic_read(&task->task_execute_queue))
919 continue;
920 /*
921 * __transport_add_task_to_execute_queue() handles the
922 * SAM Task Attribute emulation if enabled
923 */
924 __transport_add_task_to_execute_queue(task, task_prev, dev);
925 atomic_set(&task->task_execute_queue, 1);
926 task_prev = task;
927 }
928 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
c66ac9db
NB
929}
930
931/* transport_remove_task_from_execute_queue():
932 *
933 *
934 */
52208ae3 935void transport_remove_task_from_execute_queue(
c66ac9db
NB
936 struct se_task *task,
937 struct se_device *dev)
938{
939 unsigned long flags;
940
af57c3ac
NB
941 if (atomic_read(&task->task_execute_queue) == 0) {
942 dump_stack();
943 return;
944 }
945
c66ac9db
NB
946 spin_lock_irqsave(&dev->execute_task_lock, flags);
947 list_del(&task->t_execute_list);
af57c3ac 948 atomic_set(&task->task_execute_queue, 0);
c66ac9db
NB
949 atomic_dec(&dev->execute_tasks);
950 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
951}
952
07bde79a
NB
953/*
954 * Handle QUEUE_FULL / -EAGAIN status
955 */
956
957static void target_qf_do_work(struct work_struct *work)
958{
959 struct se_device *dev = container_of(work, struct se_device,
960 qf_work_queue);
bcac364a 961 LIST_HEAD(qf_cmd_list);
07bde79a
NB
962 struct se_cmd *cmd, *cmd_tmp;
963
964 spin_lock_irq(&dev->qf_cmd_lock);
bcac364a
RD
965 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
966 spin_unlock_irq(&dev->qf_cmd_lock);
07bde79a 967
bcac364a 968 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
07bde79a
NB
969 list_del(&cmd->se_qf_node);
970 atomic_dec(&dev->dev_qf_count);
971 smp_mb__after_atomic_dec();
07bde79a 972
6708bb27 973 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
07bde79a
NB
974 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
975 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
976 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
977 : "UNKNOWN");
978 /*
979 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
980 * has been added to head of queue
981 */
982 transport_add_cmd_to_queue(cmd, cmd->t_state);
07bde79a 983 }
07bde79a
NB
984}
985
c66ac9db
NB
986unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
987{
988 switch (cmd->data_direction) {
989 case DMA_NONE:
990 return "NONE";
991 case DMA_FROM_DEVICE:
992 return "READ";
993 case DMA_TO_DEVICE:
994 return "WRITE";
995 case DMA_BIDIRECTIONAL:
996 return "BIDI";
997 default:
998 break;
999 }
1000
1001 return "UNKNOWN";
1002}
1003
1004void transport_dump_dev_state(
1005 struct se_device *dev,
1006 char *b,
1007 int *bl)
1008{
1009 *bl += sprintf(b + *bl, "Status: ");
1010 switch (dev->dev_status) {
1011 case TRANSPORT_DEVICE_ACTIVATED:
1012 *bl += sprintf(b + *bl, "ACTIVATED");
1013 break;
1014 case TRANSPORT_DEVICE_DEACTIVATED:
1015 *bl += sprintf(b + *bl, "DEACTIVATED");
1016 break;
1017 case TRANSPORT_DEVICE_SHUTDOWN:
1018 *bl += sprintf(b + *bl, "SHUTDOWN");
1019 break;
1020 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1021 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1022 *bl += sprintf(b + *bl, "OFFLINE");
1023 break;
1024 default:
1025 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1026 break;
1027 }
1028
1029 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
1030 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1031 dev->queue_depth);
1032 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
e3d6f909 1033 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db
NB
1034 *bl += sprintf(b + *bl, " ");
1035}
1036
c66ac9db
NB
1037void transport_dump_vpd_proto_id(
1038 struct t10_vpd *vpd,
1039 unsigned char *p_buf,
1040 int p_buf_len)
1041{
1042 unsigned char buf[VPD_TMP_BUF_SIZE];
1043 int len;
1044
1045 memset(buf, 0, VPD_TMP_BUF_SIZE);
1046 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1047
1048 switch (vpd->protocol_identifier) {
1049 case 0x00:
1050 sprintf(buf+len, "Fibre Channel\n");
1051 break;
1052 case 0x10:
1053 sprintf(buf+len, "Parallel SCSI\n");
1054 break;
1055 case 0x20:
1056 sprintf(buf+len, "SSA\n");
1057 break;
1058 case 0x30:
1059 sprintf(buf+len, "IEEE 1394\n");
1060 break;
1061 case 0x40:
1062 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1063 " Protocol\n");
1064 break;
1065 case 0x50:
1066 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1067 break;
1068 case 0x60:
1069 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1070 break;
1071 case 0x70:
1072 sprintf(buf+len, "Automation/Drive Interface Transport"
1073 " Protocol\n");
1074 break;
1075 case 0x80:
1076 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1077 break;
1078 default:
1079 sprintf(buf+len, "Unknown 0x%02x\n",
1080 vpd->protocol_identifier);
1081 break;
1082 }
1083
1084 if (p_buf)
1085 strncpy(p_buf, buf, p_buf_len);
1086 else
6708bb27 1087 pr_debug("%s", buf);
c66ac9db
NB
1088}
1089
1090void
1091transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1092{
1093 /*
1094 * Check if the Protocol Identifier Valid (PIV) bit is set..
1095 *
1096 * from spc3r23.pdf section 7.5.1
1097 */
1098 if (page_83[1] & 0x80) {
1099 vpd->protocol_identifier = (page_83[0] & 0xf0);
1100 vpd->protocol_identifier_set = 1;
1101 transport_dump_vpd_proto_id(vpd, NULL, 0);
1102 }
1103}
1104EXPORT_SYMBOL(transport_set_vpd_proto_id);
1105
1106int transport_dump_vpd_assoc(
1107 struct t10_vpd *vpd,
1108 unsigned char *p_buf,
1109 int p_buf_len)
1110{
1111 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1112 int ret = 0;
1113 int len;
c66ac9db
NB
1114
1115 memset(buf, 0, VPD_TMP_BUF_SIZE);
1116 len = sprintf(buf, "T10 VPD Identifier Association: ");
1117
1118 switch (vpd->association) {
1119 case 0x00:
1120 sprintf(buf+len, "addressed logical unit\n");
1121 break;
1122 case 0x10:
1123 sprintf(buf+len, "target port\n");
1124 break;
1125 case 0x20:
1126 sprintf(buf+len, "SCSI target device\n");
1127 break;
1128 default:
1129 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
e3d6f909 1130 ret = -EINVAL;
c66ac9db
NB
1131 break;
1132 }
1133
1134 if (p_buf)
1135 strncpy(p_buf, buf, p_buf_len);
1136 else
6708bb27 1137 pr_debug("%s", buf);
c66ac9db
NB
1138
1139 return ret;
1140}
1141
1142int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1143{
1144 /*
1145 * The VPD identification association..
1146 *
1147 * from spc3r23.pdf Section 7.6.3.1 Table 297
1148 */
1149 vpd->association = (page_83[1] & 0x30);
1150 return transport_dump_vpd_assoc(vpd, NULL, 0);
1151}
1152EXPORT_SYMBOL(transport_set_vpd_assoc);
1153
1154int transport_dump_vpd_ident_type(
1155 struct t10_vpd *vpd,
1156 unsigned char *p_buf,
1157 int p_buf_len)
1158{
1159 unsigned char buf[VPD_TMP_BUF_SIZE];
e3d6f909
AG
1160 int ret = 0;
1161 int len;
c66ac9db
NB
1162
1163 memset(buf, 0, VPD_TMP_BUF_SIZE);
1164 len = sprintf(buf, "T10 VPD Identifier Type: ");
1165
1166 switch (vpd->device_identifier_type) {
1167 case 0x00:
1168 sprintf(buf+len, "Vendor specific\n");
1169 break;
1170 case 0x01:
1171 sprintf(buf+len, "T10 Vendor ID based\n");
1172 break;
1173 case 0x02:
1174 sprintf(buf+len, "EUI-64 based\n");
1175 break;
1176 case 0x03:
1177 sprintf(buf+len, "NAA\n");
1178 break;
1179 case 0x04:
1180 sprintf(buf+len, "Relative target port identifier\n");
1181 break;
1182 case 0x08:
1183 sprintf(buf+len, "SCSI name string\n");
1184 break;
1185 default:
1186 sprintf(buf+len, "Unsupported: 0x%02x\n",
1187 vpd->device_identifier_type);
e3d6f909 1188 ret = -EINVAL;
c66ac9db
NB
1189 break;
1190 }
1191
e3d6f909
AG
1192 if (p_buf) {
1193 if (p_buf_len < strlen(buf)+1)
1194 return -EINVAL;
c66ac9db 1195 strncpy(p_buf, buf, p_buf_len);
e3d6f909 1196 } else {
6708bb27 1197 pr_debug("%s", buf);
e3d6f909 1198 }
c66ac9db
NB
1199
1200 return ret;
1201}
1202
1203int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1204{
1205 /*
1206 * The VPD identifier type..
1207 *
1208 * from spc3r23.pdf Section 7.6.3.1 Table 298
1209 */
1210 vpd->device_identifier_type = (page_83[1] & 0x0f);
1211 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1212}
1213EXPORT_SYMBOL(transport_set_vpd_ident_type);
1214
1215int transport_dump_vpd_ident(
1216 struct t10_vpd *vpd,
1217 unsigned char *p_buf,
1218 int p_buf_len)
1219{
1220 unsigned char buf[VPD_TMP_BUF_SIZE];
1221 int ret = 0;
1222
1223 memset(buf, 0, VPD_TMP_BUF_SIZE);
1224
1225 switch (vpd->device_identifier_code_set) {
1226 case 0x01: /* Binary */
1227 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1228 &vpd->device_identifier[0]);
1229 break;
1230 case 0x02: /* ASCII */
1231 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1232 &vpd->device_identifier[0]);
1233 break;
1234 case 0x03: /* UTF-8 */
1235 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1236 &vpd->device_identifier[0]);
1237 break;
1238 default:
1239 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1240 " 0x%02x", vpd->device_identifier_code_set);
e3d6f909 1241 ret = -EINVAL;
c66ac9db
NB
1242 break;
1243 }
1244
1245 if (p_buf)
1246 strncpy(p_buf, buf, p_buf_len);
1247 else
6708bb27 1248 pr_debug("%s", buf);
c66ac9db
NB
1249
1250 return ret;
1251}
1252
1253int
1254transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1255{
1256 static const char hex_str[] = "0123456789abcdef";
1257 int j = 0, i = 4; /* offset to start of the identifer */
1258
1259 /*
1260 * The VPD Code Set (encoding)
1261 *
1262 * from spc3r23.pdf Section 7.6.3.1 Table 296
1263 */
1264 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1265 switch (vpd->device_identifier_code_set) {
1266 case 0x01: /* Binary */
1267 vpd->device_identifier[j++] =
1268 hex_str[vpd->device_identifier_type];
1269 while (i < (4 + page_83[3])) {
1270 vpd->device_identifier[j++] =
1271 hex_str[(page_83[i] & 0xf0) >> 4];
1272 vpd->device_identifier[j++] =
1273 hex_str[page_83[i] & 0x0f];
1274 i++;
1275 }
1276 break;
1277 case 0x02: /* ASCII */
1278 case 0x03: /* UTF-8 */
1279 while (i < (4 + page_83[3]))
1280 vpd->device_identifier[j++] = page_83[i++];
1281 break;
1282 default:
1283 break;
1284 }
1285
1286 return transport_dump_vpd_ident(vpd, NULL, 0);
1287}
1288EXPORT_SYMBOL(transport_set_vpd_ident);
1289
1290static void core_setup_task_attr_emulation(struct se_device *dev)
1291{
1292 /*
1293 * If this device is from Target_Core_Mod/pSCSI, disable the
1294 * SAM Task Attribute emulation.
1295 *
1296 * This is currently not available in upsream Linux/SCSI Target
1297 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1298 */
e3d6f909 1299 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
c66ac9db
NB
1300 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1301 return;
1302 }
1303
1304 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
6708bb27 1305 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
e3d6f909
AG
1306 " device\n", dev->transport->name,
1307 dev->transport->get_device_rev(dev));
c66ac9db
NB
1308}
1309
1310static void scsi_dump_inquiry(struct se_device *dev)
1311{
e3d6f909 1312 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
c66ac9db
NB
1313 int i, device_type;
1314 /*
1315 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1316 */
6708bb27 1317 pr_debug(" Vendor: ");
c66ac9db
NB
1318 for (i = 0; i < 8; i++)
1319 if (wwn->vendor[i] >= 0x20)
6708bb27 1320 pr_debug("%c", wwn->vendor[i]);
c66ac9db 1321 else
6708bb27 1322 pr_debug(" ");
c66ac9db 1323
6708bb27 1324 pr_debug(" Model: ");
c66ac9db
NB
1325 for (i = 0; i < 16; i++)
1326 if (wwn->model[i] >= 0x20)
6708bb27 1327 pr_debug("%c", wwn->model[i]);
c66ac9db 1328 else
6708bb27 1329 pr_debug(" ");
c66ac9db 1330
6708bb27 1331 pr_debug(" Revision: ");
c66ac9db
NB
1332 for (i = 0; i < 4; i++)
1333 if (wwn->revision[i] >= 0x20)
6708bb27 1334 pr_debug("%c", wwn->revision[i]);
c66ac9db 1335 else
6708bb27 1336 pr_debug(" ");
c66ac9db 1337
6708bb27 1338 pr_debug("\n");
c66ac9db 1339
e3d6f909 1340 device_type = dev->transport->get_device_type(dev);
6708bb27
AG
1341 pr_debug(" Type: %s ", scsi_device_type(device_type));
1342 pr_debug(" ANSI SCSI revision: %02x\n",
e3d6f909 1343 dev->transport->get_device_rev(dev));
c66ac9db
NB
1344}
1345
1346struct se_device *transport_add_device_to_core_hba(
1347 struct se_hba *hba,
1348 struct se_subsystem_api *transport,
1349 struct se_subsystem_dev *se_dev,
1350 u32 device_flags,
1351 void *transport_dev,
1352 struct se_dev_limits *dev_limits,
1353 const char *inquiry_prod,
1354 const char *inquiry_rev)
1355{
12a18bdc 1356 int force_pt;
c66ac9db
NB
1357 struct se_device *dev;
1358
1359 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
6708bb27
AG
1360 if (!dev) {
1361 pr_err("Unable to allocate memory for se_dev_t\n");
c66ac9db
NB
1362 return NULL;
1363 }
c66ac9db 1364
e3d6f909 1365 transport_init_queue_obj(&dev->dev_queue_obj);
c66ac9db
NB
1366 dev->dev_flags = device_flags;
1367 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
5951146d 1368 dev->dev_ptr = transport_dev;
c66ac9db
NB
1369 dev->se_hba = hba;
1370 dev->se_sub_dev = se_dev;
1371 dev->transport = transport;
1372 atomic_set(&dev->active_cmds, 0);
1373 INIT_LIST_HEAD(&dev->dev_list);
1374 INIT_LIST_HEAD(&dev->dev_sep_list);
1375 INIT_LIST_HEAD(&dev->dev_tmr_list);
1376 INIT_LIST_HEAD(&dev->execute_task_list);
1377 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1378 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1379 INIT_LIST_HEAD(&dev->state_task_list);
07bde79a 1380 INIT_LIST_HEAD(&dev->qf_cmd_list);
c66ac9db
NB
1381 spin_lock_init(&dev->execute_task_lock);
1382 spin_lock_init(&dev->delayed_cmd_lock);
1383 spin_lock_init(&dev->ordered_cmd_lock);
1384 spin_lock_init(&dev->state_task_lock);
1385 spin_lock_init(&dev->dev_alua_lock);
1386 spin_lock_init(&dev->dev_reservation_lock);
1387 spin_lock_init(&dev->dev_status_lock);
1388 spin_lock_init(&dev->dev_status_thr_lock);
1389 spin_lock_init(&dev->se_port_lock);
1390 spin_lock_init(&dev->se_tmr_lock);
07bde79a 1391 spin_lock_init(&dev->qf_cmd_lock);
c66ac9db
NB
1392
1393 dev->queue_depth = dev_limits->queue_depth;
1394 atomic_set(&dev->depth_left, dev->queue_depth);
1395 atomic_set(&dev->dev_ordered_id, 0);
1396
1397 se_dev_set_default_attribs(dev, dev_limits);
1398
1399 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1400 dev->creation_time = get_jiffies_64();
1401 spin_lock_init(&dev->stats_lock);
1402
1403 spin_lock(&hba->device_lock);
1404 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1405 hba->dev_count++;
1406 spin_unlock(&hba->device_lock);
1407 /*
1408 * Setup the SAM Task Attribute emulation for struct se_device
1409 */
1410 core_setup_task_attr_emulation(dev);
1411 /*
1412 * Force PR and ALUA passthrough emulation with internal object use.
1413 */
1414 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1415 /*
1416 * Setup the Reservations infrastructure for struct se_device
1417 */
1418 core_setup_reservations(dev, force_pt);
1419 /*
1420 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1421 */
1422 if (core_setup_alua(dev, force_pt) < 0)
1423 goto out;
1424
1425 /*
1426 * Startup the struct se_device processing thread
1427 */
1428 dev->process_thread = kthread_run(transport_processing_thread, dev,
e3d6f909 1429 "LIO_%s", dev->transport->name);
c66ac9db 1430 if (IS_ERR(dev->process_thread)) {
6708bb27 1431 pr_err("Unable to create kthread: LIO_%s\n",
e3d6f909 1432 dev->transport->name);
c66ac9db
NB
1433 goto out;
1434 }
07bde79a
NB
1435 /*
1436 * Setup work_queue for QUEUE_FULL
1437 */
1438 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
c66ac9db
NB
1439 /*
1440 * Preload the initial INQUIRY const values if we are doing
1441 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1442 * passthrough because this is being provided by the backend LLD.
1443 * This is required so that transport_get_inquiry() copies these
1444 * originals once back into DEV_T10_WWN(dev) for the virtual device
1445 * setup.
1446 */
e3d6f909 1447 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
f22c1196 1448 if (!inquiry_prod || !inquiry_rev) {
6708bb27 1449 pr_err("All non TCM/pSCSI plugins require"
c66ac9db
NB
1450 " INQUIRY consts\n");
1451 goto out;
1452 }
1453
e3d6f909
AG
1454 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1455 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1456 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
c66ac9db
NB
1457 }
1458 scsi_dump_inquiry(dev);
1459
12a18bdc 1460 return dev;
c66ac9db 1461out:
c66ac9db
NB
1462 kthread_stop(dev->process_thread);
1463
1464 spin_lock(&hba->device_lock);
1465 list_del(&dev->dev_list);
1466 hba->dev_count--;
1467 spin_unlock(&hba->device_lock);
1468
1469 se_release_vpd_for_dev(dev);
1470
c66ac9db
NB
1471 kfree(dev);
1472
1473 return NULL;
1474}
1475EXPORT_SYMBOL(transport_add_device_to_core_hba);
1476
1477/* transport_generic_prepare_cdb():
1478 *
1479 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1480 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1481 * The point of this is since we are mapping iSCSI LUNs to
1482 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1483 * devices and HBAs for a loop.
1484 */
1485static inline void transport_generic_prepare_cdb(
1486 unsigned char *cdb)
1487{
1488 switch (cdb[0]) {
1489 case READ_10: /* SBC - RDProtect */
1490 case READ_12: /* SBC - RDProtect */
1491 case READ_16: /* SBC - RDProtect */
1492 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1493 case VERIFY: /* SBC - VRProtect */
1494 case VERIFY_16: /* SBC - VRProtect */
1495 case WRITE_VERIFY: /* SBC - VRProtect */
1496 case WRITE_VERIFY_12: /* SBC - VRProtect */
1497 break;
1498 default:
1499 cdb[1] &= 0x1f; /* clear logical unit number */
1500 break;
1501 }
1502}
1503
1504static struct se_task *
1505transport_generic_get_task(struct se_cmd *cmd,
1506 enum dma_data_direction data_direction)
1507{
1508 struct se_task *task;
5951146d 1509 struct se_device *dev = cmd->se_dev;
c66ac9db 1510
6708bb27 1511 task = dev->transport->alloc_task(cmd->t_task_cdb);
c66ac9db 1512 if (!task) {
6708bb27 1513 pr_err("Unable to allocate struct se_task\n");
c66ac9db
NB
1514 return NULL;
1515 }
1516
1517 INIT_LIST_HEAD(&task->t_list);
1518 INIT_LIST_HEAD(&task->t_execute_list);
1519 INIT_LIST_HEAD(&task->t_state_list);
1520 init_completion(&task->task_stop_comp);
c66ac9db 1521 task->task_se_cmd = cmd;
c66ac9db
NB
1522 task->task_data_direction = data_direction;
1523
c66ac9db
NB
1524 return task;
1525}
1526
1527static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1528
c66ac9db
NB
1529/*
1530 * Used by fabric modules containing a local struct se_cmd within their
1531 * fabric dependent per I/O descriptor.
1532 */
1533void transport_init_se_cmd(
1534 struct se_cmd *cmd,
1535 struct target_core_fabric_ops *tfo,
1536 struct se_session *se_sess,
1537 u32 data_length,
1538 int data_direction,
1539 int task_attr,
1540 unsigned char *sense_buffer)
1541{
5951146d
AG
1542 INIT_LIST_HEAD(&cmd->se_lun_node);
1543 INIT_LIST_HEAD(&cmd->se_delayed_node);
1544 INIT_LIST_HEAD(&cmd->se_ordered_node);
07bde79a 1545 INIT_LIST_HEAD(&cmd->se_qf_node);
79a7fef2 1546 INIT_LIST_HEAD(&cmd->se_queue_node);
c66ac9db 1547
a1d8b49a
AG
1548 INIT_LIST_HEAD(&cmd->t_task_list);
1549 init_completion(&cmd->transport_lun_fe_stop_comp);
1550 init_completion(&cmd->transport_lun_stop_comp);
1551 init_completion(&cmd->t_transport_stop_comp);
1552 spin_lock_init(&cmd->t_state_lock);
1553 atomic_set(&cmd->transport_dev_active, 1);
c66ac9db
NB
1554
1555 cmd->se_tfo = tfo;
1556 cmd->se_sess = se_sess;
1557 cmd->data_length = data_length;
1558 cmd->data_direction = data_direction;
1559 cmd->sam_task_attr = task_attr;
1560 cmd->sense_buffer = sense_buffer;
1561}
1562EXPORT_SYMBOL(transport_init_se_cmd);
1563
1564static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1565{
1566 /*
1567 * Check if SAM Task Attribute emulation is enabled for this
1568 * struct se_device storage object
1569 */
5951146d 1570 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
1571 return 0;
1572
e66ecd50 1573 if (cmd->sam_task_attr == MSG_ACA_TAG) {
6708bb27 1574 pr_debug("SAM Task Attribute ACA"
c66ac9db 1575 " emulation is not supported\n");
e3d6f909 1576 return -EINVAL;
c66ac9db
NB
1577 }
1578 /*
1579 * Used to determine when ORDERED commands should go from
1580 * Dormant to Active status.
1581 */
5951146d 1582 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
c66ac9db 1583 smp_mb__after_atomic_inc();
6708bb27 1584 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
c66ac9db 1585 cmd->se_ordered_id, cmd->sam_task_attr,
6708bb27 1586 cmd->se_dev->transport->name);
c66ac9db
NB
1587 return 0;
1588}
1589
c66ac9db
NB
1590/* transport_generic_allocate_tasks():
1591 *
1592 * Called from fabric RX Thread.
1593 */
1594int transport_generic_allocate_tasks(
1595 struct se_cmd *cmd,
1596 unsigned char *cdb)
1597{
1598 int ret;
1599
1600 transport_generic_prepare_cdb(cdb);
c66ac9db
NB
1601 /*
1602 * Ensure that the received CDB is less than the max (252 + 8) bytes
1603 * for VARIABLE_LENGTH_CMD
1604 */
1605 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
6708bb27 1606 pr_err("Received SCSI CDB with command_size: %d that"
c66ac9db
NB
1607 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1608 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
e3d6f909 1609 return -EINVAL;
c66ac9db
NB
1610 }
1611 /*
1612 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1613 * allocate the additional extended CDB buffer now.. Otherwise
1614 * setup the pointer from __t_task_cdb to t_task_cdb.
1615 */
a1d8b49a
AG
1616 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1617 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
c66ac9db 1618 GFP_KERNEL);
6708bb27
AG
1619 if (!cmd->t_task_cdb) {
1620 pr_err("Unable to allocate cmd->t_task_cdb"
a1d8b49a 1621 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
c66ac9db 1622 scsi_command_size(cdb),
a1d8b49a 1623 (unsigned long)sizeof(cmd->__t_task_cdb));
e3d6f909 1624 return -ENOMEM;
c66ac9db
NB
1625 }
1626 } else
a1d8b49a 1627 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
c66ac9db 1628 /*
a1d8b49a 1629 * Copy the original CDB into cmd->
c66ac9db 1630 */
a1d8b49a 1631 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
c66ac9db
NB
1632 /*
1633 * Setup the received CDB based on SCSI defined opcodes and
1634 * perform unit attention, persistent reservations and ALUA
a1d8b49a 1635 * checks for virtual device backends. The cmd->t_task_cdb
c66ac9db
NB
1636 * pointer is expected to be setup before we reach this point.
1637 */
1638 ret = transport_generic_cmd_sequencer(cmd, cdb);
1639 if (ret < 0)
1640 return ret;
1641 /*
1642 * Check for SAM Task Attribute Emulation
1643 */
1644 if (transport_check_alloc_task_attr(cmd) < 0) {
1645 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1646 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 1647 return -EINVAL;
c66ac9db
NB
1648 }
1649 spin_lock(&cmd->se_lun->lun_sep_lock);
1650 if (cmd->se_lun->lun_sep)
1651 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1652 spin_unlock(&cmd->se_lun->lun_sep_lock);
1653 return 0;
1654}
1655EXPORT_SYMBOL(transport_generic_allocate_tasks);
1656
dd8ae59d
NB
1657static void transport_generic_request_failure(struct se_cmd *,
1658 struct se_device *, int, int);
695434e1
NB
1659/*
1660 * Used by fabric module frontends to queue tasks directly.
1661 * Many only be used from process context only
1662 */
1663int transport_handle_cdb_direct(
1664 struct se_cmd *cmd)
1665{
dd8ae59d
NB
1666 int ret;
1667
695434e1
NB
1668 if (!cmd->se_lun) {
1669 dump_stack();
6708bb27 1670 pr_err("cmd->se_lun is NULL\n");
695434e1
NB
1671 return -EINVAL;
1672 }
1673 if (in_interrupt()) {
1674 dump_stack();
6708bb27 1675 pr_err("transport_generic_handle_cdb cannot be called"
695434e1
NB
1676 " from interrupt context\n");
1677 return -EINVAL;
1678 }
dd8ae59d
NB
1679 /*
1680 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1681 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1682 * in existing usage to ensure that outstanding descriptors are handled
d14921d6 1683 * correctly during shutdown via transport_wait_for_tasks()
dd8ae59d
NB
1684 *
1685 * Also, we don't take cmd->t_state_lock here as we only expect
1686 * this to be called for initial descriptor submission.
1687 */
1688 cmd->t_state = TRANSPORT_NEW_CMD;
1689 atomic_set(&cmd->t_transport_active, 1);
1690 /*
1691 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1692 * so follow TRANSPORT_NEW_CMD processing thread context usage
1693 * and call transport_generic_request_failure() if necessary..
1694 */
1695 ret = transport_generic_new_cmd(cmd);
1696 if (ret == -EAGAIN)
1697 return 0;
1698 else if (ret < 0) {
1699 cmd->transport_error_status = ret;
1700 transport_generic_request_failure(cmd, NULL, 0,
1701 (cmd->data_direction != DMA_TO_DEVICE));
1702 }
1703 return 0;
695434e1
NB
1704}
1705EXPORT_SYMBOL(transport_handle_cdb_direct);
1706
c66ac9db
NB
1707/*
1708 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1709 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1710 * complete setup in TCM process context w/ TFO->new_cmd_map().
1711 */
1712int transport_generic_handle_cdb_map(
1713 struct se_cmd *cmd)
1714{
e3d6f909 1715 if (!cmd->se_lun) {
c66ac9db 1716 dump_stack();
6708bb27 1717 pr_err("cmd->se_lun is NULL\n");
e3d6f909 1718 return -EINVAL;
c66ac9db
NB
1719 }
1720
1721 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
1722 return 0;
1723}
1724EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1725
1726/* transport_generic_handle_data():
1727 *
1728 *
1729 */
1730int transport_generic_handle_data(
1731 struct se_cmd *cmd)
1732{
1733 /*
1734 * For the software fabric case, then we assume the nexus is being
1735 * failed/shutdown when signals are pending from the kthread context
1736 * caller, so we return a failure. For the HW target mode case running
1737 * in interrupt code, the signal_pending() check is skipped.
1738 */
1739 if (!in_interrupt() && signal_pending(current))
e3d6f909 1740 return -EPERM;
c66ac9db
NB
1741 /*
1742 * If the received CDB has aleady been ABORTED by the generic
1743 * target engine, we now call transport_check_aborted_status()
1744 * to queue any delated TASK_ABORTED status for the received CDB to the
25985edc 1745 * fabric module as we are expecting no further incoming DATA OUT
c66ac9db
NB
1746 * sequences at this point.
1747 */
1748 if (transport_check_aborted_status(cmd, 1) != 0)
1749 return 0;
1750
1751 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
1752 return 0;
1753}
1754EXPORT_SYMBOL(transport_generic_handle_data);
1755
1756/* transport_generic_handle_tmr():
1757 *
1758 *
1759 */
1760int transport_generic_handle_tmr(
1761 struct se_cmd *cmd)
1762{
c66ac9db
NB
1763 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
1764 return 0;
1765}
1766EXPORT_SYMBOL(transport_generic_handle_tmr);
1767
f4366772
NB
1768void transport_generic_free_cmd_intr(
1769 struct se_cmd *cmd)
1770{
1771 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
1772}
1773EXPORT_SYMBOL(transport_generic_free_cmd_intr);
1774
c66ac9db
NB
1775static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1776{
1777 struct se_task *task, *task_tmp;
1778 unsigned long flags;
1779 int ret = 0;
1780
6708bb27 1781 pr_debug("ITT[0x%08x] - Stopping tasks\n",
e3d6f909 1782 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
1783
1784 /*
1785 * No tasks remain in the execution queue
1786 */
a1d8b49a 1787 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 1788 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 1789 &cmd->t_task_list, t_list) {
6708bb27 1790 pr_debug("task_no[%d] - Processing task %p\n",
c66ac9db
NB
1791 task->task_no, task);
1792 /*
1793 * If the struct se_task has not been sent and is not active,
1794 * remove the struct se_task from the execution queue.
1795 */
1796 if (!atomic_read(&task->task_sent) &&
1797 !atomic_read(&task->task_active)) {
a1d8b49a 1798 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
1799 flags);
1800 transport_remove_task_from_execute_queue(task,
42bf829e 1801 cmd->se_dev);
c66ac9db 1802
6708bb27 1803 pr_debug("task_no[%d] - Removed from execute queue\n",
c66ac9db 1804 task->task_no);
a1d8b49a 1805 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
1806 continue;
1807 }
1808
1809 /*
1810 * If the struct se_task is active, sleep until it is returned
1811 * from the plugin.
1812 */
1813 if (atomic_read(&task->task_active)) {
1814 atomic_set(&task->task_stop, 1);
a1d8b49a 1815 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
1816 flags);
1817
6708bb27 1818 pr_debug("task_no[%d] - Waiting to complete\n",
c66ac9db
NB
1819 task->task_no);
1820 wait_for_completion(&task->task_stop_comp);
6708bb27 1821 pr_debug("task_no[%d] - Stopped successfully\n",
c66ac9db
NB
1822 task->task_no);
1823
a1d8b49a
AG
1824 spin_lock_irqsave(&cmd->t_state_lock, flags);
1825 atomic_dec(&cmd->t_task_cdbs_left);
c66ac9db
NB
1826
1827 atomic_set(&task->task_active, 0);
1828 atomic_set(&task->task_stop, 0);
1829 } else {
6708bb27 1830 pr_debug("task_no[%d] - Did nothing\n", task->task_no);
c66ac9db
NB
1831 ret++;
1832 }
1833
1834 __transport_stop_task_timer(task, &flags);
1835 }
a1d8b49a 1836 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1837
1838 return ret;
1839}
1840
c66ac9db
NB
1841/*
1842 * Handle SAM-esque emulation for generic transport request failures.
1843 */
1844static void transport_generic_request_failure(
1845 struct se_cmd *cmd,
1846 struct se_device *dev,
1847 int complete,
1848 int sc)
1849{
07bde79a
NB
1850 int ret = 0;
1851
6708bb27 1852 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
e3d6f909 1853 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
a1d8b49a 1854 cmd->t_task_cdb[0]);
6708bb27 1855 pr_debug("-----[ i_state: %d t_state/def_t_state:"
c66ac9db 1856 " %d/%d transport_error_status: %d\n",
e3d6f909 1857 cmd->se_tfo->get_cmd_state(cmd),
c66ac9db
NB
1858 cmd->t_state, cmd->deferred_t_state,
1859 cmd->transport_error_status);
6708bb27 1860 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
c66ac9db
NB
1861 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1862 " t_transport_active: %d t_transport_stop: %d"
6708bb27 1863 " t_transport_sent: %d\n", cmd->t_task_list_num,
a1d8b49a
AG
1864 atomic_read(&cmd->t_task_cdbs_left),
1865 atomic_read(&cmd->t_task_cdbs_sent),
1866 atomic_read(&cmd->t_task_cdbs_ex_left),
1867 atomic_read(&cmd->t_transport_active),
1868 atomic_read(&cmd->t_transport_stop),
1869 atomic_read(&cmd->t_transport_sent));
c66ac9db
NB
1870
1871 transport_stop_all_task_timers(cmd);
1872
1873 if (dev)
e3d6f909 1874 atomic_inc(&dev->depth_left);
c66ac9db
NB
1875 /*
1876 * For SAM Task Attribute emulation for failed struct se_cmd
1877 */
1878 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1879 transport_complete_task_attr(cmd);
1880
1881 if (complete) {
1882 transport_direct_request_timeout(cmd);
1883 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
1884 }
1885
1886 switch (cmd->transport_error_status) {
1887 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
1888 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1889 break;
1890 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
1891 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
1892 break;
1893 case PYX_TRANSPORT_INVALID_CDB_FIELD:
1894 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1895 break;
1896 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1897 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1898 break;
1899 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1900 if (!sc)
1901 transport_new_cmd_failure(cmd);
1902 /*
1903 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1904 * we force this session to fall back to session
1905 * recovery.
1906 */
e3d6f909
AG
1907 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1908 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
c66ac9db
NB
1909
1910 goto check_stop;
1911 case PYX_TRANSPORT_LU_COMM_FAILURE:
1912 case PYX_TRANSPORT_ILLEGAL_REQUEST:
1913 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1914 break;
1915 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1916 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1917 break;
1918 case PYX_TRANSPORT_WRITE_PROTECTED:
1919 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1920 break;
1921 case PYX_TRANSPORT_RESERVATION_CONFLICT:
1922 /*
1923 * No SENSE Data payload for this case, set SCSI Status
1924 * and queue the response to $FABRIC_MOD.
1925 *
1926 * Uses linux/include/scsi/scsi.h SAM status codes defs
1927 */
1928 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1929 /*
1930 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1931 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1932 * CONFLICT STATUS.
1933 *
1934 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1935 */
e3d6f909
AG
1936 if (cmd->se_sess &&
1937 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1938 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
1939 cmd->orig_fe_lun, 0x2C,
1940 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1941
07bde79a
NB
1942 ret = cmd->se_tfo->queue_status(cmd);
1943 if (ret == -EAGAIN)
1944 goto queue_full;
c66ac9db
NB
1945 goto check_stop;
1946 case PYX_TRANSPORT_USE_SENSE_REASON:
1947 /*
1948 * struct se_cmd->scsi_sense_reason already set
1949 */
1950 break;
1951 default:
6708bb27 1952 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
a1d8b49a 1953 cmd->t_task_cdb[0],
c66ac9db
NB
1954 cmd->transport_error_status);
1955 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1956 break;
1957 }
16ab8e60
NB
1958 /*
1959 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1960 * make the call to transport_send_check_condition_and_sense()
1961 * directly. Otherwise expect the fabric to make the call to
1962 * transport_send_check_condition_and_sense() after handling
1963 * possible unsoliticied write data payloads.
1964 */
1965 if (!sc && !cmd->se_tfo->new_cmd_map)
c66ac9db 1966 transport_new_cmd_failure(cmd);
07bde79a
NB
1967 else {
1968 ret = transport_send_check_condition_and_sense(cmd,
1969 cmd->scsi_sense_reason, 0);
1970 if (ret == -EAGAIN)
1971 goto queue_full;
1972 }
1973
c66ac9db
NB
1974check_stop:
1975 transport_lun_remove_cmd(cmd);
6708bb27 1976 if (!transport_cmd_check_stop_to_fabric(cmd))
c66ac9db 1977 ;
07bde79a
NB
1978 return;
1979
1980queue_full:
1981 cmd->t_state = TRANSPORT_COMPLETE_OK;
1982 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
c66ac9db
NB
1983}
1984
1985static void transport_direct_request_timeout(struct se_cmd *cmd)
1986{
1987 unsigned long flags;
1988
a1d8b49a 1989 spin_lock_irqsave(&cmd->t_state_lock, flags);
6708bb27 1990 if (!atomic_read(&cmd->t_transport_timeout)) {
a1d8b49a 1991 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1992 return;
1993 }
a1d8b49a
AG
1994 if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
1995 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
1996 return;
1997 }
1998
a1d8b49a
AG
1999 atomic_sub(atomic_read(&cmd->t_transport_timeout),
2000 &cmd->t_se_count);
2001 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2002}
2003
2004static void transport_generic_request_timeout(struct se_cmd *cmd)
2005{
2006 unsigned long flags;
2007
2008 /*
e6a2573f 2009 * Reset cmd->t_se_count to allow transport_put_cmd()
c66ac9db
NB
2010 * to allow last call to free memory resources.
2011 */
a1d8b49a
AG
2012 spin_lock_irqsave(&cmd->t_state_lock, flags);
2013 if (atomic_read(&cmd->t_transport_timeout) > 1) {
2014 int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
c66ac9db 2015
a1d8b49a 2016 atomic_sub(tmp, &cmd->t_se_count);
c66ac9db 2017 }
a1d8b49a 2018 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2019
e6a2573f 2020 transport_put_cmd(cmd);
c66ac9db
NB
2021}
2022
c66ac9db
NB
2023static inline u32 transport_lba_21(unsigned char *cdb)
2024{
2025 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2026}
2027
2028static inline u32 transport_lba_32(unsigned char *cdb)
2029{
2030 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2031}
2032
2033static inline unsigned long long transport_lba_64(unsigned char *cdb)
2034{
2035 unsigned int __v1, __v2;
2036
2037 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2038 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2039
2040 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2041}
2042
2043/*
2044 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2045 */
2046static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2047{
2048 unsigned int __v1, __v2;
2049
2050 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2051 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2052
2053 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2054}
2055
2056static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2057{
2058 unsigned long flags;
2059
a1d8b49a 2060 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db 2061 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
a1d8b49a 2062 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2063}
2064
2065/*
2066 * Called from interrupt context.
2067 */
2068static void transport_task_timeout_handler(unsigned long data)
2069{
2070 struct se_task *task = (struct se_task *)data;
e3d6f909 2071 struct se_cmd *cmd = task->task_se_cmd;
c66ac9db
NB
2072 unsigned long flags;
2073
6708bb27 2074 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
c66ac9db 2075
a1d8b49a 2076 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2077 if (task->task_flags & TF_STOP) {
a1d8b49a 2078 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2079 return;
2080 }
2081 task->task_flags &= ~TF_RUNNING;
2082
2083 /*
2084 * Determine if transport_complete_task() has already been called.
2085 */
6708bb27
AG
2086 if (!atomic_read(&task->task_active)) {
2087 pr_debug("transport task: %p cmd: %p timeout task_active"
c66ac9db 2088 " == 0\n", task, cmd);
a1d8b49a 2089 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2090 return;
2091 }
2092
a1d8b49a
AG
2093 atomic_inc(&cmd->t_se_count);
2094 atomic_inc(&cmd->t_transport_timeout);
2095 cmd->t_tasks_failed = 1;
c66ac9db
NB
2096
2097 atomic_set(&task->task_timeout, 1);
2098 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2099 task->task_scsi_status = 1;
2100
2101 if (atomic_read(&task->task_stop)) {
6708bb27 2102 pr_debug("transport task: %p cmd: %p timeout task_stop"
c66ac9db 2103 " == 1\n", task, cmd);
a1d8b49a 2104 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2105 complete(&task->task_stop_comp);
2106 return;
2107 }
2108
6708bb27
AG
2109 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2110 pr_debug("transport task: %p cmd: %p timeout non zero"
c66ac9db 2111 " t_task_cdbs_left\n", task, cmd);
a1d8b49a 2112 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2113 return;
2114 }
6708bb27 2115 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
c66ac9db
NB
2116 task, cmd);
2117
2118 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
a1d8b49a 2119 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2120
2121 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2122}
2123
2124/*
a1d8b49a 2125 * Called with cmd->t_state_lock held.
c66ac9db
NB
2126 */
2127static void transport_start_task_timer(struct se_task *task)
2128{
42bf829e 2129 struct se_device *dev = task->task_se_cmd->se_dev;
c66ac9db
NB
2130 int timeout;
2131
2132 if (task->task_flags & TF_RUNNING)
2133 return;
2134 /*
2135 * If the task_timeout is disabled, exit now.
2136 */
e3d6f909 2137 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
6708bb27 2138 if (!timeout)
c66ac9db
NB
2139 return;
2140
2141 init_timer(&task->task_timer);
2142 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2143 task->task_timer.data = (unsigned long) task;
2144 task->task_timer.function = transport_task_timeout_handler;
2145
2146 task->task_flags |= TF_RUNNING;
2147 add_timer(&task->task_timer);
2148#if 0
6708bb27 2149 pr_debug("Starting task timer for cmd: %p task: %p seconds:"
c66ac9db
NB
2150 " %d\n", task->task_se_cmd, task, timeout);
2151#endif
2152}
2153
2154/*
a1d8b49a 2155 * Called with spin_lock_irq(&cmd->t_state_lock) held.
c66ac9db
NB
2156 */
2157void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2158{
e3d6f909 2159 struct se_cmd *cmd = task->task_se_cmd;
c66ac9db 2160
6708bb27 2161 if (!task->task_flags & TF_RUNNING)
c66ac9db
NB
2162 return;
2163
2164 task->task_flags |= TF_STOP;
a1d8b49a 2165 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
c66ac9db
NB
2166
2167 del_timer_sync(&task->task_timer);
2168
a1d8b49a 2169 spin_lock_irqsave(&cmd->t_state_lock, *flags);
c66ac9db
NB
2170 task->task_flags &= ~TF_RUNNING;
2171 task->task_flags &= ~TF_STOP;
2172}
2173
2174static void transport_stop_all_task_timers(struct se_cmd *cmd)
2175{
2176 struct se_task *task = NULL, *task_tmp;
2177 unsigned long flags;
2178
a1d8b49a 2179 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2180 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2181 &cmd->t_task_list, t_list)
c66ac9db 2182 __transport_stop_task_timer(task, &flags);
a1d8b49a 2183 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2184}
2185
2186static inline int transport_tcq_window_closed(struct se_device *dev)
2187{
2188 if (dev->dev_tcq_window_closed++ <
2189 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2190 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2191 } else
2192 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2193
e3d6f909 2194 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
2195 return 0;
2196}
2197
2198/*
2199 * Called from Fabric Module context from transport_execute_tasks()
2200 *
2201 * The return of this function determins if the tasks from struct se_cmd
2202 * get added to the execution queue in transport_execute_tasks(),
2203 * or are added to the delayed or ordered lists here.
2204 */
2205static inline int transport_execute_task_attr(struct se_cmd *cmd)
2206{
5951146d 2207 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
c66ac9db
NB
2208 return 1;
2209 /*
25985edc 2210 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
c66ac9db
NB
2211 * to allow the passed struct se_cmd list of tasks to the front of the list.
2212 */
e66ecd50 2213 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
5951146d 2214 atomic_inc(&cmd->se_dev->dev_hoq_count);
c66ac9db 2215 smp_mb__after_atomic_inc();
6708bb27 2216 pr_debug("Added HEAD_OF_QUEUE for CDB:"
c66ac9db 2217 " 0x%02x, se_ordered_id: %u\n",
6708bb27 2218 cmd->t_task_cdb[0],
c66ac9db
NB
2219 cmd->se_ordered_id);
2220 return 1;
e66ecd50 2221 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
5951146d
AG
2222 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2223 list_add_tail(&cmd->se_ordered_node,
2224 &cmd->se_dev->ordered_cmd_list);
2225 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
c66ac9db 2226
5951146d 2227 atomic_inc(&cmd->se_dev->dev_ordered_sync);
c66ac9db
NB
2228 smp_mb__after_atomic_inc();
2229
6708bb27 2230 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
c66ac9db 2231 " list, se_ordered_id: %u\n",
a1d8b49a 2232 cmd->t_task_cdb[0],
c66ac9db
NB
2233 cmd->se_ordered_id);
2234 /*
2235 * Add ORDERED command to tail of execution queue if
2236 * no other older commands exist that need to be
2237 * completed first.
2238 */
6708bb27 2239 if (!atomic_read(&cmd->se_dev->simple_cmds))
c66ac9db
NB
2240 return 1;
2241 } else {
2242 /*
2243 * For SIMPLE and UNTAGGED Task Attribute commands
2244 */
5951146d 2245 atomic_inc(&cmd->se_dev->simple_cmds);
c66ac9db
NB
2246 smp_mb__after_atomic_inc();
2247 }
2248 /*
2249 * Otherwise if one or more outstanding ORDERED task attribute exist,
2250 * add the dormant task(s) built for the passed struct se_cmd to the
2251 * execution queue and become in Active state for this struct se_device.
2252 */
5951146d 2253 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
c66ac9db
NB
2254 /*
2255 * Otherwise, add cmd w/ tasks to delayed cmd queue that
25985edc 2256 * will be drained upon completion of HEAD_OF_QUEUE task.
c66ac9db 2257 */
5951146d 2258 spin_lock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2259 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
5951146d
AG
2260 list_add_tail(&cmd->se_delayed_node,
2261 &cmd->se_dev->delayed_cmd_list);
2262 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
c66ac9db 2263
6708bb27 2264 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
c66ac9db 2265 " delayed CMD list, se_ordered_id: %u\n",
a1d8b49a 2266 cmd->t_task_cdb[0], cmd->sam_task_attr,
c66ac9db
NB
2267 cmd->se_ordered_id);
2268 /*
2269 * Return zero to let transport_execute_tasks() know
2270 * not to add the delayed tasks to the execution list.
2271 */
2272 return 0;
2273 }
2274 /*
2275 * Otherwise, no ORDERED task attributes exist..
2276 */
2277 return 1;
2278}
2279
2280/*
2281 * Called from fabric module context in transport_generic_new_cmd() and
2282 * transport_generic_process_write()
2283 */
2284static int transport_execute_tasks(struct se_cmd *cmd)
2285{
2286 int add_tasks;
2287
db1620a2
CH
2288 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2289 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2290 transport_generic_request_failure(cmd, NULL, 0, 1);
2291 return 0;
c66ac9db 2292 }
db1620a2 2293
c66ac9db
NB
2294 /*
2295 * Call transport_cmd_check_stop() to see if a fabric exception
25985edc 2296 * has occurred that prevents execution.
c66ac9db 2297 */
6708bb27 2298 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
c66ac9db
NB
2299 /*
2300 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2301 * attribute for the tasks of the received struct se_cmd CDB
2302 */
2303 add_tasks = transport_execute_task_attr(cmd);
e3d6f909 2304 if (!add_tasks)
c66ac9db
NB
2305 goto execute_tasks;
2306 /*
2307 * This calls transport_add_tasks_from_cmd() to handle
2308 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2309 * (if enabled) in __transport_add_task_to_execute_queue() and
2310 * transport_add_task_check_sam_attr().
2311 */
2312 transport_add_tasks_from_cmd(cmd);
2313 }
2314 /*
2315 * Kick the execution queue for the cmd associated struct se_device
2316 * storage object.
2317 */
2318execute_tasks:
5951146d 2319 __transport_execute_tasks(cmd->se_dev);
c66ac9db
NB
2320 return 0;
2321}
2322
2323/*
2324 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2325 * from struct se_device->execute_task_list and
2326 *
2327 * Called from transport_processing_thread()
2328 */
2329static int __transport_execute_tasks(struct se_device *dev)
2330{
2331 int error;
2332 struct se_cmd *cmd = NULL;
e3d6f909 2333 struct se_task *task = NULL;
c66ac9db
NB
2334 unsigned long flags;
2335
2336 /*
2337 * Check if there is enough room in the device and HBA queue to send
a1d8b49a 2338 * struct se_tasks to the selected transport.
c66ac9db
NB
2339 */
2340check_depth:
e3d6f909 2341 if (!atomic_read(&dev->depth_left))
c66ac9db 2342 return transport_tcq_window_closed(dev);
c66ac9db 2343
e3d6f909 2344 dev->dev_tcq_window_closed = 0;
c66ac9db 2345
e3d6f909
AG
2346 spin_lock_irq(&dev->execute_task_lock);
2347 if (list_empty(&dev->execute_task_list)) {
2348 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2349 return 0;
2350 }
e3d6f909
AG
2351 task = list_first_entry(&dev->execute_task_list,
2352 struct se_task, t_execute_list);
2353 list_del(&task->t_execute_list);
2354 atomic_set(&task->task_execute_queue, 0);
2355 atomic_dec(&dev->execute_tasks);
2356 spin_unlock_irq(&dev->execute_task_lock);
c66ac9db
NB
2357
2358 atomic_dec(&dev->depth_left);
c66ac9db 2359
e3d6f909 2360 cmd = task->task_se_cmd;
c66ac9db 2361
a1d8b49a 2362 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
2363 atomic_set(&task->task_active, 1);
2364 atomic_set(&task->task_sent, 1);
a1d8b49a 2365 atomic_inc(&cmd->t_task_cdbs_sent);
c66ac9db 2366
a1d8b49a
AG
2367 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2368 cmd->t_task_list_num)
c66ac9db
NB
2369 atomic_set(&cmd->transport_sent, 1);
2370
2371 transport_start_task_timer(task);
a1d8b49a 2372 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2373 /*
2374 * The struct se_cmd->transport_emulate_cdb() function pointer is used
e3d6f909 2375 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
c66ac9db
NB
2376 * struct se_subsystem_api->do_task() caller below.
2377 */
2378 if (cmd->transport_emulate_cdb) {
2379 error = cmd->transport_emulate_cdb(cmd);
2380 if (error != 0) {
2381 cmd->transport_error_status = error;
2382 atomic_set(&task->task_active, 0);
2383 atomic_set(&cmd->transport_sent, 0);
2384 transport_stop_tasks_for_cmd(cmd);
2385 transport_generic_request_failure(cmd, dev, 0, 1);
2386 goto check_depth;
2387 }
2388 /*
2389 * Handle the successful completion for transport_emulate_cdb()
2390 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2391 * Otherwise the caller is expected to complete the task with
2392 * proper status.
2393 */
2394 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2395 cmd->scsi_status = SAM_STAT_GOOD;
2396 task->task_scsi_status = GOOD;
2397 transport_complete_task(task, 1);
2398 }
2399 } else {
2400 /*
2401 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2402 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2403 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2404 * LUN emulation code.
2405 *
2406 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2407 * call ->do_task() directly and let the underlying TCM subsystem plugin
2408 * code handle the CDB emulation.
2409 */
e3d6f909
AG
2410 if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2411 (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
c66ac9db
NB
2412 error = transport_emulate_control_cdb(task);
2413 else
e3d6f909 2414 error = dev->transport->do_task(task);
c66ac9db
NB
2415
2416 if (error != 0) {
2417 cmd->transport_error_status = error;
2418 atomic_set(&task->task_active, 0);
2419 atomic_set(&cmd->transport_sent, 0);
2420 transport_stop_tasks_for_cmd(cmd);
2421 transport_generic_request_failure(cmd, dev, 0, 1);
2422 }
2423 }
2424
2425 goto check_depth;
2426
2427 return 0;
2428}
2429
2430void transport_new_cmd_failure(struct se_cmd *se_cmd)
2431{
2432 unsigned long flags;
2433 /*
2434 * Any unsolicited data will get dumped for failed command inside of
2435 * the fabric plugin
2436 */
a1d8b49a 2437 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2438 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2439 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
a1d8b49a 2440 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
c66ac9db
NB
2441}
2442
c66ac9db
NB
2443static inline u32 transport_get_sectors_6(
2444 unsigned char *cdb,
2445 struct se_cmd *cmd,
2446 int *ret)
2447{
5951146d 2448 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2449
2450 /*
2451 * Assume TYPE_DISK for non struct se_device objects.
2452 * Use 8-bit sector value.
2453 */
2454 if (!dev)
2455 goto type_disk;
2456
2457 /*
2458 * Use 24-bit allocation length for TYPE_TAPE.
2459 */
e3d6f909 2460 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2461 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2462
2463 /*
2464 * Everything else assume TYPE_DISK Sector CDB location.
2465 * Use 8-bit sector value.
2466 */
2467type_disk:
2468 return (u32)cdb[4];
2469}
2470
2471static inline u32 transport_get_sectors_10(
2472 unsigned char *cdb,
2473 struct se_cmd *cmd,
2474 int *ret)
2475{
5951146d 2476 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2477
2478 /*
2479 * Assume TYPE_DISK for non struct se_device objects.
2480 * Use 16-bit sector value.
2481 */
2482 if (!dev)
2483 goto type_disk;
2484
2485 /*
2486 * XXX_10 is not defined in SSC, throw an exception
2487 */
e3d6f909
AG
2488 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2489 *ret = -EINVAL;
c66ac9db
NB
2490 return 0;
2491 }
2492
2493 /*
2494 * Everything else assume TYPE_DISK Sector CDB location.
2495 * Use 16-bit sector value.
2496 */
2497type_disk:
2498 return (u32)(cdb[7] << 8) + cdb[8];
2499}
2500
2501static inline u32 transport_get_sectors_12(
2502 unsigned char *cdb,
2503 struct se_cmd *cmd,
2504 int *ret)
2505{
5951146d 2506 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2507
2508 /*
2509 * Assume TYPE_DISK for non struct se_device objects.
2510 * Use 32-bit sector value.
2511 */
2512 if (!dev)
2513 goto type_disk;
2514
2515 /*
2516 * XXX_12 is not defined in SSC, throw an exception
2517 */
e3d6f909
AG
2518 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2519 *ret = -EINVAL;
c66ac9db
NB
2520 return 0;
2521 }
2522
2523 /*
2524 * Everything else assume TYPE_DISK Sector CDB location.
2525 * Use 32-bit sector value.
2526 */
2527type_disk:
2528 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2529}
2530
2531static inline u32 transport_get_sectors_16(
2532 unsigned char *cdb,
2533 struct se_cmd *cmd,
2534 int *ret)
2535{
5951146d 2536 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2537
2538 /*
2539 * Assume TYPE_DISK for non struct se_device objects.
2540 * Use 32-bit sector value.
2541 */
2542 if (!dev)
2543 goto type_disk;
2544
2545 /*
2546 * Use 24-bit allocation length for TYPE_TAPE.
2547 */
e3d6f909 2548 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
c66ac9db
NB
2549 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2550
2551type_disk:
2552 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2553 (cdb[12] << 8) + cdb[13];
2554}
2555
2556/*
2557 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2558 */
2559static inline u32 transport_get_sectors_32(
2560 unsigned char *cdb,
2561 struct se_cmd *cmd,
2562 int *ret)
2563{
2564 /*
2565 * Assume TYPE_DISK for non struct se_device objects.
2566 * Use 32-bit sector value.
2567 */
2568 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2569 (cdb[30] << 8) + cdb[31];
2570
2571}
2572
2573static inline u32 transport_get_size(
2574 u32 sectors,
2575 unsigned char *cdb,
2576 struct se_cmd *cmd)
2577{
5951146d 2578 struct se_device *dev = cmd->se_dev;
c66ac9db 2579
e3d6f909 2580 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
c66ac9db 2581 if (cdb[1] & 1) { /* sectors */
e3d6f909 2582 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2583 } else /* bytes */
2584 return sectors;
2585 }
2586#if 0
6708bb27 2587 pr_debug("Returning block_size: %u, sectors: %u == %u for"
e3d6f909
AG
2588 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2589 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2590 dev->transport->name);
c66ac9db 2591#endif
e3d6f909 2592 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
c66ac9db
NB
2593}
2594
c66ac9db
NB
2595static void transport_xor_callback(struct se_cmd *cmd)
2596{
2597 unsigned char *buf, *addr;
ec98f782 2598 struct scatterlist *sg;
c66ac9db
NB
2599 unsigned int offset;
2600 int i;
ec98f782 2601 int count;
c66ac9db
NB
2602 /*
2603 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2604 *
2605 * 1) read the specified logical block(s);
2606 * 2) transfer logical blocks from the data-out buffer;
2607 * 3) XOR the logical blocks transferred from the data-out buffer with
2608 * the logical blocks read, storing the resulting XOR data in a buffer;
2609 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2610 * blocks transferred from the data-out buffer; and
2611 * 5) transfer the resulting XOR data to the data-in buffer.
2612 */
2613 buf = kmalloc(cmd->data_length, GFP_KERNEL);
6708bb27
AG
2614 if (!buf) {
2615 pr_err("Unable to allocate xor_callback buf\n");
c66ac9db
NB
2616 return;
2617 }
2618 /*
ec98f782 2619 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
c66ac9db
NB
2620 * into the locally allocated *buf
2621 */
ec98f782
AG
2622 sg_copy_to_buffer(cmd->t_data_sg,
2623 cmd->t_data_nents,
2624 buf,
2625 cmd->data_length);
2626
c66ac9db
NB
2627 /*
2628 * Now perform the XOR against the BIDI read memory located at
a1d8b49a 2629 * cmd->t_mem_bidi_list
c66ac9db
NB
2630 */
2631
2632 offset = 0;
ec98f782
AG
2633 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2634 addr = kmap_atomic(sg_page(sg), KM_USER0);
2635 if (!addr)
c66ac9db
NB
2636 goto out;
2637
ec98f782
AG
2638 for (i = 0; i < sg->length; i++)
2639 *(addr + sg->offset + i) ^= *(buf + offset + i);
c66ac9db 2640
ec98f782 2641 offset += sg->length;
c66ac9db
NB
2642 kunmap_atomic(addr, KM_USER0);
2643 }
ec98f782 2644
c66ac9db
NB
2645out:
2646 kfree(buf);
2647}
2648
2649/*
2650 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2651 */
2652static int transport_get_sense_data(struct se_cmd *cmd)
2653{
2654 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
42bf829e 2655 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2656 struct se_task *task = NULL, *task_tmp;
2657 unsigned long flags;
2658 u32 offset = 0;
2659
e3d6f909
AG
2660 WARN_ON(!cmd->se_lun);
2661
42bf829e
CH
2662 if (!dev)
2663 return 0;
2664
a1d8b49a 2665 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 2666 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 2667 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2668 return 0;
2669 }
2670
2671 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 2672 &cmd->t_task_list, t_list) {
c66ac9db
NB
2673 if (!task->task_sense)
2674 continue;
2675
e3d6f909 2676 if (!dev->transport->get_sense_buffer) {
6708bb27 2677 pr_err("dev->transport->get_sense_buffer"
c66ac9db
NB
2678 " is NULL\n");
2679 continue;
2680 }
2681
e3d6f909 2682 sense_buffer = dev->transport->get_sense_buffer(task);
6708bb27
AG
2683 if (!sense_buffer) {
2684 pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
c66ac9db 2685 " sense buffer for task with sense\n",
e3d6f909 2686 cmd->se_tfo->get_task_tag(cmd), task->task_no);
c66ac9db
NB
2687 continue;
2688 }
a1d8b49a 2689 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 2690
e3d6f909 2691 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
2692 TRANSPORT_SENSE_BUFFER);
2693
5951146d 2694 memcpy(&buffer[offset], sense_buffer,
c66ac9db
NB
2695 TRANSPORT_SENSE_BUFFER);
2696 cmd->scsi_status = task->task_scsi_status;
2697 /* Automatically padded */
2698 cmd->scsi_sense_length =
2699 (TRANSPORT_SENSE_BUFFER + offset);
2700
6708bb27 2701 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
c66ac9db 2702 " and sense\n",
e3d6f909 2703 dev->se_hba->hba_id, dev->transport->name,
c66ac9db
NB
2704 cmd->scsi_status);
2705 return 0;
2706 }
a1d8b49a 2707 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
2708
2709 return -1;
2710}
2711
c66ac9db
NB
2712static int
2713transport_handle_reservation_conflict(struct se_cmd *cmd)
2714{
c66ac9db
NB
2715 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2716 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2717 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2718 /*
2719 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2720 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2721 * CONFLICT STATUS.
2722 *
2723 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2724 */
e3d6f909
AG
2725 if (cmd->se_sess &&
2726 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2727 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
c66ac9db
NB
2728 cmd->orig_fe_lun, 0x2C,
2729 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
5951146d 2730 return -EINVAL;
c66ac9db
NB
2731}
2732
ec98f782
AG
2733static inline long long transport_dev_end_lba(struct se_device *dev)
2734{
2735 return dev->transport->get_blocks(dev) + 1;
2736}
2737
2738static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2739{
2740 struct se_device *dev = cmd->se_dev;
2741 u32 sectors;
2742
2743 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2744 return 0;
2745
2746 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2747
6708bb27
AG
2748 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2749 pr_err("LBA: %llu Sectors: %u exceeds"
ec98f782
AG
2750 " transport_dev_end_lba(): %llu\n",
2751 cmd->t_task_lba, sectors,
2752 transport_dev_end_lba(dev));
7abbe7f3 2753 return -EINVAL;
ec98f782
AG
2754 }
2755
7abbe7f3 2756 return 0;
ec98f782
AG
2757}
2758
706d5860
NB
2759static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2760{
2761 /*
2762 * Determine if the received WRITE_SAME is used to for direct
2763 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2764 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2765 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2766 */
2767 int passthrough = (dev->transport->transport_type ==
2768 TRANSPORT_PLUGIN_PHBA_PDEV);
2769
2770 if (!passthrough) {
2771 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2772 pr_err("WRITE_SAME PBDATA and LBDATA"
2773 " bits not supported for Block Discard"
2774 " Emulation\n");
2775 return -ENOSYS;
2776 }
2777 /*
2778 * Currently for the emulated case we only accept
2779 * tpws with the UNMAP=1 bit set.
2780 */
2781 if (!(flags[0] & 0x08)) {
2782 pr_err("WRITE_SAME w/o UNMAP bit not"
2783 " supported for Block Discard Emulation\n");
2784 return -ENOSYS;
2785 }
2786 }
2787
2788 return 0;
2789}
2790
c66ac9db
NB
2791/* transport_generic_cmd_sequencer():
2792 *
2793 * Generic Command Sequencer that should work for most DAS transport
2794 * drivers.
2795 *
2796 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2797 * RX Thread.
2798 *
2799 * FIXME: Need to support other SCSI OPCODES where as well.
2800 */
2801static int transport_generic_cmd_sequencer(
2802 struct se_cmd *cmd,
2803 unsigned char *cdb)
2804{
5951146d 2805 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
2806 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2807 int ret = 0, sector_ret = 0, passthrough;
2808 u32 sectors = 0, size = 0, pr_reg_type = 0;
2809 u16 service_action;
2810 u8 alua_ascq = 0;
2811 /*
2812 * Check for an existing UNIT ATTENTION condition
2813 */
2814 if (core_scsi3_ua_check(cmd, cdb) < 0) {
c66ac9db
NB
2815 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2816 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
5951146d 2817 return -EINVAL;
c66ac9db
NB
2818 }
2819 /*
2820 * Check status of Asymmetric Logical Unit Assignment port
2821 */
e3d6f909 2822 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
c66ac9db 2823 if (ret != 0) {
c66ac9db 2824 /*
25985edc 2825 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
c66ac9db
NB
2826 * The ALUA additional sense code qualifier (ASCQ) is determined
2827 * by the ALUA primary or secondary access state..
2828 */
2829 if (ret > 0) {
2830#if 0
6708bb27 2831 pr_debug("[%s]: ALUA TG Port not available,"
c66ac9db 2832 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
e3d6f909 2833 cmd->se_tfo->get_fabric_name(), alua_ascq);
c66ac9db
NB
2834#endif
2835 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2836 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2837 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
5951146d 2838 return -EINVAL;
c66ac9db
NB
2839 }
2840 goto out_invalid_cdb_field;
2841 }
2842 /*
2843 * Check status for SPC-3 Persistent Reservations
2844 */
e3d6f909
AG
2845 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2846 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
c66ac9db
NB
2847 cmd, cdb, pr_reg_type) != 0)
2848 return transport_handle_reservation_conflict(cmd);
2849 /*
2850 * This means the CDB is allowed for the SCSI Initiator port
2851 * when said port is *NOT* holding the legacy SPC-2 or
2852 * SPC-3 Persistent Reservation.
2853 */
2854 }
2855
2856 switch (cdb[0]) {
2857 case READ_6:
2858 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2859 if (sector_ret)
2860 goto out_unsupported_cdb;
2861 size = transport_get_size(sectors, cdb, cmd);
2862 cmd->transport_split_cdb = &split_cdb_XX_6;
a1d8b49a 2863 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2864 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2865 break;
2866 case READ_10:
2867 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2868 if (sector_ret)
2869 goto out_unsupported_cdb;
2870 size = transport_get_size(sectors, cdb, cmd);
2871 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a 2872 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2873 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2874 break;
2875 case READ_12:
2876 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2877 if (sector_ret)
2878 goto out_unsupported_cdb;
2879 size = transport_get_size(sectors, cdb, cmd);
2880 cmd->transport_split_cdb = &split_cdb_XX_12;
a1d8b49a 2881 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
2882 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2883 break;
2884 case READ_16:
2885 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2886 if (sector_ret)
2887 goto out_unsupported_cdb;
2888 size = transport_get_size(sectors, cdb, cmd);
2889 cmd->transport_split_cdb = &split_cdb_XX_16;
a1d8b49a 2890 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
2891 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2892 break;
2893 case WRITE_6:
2894 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2895 if (sector_ret)
2896 goto out_unsupported_cdb;
2897 size = transport_get_size(sectors, cdb, cmd);
2898 cmd->transport_split_cdb = &split_cdb_XX_6;
a1d8b49a 2899 cmd->t_task_lba = transport_lba_21(cdb);
c66ac9db
NB
2900 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2901 break;
2902 case WRITE_10:
2903 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2904 if (sector_ret)
2905 goto out_unsupported_cdb;
2906 size = transport_get_size(sectors, cdb, cmd);
2907 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a
AG
2908 cmd->t_task_lba = transport_lba_32(cdb);
2909 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
2910 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2911 break;
2912 case WRITE_12:
2913 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2914 if (sector_ret)
2915 goto out_unsupported_cdb;
2916 size = transport_get_size(sectors, cdb, cmd);
2917 cmd->transport_split_cdb = &split_cdb_XX_12;
a1d8b49a
AG
2918 cmd->t_task_lba = transport_lba_32(cdb);
2919 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
2920 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2921 break;
2922 case WRITE_16:
2923 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2924 if (sector_ret)
2925 goto out_unsupported_cdb;
2926 size = transport_get_size(sectors, cdb, cmd);
2927 cmd->transport_split_cdb = &split_cdb_XX_16;
a1d8b49a
AG
2928 cmd->t_task_lba = transport_lba_64(cdb);
2929 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
2930 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2931 break;
2932 case XDWRITEREAD_10:
2933 if ((cmd->data_direction != DMA_TO_DEVICE) ||
a1d8b49a 2934 !(cmd->t_tasks_bidi))
c66ac9db
NB
2935 goto out_invalid_cdb_field;
2936 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2937 if (sector_ret)
2938 goto out_unsupported_cdb;
2939 size = transport_get_size(sectors, cdb, cmd);
2940 cmd->transport_split_cdb = &split_cdb_XX_10;
a1d8b49a 2941 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db 2942 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
e3d6f909 2943 passthrough = (dev->transport->transport_type ==
c66ac9db
NB
2944 TRANSPORT_PLUGIN_PHBA_PDEV);
2945 /*
2946 * Skip the remaining assignments for TCM/PSCSI passthrough
2947 */
2948 if (passthrough)
2949 break;
2950 /*
2951 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
2952 */
2953 cmd->transport_complete_callback = &transport_xor_callback;
a1d8b49a 2954 cmd->t_tasks_fua = (cdb[1] & 0x8);
c66ac9db
NB
2955 break;
2956 case VARIABLE_LENGTH_CMD:
2957 service_action = get_unaligned_be16(&cdb[8]);
2958 /*
2959 * Determine if this is TCM/PSCSI device and we should disable
2960 * internal emulation for this CDB.
2961 */
e3d6f909 2962 passthrough = (dev->transport->transport_type ==
c66ac9db
NB
2963 TRANSPORT_PLUGIN_PHBA_PDEV);
2964
2965 switch (service_action) {
2966 case XDWRITEREAD_32:
2967 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2968 if (sector_ret)
2969 goto out_unsupported_cdb;
2970 size = transport_get_size(sectors, cdb, cmd);
2971 /*
2972 * Use WRITE_32 and READ_32 opcodes for the emulated
2973 * XDWRITE_READ_32 logic.
2974 */
2975 cmd->transport_split_cdb = &split_cdb_XX_32;
a1d8b49a 2976 cmd->t_task_lba = transport_lba_64_ext(cdb);
c66ac9db
NB
2977 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2978
2979 /*
2980 * Skip the remaining assignments for TCM/PSCSI passthrough
2981 */
2982 if (passthrough)
2983 break;
2984
2985 /*
2986 * Setup BIDI XOR callback to be run during
2987 * transport_generic_complete_ok()
2988 */
2989 cmd->transport_complete_callback = &transport_xor_callback;
a1d8b49a 2990 cmd->t_tasks_fua = (cdb[10] & 0x8);
c66ac9db
NB
2991 break;
2992 case WRITE_SAME_32:
2993 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2994 if (sector_ret)
2995 goto out_unsupported_cdb;
dd3a5ad8 2996
6708bb27 2997 if (sectors)
12850626 2998 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
2999 else {
3000 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3001 " supported\n");
3002 goto out_invalid_cdb_field;
3003 }
dd3a5ad8 3004
a1d8b49a 3005 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
c66ac9db
NB
3006 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3007
706d5860 3008 if (target_check_write_same_discard(&cdb[10], dev) < 0)
c66ac9db 3009 goto out_invalid_cdb_field;
706d5860 3010
c66ac9db
NB
3011 break;
3012 default:
6708bb27 3013 pr_err("VARIABLE_LENGTH_CMD service action"
c66ac9db
NB
3014 " 0x%04x not supported\n", service_action);
3015 goto out_unsupported_cdb;
3016 }
3017 break;
e434f1f1 3018 case MAINTENANCE_IN:
e3d6f909 3019 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
3020 /* MAINTENANCE_IN from SCC-2 */
3021 /*
3022 * Check for emulated MI_REPORT_TARGET_PGS.
3023 */
3024 if (cdb[1] == MI_REPORT_TARGET_PGS) {
3025 cmd->transport_emulate_cdb =
e3d6f909 3026 (su_dev->t10_alua.alua_type ==
c66ac9db 3027 SPC3_ALUA_EMULATED) ?
e3d6f909 3028 core_emulate_report_target_port_groups :
c66ac9db
NB
3029 NULL;
3030 }
3031 size = (cdb[6] << 24) | (cdb[7] << 16) |
3032 (cdb[8] << 8) | cdb[9];
3033 } else {
3034 /* GPCMD_SEND_KEY from multi media commands */
3035 size = (cdb[8] << 8) + cdb[9];
3036 }
05d1c7c0 3037 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3038 break;
3039 case MODE_SELECT:
3040 size = cdb[4];
3041 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3042 break;
3043 case MODE_SELECT_10:
3044 size = (cdb[7] << 8) + cdb[8];
3045 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3046 break;
3047 case MODE_SENSE:
3048 size = cdb[4];
05d1c7c0 3049 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3050 break;
3051 case MODE_SENSE_10:
3052 case GPCMD_READ_BUFFER_CAPACITY:
3053 case GPCMD_SEND_OPC:
3054 case LOG_SELECT:
3055 case LOG_SENSE:
3056 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 3057 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3058 break;
3059 case READ_BLOCK_LIMITS:
3060 size = READ_BLOCK_LEN;
05d1c7c0 3061 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3062 break;
3063 case GPCMD_GET_CONFIGURATION:
3064 case GPCMD_READ_FORMAT_CAPACITIES:
3065 case GPCMD_READ_DISC_INFO:
3066 case GPCMD_READ_TRACK_RZONE_INFO:
3067 size = (cdb[7] << 8) + cdb[8];
3068 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3069 break;
3070 case PERSISTENT_RESERVE_IN:
3071 case PERSISTENT_RESERVE_OUT:
3072 cmd->transport_emulate_cdb =
e3d6f909 3073 (su_dev->t10_pr.res_type ==
c66ac9db 3074 SPC3_PERSISTENT_RESERVATIONS) ?
e3d6f909 3075 core_scsi3_emulate_pr : NULL;
c66ac9db 3076 size = (cdb[7] << 8) + cdb[8];
05d1c7c0 3077 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3078 break;
3079 case GPCMD_MECHANISM_STATUS:
3080 case GPCMD_READ_DVD_STRUCTURE:
3081 size = (cdb[8] << 8) + cdb[9];
3082 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3083 break;
3084 case READ_POSITION:
3085 size = READ_POSITION_LEN;
05d1c7c0 3086 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db 3087 break;
e434f1f1 3088 case MAINTENANCE_OUT:
e3d6f909 3089 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
c66ac9db
NB
3090 /* MAINTENANCE_OUT from SCC-2
3091 *
3092 * Check for emulated MO_SET_TARGET_PGS.
3093 */
3094 if (cdb[1] == MO_SET_TARGET_PGS) {
3095 cmd->transport_emulate_cdb =
e3d6f909 3096 (su_dev->t10_alua.alua_type ==
c66ac9db 3097 SPC3_ALUA_EMULATED) ?
e3d6f909 3098 core_emulate_set_target_port_groups :
c66ac9db
NB
3099 NULL;
3100 }
3101
3102 size = (cdb[6] << 24) | (cdb[7] << 16) |
3103 (cdb[8] << 8) | cdb[9];
3104 } else {
3105 /* GPCMD_REPORT_KEY from multi media commands */
3106 size = (cdb[8] << 8) + cdb[9];
3107 }
05d1c7c0 3108 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3109 break;
3110 case INQUIRY:
3111 size = (cdb[3] << 8) + cdb[4];
3112 /*
3113 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3114 * See spc4r17 section 5.3
3115 */
5951146d 3116 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3117 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3118 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3119 break;
3120 case READ_BUFFER:
3121 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 3122 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3123 break;
3124 case READ_CAPACITY:
3125 size = READ_CAP_LEN;
05d1c7c0 3126 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3127 break;
3128 case READ_MEDIA_SERIAL_NUMBER:
3129 case SECURITY_PROTOCOL_IN:
3130 case SECURITY_PROTOCOL_OUT:
3131 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
05d1c7c0 3132 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3133 break;
3134 case SERVICE_ACTION_IN:
3135 case ACCESS_CONTROL_IN:
3136 case ACCESS_CONTROL_OUT:
3137 case EXTENDED_COPY:
3138 case READ_ATTRIBUTE:
3139 case RECEIVE_COPY_RESULTS:
3140 case WRITE_ATTRIBUTE:
3141 size = (cdb[10] << 24) | (cdb[11] << 16) |
3142 (cdb[12] << 8) | cdb[13];
05d1c7c0 3143 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3144 break;
3145 case RECEIVE_DIAGNOSTIC:
3146 case SEND_DIAGNOSTIC:
3147 size = (cdb[3] << 8) | cdb[4];
05d1c7c0 3148 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3149 break;
3150/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3151#if 0
3152 case GPCMD_READ_CD:
3153 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3154 size = (2336 * sectors);
05d1c7c0 3155 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3156 break;
3157#endif
3158 case READ_TOC:
3159 size = cdb[8];
05d1c7c0 3160 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3161 break;
3162 case REQUEST_SENSE:
3163 size = cdb[4];
05d1c7c0 3164 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3165 break;
3166 case READ_ELEMENT_STATUS:
3167 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
05d1c7c0 3168 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3169 break;
3170 case WRITE_BUFFER:
3171 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
05d1c7c0 3172 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3173 break;
3174 case RESERVE:
3175 case RESERVE_10:
3176 /*
3177 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3178 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3179 */
3180 if (cdb[0] == RESERVE_10)
3181 size = (cdb[7] << 8) | cdb[8];
3182 else
3183 size = cmd->data_length;
3184
3185 /*
3186 * Setup the legacy emulated handler for SPC-2 and
3187 * >= SPC-3 compatible reservation handling (CRH=1)
3188 * Otherwise, we assume the underlying SCSI logic is
3189 * is running in SPC_PASSTHROUGH, and wants reservations
3190 * emulation disabled.
3191 */
3192 cmd->transport_emulate_cdb =
e3d6f909 3193 (su_dev->t10_pr.res_type !=
c66ac9db 3194 SPC_PASSTHROUGH) ?
e3d6f909 3195 core_scsi2_emulate_crh : NULL;
c66ac9db
NB
3196 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3197 break;
3198 case RELEASE:
3199 case RELEASE_10:
3200 /*
3201 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3202 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3203 */
3204 if (cdb[0] == RELEASE_10)
3205 size = (cdb[7] << 8) | cdb[8];
3206 else
3207 size = cmd->data_length;
3208
3209 cmd->transport_emulate_cdb =
e3d6f909 3210 (su_dev->t10_pr.res_type !=
c66ac9db 3211 SPC_PASSTHROUGH) ?
e3d6f909 3212 core_scsi2_emulate_crh : NULL;
c66ac9db
NB
3213 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3214 break;
3215 case SYNCHRONIZE_CACHE:
3216 case 0x91: /* SYNCHRONIZE_CACHE_16: */
3217 /*
3218 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3219 */
3220 if (cdb[0] == SYNCHRONIZE_CACHE) {
3221 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
a1d8b49a 3222 cmd->t_task_lba = transport_lba_32(cdb);
c66ac9db
NB
3223 } else {
3224 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
a1d8b49a 3225 cmd->t_task_lba = transport_lba_64(cdb);
c66ac9db
NB
3226 }
3227 if (sector_ret)
3228 goto out_unsupported_cdb;
3229
3230 size = transport_get_size(sectors, cdb, cmd);
3231 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3232
3233 /*
3234 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3235 */
e3d6f909 3236 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
c66ac9db
NB
3237 break;
3238 /*
3239 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3240 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3241 */
3242 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3243 /*
3244 * Check to ensure that LBA + Range does not exceed past end of
7abbe7f3 3245 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
c66ac9db 3246 */
7abbe7f3
NB
3247 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3248 if (transport_cmd_get_valid_sectors(cmd) < 0)
3249 goto out_invalid_cdb_field;
3250 }
c66ac9db
NB
3251 break;
3252 case UNMAP:
3253 size = get_unaligned_be16(&cdb[7]);
05d1c7c0 3254 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3255 break;
3256 case WRITE_SAME_16:
3257 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3258 if (sector_ret)
3259 goto out_unsupported_cdb;
dd3a5ad8 3260
6708bb27 3261 if (sectors)
12850626 3262 size = transport_get_size(1, cdb, cmd);
6708bb27
AG
3263 else {
3264 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3265 goto out_invalid_cdb_field;
3266 }
dd3a5ad8 3267
5db0753b 3268 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
706d5860
NB
3269 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3270
3271 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3272 goto out_invalid_cdb_field;
3273 break;
3274 case WRITE_SAME:
3275 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3276 if (sector_ret)
3277 goto out_unsupported_cdb;
3278
3279 if (sectors)
12850626 3280 size = transport_get_size(1, cdb, cmd);
706d5860
NB
3281 else {
3282 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3283 goto out_invalid_cdb_field;
c66ac9db 3284 }
706d5860
NB
3285
3286 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
c66ac9db 3287 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
706d5860
NB
3288 /*
3289 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3290 * of byte 1 bit 3 UNMAP instead of original reserved field
3291 */
3292 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3293 goto out_invalid_cdb_field;
c66ac9db
NB
3294 break;
3295 case ALLOW_MEDIUM_REMOVAL:
3296 case GPCMD_CLOSE_TRACK:
3297 case ERASE:
3298 case INITIALIZE_ELEMENT_STATUS:
3299 case GPCMD_LOAD_UNLOAD:
3300 case REZERO_UNIT:
3301 case SEEK_10:
3302 case GPCMD_SET_SPEED:
3303 case SPACE:
3304 case START_STOP:
3305 case TEST_UNIT_READY:
3306 case VERIFY:
3307 case WRITE_FILEMARKS:
3308 case MOVE_MEDIUM:
3309 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3310 break;
3311 case REPORT_LUNS:
3312 cmd->transport_emulate_cdb =
e3d6f909 3313 transport_core_report_lun_response;
c66ac9db
NB
3314 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3315 /*
3316 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3317 * See spc4r17 section 5.3
3318 */
5951146d 3319 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
e66ecd50 3320 cmd->sam_task_attr = MSG_HEAD_TAG;
05d1c7c0 3321 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
c66ac9db
NB
3322 break;
3323 default:
6708bb27 3324 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
c66ac9db 3325 " 0x%02x, sending CHECK_CONDITION.\n",
e3d6f909 3326 cmd->se_tfo->get_fabric_name(), cdb[0]);
c66ac9db
NB
3327 goto out_unsupported_cdb;
3328 }
3329
3330 if (size != cmd->data_length) {
6708bb27 3331 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
c66ac9db 3332 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
e3d6f909 3333 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
c66ac9db
NB
3334 cmd->data_length, size, cdb[0]);
3335
3336 cmd->cmd_spdtl = size;
3337
3338 if (cmd->data_direction == DMA_TO_DEVICE) {
6708bb27 3339 pr_err("Rejecting underflow/overflow"
c66ac9db
NB
3340 " WRITE data\n");
3341 goto out_invalid_cdb_field;
3342 }
3343 /*
3344 * Reject READ_* or WRITE_* with overflow/underflow for
3345 * type SCF_SCSI_DATA_SG_IO_CDB.
3346 */
6708bb27
AG
3347 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3348 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
c66ac9db 3349 " CDB on non 512-byte sector setup subsystem"
e3d6f909 3350 " plugin: %s\n", dev->transport->name);
c66ac9db
NB
3351 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3352 goto out_invalid_cdb_field;
3353 }
3354
3355 if (size > cmd->data_length) {
3356 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3357 cmd->residual_count = (size - cmd->data_length);
3358 } else {
3359 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3360 cmd->residual_count = (cmd->data_length - size);
3361 }
3362 cmd->data_length = size;
3363 }
3364
d0229ae3
AG
3365 /* Let's limit control cdbs to a page, for simplicity's sake. */
3366 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3367 size > PAGE_SIZE)
3368 goto out_invalid_cdb_field;
3369
c66ac9db
NB
3370 transport_set_supported_SAM_opcode(cmd);
3371 return ret;
3372
3373out_unsupported_cdb:
3374 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3375 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
5951146d 3376 return -EINVAL;
c66ac9db
NB
3377out_invalid_cdb_field:
3378 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3379 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
5951146d 3380 return -EINVAL;
c66ac9db
NB
3381}
3382
c66ac9db
NB
3383/*
3384 * Called from transport_generic_complete_ok() and
3385 * transport_generic_request_failure() to determine which dormant/delayed
3386 * and ordered cmds need to have their tasks added to the execution queue.
3387 */
3388static void transport_complete_task_attr(struct se_cmd *cmd)
3389{
5951146d 3390 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
3391 struct se_cmd *cmd_p, *cmd_tmp;
3392 int new_active_tasks = 0;
3393
e66ecd50 3394 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
c66ac9db
NB
3395 atomic_dec(&dev->simple_cmds);
3396 smp_mb__after_atomic_dec();
3397 dev->dev_cur_ordered_id++;
6708bb27 3398 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
c66ac9db
NB
3399 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3400 cmd->se_ordered_id);
e66ecd50 3401 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
c66ac9db
NB
3402 atomic_dec(&dev->dev_hoq_count);
3403 smp_mb__after_atomic_dec();
3404 dev->dev_cur_ordered_id++;
6708bb27 3405 pr_debug("Incremented dev_cur_ordered_id: %u for"
c66ac9db
NB
3406 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3407 cmd->se_ordered_id);
e66ecd50 3408 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
c66ac9db 3409 spin_lock(&dev->ordered_cmd_lock);
5951146d 3410 list_del(&cmd->se_ordered_node);
c66ac9db
NB
3411 atomic_dec(&dev->dev_ordered_sync);
3412 smp_mb__after_atomic_dec();
3413 spin_unlock(&dev->ordered_cmd_lock);
3414
3415 dev->dev_cur_ordered_id++;
6708bb27 3416 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
c66ac9db
NB
3417 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3418 }
3419 /*
3420 * Process all commands up to the last received
3421 * ORDERED task attribute which requires another blocking
3422 * boundary
3423 */
3424 spin_lock(&dev->delayed_cmd_lock);
3425 list_for_each_entry_safe(cmd_p, cmd_tmp,
5951146d 3426 &dev->delayed_cmd_list, se_delayed_node) {
c66ac9db 3427
5951146d 3428 list_del(&cmd_p->se_delayed_node);
c66ac9db
NB
3429 spin_unlock(&dev->delayed_cmd_lock);
3430
6708bb27 3431 pr_debug("Calling add_tasks() for"
c66ac9db
NB
3432 " cmd_p: 0x%02x Task Attr: 0x%02x"
3433 " Dormant -> Active, se_ordered_id: %u\n",
6708bb27 3434 cmd_p->t_task_cdb[0],
c66ac9db
NB
3435 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3436
3437 transport_add_tasks_from_cmd(cmd_p);
3438 new_active_tasks++;
3439
3440 spin_lock(&dev->delayed_cmd_lock);
e66ecd50 3441 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
c66ac9db
NB
3442 break;
3443 }
3444 spin_unlock(&dev->delayed_cmd_lock);
3445 /*
3446 * If new tasks have become active, wake up the transport thread
3447 * to do the processing of the Active tasks.
3448 */
3449 if (new_active_tasks != 0)
e3d6f909 3450 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
c66ac9db
NB
3451}
3452
07bde79a
NB
3453static int transport_complete_qf(struct se_cmd *cmd)
3454{
3455 int ret = 0;
3456
3457 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
3458 return cmd->se_tfo->queue_status(cmd);
3459
3460 switch (cmd->data_direction) {
3461 case DMA_FROM_DEVICE:
3462 ret = cmd->se_tfo->queue_data_in(cmd);
3463 break;
3464 case DMA_TO_DEVICE:
ec98f782 3465 if (cmd->t_bidi_data_sg) {
07bde79a
NB
3466 ret = cmd->se_tfo->queue_data_in(cmd);
3467 if (ret < 0)
3468 return ret;
3469 }
3470 /* Fall through for DMA_TO_DEVICE */
3471 case DMA_NONE:
3472 ret = cmd->se_tfo->queue_status(cmd);
3473 break;
3474 default:
3475 break;
3476 }
3477
3478 return ret;
3479}
3480
3481static void transport_handle_queue_full(
3482 struct se_cmd *cmd,
3483 struct se_device *dev,
3484 int (*qf_callback)(struct se_cmd *))
3485{
3486 spin_lock_irq(&dev->qf_cmd_lock);
3487 cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
3488 cmd->transport_qf_callback = qf_callback;
3489 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3490 atomic_inc(&dev->dev_qf_count);
3491 smp_mb__after_atomic_inc();
3492 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3493
3494 schedule_work(&cmd->se_dev->qf_work_queue);
3495}
3496
c66ac9db
NB
3497static void transport_generic_complete_ok(struct se_cmd *cmd)
3498{
07bde79a 3499 int reason = 0, ret;
c66ac9db
NB
3500 /*
3501 * Check if we need to move delayed/dormant tasks from cmds on the
3502 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3503 * Attribute.
3504 */
5951146d 3505 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
c66ac9db 3506 transport_complete_task_attr(cmd);
07bde79a
NB
3507 /*
3508 * Check to schedule QUEUE_FULL work, or execute an existing
3509 * cmd->transport_qf_callback()
3510 */
3511 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3512 schedule_work(&cmd->se_dev->qf_work_queue);
3513
3514 if (cmd->transport_qf_callback) {
3515 ret = cmd->transport_qf_callback(cmd);
3516 if (ret < 0)
3517 goto queue_full;
3518
3519 cmd->transport_qf_callback = NULL;
3520 goto done;
3521 }
c66ac9db
NB
3522 /*
3523 * Check if we need to retrieve a sense buffer from
3524 * the struct se_cmd in question.
3525 */
3526 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3527 if (transport_get_sense_data(cmd) < 0)
3528 reason = TCM_NON_EXISTENT_LUN;
3529
3530 /*
3531 * Only set when an struct se_task->task_scsi_status returned
3532 * a non GOOD status.
3533 */
3534 if (cmd->scsi_status) {
07bde79a 3535 ret = transport_send_check_condition_and_sense(
c66ac9db 3536 cmd, reason, 1);
07bde79a
NB
3537 if (ret == -EAGAIN)
3538 goto queue_full;
3539
c66ac9db
NB
3540 transport_lun_remove_cmd(cmd);
3541 transport_cmd_check_stop_to_fabric(cmd);
3542 return;
3543 }
3544 }
3545 /*
25985edc 3546 * Check for a callback, used by amongst other things
c66ac9db
NB
3547 * XDWRITE_READ_10 emulation.
3548 */
3549 if (cmd->transport_complete_callback)
3550 cmd->transport_complete_callback(cmd);
3551
3552 switch (cmd->data_direction) {
3553 case DMA_FROM_DEVICE:
3554 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3555 if (cmd->se_lun->lun_sep) {
3556 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3557 cmd->data_length;
3558 }
3559 spin_unlock(&cmd->se_lun->lun_sep_lock);
c66ac9db 3560
07bde79a
NB
3561 ret = cmd->se_tfo->queue_data_in(cmd);
3562 if (ret == -EAGAIN)
3563 goto queue_full;
c66ac9db
NB
3564 break;
3565 case DMA_TO_DEVICE:
3566 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3567 if (cmd->se_lun->lun_sep) {
3568 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
c66ac9db
NB
3569 cmd->data_length;
3570 }
3571 spin_unlock(&cmd->se_lun->lun_sep_lock);
3572 /*
3573 * Check if we need to send READ payload for BIDI-COMMAND
3574 */
ec98f782 3575 if (cmd->t_bidi_data_sg) {
c66ac9db 3576 spin_lock(&cmd->se_lun->lun_sep_lock);
e3d6f909
AG
3577 if (cmd->se_lun->lun_sep) {
3578 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
c66ac9db
NB
3579 cmd->data_length;
3580 }
3581 spin_unlock(&cmd->se_lun->lun_sep_lock);
07bde79a
NB
3582 ret = cmd->se_tfo->queue_data_in(cmd);
3583 if (ret == -EAGAIN)
3584 goto queue_full;
c66ac9db
NB
3585 break;
3586 }
3587 /* Fall through for DMA_TO_DEVICE */
3588 case DMA_NONE:
07bde79a
NB
3589 ret = cmd->se_tfo->queue_status(cmd);
3590 if (ret == -EAGAIN)
3591 goto queue_full;
c66ac9db
NB
3592 break;
3593 default:
3594 break;
3595 }
3596
07bde79a 3597done:
c66ac9db
NB
3598 transport_lun_remove_cmd(cmd);
3599 transport_cmd_check_stop_to_fabric(cmd);
07bde79a
NB
3600 return;
3601
3602queue_full:
6708bb27 3603 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
07bde79a
NB
3604 " data_direction: %d\n", cmd, cmd->data_direction);
3605 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
c66ac9db
NB
3606}
3607
3608static void transport_free_dev_tasks(struct se_cmd *cmd)
3609{
3610 struct se_task *task, *task_tmp;
3611 unsigned long flags;
3612
a1d8b49a 3613 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3614 list_for_each_entry_safe(task, task_tmp,
a1d8b49a 3615 &cmd->t_task_list, t_list) {
c66ac9db
NB
3616 if (atomic_read(&task->task_active))
3617 continue;
3618
3619 kfree(task->task_sg_bidi);
3620 kfree(task->task_sg);
3621
3622 list_del(&task->t_list);
3623
a1d8b49a 3624 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
42bf829e 3625 cmd->se_dev->transport->free_task(task);
a1d8b49a 3626 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 3627 }
a1d8b49a 3628 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3629}
3630
6708bb27 3631static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
c66ac9db 3632{
ec98f782 3633 struct scatterlist *sg;
ec98f782 3634 int count;
c66ac9db 3635
6708bb27
AG
3636 for_each_sg(sgl, sg, nents, count)
3637 __free_page(sg_page(sg));
c66ac9db 3638
6708bb27
AG
3639 kfree(sgl);
3640}
c66ac9db 3641
6708bb27
AG
3642static inline void transport_free_pages(struct se_cmd *cmd)
3643{
3644 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3645 return;
3646
3647 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
ec98f782
AG
3648 cmd->t_data_sg = NULL;
3649 cmd->t_data_nents = 0;
c66ac9db 3650
6708bb27 3651 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
ec98f782
AG
3652 cmd->t_bidi_data_sg = NULL;
3653 cmd->t_bidi_data_nents = 0;
c66ac9db
NB
3654}
3655
d3df7825
CH
3656/**
3657 * transport_put_cmd - release a reference to a command
3658 * @cmd: command to release
3659 *
3660 * This routine releases our reference to the command and frees it if possible.
3661 */
39c05f32 3662static void transport_put_cmd(struct se_cmd *cmd)
c66ac9db
NB
3663{
3664 unsigned long flags;
4911e3cc 3665 int free_tasks = 0;
c66ac9db 3666
a1d8b49a 3667 spin_lock_irqsave(&cmd->t_state_lock, flags);
4911e3cc
CH
3668 if (atomic_read(&cmd->t_fe_count)) {
3669 if (!atomic_dec_and_test(&cmd->t_fe_count))
3670 goto out_busy;
3671 }
3672
3673 if (atomic_read(&cmd->t_se_count)) {
3674 if (!atomic_dec_and_test(&cmd->t_se_count))
3675 goto out_busy;
3676 }
3677
3678 if (atomic_read(&cmd->transport_dev_active)) {
3679 atomic_set(&cmd->transport_dev_active, 0);
3680 transport_all_task_dev_remove_state(cmd);
3681 free_tasks = 1;
c66ac9db 3682 }
a1d8b49a 3683 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 3684
4911e3cc
CH
3685 if (free_tasks != 0)
3686 transport_free_dev_tasks(cmd);
d3df7825 3687
c66ac9db 3688 transport_free_pages(cmd);
31afc39c 3689 transport_release_cmd(cmd);
39c05f32 3690 return;
4911e3cc
CH
3691out_busy:
3692 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
3693}
3694
c66ac9db 3695/*
ec98f782
AG
3696 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3697 * allocating in the core.
c66ac9db
NB
3698 * @cmd: Associated se_cmd descriptor
3699 * @mem: SGL style memory for TCM WRITE / READ
3700 * @sg_mem_num: Number of SGL elements
3701 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3702 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3703 *
3704 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3705 * of parameters.
3706 */
3707int transport_generic_map_mem_to_cmd(
3708 struct se_cmd *cmd,
5951146d
AG
3709 struct scatterlist *sgl,
3710 u32 sgl_count,
3711 struct scatterlist *sgl_bidi,
3712 u32 sgl_bidi_count)
c66ac9db 3713{
5951146d 3714 if (!sgl || !sgl_count)
c66ac9db 3715 return 0;
c66ac9db 3716
c66ac9db
NB
3717 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3718 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
c66ac9db 3719
ec98f782
AG
3720 cmd->t_data_sg = sgl;
3721 cmd->t_data_nents = sgl_count;
c66ac9db 3722
ec98f782
AG
3723 if (sgl_bidi && sgl_bidi_count) {
3724 cmd->t_bidi_data_sg = sgl_bidi;
3725 cmd->t_bidi_data_nents = sgl_bidi_count;
c66ac9db
NB
3726 }
3727 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
c66ac9db
NB
3728 }
3729
3730 return 0;
3731}
3732EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3733
c66ac9db
NB
3734static int transport_new_cmd_obj(struct se_cmd *cmd)
3735{
5951146d 3736 struct se_device *dev = cmd->se_dev;
01cde4d5 3737 int set_counts = 1, rc, task_cdbs;
c66ac9db 3738
ec98f782
AG
3739 /*
3740 * Setup any BIDI READ tasks and memory from
3741 * cmd->t_mem_bidi_list so the READ struct se_tasks
3742 * are queued first for the non pSCSI passthrough case.
3743 */
3744 if (cmd->t_bidi_data_sg &&
3745 (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
3746 rc = transport_allocate_tasks(cmd,
3747 cmd->t_task_lba,
3748 DMA_FROM_DEVICE,
3749 cmd->t_bidi_data_sg,
3750 cmd->t_bidi_data_nents);
6708bb27 3751 if (rc <= 0) {
c66ac9db
NB
3752 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3753 cmd->scsi_sense_reason =
ec98f782 3754 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
01cde4d5 3755 return -EINVAL;
c66ac9db 3756 }
ec98f782
AG
3757 atomic_inc(&cmd->t_fe_count);
3758 atomic_inc(&cmd->t_se_count);
3759 set_counts = 0;
3760 }
3761 /*
3762 * Setup the tasks and memory from cmd->t_mem_list
3763 * Note for BIDI transfers this will contain the WRITE payload
3764 */
3765 task_cdbs = transport_allocate_tasks(cmd,
3766 cmd->t_task_lba,
3767 cmd->data_direction,
3768 cmd->t_data_sg,
3769 cmd->t_data_nents);
6708bb27 3770 if (task_cdbs <= 0) {
ec98f782
AG
3771 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3772 cmd->scsi_sense_reason =
3773 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
01cde4d5 3774 return -EINVAL;
ec98f782 3775 }
c66ac9db 3776
ec98f782
AG
3777 if (set_counts) {
3778 atomic_inc(&cmd->t_fe_count);
3779 atomic_inc(&cmd->t_se_count);
c66ac9db
NB
3780 }
3781
ec98f782
AG
3782 cmd->t_task_list_num = task_cdbs;
3783
a1d8b49a
AG
3784 atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
3785 atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
3786 atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
c66ac9db
NB
3787 return 0;
3788}
3789
05d1c7c0
AG
3790void *transport_kmap_first_data_page(struct se_cmd *cmd)
3791{
ec98f782 3792 struct scatterlist *sg = cmd->t_data_sg;
05d1c7c0 3793
ec98f782 3794 BUG_ON(!sg);
05d1c7c0 3795 /*
ec98f782
AG
3796 * We need to take into account a possible offset here for fabrics like
3797 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3798 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
05d1c7c0 3799 */
ec98f782 3800 return kmap(sg_page(sg)) + sg->offset;
05d1c7c0
AG
3801}
3802EXPORT_SYMBOL(transport_kmap_first_data_page);
3803
3804void transport_kunmap_first_data_page(struct se_cmd *cmd)
3805{
ec98f782 3806 kunmap(sg_page(cmd->t_data_sg));
05d1c7c0
AG
3807}
3808EXPORT_SYMBOL(transport_kunmap_first_data_page);
3809
c66ac9db 3810static int
05d1c7c0 3811transport_generic_get_mem(struct se_cmd *cmd)
c66ac9db 3812{
ec98f782
AG
3813 u32 length = cmd->data_length;
3814 unsigned int nents;
3815 struct page *page;
3816 int i = 0;
c66ac9db 3817
ec98f782
AG
3818 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3819 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3820 if (!cmd->t_data_sg)
3821 return -ENOMEM;
c66ac9db 3822
ec98f782
AG
3823 cmd->t_data_nents = nents;
3824 sg_init_table(cmd->t_data_sg, nents);
c66ac9db 3825
ec98f782
AG
3826 while (length) {
3827 u32 page_len = min_t(u32, length, PAGE_SIZE);
3828 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3829 if (!page)
3830 goto out;
c66ac9db 3831
ec98f782
AG
3832 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3833 length -= page_len;
3834 i++;
c66ac9db 3835 }
c66ac9db 3836 return 0;
c66ac9db 3837
ec98f782
AG
3838out:
3839 while (i >= 0) {
3840 __free_page(sg_page(&cmd->t_data_sg[i]));
3841 i--;
c66ac9db 3842 }
ec98f782
AG
3843 kfree(cmd->t_data_sg);
3844 cmd->t_data_sg = NULL;
3845 return -ENOMEM;
c66ac9db
NB
3846}
3847
a1d8b49a
AG
3848/* Reduce sectors if they are too long for the device */
3849static inline sector_t transport_limit_task_sectors(
c66ac9db
NB
3850 struct se_device *dev,
3851 unsigned long long lba,
a1d8b49a 3852 sector_t sectors)
c66ac9db 3853{
a1d8b49a 3854 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
c66ac9db 3855
a1d8b49a
AG
3856 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3857 if ((lba + sectors) > transport_dev_end_lba(dev))
3858 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
c66ac9db 3859
a1d8b49a 3860 return sectors;
c66ac9db
NB
3861}
3862
c66ac9db
NB
3863
3864/*
3865 * This function can be used by HW target mode drivers to create a linked
3866 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3867 * This is intended to be called during the completion path by TCM Core
3868 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3869 */
3870void transport_do_task_sg_chain(struct se_cmd *cmd)
3871{
ec98f782
AG
3872 struct scatterlist *sg_first = NULL;
3873 struct scatterlist *sg_prev = NULL;
3874 int sg_prev_nents = 0;
3875 struct scatterlist *sg;
c66ac9db 3876 struct se_task *task;
ec98f782 3877 u32 chained_nents = 0;
c66ac9db
NB
3878 int i;
3879
ec98f782
AG
3880 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3881
c66ac9db
NB
3882 /*
3883 * Walk the struct se_task list and setup scatterlist chains
a1d8b49a 3884 * for each contiguously allocated struct se_task->task_sg[].
c66ac9db 3885 */
a1d8b49a 3886 list_for_each_entry(task, &cmd->t_task_list, t_list) {
ec98f782 3887 if (!task->task_sg)
c66ac9db
NB
3888 continue;
3889
ec98f782
AG
3890 if (!sg_first) {
3891 sg_first = task->task_sg;
6708bb27 3892 chained_nents = task->task_sg_nents;
97868c89 3893 } else {
ec98f782 3894 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
6708bb27 3895 chained_nents += task->task_sg_nents;
97868c89 3896 }
c3c74c7a
NB
3897 /*
3898 * For the padded tasks, use the extra SGL vector allocated
3899 * in transport_allocate_data_tasks() for the sg_prev_nents
3900 * offset into sg_chain() above.. The last task of a
3901 * multi-task list, or a single task will not have
3902 * task->task_sg_padded set..
3903 */
3904 if (task->task_padded_sg)
3905 sg_prev_nents = (task->task_sg_nents + 1);
3906 else
3907 sg_prev_nents = task->task_sg_nents;
ec98f782
AG
3908
3909 sg_prev = task->task_sg;
c66ac9db
NB
3910 }
3911 /*
3912 * Setup the starting pointer and total t_tasks_sg_linked_no including
3913 * padding SGs for linking and to mark the end.
3914 */
a1d8b49a 3915 cmd->t_tasks_sg_chained = sg_first;
ec98f782 3916 cmd->t_tasks_sg_chained_no = chained_nents;
c66ac9db 3917
6708bb27 3918 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
a1d8b49a
AG
3919 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3920 cmd->t_tasks_sg_chained_no);
c66ac9db 3921
a1d8b49a
AG
3922 for_each_sg(cmd->t_tasks_sg_chained, sg,
3923 cmd->t_tasks_sg_chained_no, i) {
c66ac9db 3924
6708bb27 3925 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
5951146d 3926 i, sg, sg_page(sg), sg->length, sg->offset);
c66ac9db 3927 if (sg_is_chain(sg))
6708bb27 3928 pr_debug("SG: %p sg_is_chain=1\n", sg);
c66ac9db 3929 if (sg_is_last(sg))
6708bb27 3930 pr_debug("SG: %p sg_is_last=1\n", sg);
c66ac9db 3931 }
c66ac9db
NB
3932}
3933EXPORT_SYMBOL(transport_do_task_sg_chain);
3934
a1d8b49a
AG
3935/*
3936 * Break up cmd into chunks transport can handle
3937 */
ec98f782 3938static int transport_allocate_data_tasks(
c66ac9db
NB
3939 struct se_cmd *cmd,
3940 unsigned long long lba,
c66ac9db 3941 enum dma_data_direction data_direction,
ec98f782
AG
3942 struct scatterlist *sgl,
3943 unsigned int sgl_nents)
c66ac9db
NB
3944{
3945 unsigned char *cdb = NULL;
3946 struct se_task *task;
5951146d 3947 struct se_device *dev = cmd->se_dev;
ec98f782 3948 unsigned long flags;
a3eedc22 3949 int task_count, i;
277c5f27 3950 sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
ec98f782
AG
3951 u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3952 struct scatterlist *sg;
3953 struct scatterlist *cmd_sg;
a1d8b49a 3954
ec98f782
AG
3955 WARN_ON(cmd->data_length % sector_size);
3956 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
277c5f27
NB
3957 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3958
ec98f782
AG
3959 cmd_sg = sgl;
3960 for (i = 0; i < task_count; i++) {
c3c74c7a 3961 unsigned int task_size, task_sg_nents_padded;
ec98f782 3962 int count;
a1d8b49a 3963
c66ac9db 3964 task = transport_generic_get_task(cmd, data_direction);
a1d8b49a 3965 if (!task)
ec98f782 3966 return -ENOMEM;
c66ac9db 3967
c66ac9db 3968 task->task_lba = lba;
ec98f782
AG
3969 task->task_sectors = min(sectors, dev_max_sectors);
3970 task->task_size = task->task_sectors * sector_size;
c66ac9db 3971
e3d6f909 3972 cdb = dev->transport->get_cdb(task);
a1d8b49a
AG
3973 BUG_ON(!cdb);
3974
3975 memcpy(cdb, cmd->t_task_cdb,
3976 scsi_command_size(cmd->t_task_cdb));
3977
3978 /* Update new cdb with updated lba/sectors */
3a867205 3979 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
525a48a2
NB
3980 /*
3981 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3982 * in order to calculate the number per task SGL entries
3983 */
3984 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
c66ac9db 3985 /*
ec98f782
AG
3986 * Check if the fabric module driver is requesting that all
3987 * struct se_task->task_sg[] be chained together.. If so,
3988 * then allocate an extra padding SG entry for linking and
c3c74c7a
NB
3989 * marking the end of the chained SGL for every task except
3990 * the last one for (task_count > 1) operation, or skipping
3991 * the extra padding for the (task_count == 1) case.
c66ac9db 3992 */
c3c74c7a
NB
3993 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3994 task_sg_nents_padded = (task->task_sg_nents + 1);
ec98f782 3995 task->task_padded_sg = 1;
c3c74c7a
NB
3996 } else
3997 task_sg_nents_padded = task->task_sg_nents;
c66ac9db 3998
1d20bb61 3999 task->task_sg = kmalloc(sizeof(struct scatterlist) *
c3c74c7a 4000 task_sg_nents_padded, GFP_KERNEL);
ec98f782
AG
4001 if (!task->task_sg) {
4002 cmd->se_dev->transport->free_task(task);
4003 return -ENOMEM;
4004 }
4005
c3c74c7a 4006 sg_init_table(task->task_sg, task_sg_nents_padded);
c66ac9db 4007
ec98f782
AG
4008 task_size = task->task_size;
4009
4010 /* Build new sgl, only up to task_size */
6708bb27 4011 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
ec98f782
AG
4012 if (cmd_sg->length > task_size)
4013 break;
4014
4015 *sg = *cmd_sg;
4016 task_size -= cmd_sg->length;
4017 cmd_sg = sg_next(cmd_sg);
c66ac9db 4018 }
c66ac9db 4019
ec98f782
AG
4020 lba += task->task_sectors;
4021 sectors -= task->task_sectors;
c66ac9db 4022
ec98f782
AG
4023 spin_lock_irqsave(&cmd->t_state_lock, flags);
4024 list_add_tail(&task->t_list, &cmd->t_task_list);
4025 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4026 }
4027
ec98f782 4028 return task_count;
c66ac9db
NB
4029}
4030
4031static int
ec98f782 4032transport_allocate_control_task(struct se_cmd *cmd)
c66ac9db 4033{
5951146d 4034 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4035 unsigned char *cdb;
4036 struct se_task *task;
ec98f782 4037 unsigned long flags;
c66ac9db
NB
4038
4039 task = transport_generic_get_task(cmd, cmd->data_direction);
4040 if (!task)
ec98f782 4041 return -ENOMEM;
c66ac9db 4042
e3d6f909 4043 cdb = dev->transport->get_cdb(task);
a1d8b49a
AG
4044 BUG_ON(!cdb);
4045 memcpy(cdb, cmd->t_task_cdb,
4046 scsi_command_size(cmd->t_task_cdb));
c66ac9db 4047
ec98f782
AG
4048 task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
4049 GFP_KERNEL);
4050 if (!task->task_sg) {
4051 cmd->se_dev->transport->free_task(task);
4052 return -ENOMEM;
4053 }
4054
4055 memcpy(task->task_sg, cmd->t_data_sg,
4056 sizeof(struct scatterlist) * cmd->t_data_nents);
c66ac9db 4057 task->task_size = cmd->data_length;
6708bb27 4058 task->task_sg_nents = cmd->t_data_nents;
c66ac9db 4059
ec98f782
AG
4060 spin_lock_irqsave(&cmd->t_state_lock, flags);
4061 list_add_tail(&task->t_list, &cmd->t_task_list);
4062 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4063
6708bb27 4064 /* Success! Return number of tasks allocated */
a3eedc22 4065 return 1;
ec98f782
AG
4066}
4067
4068static u32 transport_allocate_tasks(
4069 struct se_cmd *cmd,
4070 unsigned long long lba,
4071 enum dma_data_direction data_direction,
4072 struct scatterlist *sgl,
4073 unsigned int sgl_nents)
4074{
01cde4d5
NB
4075 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4076 if (transport_cmd_get_valid_sectors(cmd) < 0)
4077 return -EINVAL;
4078
ec98f782
AG
4079 return transport_allocate_data_tasks(cmd, lba, data_direction,
4080 sgl, sgl_nents);
01cde4d5 4081 } else
6708bb27
AG
4082 return transport_allocate_control_task(cmd);
4083
c66ac9db
NB
4084}
4085
ec98f782 4086
c66ac9db
NB
4087/* transport_generic_new_cmd(): Called from transport_processing_thread()
4088 *
4089 * Allocate storage transport resources from a set of values predefined
4090 * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
4091 * Any non zero return here is treated as an "out of resource' op here.
4092 */
4093 /*
4094 * Generate struct se_task(s) and/or their payloads for this CDB.
4095 */
a1d8b49a 4096int transport_generic_new_cmd(struct se_cmd *cmd)
c66ac9db 4097{
c66ac9db
NB
4098 int ret = 0;
4099
4100 /*
4101 * Determine is the TCM fabric module has already allocated physical
4102 * memory, and is directly calling transport_generic_map_mem_to_cmd()
ec98f782 4103 * beforehand.
c66ac9db 4104 */
ec98f782
AG
4105 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
4106 cmd->data_length) {
05d1c7c0 4107 ret = transport_generic_get_mem(cmd);
c66ac9db
NB
4108 if (ret < 0)
4109 return ret;
4110 }
1d20bb61
NB
4111 /*
4112 * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
4113 * control or data CDB types, and perform the map to backend subsystem
4114 * code from SGL memory allocated here by transport_generic_get_mem(), or
4115 * via pre-existing SGL memory setup explictly by fabric module code with
4116 * transport_generic_map_mem_to_cmd().
4117 */
c66ac9db
NB
4118 ret = transport_new_cmd_obj(cmd);
4119 if (ret < 0)
4120 return ret;
c66ac9db 4121 /*
a1d8b49a 4122 * For WRITEs, let the fabric know its buffer is ready..
c66ac9db
NB
4123 * This WRITE struct se_cmd (and all of its associated struct se_task's)
4124 * will be added to the struct se_device execution queue after its WRITE
4125 * data has arrived. (ie: It gets handled by the transport processing
4126 * thread a second time)
4127 */
4128 if (cmd->data_direction == DMA_TO_DEVICE) {
4129 transport_add_tasks_to_state_queue(cmd);
4130 return transport_generic_write_pending(cmd);
4131 }
4132 /*
4133 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
4134 * to the execution queue.
4135 */
4136 transport_execute_tasks(cmd);
4137 return 0;
4138}
a1d8b49a 4139EXPORT_SYMBOL(transport_generic_new_cmd);
c66ac9db
NB
4140
4141/* transport_generic_process_write():
4142 *
4143 *
4144 */
4145void transport_generic_process_write(struct se_cmd *cmd)
4146{
c66ac9db
NB
4147 transport_execute_tasks(cmd);
4148}
4149EXPORT_SYMBOL(transport_generic_process_write);
4150
07bde79a
NB
4151static int transport_write_pending_qf(struct se_cmd *cmd)
4152{
4153 return cmd->se_tfo->write_pending(cmd);
4154}
4155
c66ac9db
NB
4156/* transport_generic_write_pending():
4157 *
4158 *
4159 */
4160static int transport_generic_write_pending(struct se_cmd *cmd)
4161{
4162 unsigned long flags;
4163 int ret;
4164
a1d8b49a 4165 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4166 cmd->t_state = TRANSPORT_WRITE_PENDING;
a1d8b49a 4167 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
07bde79a
NB
4168
4169 if (cmd->transport_qf_callback) {
4170 ret = cmd->transport_qf_callback(cmd);
4171 if (ret == -EAGAIN)
4172 goto queue_full;
4173 else if (ret < 0)
4174 return ret;
4175
4176 cmd->transport_qf_callback = NULL;
4177 return 0;
4178 }
05d1c7c0 4179
c66ac9db
NB
4180 /*
4181 * Clear the se_cmd for WRITE_PENDING status in order to set
a1d8b49a 4182 * cmd->t_transport_active=0 so that transport_generic_handle_data
c66ac9db 4183 * can be called from HW target mode interrupt code. This is safe
e3d6f909 4184 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
c66ac9db
NB
4185 * because the se_cmd->se_lun pointer is not being cleared.
4186 */
4187 transport_cmd_check_stop(cmd, 1, 0);
4188
4189 /*
4190 * Call the fabric write_pending function here to let the
4191 * frontend know that WRITE buffers are ready.
4192 */
e3d6f909 4193 ret = cmd->se_tfo->write_pending(cmd);
07bde79a
NB
4194 if (ret == -EAGAIN)
4195 goto queue_full;
4196 else if (ret < 0)
c66ac9db
NB
4197 return ret;
4198
4199 return PYX_TRANSPORT_WRITE_PENDING;
07bde79a
NB
4200
4201queue_full:
6708bb27 4202 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
07bde79a
NB
4203 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4204 transport_handle_queue_full(cmd, cmd->se_dev,
4205 transport_write_pending_qf);
4206 return ret;
c66ac9db
NB
4207}
4208
2dbc43d2
CH
4209/**
4210 * transport_release_cmd - free a command
4211 * @cmd: command to free
4212 *
4213 * This routine unconditionally frees a command, and reference counting
4214 * or list removal must be done in the caller.
4215 */
35462975 4216void transport_release_cmd(struct se_cmd *cmd)
c66ac9db 4217{
e3d6f909 4218 BUG_ON(!cmd->se_tfo);
c66ac9db 4219
2dbc43d2
CH
4220 if (cmd->se_tmr_req)
4221 core_tmr_release_req(cmd->se_tmr_req);
4222 if (cmd->t_task_cdb != cmd->__t_task_cdb)
4223 kfree(cmd->t_task_cdb);
35462975 4224 cmd->se_tfo->release_cmd(cmd);
c66ac9db 4225}
35462975 4226EXPORT_SYMBOL(transport_release_cmd);
c66ac9db 4227
39c05f32 4228void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
c66ac9db 4229{
d14921d6
NB
4230 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
4231 if (wait_for_tasks && cmd->se_tmr_req)
4232 transport_wait_for_tasks(cmd);
4233
35462975 4234 transport_release_cmd(cmd);
d14921d6
NB
4235 } else {
4236 if (wait_for_tasks)
4237 transport_wait_for_tasks(cmd);
4238
c66ac9db
NB
4239 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
4240
82f1c8a4 4241 if (cmd->se_lun)
c66ac9db 4242 transport_lun_remove_cmd(cmd);
c66ac9db 4243
f4366772
NB
4244 transport_free_dev_tasks(cmd);
4245
39c05f32 4246 transport_put_cmd(cmd);
c66ac9db
NB
4247 }
4248}
4249EXPORT_SYMBOL(transport_generic_free_cmd);
4250
c66ac9db
NB
4251/* transport_lun_wait_for_tasks():
4252 *
4253 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4254 * an struct se_lun to be successfully shutdown.
4255 */
4256static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4257{
4258 unsigned long flags;
4259 int ret;
4260 /*
4261 * If the frontend has already requested this struct se_cmd to
4262 * be stopped, we can safely ignore this struct se_cmd.
4263 */
a1d8b49a
AG
4264 spin_lock_irqsave(&cmd->t_state_lock, flags);
4265 if (atomic_read(&cmd->t_transport_stop)) {
4266 atomic_set(&cmd->transport_lun_stop, 0);
6708bb27 4267 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
e3d6f909 4268 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4269 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4270 transport_cmd_check_stop(cmd, 1, 0);
e3d6f909 4271 return -EPERM;
c66ac9db 4272 }
a1d8b49a
AG
4273 atomic_set(&cmd->transport_lun_fe_stop, 1);
4274 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4275
5951146d 4276 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db
NB
4277
4278 ret = transport_stop_tasks_for_cmd(cmd);
4279
6708bb27
AG
4280 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4281 " %d\n", cmd, cmd->t_task_list_num, ret);
c66ac9db 4282 if (!ret) {
6708bb27 4283 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
e3d6f909 4284 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a 4285 wait_for_completion(&cmd->transport_lun_stop_comp);
6708bb27 4286 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
e3d6f909 4287 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4288 }
5951146d 4289 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
c66ac9db
NB
4290
4291 return 0;
4292}
4293
c66ac9db
NB
4294static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4295{
4296 struct se_cmd *cmd = NULL;
4297 unsigned long lun_flags, cmd_flags;
4298 /*
4299 * Do exception processing and return CHECK_CONDITION status to the
4300 * Initiator Port.
4301 */
4302 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5951146d
AG
4303 while (!list_empty(&lun->lun_cmd_list)) {
4304 cmd = list_first_entry(&lun->lun_cmd_list,
4305 struct se_cmd, se_lun_node);
4306 list_del(&cmd->se_lun_node);
4307
a1d8b49a 4308 atomic_set(&cmd->transport_lun_active, 0);
c66ac9db
NB
4309 /*
4310 * This will notify iscsi_target_transport.c:
4311 * transport_cmd_check_stop() that a LUN shutdown is in
4312 * progress for the iscsi_cmd_t.
4313 */
a1d8b49a 4314 spin_lock(&cmd->t_state_lock);
6708bb27 4315 pr_debug("SE_LUN[%d] - Setting cmd->transport"
c66ac9db 4316 "_lun_stop for ITT: 0x%08x\n",
e3d6f909
AG
4317 cmd->se_lun->unpacked_lun,
4318 cmd->se_tfo->get_task_tag(cmd));
a1d8b49a
AG
4319 atomic_set(&cmd->transport_lun_stop, 1);
4320 spin_unlock(&cmd->t_state_lock);
c66ac9db
NB
4321
4322 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4323
6708bb27
AG
4324 if (!cmd->se_lun) {
4325 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
e3d6f909
AG
4326 cmd->se_tfo->get_task_tag(cmd),
4327 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
c66ac9db
NB
4328 BUG();
4329 }
4330 /*
4331 * If the Storage engine still owns the iscsi_cmd_t, determine
4332 * and/or stop its context.
4333 */
6708bb27 4334 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
e3d6f909
AG
4335 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4336 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4337
e3d6f909 4338 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
c66ac9db
NB
4339 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4340 continue;
4341 }
4342
6708bb27 4343 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
c66ac9db 4344 "_wait_for_tasks(): SUCCESS\n",
e3d6f909
AG
4345 cmd->se_lun->unpacked_lun,
4346 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4347
a1d8b49a 4348 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
6708bb27 4349 if (!atomic_read(&cmd->transport_dev_active)) {
a1d8b49a 4350 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4351 goto check_cond;
4352 }
a1d8b49a 4353 atomic_set(&cmd->transport_dev_active, 0);
c66ac9db 4354 transport_all_task_dev_remove_state(cmd);
a1d8b49a 4355 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4356
4357 transport_free_dev_tasks(cmd);
4358 /*
4359 * The Storage engine stopped this struct se_cmd before it was
4360 * send to the fabric frontend for delivery back to the
4361 * Initiator Node. Return this SCSI CDB back with an
4362 * CHECK_CONDITION status.
4363 */
4364check_cond:
4365 transport_send_check_condition_and_sense(cmd,
4366 TCM_NON_EXISTENT_LUN, 0);
4367 /*
4368 * If the fabric frontend is waiting for this iscsi_cmd_t to
4369 * be released, notify the waiting thread now that LU has
4370 * finished accessing it.
4371 */
a1d8b49a
AG
4372 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4373 if (atomic_read(&cmd->transport_lun_fe_stop)) {
6708bb27 4374 pr_debug("SE_LUN[%d] - Detected FE stop for"
c66ac9db
NB
4375 " struct se_cmd: %p ITT: 0x%08x\n",
4376 lun->unpacked_lun,
e3d6f909 4377 cmd, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4378
a1d8b49a 4379 spin_unlock_irqrestore(&cmd->t_state_lock,
c66ac9db
NB
4380 cmd_flags);
4381 transport_cmd_check_stop(cmd, 1, 0);
a1d8b49a 4382 complete(&cmd->transport_lun_fe_stop_comp);
c66ac9db
NB
4383 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4384 continue;
4385 }
6708bb27 4386 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
e3d6f909 4387 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4388
a1d8b49a 4389 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
c66ac9db
NB
4390 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4391 }
4392 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4393}
4394
4395static int transport_clear_lun_thread(void *p)
4396{
4397 struct se_lun *lun = (struct se_lun *)p;
4398
4399 __transport_clear_lun_from_sessions(lun);
4400 complete(&lun->lun_shutdown_comp);
4401
4402 return 0;
4403}
4404
4405int transport_clear_lun_from_sessions(struct se_lun *lun)
4406{
4407 struct task_struct *kt;
4408
5951146d 4409 kt = kthread_run(transport_clear_lun_thread, lun,
c66ac9db
NB
4410 "tcm_cl_%u", lun->unpacked_lun);
4411 if (IS_ERR(kt)) {
6708bb27 4412 pr_err("Unable to start clear_lun thread\n");
e3d6f909 4413 return PTR_ERR(kt);
c66ac9db
NB
4414 }
4415 wait_for_completion(&lun->lun_shutdown_comp);
4416
4417 return 0;
4418}
4419
d14921d6
NB
4420/**
4421 * transport_wait_for_tasks - wait for completion to occur
4422 * @cmd: command to wait
c66ac9db 4423 *
d14921d6
NB
4424 * Called from frontend fabric context to wait for storage engine
4425 * to pause and/or release frontend generated struct se_cmd.
c66ac9db 4426 */
d14921d6 4427void transport_wait_for_tasks(struct se_cmd *cmd)
c66ac9db
NB
4428{
4429 unsigned long flags;
4430
a1d8b49a 4431 spin_lock_irqsave(&cmd->t_state_lock, flags);
d14921d6
NB
4432 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4433 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4434 return;
4435 }
4436 /*
4437 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4438 * has been set in transport_set_supported_SAM_opcode().
4439 */
4440 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4441 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4442 return;
4443 }
c66ac9db
NB
4444 /*
4445 * If we are already stopped due to an external event (ie: LUN shutdown)
4446 * sleep until the connection can have the passed struct se_cmd back.
a1d8b49a 4447 * The cmd->transport_lun_stopped_sem will be upped by
c66ac9db
NB
4448 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4449 * has completed its operation on the struct se_cmd.
4450 */
a1d8b49a 4451 if (atomic_read(&cmd->transport_lun_stop)) {
c66ac9db 4452
6708bb27 4453 pr_debug("wait_for_tasks: Stopping"
e3d6f909 4454 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
c66ac9db 4455 "_stop_comp); for ITT: 0x%08x\n",
e3d6f909 4456 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4457 /*
4458 * There is a special case for WRITES where a FE exception +
4459 * LUN shutdown means ConfigFS context is still sleeping on
4460 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4461 * We go ahead and up transport_lun_stop_comp just to be sure
4462 * here.
4463 */
a1d8b49a
AG
4464 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4465 complete(&cmd->transport_lun_stop_comp);
4466 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4467 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db
NB
4468
4469 transport_all_task_dev_remove_state(cmd);
4470 /*
4471 * At this point, the frontend who was the originator of this
4472 * struct se_cmd, now owns the structure and can be released through
4473 * normal means below.
4474 */
6708bb27 4475 pr_debug("wait_for_tasks: Stopped"
e3d6f909 4476 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
c66ac9db 4477 "stop_comp); for ITT: 0x%08x\n",
e3d6f909 4478 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4479
a1d8b49a 4480 atomic_set(&cmd->transport_lun_stop, 0);
c66ac9db 4481 }
a1d8b49a 4482 if (!atomic_read(&cmd->t_transport_active) ||
d14921d6
NB
4483 atomic_read(&cmd->t_transport_aborted)) {
4484 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4485 return;
4486 }
c66ac9db 4487
a1d8b49a 4488 atomic_set(&cmd->t_transport_stop, 1);
c66ac9db 4489
6708bb27 4490 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
c66ac9db 4491 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
e3d6f909
AG
4492 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4493 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
c66ac9db
NB
4494 cmd->deferred_t_state);
4495
a1d8b49a 4496 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4497
5951146d 4498 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
c66ac9db 4499
a1d8b49a 4500 wait_for_completion(&cmd->t_transport_stop_comp);
c66ac9db 4501
a1d8b49a
AG
4502 spin_lock_irqsave(&cmd->t_state_lock, flags);
4503 atomic_set(&cmd->t_transport_active, 0);
4504 atomic_set(&cmd->t_transport_stop, 0);
c66ac9db 4505
6708bb27 4506 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
a1d8b49a 4507 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
e3d6f909 4508 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4509
d14921d6 4510 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db 4511}
d14921d6 4512EXPORT_SYMBOL(transport_wait_for_tasks);
c66ac9db
NB
4513
4514static int transport_get_sense_codes(
4515 struct se_cmd *cmd,
4516 u8 *asc,
4517 u8 *ascq)
4518{
4519 *asc = cmd->scsi_asc;
4520 *ascq = cmd->scsi_ascq;
4521
4522 return 0;
4523}
4524
4525static int transport_set_sense_codes(
4526 struct se_cmd *cmd,
4527 u8 asc,
4528 u8 ascq)
4529{
4530 cmd->scsi_asc = asc;
4531 cmd->scsi_ascq = ascq;
4532
4533 return 0;
4534}
4535
4536int transport_send_check_condition_and_sense(
4537 struct se_cmd *cmd,
4538 u8 reason,
4539 int from_transport)
4540{
4541 unsigned char *buffer = cmd->sense_buffer;
4542 unsigned long flags;
4543 int offset;
4544 u8 asc = 0, ascq = 0;
4545
a1d8b49a 4546 spin_lock_irqsave(&cmd->t_state_lock, flags);
c66ac9db 4547 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
a1d8b49a 4548 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4549 return 0;
4550 }
4551 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
a1d8b49a 4552 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
c66ac9db
NB
4553
4554 if (!reason && from_transport)
4555 goto after_reason;
4556
4557 if (!from_transport)
4558 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4559 /*
4560 * Data Segment and SenseLength of the fabric response PDU.
4561 *
4562 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4563 * from include/scsi/scsi_cmnd.h
4564 */
e3d6f909 4565 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
c66ac9db
NB
4566 TRANSPORT_SENSE_BUFFER);
4567 /*
4568 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4569 * SENSE KEY values from include/scsi/scsi.h
4570 */
4571 switch (reason) {
4572 case TCM_NON_EXISTENT_LUN:
eb39d340
NB
4573 /* CURRENT ERROR */
4574 buffer[offset] = 0x70;
4575 /* ILLEGAL REQUEST */
4576 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4577 /* LOGICAL UNIT NOT SUPPORTED */
4578 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4579 break;
c66ac9db
NB
4580 case TCM_UNSUPPORTED_SCSI_OPCODE:
4581 case TCM_SECTOR_COUNT_TOO_MANY:
4582 /* CURRENT ERROR */
4583 buffer[offset] = 0x70;
4584 /* ILLEGAL REQUEST */
4585 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4586 /* INVALID COMMAND OPERATION CODE */
4587 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4588 break;
4589 case TCM_UNKNOWN_MODE_PAGE:
4590 /* CURRENT ERROR */
4591 buffer[offset] = 0x70;
4592 /* ILLEGAL REQUEST */
4593 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4594 /* INVALID FIELD IN CDB */
4595 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4596 break;
4597 case TCM_CHECK_CONDITION_ABORT_CMD:
4598 /* CURRENT ERROR */
4599 buffer[offset] = 0x70;
4600 /* ABORTED COMMAND */
4601 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4602 /* BUS DEVICE RESET FUNCTION OCCURRED */
4603 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4604 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4605 break;
4606 case TCM_INCORRECT_AMOUNT_OF_DATA:
4607 /* CURRENT ERROR */
4608 buffer[offset] = 0x70;
4609 /* ABORTED COMMAND */
4610 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4611 /* WRITE ERROR */
4612 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4613 /* NOT ENOUGH UNSOLICITED DATA */
4614 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4615 break;
4616 case TCM_INVALID_CDB_FIELD:
4617 /* CURRENT ERROR */
4618 buffer[offset] = 0x70;
4619 /* ABORTED COMMAND */
4620 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4621 /* INVALID FIELD IN CDB */
4622 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4623 break;
4624 case TCM_INVALID_PARAMETER_LIST:
4625 /* CURRENT ERROR */
4626 buffer[offset] = 0x70;
4627 /* ABORTED COMMAND */
4628 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4629 /* INVALID FIELD IN PARAMETER LIST */
4630 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4631 break;
4632 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4633 /* CURRENT ERROR */
4634 buffer[offset] = 0x70;
4635 /* ABORTED COMMAND */
4636 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4637 /* WRITE ERROR */
4638 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4639 /* UNEXPECTED_UNSOLICITED_DATA */
4640 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4641 break;
4642 case TCM_SERVICE_CRC_ERROR:
4643 /* CURRENT ERROR */
4644 buffer[offset] = 0x70;
4645 /* ABORTED COMMAND */
4646 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4647 /* PROTOCOL SERVICE CRC ERROR */
4648 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4649 /* N/A */
4650 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4651 break;
4652 case TCM_SNACK_REJECTED:
4653 /* CURRENT ERROR */
4654 buffer[offset] = 0x70;
4655 /* ABORTED COMMAND */
4656 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4657 /* READ ERROR */
4658 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4659 /* FAILED RETRANSMISSION REQUEST */
4660 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4661 break;
4662 case TCM_WRITE_PROTECTED:
4663 /* CURRENT ERROR */
4664 buffer[offset] = 0x70;
4665 /* DATA PROTECT */
4666 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4667 /* WRITE PROTECTED */
4668 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4669 break;
4670 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4671 /* CURRENT ERROR */
4672 buffer[offset] = 0x70;
4673 /* UNIT ATTENTION */
4674 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4675 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4676 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4677 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4678 break;
4679 case TCM_CHECK_CONDITION_NOT_READY:
4680 /* CURRENT ERROR */
4681 buffer[offset] = 0x70;
4682 /* Not Ready */
4683 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4684 transport_get_sense_codes(cmd, &asc, &ascq);
4685 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4686 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4687 break;
4688 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4689 default:
4690 /* CURRENT ERROR */
4691 buffer[offset] = 0x70;
4692 /* ILLEGAL REQUEST */
4693 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4694 /* LOGICAL UNIT COMMUNICATION FAILURE */
4695 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4696 break;
4697 }
4698 /*
4699 * This code uses linux/include/scsi/scsi.h SAM status codes!
4700 */
4701 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4702 /*
4703 * Automatically padded, this value is encoded in the fabric's
4704 * data_length response PDU containing the SCSI defined sense data.
4705 */
4706 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4707
4708after_reason:
07bde79a 4709 return cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4710}
4711EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4712
4713int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4714{
4715 int ret = 0;
4716
a1d8b49a 4717 if (atomic_read(&cmd->t_transport_aborted) != 0) {
6708bb27 4718 if (!send_status ||
c66ac9db
NB
4719 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4720 return 1;
4721#if 0
6708bb27 4722 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
c66ac9db 4723 " status for CDB: 0x%02x ITT: 0x%08x\n",
a1d8b49a 4724 cmd->t_task_cdb[0],
e3d6f909 4725 cmd->se_tfo->get_task_tag(cmd));
c66ac9db
NB
4726#endif
4727 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
e3d6f909 4728 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4729 ret = 1;
4730 }
4731 return ret;
4732}
4733EXPORT_SYMBOL(transport_check_aborted_status);
4734
4735void transport_send_task_abort(struct se_cmd *cmd)
4736{
c252f003
NB
4737 unsigned long flags;
4738
4739 spin_lock_irqsave(&cmd->t_state_lock, flags);
4740 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4741 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4742 return;
4743 }
4744 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4745
c66ac9db
NB
4746 /*
4747 * If there are still expected incoming fabric WRITEs, we wait
4748 * until until they have completed before sending a TASK_ABORTED
4749 * response. This response with TASK_ABORTED status will be
4750 * queued back to fabric module by transport_check_aborted_status().
4751 */
4752 if (cmd->data_direction == DMA_TO_DEVICE) {
e3d6f909 4753 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
a1d8b49a 4754 atomic_inc(&cmd->t_transport_aborted);
c66ac9db
NB
4755 smp_mb__after_atomic_inc();
4756 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4757 transport_new_cmd_failure(cmd);
4758 return;
4759 }
4760 }
4761 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4762#if 0
6708bb27 4763 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
a1d8b49a 4764 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
e3d6f909 4765 cmd->se_tfo->get_task_tag(cmd));
c66ac9db 4766#endif
e3d6f909 4767 cmd->se_tfo->queue_status(cmd);
c66ac9db
NB
4768}
4769
4770/* transport_generic_do_tmr():
4771 *
4772 *
4773 */
4774int transport_generic_do_tmr(struct se_cmd *cmd)
4775{
5951146d 4776 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
4777 struct se_tmr_req *tmr = cmd->se_tmr_req;
4778 int ret;
4779
4780 switch (tmr->function) {
5c6cd613 4781 case TMR_ABORT_TASK:
c66ac9db
NB
4782 tmr->response = TMR_FUNCTION_REJECTED;
4783 break;
5c6cd613
NB
4784 case TMR_ABORT_TASK_SET:
4785 case TMR_CLEAR_ACA:
4786 case TMR_CLEAR_TASK_SET:
c66ac9db
NB
4787 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4788 break;
5c6cd613 4789 case TMR_LUN_RESET:
c66ac9db
NB
4790 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4791 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4792 TMR_FUNCTION_REJECTED;
4793 break;
5c6cd613 4794 case TMR_TARGET_WARM_RESET:
c66ac9db
NB
4795 tmr->response = TMR_FUNCTION_REJECTED;
4796 break;
5c6cd613 4797 case TMR_TARGET_COLD_RESET:
c66ac9db
NB
4798 tmr->response = TMR_FUNCTION_REJECTED;
4799 break;
c66ac9db 4800 default:
6708bb27 4801 pr_err("Uknown TMR function: 0x%02x.\n",
c66ac9db
NB
4802 tmr->function);
4803 tmr->response = TMR_FUNCTION_REJECTED;
4804 break;
4805 }
4806
4807 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
e3d6f909 4808 cmd->se_tfo->queue_tm_rsp(cmd);
c66ac9db
NB
4809
4810 transport_cmd_check_stop(cmd, 2, 0);
4811 return 0;
4812}
4813
c66ac9db
NB
4814/* transport_processing_thread():
4815 *
4816 *
4817 */
4818static int transport_processing_thread(void *param)
4819{
5951146d 4820 int ret;
c66ac9db
NB
4821 struct se_cmd *cmd;
4822 struct se_device *dev = (struct se_device *) param;
c66ac9db
NB
4823
4824 set_user_nice(current, -20);
4825
4826 while (!kthread_should_stop()) {
e3d6f909
AG
4827 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4828 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
c66ac9db
NB
4829 kthread_should_stop());
4830 if (ret < 0)
4831 goto out;
4832
c66ac9db
NB
4833get_cmd:
4834 __transport_execute_tasks(dev);
4835
5951146d
AG
4836 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4837 if (!cmd)
c66ac9db
NB
4838 continue;
4839
5951146d 4840 switch (cmd->t_state) {
680b73c5
CH
4841 case TRANSPORT_NEW_CMD:
4842 BUG();
4843 break;
c66ac9db 4844 case TRANSPORT_NEW_CMD_MAP:
6708bb27
AG
4845 if (!cmd->se_tfo->new_cmd_map) {
4846 pr_err("cmd->se_tfo->new_cmd_map is"
c66ac9db
NB
4847 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4848 BUG();
4849 }
e3d6f909 4850 ret = cmd->se_tfo->new_cmd_map(cmd);
c66ac9db
NB
4851 if (ret < 0) {
4852 cmd->transport_error_status = ret;
4853 transport_generic_request_failure(cmd, NULL,
4854 0, (cmd->data_direction !=
4855 DMA_TO_DEVICE));
4856 break;
4857 }
c66ac9db 4858 ret = transport_generic_new_cmd(cmd);
07bde79a
NB
4859 if (ret == -EAGAIN)
4860 break;
4861 else if (ret < 0) {
c66ac9db
NB
4862 cmd->transport_error_status = ret;
4863 transport_generic_request_failure(cmd, NULL,
4864 0, (cmd->data_direction !=
4865 DMA_TO_DEVICE));
4866 }
4867 break;
4868 case TRANSPORT_PROCESS_WRITE:
4869 transport_generic_process_write(cmd);
4870 break;
4871 case TRANSPORT_COMPLETE_OK:
4872 transport_stop_all_task_timers(cmd);
4873 transport_generic_complete_ok(cmd);
4874 break;
4875 case TRANSPORT_REMOVE:
e6a2573f 4876 transport_put_cmd(cmd);
c66ac9db 4877 break;
f4366772 4878 case TRANSPORT_FREE_CMD_INTR:
82f1c8a4 4879 transport_generic_free_cmd(cmd, 0);
f4366772 4880 break;
c66ac9db
NB
4881 case TRANSPORT_PROCESS_TMR:
4882 transport_generic_do_tmr(cmd);
4883 break;
4884 case TRANSPORT_COMPLETE_FAILURE:
4885 transport_generic_request_failure(cmd, NULL, 1, 1);
4886 break;
4887 case TRANSPORT_COMPLETE_TIMEOUT:
4888 transport_stop_all_task_timers(cmd);
4889 transport_generic_request_timeout(cmd);
4890 break;
07bde79a
NB
4891 case TRANSPORT_COMPLETE_QF_WP:
4892 transport_generic_write_pending(cmd);
4893 break;
c66ac9db 4894 default:
6708bb27 4895 pr_err("Unknown t_state: %d deferred_t_state:"
c66ac9db 4896 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5951146d 4897 " %u\n", cmd->t_state, cmd->deferred_t_state,
e3d6f909
AG
4898 cmd->se_tfo->get_task_tag(cmd),
4899 cmd->se_tfo->get_cmd_state(cmd),
4900 cmd->se_lun->unpacked_lun);
c66ac9db
NB
4901 BUG();
4902 }
4903
4904 goto get_cmd;
4905 }
4906
4907out:
ce8762f6
NB
4908 WARN_ON(!list_empty(&dev->state_task_list));
4909 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
c66ac9db
NB
4910 dev->process_thread = NULL;
4911 return 0;
4912}