1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
48 #include <net/checksum.h>
50 #include <linux/unaligned.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
62 #include "scsi_logging.h"
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
68 #define MY_NAME "scsi_debug"
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST 1
121 #define DEF_NUM_TGTS 1
122 #define DEF_MAX_LUNS 1
123 /* With these defaults, this driver will make 1 host with 1 target
124 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT 0
130 #define DEF_DEV_SIZE_MB 8
131 #define DEF_ZBC_DEV_SIZE_MB 128
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE 0
136 #define DEF_EVERY_NTH 0
137 #define DEF_FAKE_RW 0
139 #define DEF_HOST_LOCK 0
142 #define DEF_LBPWS10 0
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0 0
147 #define DEF_NUM_PARTS 0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB 0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB 128
179 #define DEF_ZBC_MAX_OPEN_ZONES 8
180 #define DEF_ZBC_NR_CONV_ZONES 1
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY 0x0
184 #define TAPE_BAD_DENSITY 0x65
185 #define TAPE_DEF_BLKSIZE 0
186 #define TAPE_MIN_BLKSIZE 512
187 #define TAPE_MAX_BLKSIZE 1048576
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
205 unsigned char data[4];
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK 0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
213 #define SDEBUG_LUN_0_VAL 0
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE 1
217 #define SDEBUG_OPT_MEDIUM_ERR 2
218 #define SDEBUG_OPT_TIMEOUT 4
219 #define SDEBUG_OPT_RECOVERED_ERR 8
220 #define SDEBUG_OPT_TRANSPORT_ERR 16
221 #define SDEBUG_OPT_DIF_ERR 32
222 #define SDEBUG_OPT_DIX_ERR 64
223 #define SDEBUG_OPT_MAC_TIMEOUT 128
224 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
225 #define SDEBUG_OPT_Q_NOISE 0x200
226 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
227 #define SDEBUG_OPT_RARE_TSF 0x800
228 #define SDEBUG_OPT_N_WCE 0x1000
229 #define SDEBUG_OPT_RESET_NOISE 0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
231 #define SDEBUG_OPT_HOST_BUSY 0x8000
232 #define SDEBUG_OPT_CMD_ABORT 0x10000
233 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
234 SDEBUG_OPT_RESET_NOISE)
235 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
236 SDEBUG_OPT_TRANSPORT_ERR | \
237 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
238 SDEBUG_OPT_SHORT_TRANSFER | \
239 SDEBUG_OPT_HOST_BUSY | \
240 SDEBUG_OPT_CMD_ABORT)
241 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
242 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
244 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
245 * priority order. In the subset implemented here lower numbers have higher
246 * priority. The UA numbers should be a sequence starting from 0 with
247 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
248 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
249 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
250 #define SDEBUG_UA_BUS_RESET 2
251 #define SDEBUG_UA_MODE_CHANGED 3
252 #define SDEBUG_UA_CAPACITY_CHANGED 4
253 #define SDEBUG_UA_LUNS_CHANGED 5
254 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
255 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
256 #define SDEBUG_UA_NOT_READY_TO_READY 8
257 #define SDEBUG_NUM_UAS 9
259 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
260 * sector on read commands: */
261 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
262 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
264 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
265 * (for response) per submit queue at one time. Can be reduced by max_queue
266 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
267 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
268 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
269 * but cannot exceed SDEBUG_CANQUEUE .
271 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
272 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
273 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
275 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276 #define F_D_IN 1 /* Data-in command (e.g. READ) */
277 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
278 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
280 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
281 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
282 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
283 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
284 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
285 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
286 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
287 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
288 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
289 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
291 /* Useful combinations of the above flags */
292 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
293 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
294 #define FF_SA (F_SA_HIGH | F_SA_LOW)
295 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
297 /* Device selection bit mask */
298 #define DS_ALL 0xffffffff
299 #define DS_SBC (1 << TYPE_DISK)
300 #define DS_SSC (1 << TYPE_TAPE)
301 #define DS_ZBC (1 << TYPE_ZBC)
303 #define DS_NO_SSC (DS_ALL & ~DS_SSC)
305 #define SDEBUG_MAX_PARTS 4
307 #define SDEBUG_MAX_CMD_LEN 32
309 #define SDEB_XA_NOT_IN_USE XA_MARK_1
311 /* Zone types (zbcr05 table 25) */
316 /* ZBC_ZTYPE_SOBR = 0x4, */
320 /* enumeration names taken from table 26, zbcr05 */
322 ZBC_NOT_WRITE_POINTER = 0x0,
324 ZC2_IMPLICIT_OPEN = 0x2,
325 ZC3_EXPLICIT_OPEN = 0x3,
332 struct sdeb_zone_state { /* ZBC: per zone state */
333 enum sdebug_z_type z_type;
334 enum sdebug_z_cond z_cond;
335 bool z_non_seq_resource;
341 enum sdebug_err_type {
342 ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
343 ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
344 /* queuecmd return failed */
345 ERR_FAIL_CMD = 2, /* make specific scsi command's */
346 /* queuecmd return succeed but */
347 /* with errors set in scsi_cmnd */
348 ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
349 /* scsi_debug_abort() */
350 ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
351 /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
354 struct sdebug_err_inject {
356 struct list_head list;
363 * For ERR_FAIL_QUEUE_CMD
371 unsigned char host_byte;
372 unsigned char driver_byte;
373 unsigned char status_byte;
374 unsigned char sense_key;
381 struct sdebug_dev_info {
382 struct list_head dev_list;
383 unsigned int channel;
387 struct sdebug_host_info *sdbg_host;
388 unsigned long uas_bm[1];
389 atomic_t stopped; /* 1: by SSU, 2: device start */
392 /* For ZBC devices */
396 unsigned int zsize_shift;
397 unsigned int nr_zones;
398 unsigned int nr_conv_zones;
399 unsigned int nr_seq_zones;
400 unsigned int nr_imp_open;
401 unsigned int nr_exp_open;
402 unsigned int nr_closed;
403 unsigned int max_open;
404 ktime_t create_ts; /* time since bootup that this device was created */
405 struct sdeb_zone_state *zstate;
408 unsigned int tape_blksize;
409 unsigned int tape_density;
410 unsigned char tape_partition;
411 unsigned char tape_nbr_partitions;
412 unsigned char tape_pending_nbr_partitions;
413 unsigned int tape_pending_part_0_size;
414 unsigned int tape_pending_part_1_size;
415 unsigned char tape_dce;
416 unsigned int tape_location[TAPE_MAX_PARTITIONS];
417 unsigned int tape_eop[TAPE_MAX_PARTITIONS];
418 struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
420 struct dentry *debugfs_entry;
421 struct spinlock list_lock;
422 struct list_head inject_err_list;
425 struct sdebug_target_info {
427 struct dentry *debugfs_entry;
430 struct sdebug_host_info {
431 struct list_head host_list;
432 int si_idx; /* sdeb_store_info (per host) xarray index */
433 struct Scsi_Host *shost;
435 struct list_head dev_info_list;
438 /* There is an xarray of pointers to this struct's objects, one per host */
439 struct sdeb_store_info {
440 rwlock_t macc_data_lck; /* for media data access on this store */
441 rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
442 rwlock_t macc_sector_lck; /* per-sector media data access on this store */
443 u8 *storep; /* user data storage (ram) */
444 struct t10_pi_tuple *dif_storep; /* protection info */
445 void *map_storep; /* provisioning map */
448 #define dev_to_sdebug_host(d) \
449 container_of(d, struct sdebug_host_info, dev)
451 #define shost_to_sdebug_host(shost) \
452 dev_to_sdebug_host(shost->dma_dev)
454 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
455 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
457 struct sdebug_defer {
459 struct execute_work ew;
460 ktime_t cmpl_ts;/* time since boot to complete this cmd */
462 bool aborted; /* true when blk_abort_request() already called */
463 enum sdeb_defer_type defer_t;
466 struct sdebug_scsi_cmd {
468 struct sdebug_defer sd_dp;
471 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
472 static atomic_t sdebug_completions; /* count of deferred completions */
473 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
474 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
475 static atomic_t sdeb_inject_pending;
476 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
478 struct opcode_info_t {
479 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
480 /* for terminating element */
481 u8 opcode; /* if num_attached > 0, preferred */
482 u16 sa; /* service action */
483 u32 devsel; /* device type mask for this definition */
484 u32 flags; /* OR-ed set of SDEB_F_* */
485 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
486 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
487 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
488 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
491 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
492 enum sdeb_opcode_index {
493 SDEB_I_INVALID_OPCODE = 0,
495 SDEB_I_REPORT_LUNS = 2,
496 SDEB_I_REQUEST_SENSE = 3,
497 SDEB_I_TEST_UNIT_READY = 4,
498 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
499 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
500 SDEB_I_LOG_SENSE = 7,
501 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
502 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
503 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
504 SDEB_I_START_STOP = 11,
505 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
506 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
507 SDEB_I_MAINT_IN = 14,
508 SDEB_I_MAINT_OUT = 15,
509 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
510 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
511 SDEB_I_RESERVE = 18, /* 6, 10 */
512 SDEB_I_RELEASE = 19, /* 6, 10 */
513 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
514 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
515 SDEB_I_ATA_PT = 22, /* 12, 16 */
516 SDEB_I_SEND_DIAG = 23,
518 SDEB_I_WRITE_BUFFER = 25,
519 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
520 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
521 SDEB_I_COMP_WRITE = 28,
522 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
523 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
524 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
525 SDEB_I_ATOMIC_WRITE_16 = 32,
526 SDEB_I_READ_BLOCK_LIMITS = 33,
528 SDEB_I_WRITE_FILEMARKS = 35,
530 SDEB_I_FORMAT_MEDIUM = 37,
532 SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */
536 static const unsigned char opcode_ind_arr[256] = {
537 /* 0x0; 0x0->0x1f: 6 byte cdbs */
538 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
539 SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
540 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
541 SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
542 SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
543 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
544 SDEB_I_ALLOW_REMOVAL, 0,
545 /* 0x20; 0x20->0x3f: 10 byte cdbs */
546 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
547 SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
548 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
549 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
550 /* 0x40; 0x40->0x5f: 10 byte cdbs */
551 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
552 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
553 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
555 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
556 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
557 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
558 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
559 0, SDEB_I_VARIABLE_LEN,
560 /* 0x80; 0x80->0x9f: 16 byte cdbs */
561 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
562 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
563 0, 0, 0, SDEB_I_VERIFY,
564 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
565 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
567 SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
568 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
569 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
570 SDEB_I_MAINT_OUT, 0, 0, 0,
571 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
572 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
573 0, 0, 0, 0, 0, 0, 0, 0,
574 0, 0, 0, 0, 0, 0, 0, 0,
575 /* 0xc0; 0xc0->0xff: vendor specific */
576 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
577 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
578 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
583 * The following "response" functions return the SCSI mid-level's 4 byte
584 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
585 * command completion, they can mask their return value with
586 * SDEG_RES_IMMED_MASK .
588 #define SDEG_RES_IMMED_MASK 0x40000000
590 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
591 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
592 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_get_stream_status(struct scsi_cmnd *scp,
606 struct sdebug_dev_info *devip);
607 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
608 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
609 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
633 static int sdebug_do_add_host(bool mk_new_store);
634 static int sdebug_add_host_helper(int per_host_idx);
635 static void sdebug_do_remove_host(bool the_end);
636 static int sdebug_add_store(void);
637 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
638 static void sdebug_erase_all_stores(bool apart_from_first);
641 * The following are overflow arrays for cdbs that "hit" the same index in
642 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
643 * should be placed in opcode_info_arr[], the others should be placed here.
645 static const struct opcode_info_t msense_iarr[] = {
646 {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
647 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 static const struct opcode_info_t mselect_iarr[] = {
651 {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
652 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 static const struct opcode_info_t read_iarr[] = {
656 {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
657 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
659 {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
660 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661 {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
662 {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
664 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
668 static const struct opcode_info_t write_iarr[] = {
669 {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
670 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
672 {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
673 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
675 {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
676 NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
678 {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
679 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
680 0xbf, 0xc7, 0, 0, 0, 0} },
683 static const struct opcode_info_t verify_iarr[] = {
684 {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
685 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
689 static const struct opcode_info_t sa_in_16_iarr[] = {
690 {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
691 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
692 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
693 {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
694 {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
695 0, 0} }, /* GET STREAM STATUS */
698 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
699 {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
700 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
701 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
702 {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
703 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
704 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
707 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
708 {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
709 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
710 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
711 {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
712 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
713 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
716 static const struct opcode_info_t write_same_iarr[] = {
717 {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
718 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
722 static const struct opcode_info_t reserve_iarr[] = {
723 {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */
724 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
727 static const struct opcode_info_t release_iarr[] = {
728 {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */
729 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 static const struct opcode_info_t sync_cache_iarr[] = {
733 {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
734 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
735 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
738 static const struct opcode_info_t pre_fetch_iarr[] = {
739 {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
740 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
742 {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
743 {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
744 0, 0, 0, 0} }, /* READ POSITION (10) */
747 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
748 {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
749 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
750 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
751 {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
752 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
753 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
754 {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
755 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
756 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
759 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
760 {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
761 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
762 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
766 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
767 * plus the terminating elements for logic that scans this table such as
768 * REPORT SUPPORTED OPERATION CODES. */
769 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
771 {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
772 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
773 {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
774 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
776 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
777 0, 0} }, /* REPORT LUNS */
778 {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
779 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780 {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
781 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
783 {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */
784 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
785 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
786 {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */
787 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
788 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
789 {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
790 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
792 {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
793 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
795 {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
796 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
797 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
799 {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
800 resp_write_dt0, write_iarr, /* WRITE(16) */
801 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
802 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
803 {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
804 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
805 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
806 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
807 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
809 {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
810 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
811 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
812 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
813 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
814 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
815 0xff, 0, 0xc7, 0, 0, 0, 0} },
817 {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
818 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
819 {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
820 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
821 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
822 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
823 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
824 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
825 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
827 {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
828 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
829 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
831 {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
832 NULL, release_iarr, /* RELEASE(10) <no response function> */
833 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
836 {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
837 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
838 {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
839 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
841 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */
843 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
845 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
847 {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
848 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
849 0, 0, 0, 0} }, /* WRITE_BUFFER */
850 {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
851 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
852 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
854 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
855 resp_sync_cache, sync_cache_iarr,
856 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
857 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
858 {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
859 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
860 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
861 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
862 resp_pre_fetch, pre_fetch_iarr,
863 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
864 0, 0, 0, 0} }, /* PRE-FETCH (10) */
865 /* READ POSITION (10) */
868 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
869 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
870 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
871 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
872 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
873 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
874 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
875 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
877 {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
878 resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
879 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
880 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
881 {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
882 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
883 {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
884 {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
886 {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
887 {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
888 {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */
889 {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
891 {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */
893 {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
896 {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */
897 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
900 static int sdebug_num_hosts;
901 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
902 static int sdebug_ato = DEF_ATO;
903 static int sdebug_cdb_len = DEF_CDB_LEN;
904 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
905 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
906 static int sdebug_dif = DEF_DIF;
907 static int sdebug_dix = DEF_DIX;
908 static int sdebug_dsense = DEF_D_SENSE;
909 static int sdebug_every_nth = DEF_EVERY_NTH;
910 static int sdebug_fake_rw = DEF_FAKE_RW;
911 static unsigned int sdebug_guard = DEF_GUARD;
912 static int sdebug_host_max_queue; /* per host */
913 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
914 static int sdebug_max_luns = DEF_MAX_LUNS;
915 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
916 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
917 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
918 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
919 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
920 static int sdebug_no_uld;
921 static int sdebug_num_parts = DEF_NUM_PARTS;
922 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
923 static int sdebug_opt_blks = DEF_OPT_BLKS;
924 static int sdebug_opts = DEF_OPTS;
925 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
926 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
927 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
928 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
929 static int sdebug_sector_size = DEF_SECTOR_SIZE;
930 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
931 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
932 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
933 static unsigned int sdebug_lbpu = DEF_LBPU;
934 static unsigned int sdebug_lbpws = DEF_LBPWS;
935 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
936 static unsigned int sdebug_lbprz = DEF_LBPRZ;
937 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
938 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
939 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
940 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
941 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
942 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
943 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
944 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
945 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
946 static unsigned int sdebug_atomic_wr_max_length_bndry =
947 DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
948 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
949 static int sdebug_uuid_ctl = DEF_UUID_CTL;
950 static bool sdebug_random = DEF_RANDOM;
951 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
952 static bool sdebug_removable = DEF_REMOVABLE;
953 static bool sdebug_clustering;
954 static bool sdebug_host_lock = DEF_HOST_LOCK;
955 static bool sdebug_strict = DEF_STRICT;
956 static bool sdebug_any_injecting_opt;
957 static bool sdebug_no_rwlock;
958 static bool sdebug_verbose;
959 static bool have_dif_prot;
960 static bool write_since_sync;
961 static bool sdebug_statistics = DEF_STATISTICS;
962 static bool sdebug_wp;
963 static bool sdebug_allow_restart;
968 } sdeb_zbc_model = BLK_ZONED_NONE;
969 static char *sdeb_zbc_model_s;
971 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
972 SAM_LUN_AM_FLAT = 0x1,
973 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
974 SAM_LUN_AM_EXTENDED = 0x3};
975 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
976 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
978 static unsigned int sdebug_store_sectors;
979 static sector_t sdebug_capacity; /* in sectors */
981 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
982 may still need them */
983 static int sdebug_heads; /* heads per disk */
984 static int sdebug_cylinders_per; /* cylinders per surface */
985 static int sdebug_sectors_per; /* sectors per cylinder */
987 static LIST_HEAD(sdebug_host_list);
988 static DEFINE_MUTEX(sdebug_host_list_mutex);
990 static struct xarray per_store_arr;
991 static struct xarray *per_store_ap = &per_store_arr;
992 static int sdeb_first_idx = -1; /* invalid index ==> none created */
993 static int sdeb_most_recent_idx = -1;
994 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
996 static unsigned long map_size;
997 static int num_aborts;
998 static int num_dev_resets;
999 static int num_target_resets;
1000 static int num_bus_resets;
1001 static int num_host_resets;
1002 static int dix_writes;
1003 static int dix_reads;
1004 static int dif_errors;
1006 /* ZBC global data */
1007 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
1008 static int sdeb_zbc_zone_cap_mb;
1009 static int sdeb_zbc_zone_size_mb;
1010 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1011 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1013 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
1014 static int poll_queues; /* iouring iopoll interface.*/
1016 static atomic_long_t writes_by_group_number[64];
1018 static char sdebug_proc_name[] = MY_NAME;
1019 static const char *my_name = MY_NAME;
1021 static const struct bus_type pseudo_lld_bus;
1023 static struct device_driver sdebug_driverfs_driver = {
1024 .name = sdebug_proc_name,
1025 .bus = &pseudo_lld_bus,
1028 static const int check_condition_result =
1029 SAM_STAT_CHECK_CONDITION;
1031 static const int illegal_condition_result =
1032 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1034 static const int device_qfull_result =
1035 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1037 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1039 static struct dentry *sdebug_debugfs_root;
1040 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1042 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1044 unsigned char devtype = sdp->type;
1048 devsel = (1 << devtype);
1055 static void sdebug_err_free(struct rcu_head *head)
1057 struct sdebug_err_inject *inject =
1058 container_of(head, typeof(*inject), rcu);
1063 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1065 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1066 struct sdebug_err_inject *err;
1068 spin_lock(&devip->list_lock);
1069 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1070 if (err->type == new->type && err->cmd == new->cmd) {
1071 list_del_rcu(&err->list);
1072 call_rcu(&err->rcu, sdebug_err_free);
1076 list_add_tail_rcu(&new->list, &devip->inject_err_list);
1077 spin_unlock(&devip->list_lock);
1080 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1082 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1083 struct sdebug_err_inject *err;
1087 if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1092 spin_lock(&devip->list_lock);
1093 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1094 if (err->type == type && err->cmd == cmd) {
1095 list_del_rcu(&err->list);
1096 call_rcu(&err->rcu, sdebug_err_free);
1097 spin_unlock(&devip->list_lock);
1102 spin_unlock(&devip->list_lock);
1108 static int sdebug_error_show(struct seq_file *m, void *p)
1110 struct scsi_device *sdev = (struct scsi_device *)m->private;
1111 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1112 struct sdebug_err_inject *err;
1114 seq_puts(m, "Type\tCount\tCommand\n");
1117 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1118 switch (err->type) {
1120 case ERR_ABORT_CMD_FAILED:
1121 case ERR_LUN_RESET_FAILED:
1122 seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1126 case ERR_FAIL_QUEUE_CMD:
1127 seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1128 err->cnt, err->cmd, err->queuecmd_ret);
1132 seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1133 err->type, err->cnt, err->cmd,
1134 err->host_byte, err->driver_byte,
1135 err->status_byte, err->sense_key,
1136 err->asc, err->asq);
1145 static int sdebug_error_open(struct inode *inode, struct file *file)
1147 return single_open(file, sdebug_error_show, inode->i_private);
1150 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1151 size_t count, loff_t *ppos)
1154 unsigned int inject_type;
1155 struct sdebug_err_inject *inject;
1156 struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1158 buf = kzalloc(count + 1, GFP_KERNEL);
1162 if (copy_from_user(buf, ubuf, count)) {
1168 return sdebug_err_remove(sdev, buf, count);
1170 if (sscanf(buf, "%d", &inject_type) != 1) {
1175 inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1181 switch (inject_type) {
1183 case ERR_ABORT_CMD_FAILED:
1184 case ERR_LUN_RESET_FAILED:
1185 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1190 case ERR_FAIL_QUEUE_CMD:
1191 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1192 &inject->cmd, &inject->queuecmd_ret) != 4)
1197 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1198 &inject->type, &inject->cnt, &inject->cmd,
1199 &inject->host_byte, &inject->driver_byte,
1200 &inject->status_byte, &inject->sense_key,
1201 &inject->asc, &inject->asq) != 9)
1211 sdebug_err_add(sdev, inject);
1221 static const struct file_operations sdebug_error_fops = {
1222 .open = sdebug_error_open,
1224 .write = sdebug_error_write,
1225 .release = single_release,
1228 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1230 struct scsi_target *starget = (struct scsi_target *)m->private;
1231 struct sdebug_target_info *targetip =
1232 (struct sdebug_target_info *)starget->hostdata;
1235 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1240 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1242 return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1245 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1246 const char __user *ubuf, size_t count, loff_t *ppos)
1249 struct scsi_target *starget =
1250 (struct scsi_target *)file->f_inode->i_private;
1251 struct sdebug_target_info *targetip =
1252 (struct sdebug_target_info *)starget->hostdata;
1255 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1256 return ret < 0 ? ret : count;
1261 static const struct file_operations sdebug_target_reset_fail_fops = {
1262 .open = sdebug_target_reset_fail_open,
1264 .write = sdebug_target_reset_fail_write,
1265 .release = single_release,
1268 static int sdebug_target_alloc(struct scsi_target *starget)
1270 struct sdebug_target_info *targetip;
1272 targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1276 async_synchronize_full_domain(&sdebug_async_domain);
1278 targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1279 sdebug_debugfs_root);
1281 debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1282 &sdebug_target_reset_fail_fops);
1284 starget->hostdata = targetip;
1289 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1291 struct sdebug_target_info *targetip = data;
1293 debugfs_remove(targetip->debugfs_entry);
1297 static void sdebug_target_destroy(struct scsi_target *starget)
1299 struct sdebug_target_info *targetip;
1301 targetip = (struct sdebug_target_info *)starget->hostdata;
1303 starget->hostdata = NULL;
1304 async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1305 &sdebug_async_domain);
1309 /* Only do the extra work involved in logical block provisioning if one or
1310 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1311 * real reads and writes (i.e. not skipping them for speed).
1313 static inline bool scsi_debug_lbp(void)
1315 return 0 == sdebug_fake_rw &&
1316 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1319 static inline bool scsi_debug_atomic_write(void)
1321 return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1324 static void *lba2fake_store(struct sdeb_store_info *sip,
1325 unsigned long long lba)
1327 struct sdeb_store_info *lsip = sip;
1329 lba = do_div(lba, sdebug_store_sectors);
1330 if (!sip || !sip->storep) {
1332 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
1334 return lsip->storep + lba * sdebug_sector_size;
1337 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1340 sector = sector_div(sector, sdebug_store_sectors);
1342 return sip->dif_storep + sector;
1345 static void sdebug_max_tgts_luns(void)
1347 struct sdebug_host_info *sdbg_host;
1348 struct Scsi_Host *hpnt;
1350 mutex_lock(&sdebug_host_list_mutex);
1351 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1352 hpnt = sdbg_host->shost;
1353 if ((hpnt->this_id >= 0) &&
1354 (sdebug_num_tgts > hpnt->this_id))
1355 hpnt->max_id = sdebug_num_tgts + 1;
1357 hpnt->max_id = sdebug_num_tgts;
1358 /* sdebug_max_luns; */
1359 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1361 mutex_unlock(&sdebug_host_list_mutex);
1364 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1366 /* Set in_bit to -1 to indicate no bit position of invalid field */
1367 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1368 enum sdeb_cmd_data c_d,
1369 int in_byte, int in_bit)
1371 unsigned char *sbuff;
1375 sbuff = scp->sense_buffer;
1377 sdev_printk(KERN_ERR, scp->device,
1378 "%s: sense_buffer is NULL\n", __func__);
1381 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1382 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1383 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1384 memset(sks, 0, sizeof(sks));
1390 sks[0] |= 0x7 & in_bit;
1392 put_unaligned_be16(in_byte, sks + 1);
1393 if (sdebug_dsense) {
1397 sbuff[sl + 1] = 0x6;
1398 memcpy(sbuff + sl + 4, sks, 3);
1400 memcpy(sbuff + 15, sks, 3);
1402 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
1403 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1404 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1407 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1409 if (!scp->sense_buffer) {
1410 sdev_printk(KERN_ERR, scp->device,
1411 "%s: sense_buffer is NULL\n", __func__);
1414 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1416 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1419 sdev_printk(KERN_INFO, scp->device,
1420 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1421 my_name, key, asc, asq);
1424 /* Sense data that has information fields for tapes */
1425 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1426 unsigned int information, unsigned char tape_flags)
1428 if (!scp->sense_buffer) {
1429 sdev_printk(KERN_ERR, scp->device,
1430 "%s: sense_buffer is NULL\n", __func__);
1433 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1435 scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1436 /* only fixed format so far */
1438 scp->sense_buffer[0] |= 0x80; /* valid */
1439 scp->sense_buffer[2] |= tape_flags;
1440 put_unaligned_be32(information, &scp->sense_buffer[3]);
1443 sdev_printk(KERN_INFO, scp->device,
1444 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1445 my_name, key, asc, asq);
1448 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1450 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1453 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1456 if (sdebug_verbose) {
1458 sdev_printk(KERN_INFO, dev,
1459 "%s: BLKFLSBUF [0x1261]\n", __func__);
1460 else if (0x5331 == cmd)
1461 sdev_printk(KERN_INFO, dev,
1462 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1465 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1469 /* return -ENOTTY; // correct return but upsets fdisk */
1472 static void config_cdb_len(struct scsi_device *sdev)
1474 switch (sdebug_cdb_len) {
1475 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1476 sdev->use_10_for_rw = false;
1477 sdev->use_16_for_rw = false;
1478 sdev->use_10_for_ms = false;
1480 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1481 sdev->use_10_for_rw = true;
1482 sdev->use_16_for_rw = false;
1483 sdev->use_10_for_ms = false;
1485 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1486 sdev->use_10_for_rw = true;
1487 sdev->use_16_for_rw = false;
1488 sdev->use_10_for_ms = true;
1491 sdev->use_10_for_rw = false;
1492 sdev->use_16_for_rw = true;
1493 sdev->use_10_for_ms = true;
1495 case 32: /* No knobs to suggest this so same as 16 for now */
1496 sdev->use_10_for_rw = false;
1497 sdev->use_16_for_rw = true;
1498 sdev->use_10_for_ms = true;
1501 pr_warn("unexpected cdb_len=%d, force to 10\n",
1503 sdev->use_10_for_rw = true;
1504 sdev->use_16_for_rw = false;
1505 sdev->use_10_for_ms = false;
1506 sdebug_cdb_len = 10;
1511 static void all_config_cdb_len(void)
1513 struct sdebug_host_info *sdbg_host;
1514 struct Scsi_Host *shost;
1515 struct scsi_device *sdev;
1517 mutex_lock(&sdebug_host_list_mutex);
1518 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1519 shost = sdbg_host->shost;
1520 shost_for_each_device(sdev, shost) {
1521 config_cdb_len(sdev);
1524 mutex_unlock(&sdebug_host_list_mutex);
1527 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1529 struct sdebug_host_info *sdhp = devip->sdbg_host;
1530 struct sdebug_dev_info *dp;
1532 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1533 if ((devip->sdbg_host == dp->sdbg_host) &&
1534 (devip->target == dp->target)) {
1535 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1540 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1544 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1545 if (k != SDEBUG_NUM_UAS) {
1546 const char *cp = NULL;
1550 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1551 POWER_ON_RESET_ASCQ);
1553 cp = "power on reset";
1555 case SDEBUG_UA_POOCCUR:
1556 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1557 POWER_ON_OCCURRED_ASCQ);
1559 cp = "power on occurred";
1561 case SDEBUG_UA_BUS_RESET:
1562 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1567 case SDEBUG_UA_MODE_CHANGED:
1568 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1571 cp = "mode parameters changed";
1573 case SDEBUG_UA_CAPACITY_CHANGED:
1574 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1575 CAPACITY_CHANGED_ASCQ);
1577 cp = "capacity data changed";
1579 case SDEBUG_UA_MICROCODE_CHANGED:
1580 mk_sense_buffer(scp, UNIT_ATTENTION,
1582 MICROCODE_CHANGED_ASCQ);
1584 cp = "microcode has been changed";
1586 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1587 mk_sense_buffer(scp, UNIT_ATTENTION,
1589 MICROCODE_CHANGED_WO_RESET_ASCQ);
1591 cp = "microcode has been changed without reset";
1593 case SDEBUG_UA_LUNS_CHANGED:
1595 * SPC-3 behavior is to report a UNIT ATTENTION with
1596 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1597 * on the target, until a REPORT LUNS command is
1598 * received. SPC-4 behavior is to report it only once.
1599 * NOTE: sdebug_scsi_level does not use the same
1600 * values as struct scsi_device->scsi_level.
1602 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1603 clear_luns_changed_on_target(devip);
1604 mk_sense_buffer(scp, UNIT_ATTENTION,
1608 cp = "reported luns data has changed";
1610 case SDEBUG_UA_NOT_READY_TO_READY:
1611 mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1614 cp = "not ready to ready transition/media change";
1617 pr_warn("unexpected unit attention code=%d\n", k);
1622 clear_bit(k, devip->uas_bm);
1624 sdev_printk(KERN_INFO, scp->device,
1625 "%s reports: Unit attention: %s\n",
1627 return check_condition_result;
1632 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1633 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1637 struct scsi_data_buffer *sdb = &scp->sdb;
1641 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1642 return DID_ERROR << 16;
1644 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1646 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1651 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1652 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1653 * calls, not required to write in ascending offset order. Assumes resid
1654 * set to scsi_bufflen() prior to any calls.
1656 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1657 int arr_len, unsigned int off_dst)
1659 unsigned int act_len, n;
1660 struct scsi_data_buffer *sdb = &scp->sdb;
1661 off_t skip = off_dst;
1663 if (sdb->length <= off_dst)
1665 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1666 return DID_ERROR << 16;
1668 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1669 arr, arr_len, skip);
1670 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1671 __func__, off_dst, scsi_bufflen(scp), act_len,
1672 scsi_get_resid(scp));
1673 n = scsi_bufflen(scp) - (off_dst + act_len);
1674 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1678 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1679 * 'arr' or -1 if error.
1681 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1684 if (!scsi_bufflen(scp))
1686 if (scp->sc_data_direction != DMA_TO_DEVICE)
1689 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1693 static char sdebug_inq_vendor_id[9] = "Linux ";
1694 static char sdebug_inq_product_id[17] = "scsi_debug ";
1695 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1696 /* Use some locally assigned NAAs for SAS addresses. */
1697 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1698 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1699 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1701 /* Device identification VPD page. Returns number of bytes placed in arr */
1702 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1703 int target_dev_id, int dev_id_num,
1704 const char *dev_id_str, int dev_id_str_len,
1705 const uuid_t *lu_name)
1710 port_a = target_dev_id + 1;
1711 /* T10 vendor identifier field format (faked) */
1712 arr[0] = 0x2; /* ASCII */
1715 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1716 memcpy(&arr[12], sdebug_inq_product_id, 16);
1717 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1718 num = 8 + 16 + dev_id_str_len;
1721 if (dev_id_num >= 0) {
1722 if (sdebug_uuid_ctl) {
1723 /* Locally assigned UUID */
1724 arr[num++] = 0x1; /* binary (not necessarily sas) */
1725 arr[num++] = 0xa; /* PIV=0, lu, naa */
1728 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1730 memcpy(arr + num, lu_name, 16);
1733 /* NAA-3, Logical unit identifier (binary) */
1734 arr[num++] = 0x1; /* binary (not necessarily sas) */
1735 arr[num++] = 0x3; /* PIV=0, lu, naa */
1738 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1741 /* Target relative port number */
1742 arr[num++] = 0x61; /* proto=sas, binary */
1743 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1744 arr[num++] = 0x0; /* reserved */
1745 arr[num++] = 0x4; /* length */
1746 arr[num++] = 0x0; /* reserved */
1747 arr[num++] = 0x0; /* reserved */
1749 arr[num++] = 0x1; /* relative port A */
1751 /* NAA-3, Target port identifier */
1752 arr[num++] = 0x61; /* proto=sas, binary */
1753 arr[num++] = 0x93; /* piv=1, target port, naa */
1756 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1758 /* NAA-3, Target port group identifier */
1759 arr[num++] = 0x61; /* proto=sas, binary */
1760 arr[num++] = 0x95; /* piv=1, target port group id */
1765 put_unaligned_be16(port_group_id, arr + num);
1767 /* NAA-3, Target device identifier */
1768 arr[num++] = 0x61; /* proto=sas, binary */
1769 arr[num++] = 0xa3; /* piv=1, target device, naa */
1772 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1774 /* SCSI name string: Target device identifier */
1775 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1776 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1779 memcpy(arr + num, "naa.32222220", 12);
1781 snprintf(b, sizeof(b), "%08X", target_dev_id);
1782 memcpy(arr + num, b, 8);
1784 memset(arr + num, 0, 4);
1789 static unsigned char vpd84_data[] = {
1790 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1791 0x22,0x22,0x22,0x0,0xbb,0x1,
1792 0x22,0x22,0x22,0x0,0xbb,0x2,
1795 /* Software interface identification VPD page */
1796 static int inquiry_vpd_84(unsigned char *arr)
1798 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1799 return sizeof(vpd84_data);
1802 /* Management network addresses VPD page */
1803 static int inquiry_vpd_85(unsigned char *arr)
1806 const char *na1 = "https://www.kernel.org/config";
1807 const char *na2 = "http://www.kernel.org/log";
1810 arr[num++] = 0x1; /* lu, storage config */
1811 arr[num++] = 0x0; /* reserved */
1816 plen = ((plen / 4) + 1) * 4;
1817 arr[num++] = plen; /* length, null termianted, padded */
1818 memcpy(arr + num, na1, olen);
1819 memset(arr + num + olen, 0, plen - olen);
1822 arr[num++] = 0x4; /* lu, logging */
1823 arr[num++] = 0x0; /* reserved */
1828 plen = ((plen / 4) + 1) * 4;
1829 arr[num++] = plen; /* length, null terminated, padded */
1830 memcpy(arr + num, na2, olen);
1831 memset(arr + num + olen, 0, plen - olen);
1837 /* SCSI ports VPD page */
1838 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1843 port_a = target_dev_id + 1;
1844 port_b = port_a + 1;
1845 arr[num++] = 0x0; /* reserved */
1846 arr[num++] = 0x0; /* reserved */
1848 arr[num++] = 0x1; /* relative port 1 (primary) */
1849 memset(arr + num, 0, 6);
1852 arr[num++] = 12; /* length tp descriptor */
1853 /* naa-5 target port identifier (A) */
1854 arr[num++] = 0x61; /* proto=sas, binary */
1855 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1856 arr[num++] = 0x0; /* reserved */
1857 arr[num++] = 0x8; /* length */
1858 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1860 arr[num++] = 0x0; /* reserved */
1861 arr[num++] = 0x0; /* reserved */
1863 arr[num++] = 0x2; /* relative port 2 (secondary) */
1864 memset(arr + num, 0, 6);
1867 arr[num++] = 12; /* length tp descriptor */
1868 /* naa-5 target port identifier (B) */
1869 arr[num++] = 0x61; /* proto=sas, binary */
1870 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1871 arr[num++] = 0x0; /* reserved */
1872 arr[num++] = 0x8; /* length */
1873 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1880 static unsigned char vpd89_data[] = {
1881 /* from 4th byte */ 0,0,0,0,
1882 'l','i','n','u','x',' ',' ',' ',
1883 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1885 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1887 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1888 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1889 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1890 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1892 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1894 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1896 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1897 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1898 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1899 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1900 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1901 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1902 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1903 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1904 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1905 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1906 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1907 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1908 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1909 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1913 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1914 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1915 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1916 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1917 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1918 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1919 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1920 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1921 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1924 /* ATA Information VPD page */
1925 static int inquiry_vpd_89(unsigned char *arr)
1927 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1928 return sizeof(vpd89_data);
1932 static unsigned char vpdb0_data[] = {
1933 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1934 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1935 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1936 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1939 /* Block limits VPD page (SBC-3) */
1940 static int inquiry_vpd_b0(unsigned char *arr)
1944 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1946 /* Optimal transfer length granularity */
1947 if (sdebug_opt_xferlen_exp != 0 &&
1948 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1949 gran = 1 << sdebug_opt_xferlen_exp;
1951 gran = 1 << sdebug_physblk_exp;
1952 put_unaligned_be16(gran, arr + 2);
1954 /* Maximum Transfer Length */
1955 if (sdebug_store_sectors > 0x400)
1956 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1958 /* Optimal Transfer Length */
1959 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1962 /* Maximum Unmap LBA Count */
1963 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1965 /* Maximum Unmap Block Descriptor Count */
1966 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1969 /* Unmap Granularity Alignment */
1970 if (sdebug_unmap_alignment) {
1971 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1972 arr[28] |= 0x80; /* UGAVALID */
1975 /* Optimal Unmap Granularity */
1976 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1978 /* Maximum WRITE SAME Length */
1979 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1981 if (sdebug_atomic_wr) {
1982 put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1983 put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1984 put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1985 put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1986 put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1989 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1992 /* Block device characteristics VPD page (SBC-3) */
1993 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1995 memset(arr, 0, 0x3c);
1997 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1999 arr[3] = 5; /* less than 1.8" */
2004 /* Logical block provisioning VPD page (SBC-4) */
2005 static int inquiry_vpd_b2(unsigned char *arr)
2007 memset(arr, 0, 0x4);
2008 arr[0] = 0; /* threshold exponent */
2015 if (sdebug_lbprz && scsi_debug_lbp())
2016 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
2017 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
2018 /* minimum_percentage=0; provisioning_type=0 (unknown) */
2019 /* threshold_percentage=0 */
2023 /* Zoned block device characteristics VPD page (ZBC mandatory) */
2024 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2026 memset(arr, 0, 0x3c);
2027 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2029 * Set Optimal number of open sequential write preferred zones and
2030 * Optimal number of non-sequentially written sequential write
2031 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2032 * fields set to zero, apart from Max. number of open swrz_s field.
2034 put_unaligned_be32(0xffffffff, &arr[4]);
2035 put_unaligned_be32(0xffffffff, &arr[8]);
2036 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2037 put_unaligned_be32(devip->max_open, &arr[12]);
2039 put_unaligned_be32(0xffffffff, &arr[12]);
2040 if (devip->zcap < devip->zsize) {
2041 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2042 put_unaligned_be64(devip->zsize, &arr[20]);
2049 #define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
2051 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2053 /* Block limits extension VPD page (SBC-4) */
2054 static int inquiry_vpd_b7(unsigned char *arrb4)
2056 memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2057 arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2058 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2059 return SDEBUG_BLE_LEN_AFTER_B4;
2062 #define SDEBUG_LONG_INQ_SZ 96
2063 #define SDEBUG_MAX_INQ_ARR_SZ 584
2065 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2067 unsigned char pq_pdt;
2069 unsigned char *cmd = scp->cmnd;
2072 bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2074 alloc_len = get_unaligned_be16(cmd + 3);
2075 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2077 return DID_REQUEUE << 16;
2078 if (scp->device->type >= 32) {
2079 is_disk = (sdebug_ptype == TYPE_DISK);
2080 is_tape = (sdebug_ptype == TYPE_TAPE);
2082 is_disk = (scp->device->type == TYPE_DISK);
2083 is_tape = (scp->device->type == TYPE_TAPE);
2085 is_zbc = devip->zoned;
2086 is_disk_zbc = (is_disk || is_zbc);
2087 have_wlun = scsi_is_wlun(scp->device->lun);
2089 pq_pdt = TYPE_WLUN; /* present, wlun */
2090 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2091 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
2093 pq_pdt = ((scp->device->type >= 32 ?
2094 sdebug_ptype : scp->device->type) & 0x1f);
2096 if (0x2 & cmd[1]) { /* CMDDT bit set */
2097 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2099 return check_condition_result;
2100 } else if (0x1 & cmd[1]) { /* EVPD bit set */
2101 int lu_id_num, port_group_id, target_dev_id;
2104 int host_no = devip->sdbg_host->shost->host_no;
2107 port_group_id = (((host_no + 1) & 0x7f) << 8) +
2108 (devip->channel & 0x7f);
2109 if (sdebug_vpd_use_hostno == 0)
2111 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2112 (devip->target * 1000) + devip->lun);
2113 target_dev_id = ((host_no + 1) * 2000) +
2114 (devip->target * 1000) - 3;
2115 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2116 if (0 == cmd[2]) { /* supported vital product data pages */
2118 arr[n++] = 0x0; /* this page */
2119 arr[n++] = 0x80; /* unit serial number */
2120 arr[n++] = 0x83; /* device identification */
2121 arr[n++] = 0x84; /* software interface ident. */
2122 arr[n++] = 0x85; /* management network addresses */
2123 arr[n++] = 0x86; /* extended inquiry */
2124 arr[n++] = 0x87; /* mode page policy */
2125 arr[n++] = 0x88; /* SCSI ports */
2126 if (is_disk_zbc) { /* SBC or ZBC */
2127 arr[n++] = 0x89; /* ATA information */
2128 arr[n++] = 0xb0; /* Block limits */
2129 arr[n++] = 0xb1; /* Block characteristics */
2131 arr[n++] = 0xb2; /* LB Provisioning */
2133 arr[n++] = 0xb6; /* ZB dev. char. */
2134 arr[n++] = 0xb7; /* Block limits extension */
2136 arr[3] = n - 4; /* number of supported VPD pages */
2137 } else if (0x80 == cmd[2]) { /* unit serial number */
2139 memcpy(&arr[4], lu_id_str, len);
2140 } else if (0x83 == cmd[2]) { /* device identification */
2141 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2142 target_dev_id, lu_id_num,
2145 } else if (0x84 == cmd[2]) { /* Software interface ident. */
2146 arr[3] = inquiry_vpd_84(&arr[4]);
2147 } else if (0x85 == cmd[2]) { /* Management network addresses */
2148 arr[3] = inquiry_vpd_85(&arr[4]);
2149 } else if (0x86 == cmd[2]) { /* extended inquiry */
2150 arr[3] = 0x3c; /* number of following entries */
2151 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2152 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
2153 else if (have_dif_prot)
2154 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
2156 arr[4] = 0x0; /* no protection stuff */
2158 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2159 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2162 } else if (0x87 == cmd[2]) { /* mode page policy */
2163 arr[3] = 0x8; /* number of following entries */
2164 arr[4] = 0x2; /* disconnect-reconnect mp */
2165 arr[6] = 0x80; /* mlus, shared */
2166 arr[8] = 0x18; /* protocol specific lu */
2167 arr[10] = 0x82; /* mlus, per initiator port */
2168 } else if (0x88 == cmd[2]) { /* SCSI Ports */
2169 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2170 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2171 n = inquiry_vpd_89(&arr[4]);
2172 put_unaligned_be16(n, arr + 2);
2173 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2174 arr[3] = inquiry_vpd_b0(&arr[4]);
2175 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2176 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2177 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2178 arr[3] = inquiry_vpd_b2(&arr[4]);
2179 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2180 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2181 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2182 arr[3] = inquiry_vpd_b7(&arr[4]);
2184 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2186 return check_condition_result;
2188 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2189 ret = fill_from_dev_buffer(scp, arr,
2190 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2194 /* drops through here for a standard inquiry */
2195 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
2196 arr[2] = sdebug_scsi_level;
2197 arr[3] = 2; /* response_data_format==2 */
2198 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2199 arr[5] = (int)have_dif_prot; /* PROTECT bit */
2200 if (sdebug_vpd_use_hostno == 0)
2201 arr[5] |= 0x10; /* claim: implicit TPGS */
2202 arr[6] = 0x10; /* claim: MultiP */
2203 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2204 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2205 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2206 memcpy(&arr[16], sdebug_inq_product_id, 16);
2207 memcpy(&arr[32], sdebug_inq_product_rev, 4);
2208 /* Use Vendor Specific area to place driver date in ASCII hex */
2209 memcpy(&arr[36], sdebug_version_date, 8);
2210 /* version descriptors (2 bytes each) follow */
2211 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
2212 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
2214 if (is_disk) { /* SBC-4 no version claimed */
2215 put_unaligned_be16(0x600, arr + n);
2217 } else if (is_tape) { /* SSC-4 rev 3 */
2218 put_unaligned_be16(0x525, arr + n);
2220 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
2221 put_unaligned_be16(0x624, arr + n);
2224 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
2225 ret = fill_from_dev_buffer(scp, arr,
2226 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2231 /* See resp_iec_m_pg() for how this data is manipulated */
2232 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2235 static int resp_requests(struct scsi_cmnd *scp,
2236 struct sdebug_dev_info *devip)
2238 unsigned char *cmd = scp->cmnd;
2239 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
2240 bool dsense = !!(cmd[1] & 1);
2241 u32 alloc_len = cmd[4];
2243 int stopped_state = atomic_read(&devip->stopped);
2245 memset(arr, 0, sizeof(arr));
2246 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
2250 arr[2] = LOGICAL_UNIT_NOT_READY;
2251 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2255 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
2256 arr[7] = 0xa; /* 18 byte sense buffer */
2257 arr[12] = LOGICAL_UNIT_NOT_READY;
2258 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2260 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2261 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2264 arr[1] = 0x0; /* NO_SENSE in sense_key */
2265 arr[2] = THRESHOLD_EXCEEDED;
2266 arr[3] = 0xff; /* Failure prediction(false) */
2270 arr[2] = 0x0; /* NO_SENSE in sense_key */
2271 arr[7] = 0xa; /* 18 byte sense buffer */
2272 arr[12] = THRESHOLD_EXCEEDED;
2273 arr[13] = 0xff; /* Failure prediction(false) */
2275 } else { /* nothing to report */
2278 memset(arr, 0, len);
2281 memset(arr, 0, len);
2286 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2289 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2291 unsigned char *cmd = scp->cmnd;
2292 int power_cond, want_stop, stopped_state;
2295 power_cond = (cmd[4] & 0xf0) >> 4;
2297 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2298 return check_condition_result;
2300 want_stop = !(cmd[4] & 1);
2301 stopped_state = atomic_read(&devip->stopped);
2302 if (stopped_state == 2) {
2303 ktime_t now_ts = ktime_get_boottime();
2305 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2306 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2308 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2309 /* tur_ms_to_ready timer extinguished */
2310 atomic_set(&devip->stopped, 0);
2314 if (stopped_state == 2) {
2316 stopped_state = 1; /* dummy up success */
2317 } else { /* Disallow tur_ms_to_ready delay to be overridden */
2318 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2319 return check_condition_result;
2323 changing = (stopped_state != want_stop);
2325 atomic_xchg(&devip->stopped, want_stop);
2326 if (scp->device->type == TYPE_TAPE && !want_stop) {
2329 set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2330 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2331 devip->tape_location[i] = 0;
2332 devip->tape_partition = 0;
2334 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
2335 return SDEG_RES_IMMED_MASK;
2340 static sector_t get_sdebug_capacity(void)
2342 static const unsigned int gibibyte = 1073741824;
2344 if (sdebug_virtual_gb > 0)
2345 return (sector_t)sdebug_virtual_gb *
2346 (gibibyte / sdebug_sector_size);
2348 return sdebug_store_sectors;
2351 #define SDEBUG_READCAP_ARR_SZ 8
2352 static int resp_readcap(struct scsi_cmnd *scp,
2353 struct sdebug_dev_info *devip)
2355 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2358 /* following just in case virtual_gb changed */
2359 sdebug_capacity = get_sdebug_capacity();
2360 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2361 if (sdebug_capacity < 0xffffffff) {
2362 capac = (unsigned int)sdebug_capacity - 1;
2363 put_unaligned_be32(capac, arr + 0);
2365 put_unaligned_be32(0xffffffff, arr + 0);
2366 put_unaligned_be16(sdebug_sector_size, arr + 6);
2367 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2370 #define SDEBUG_READCAP16_ARR_SZ 32
2371 static int resp_readcap16(struct scsi_cmnd *scp,
2372 struct sdebug_dev_info *devip)
2374 unsigned char *cmd = scp->cmnd;
2375 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2378 alloc_len = get_unaligned_be32(cmd + 10);
2379 /* following just in case virtual_gb changed */
2380 sdebug_capacity = get_sdebug_capacity();
2381 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2382 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2383 put_unaligned_be32(sdebug_sector_size, arr + 8);
2384 arr[13] = sdebug_physblk_exp & 0xf;
2385 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2387 if (scsi_debug_lbp()) {
2388 arr[14] |= 0x80; /* LBPME */
2389 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2390 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2391 * in the wider field maps to 0 in this field.
2393 if (sdebug_lbprz & 1) /* precisely what the draft requires */
2398 * Since the scsi_debug READ CAPACITY implementation always reports the
2399 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2404 arr[15] = sdebug_lowest_aligned & 0xff;
2406 if (have_dif_prot) {
2407 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2408 arr[12] |= 1; /* PROT_EN */
2411 return fill_from_dev_buffer(scp, arr,
2412 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2415 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2417 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2418 struct sdebug_dev_info *devip)
2420 unsigned char *cmd = scp->cmnd;
2422 int host_no = devip->sdbg_host->shost->host_no;
2423 int port_group_a, port_group_b, port_a, port_b;
2427 alen = get_unaligned_be32(cmd + 6);
2428 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2430 return DID_REQUEUE << 16;
2432 * EVPD page 0x88 states we have two ports, one
2433 * real and a fake port with no device connected.
2434 * So we create two port groups with one port each
2435 * and set the group with port B to unavailable.
2437 port_a = 0x1; /* relative port A */
2438 port_b = 0x2; /* relative port B */
2439 port_group_a = (((host_no + 1) & 0x7f) << 8) +
2440 (devip->channel & 0x7f);
2441 port_group_b = (((host_no + 1) & 0x7f) << 8) +
2442 (devip->channel & 0x7f) + 0x80;
2445 * The asymmetric access state is cycled according to the host_id.
2448 if (sdebug_vpd_use_hostno == 0) {
2449 arr[n++] = host_no % 3; /* Asymm access state */
2450 arr[n++] = 0x0F; /* claim: all states are supported */
2452 arr[n++] = 0x0; /* Active/Optimized path */
2453 arr[n++] = 0x01; /* only support active/optimized paths */
2455 put_unaligned_be16(port_group_a, arr + n);
2457 arr[n++] = 0; /* Reserved */
2458 arr[n++] = 0; /* Status code */
2459 arr[n++] = 0; /* Vendor unique */
2460 arr[n++] = 0x1; /* One port per group */
2461 arr[n++] = 0; /* Reserved */
2462 arr[n++] = 0; /* Reserved */
2463 put_unaligned_be16(port_a, arr + n);
2465 arr[n++] = 3; /* Port unavailable */
2466 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2467 put_unaligned_be16(port_group_b, arr + n);
2469 arr[n++] = 0; /* Reserved */
2470 arr[n++] = 0; /* Status code */
2471 arr[n++] = 0; /* Vendor unique */
2472 arr[n++] = 0x1; /* One port per group */
2473 arr[n++] = 0; /* Reserved */
2474 arr[n++] = 0; /* Reserved */
2475 put_unaligned_be16(port_b, arr + n);
2479 put_unaligned_be32(rlen, arr + 0);
2482 * Return the smallest value of either
2483 * - The allocated length
2484 * - The constructed command length
2485 * - The maximum array size
2487 rlen = min(alen, n);
2488 ret = fill_from_dev_buffer(scp, arr,
2489 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2494 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2495 struct sdebug_dev_info *devip)
2498 u8 reporting_opts, req_opcode, sdeb_i, supp;
2500 u32 alloc_len, a_len;
2501 int k, offset, len, errsts, bump, na;
2502 const struct opcode_info_t *oip;
2503 const struct opcode_info_t *r_oip;
2505 u8 *cmd = scp->cmnd;
2506 u32 devsel = sdebug_get_devsel(scp->device);
2508 rctd = !!(cmd[2] & 0x80);
2509 reporting_opts = cmd[2] & 0x7;
2510 req_opcode = cmd[3];
2511 req_sa = get_unaligned_be16(cmd + 4);
2512 alloc_len = get_unaligned_be32(cmd + 6);
2513 if (alloc_len < 4 || alloc_len > 0xffff) {
2514 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2515 return check_condition_result;
2517 if (alloc_len > 8192)
2521 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2523 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2525 return check_condition_result;
2527 switch (reporting_opts) {
2528 case 0: /* all commands */
2529 bump = rctd ? 20 : 8;
2530 for (offset = 4, oip = opcode_info_arr;
2531 oip->num_attached != 0xff && offset < a_len; ++oip) {
2532 if (F_INV_OP & oip->flags)
2534 if ((devsel & oip->devsel) != 0) {
2535 arr[offset] = oip->opcode;
2536 put_unaligned_be16(oip->sa, arr + offset + 2);
2538 arr[offset + 5] |= 0x2;
2539 if (FF_SA & oip->flags)
2540 arr[offset + 5] |= 0x1;
2541 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2543 put_unaligned_be16(0xa, arr + offset + 8);
2546 na = oip->num_attached;
2548 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2549 if (F_INV_OP & oip->flags)
2551 if ((devsel & oip->devsel) == 0)
2553 arr[offset] = oip->opcode;
2554 put_unaligned_be16(oip->sa, arr + offset + 2);
2556 arr[offset + 5] |= 0x2;
2557 if (FF_SA & oip->flags)
2558 arr[offset + 5] |= 0x1;
2559 put_unaligned_be16(oip->len_mask[0],
2562 put_unaligned_be16(0xa,
2568 put_unaligned_be32(offset - 4, arr);
2570 case 1: /* one command: opcode only */
2571 case 2: /* one command: opcode plus service action */
2572 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2573 sdeb_i = opcode_ind_arr[req_opcode];
2574 oip = &opcode_info_arr[sdeb_i];
2575 if (F_INV_OP & oip->flags) {
2579 if (1 == reporting_opts) {
2580 if (FF_SA & oip->flags) {
2581 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2584 return check_condition_result;
2587 } else if (2 == reporting_opts &&
2588 0 == (FF_SA & oip->flags)) {
2589 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2590 kfree(arr); /* point at requested sa */
2591 return check_condition_result;
2593 if (0 == (FF_SA & oip->flags) &&
2594 (devsel & oip->devsel) != 0 &&
2595 req_opcode == oip->opcode)
2597 else if (0 == (FF_SA & oip->flags)) {
2598 na = oip->num_attached;
2599 for (k = 0, oip = oip->arrp; k < na;
2601 if (req_opcode == oip->opcode &&
2602 (devsel & oip->devsel) != 0)
2605 supp = (k >= na) ? 1 : 3;
2606 } else if (req_sa != oip->sa) {
2607 na = oip->num_attached;
2608 for (k = 0, oip = oip->arrp; k < na;
2610 if (req_sa == oip->sa &&
2611 (devsel & oip->devsel) != 0)
2614 supp = (k >= na) ? 1 : 3;
2618 u = oip->len_mask[0];
2619 put_unaligned_be16(u, arr + 2);
2620 arr[4] = oip->opcode;
2621 for (k = 1; k < u; ++k)
2622 arr[4 + k] = (k < 16) ?
2623 oip->len_mask[k] : 0xff;
2628 arr[1] = (rctd ? 0x80 : 0) | supp;
2630 put_unaligned_be16(0xa, arr + offset);
2635 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2637 return check_condition_result;
2639 offset = (offset < a_len) ? offset : a_len;
2640 len = (offset < alloc_len) ? offset : alloc_len;
2641 errsts = fill_from_dev_buffer(scp, arr, len);
2646 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2647 struct sdebug_dev_info *devip)
2652 u8 *cmd = scp->cmnd;
2654 memset(arr, 0, sizeof(arr));
2655 repd = !!(cmd[2] & 0x80);
2656 alloc_len = get_unaligned_be32(cmd + 6);
2657 if (alloc_len < 4) {
2658 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2659 return check_condition_result;
2661 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2662 arr[1] = 0x1; /* ITNRS */
2669 len = (len < alloc_len) ? len : alloc_len;
2670 return fill_from_dev_buffer(scp, arr, len);
2673 /* <<Following mode page info copied from ST318451LW>> */
2675 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2676 { /* Read-Write Error Recovery page for mode_sense */
2677 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2680 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2682 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2683 return sizeof(err_recov_pg);
2686 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2687 { /* Disconnect-Reconnect page for mode_sense */
2688 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2689 0, 0, 0, 0, 0, 0, 0, 0};
2691 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2693 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2694 return sizeof(disconnect_pg);
2697 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2698 { /* Format device page for mode_sense */
2699 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2700 0, 0, 0, 0, 0, 0, 0, 0,
2701 0, 0, 0, 0, 0x40, 0, 0, 0};
2703 memcpy(p, format_pg, sizeof(format_pg));
2704 put_unaligned_be16(sdebug_sectors_per, p + 10);
2705 put_unaligned_be16(sdebug_sector_size, p + 12);
2706 if (sdebug_removable)
2707 p[20] |= 0x20; /* should agree with INQUIRY */
2709 memset(p + 2, 0, sizeof(format_pg) - 2);
2710 return sizeof(format_pg);
2713 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2714 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2717 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2718 { /* Caching page for mode_sense */
2719 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2720 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2721 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2722 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2724 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2725 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2726 memcpy(p, caching_pg, sizeof(caching_pg));
2728 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2729 else if (2 == pcontrol)
2730 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2731 return sizeof(caching_pg);
2734 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2737 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2738 { /* Control mode page for mode_sense */
2739 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2741 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2745 ctrl_m_pg[2] |= 0x4;
2747 ctrl_m_pg[2] &= ~0x4;
2750 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2752 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2754 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2755 else if (2 == pcontrol)
2756 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2757 return sizeof(ctrl_m_pg);
2760 /* IO Advice Hints Grouping mode page */
2761 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2763 /* IO Advice Hints Grouping mode page */
2764 struct grouping_m_pg {
2765 u8 page_code; /* OR 0x40 when subpage_code > 0 */
2769 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2771 static const struct grouping_m_pg gr_m_pg = {
2772 .page_code = 0xa | 0x40,
2774 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2785 BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2786 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2787 memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2788 if (1 == pcontrol) {
2789 /* There are no changeable values so clear from byte 4 on. */
2790 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2792 return sizeof(gr_m_pg);
2795 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2796 { /* Informational Exceptions control mode page for mode_sense */
2797 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2799 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2802 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2804 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2805 else if (2 == pcontrol)
2806 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2807 return sizeof(iec_m_pg);
2810 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2811 { /* SAS SSP mode page - short format for mode_sense */
2812 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2813 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2815 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2817 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2818 return sizeof(sas_sf_m_pg);
2822 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2824 { /* SAS phy control and discover mode page for mode_sense */
2825 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2826 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2827 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2828 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2829 0x2, 0, 0, 0, 0, 0, 0, 0,
2830 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2831 0, 0, 0, 0, 0, 0, 0, 0,
2832 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2833 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2834 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2835 0x3, 0, 0, 0, 0, 0, 0, 0,
2836 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2837 0, 0, 0, 0, 0, 0, 0, 0,
2841 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2842 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2843 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2844 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2845 port_a = target_dev_id + 1;
2846 port_b = port_a + 1;
2847 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2848 put_unaligned_be32(port_a, p + 20);
2849 put_unaligned_be32(port_b, p + 48 + 20);
2851 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2852 return sizeof(sas_pcd_m_pg);
2855 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2856 { /* SAS SSP shared protocol specific port mode subpage */
2857 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2858 0, 0, 0, 0, 0, 0, 0, 0,
2861 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2863 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2864 return sizeof(sas_sha_m_pg);
2867 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2868 0xff, 0xff, 0x00, 0x00};
2870 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2871 { /* Partition page for mode_sense (tape) */
2872 memcpy(p, partition_pg, sizeof(partition_pg));
2874 memset(p + 2, 0, sizeof(partition_pg) - 2);
2875 return sizeof(partition_pg);
2878 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2879 unsigned char *new, int pg_len)
2881 int new_nbr, p0_size, p1_size;
2883 if ((new[4] & 0x80) != 0) { /* FDP */
2884 partition_pg[4] |= 0x80;
2885 devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2886 devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2887 devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2889 new_nbr = new[3] + 1;
2890 if (new_nbr > TAPE_MAX_PARTITIONS)
2892 if ((new[4] & 0x40) != 0) { /* SDP */
2893 p1_size = TAPE_PARTITION_1_UNITS;
2894 p0_size = TAPE_UNITS - p1_size;
2897 } else if ((new[4] & 0x20) != 0) {
2899 p0_size = get_unaligned_be16(new + 8);
2900 p1_size = get_unaligned_be16(new + 10);
2901 if (p1_size == 0xFFFF)
2902 p1_size = TAPE_UNITS - p0_size;
2903 else if (p0_size == 0xFFFF)
2904 p0_size = TAPE_UNITS - p1_size;
2905 if (p0_size < 100 || p1_size < 100)
2908 p0_size = TAPE_UNITS;
2913 devip->tape_pending_nbr_partitions = new_nbr;
2914 devip->tape_pending_part_0_size = p0_size;
2915 devip->tape_pending_part_1_size = p1_size;
2916 partition_pg[3] = new_nbr;
2917 devip->tape_pending_nbr_partitions = new_nbr;
2923 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2925 { /* Compression page for mode_sense (tape) */
2926 unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2927 0, 0, 0, 0, 00, 00};
2929 memcpy(p, compression_pg, sizeof(compression_pg));
2933 memset(p + 2, 0, sizeof(compression_pg) - 2);
2934 return sizeof(compression_pg);
2937 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2938 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2940 static int resp_mode_sense(struct scsi_cmnd *scp,
2941 struct sdebug_dev_info *devip)
2943 int pcontrol, pcode, subpcode, bd_len;
2944 unsigned char dev_spec;
2945 u32 alloc_len, offset, len;
2947 int target = scp->device->id;
2949 unsigned char *arr __free(kfree);
2950 unsigned char *cmd = scp->cmnd;
2951 bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2953 arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2956 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2957 pcontrol = (cmd[2] & 0xc0) >> 6;
2958 pcode = cmd[2] & 0x3f;
2960 msense_6 = (MODE_SENSE == cmd[0]);
2961 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2962 is_disk = (scp->device->type == TYPE_DISK);
2963 is_zbc = devip->zoned;
2964 is_tape = (scp->device->type == TYPE_TAPE);
2965 if ((is_disk || is_zbc || is_tape) && !dbd)
2966 bd_len = llbaa ? 16 : 8;
2969 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2970 if (0x3 == pcontrol) { /* Saving values not supported */
2971 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2972 return check_condition_result;
2974 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2975 (devip->target * 1000) - 3;
2976 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2977 if (is_disk || is_zbc) {
2978 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2990 arr[4] = 0x1; /* set LONGLBA bit */
2991 arr[7] = bd_len; /* assume 255 or less */
2995 if ((bd_len > 0) && (!sdebug_capacity))
2996 sdebug_capacity = get_sdebug_capacity();
2999 if (sdebug_capacity > 0xfffffffe)
3000 put_unaligned_be32(0xffffffff, ap + 0);
3002 put_unaligned_be32(sdebug_capacity, ap + 0);
3004 ap[0] = devip->tape_density;
3005 put_unaligned_be16(devip->tape_blksize, ap + 6);
3007 put_unaligned_be16(sdebug_sector_size, ap + 6);
3010 } else if (16 == bd_len) {
3012 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3013 return check_condition_result;
3015 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3016 put_unaligned_be32(sdebug_sector_size, ap + 12);
3021 goto only_bd; /* Only block descriptor requested */
3024 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3025 * len += resp_*_pg(ap + len, pcontrol, target);
3028 case 0x1: /* Read-Write error recovery page, direct access */
3029 if (subpcode > 0x0 && subpcode < 0xff)
3031 len = resp_err_recov_pg(ap, pcontrol, target);
3034 case 0x2: /* Disconnect-Reconnect page, all devices */
3035 if (subpcode > 0x0 && subpcode < 0xff)
3037 len = resp_disconnect_pg(ap, pcontrol, target);
3040 case 0x3: /* Format device page, direct access */
3041 if (subpcode > 0x0 && subpcode < 0xff)
3044 len = resp_format_pg(ap, pcontrol, target);
3050 case 0x8: /* Caching page, direct access */
3051 if (subpcode > 0x0 && subpcode < 0xff)
3053 if (is_disk || is_zbc) {
3054 len = resp_caching_pg(ap, pcontrol, target);
3060 case 0xa: /* Control Mode page, all devices */
3063 len = resp_ctrl_m_pg(ap, pcontrol, target);
3066 len = resp_grouping_m_pg(ap, pcontrol, target);
3069 len = resp_ctrl_m_pg(ap, pcontrol, target);
3070 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3077 case 0xf: /* Compression Mode Page (tape) */
3080 len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3083 case 0x11: /* Partition Mode Page (tape) */
3086 len = resp_partition_m_pg(ap, pcontrol, target);
3089 case 0x19: /* if spc==1 then sas phy, control+discover */
3090 if (subpcode > 0x2 && subpcode < 0xff)
3093 if ((0x0 == subpcode) || (0xff == subpcode))
3094 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3095 if ((0x1 == subpcode) || (0xff == subpcode))
3096 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3098 if ((0x2 == subpcode) || (0xff == subpcode))
3099 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3102 case 0x1c: /* Informational Exceptions Mode page, all devices */
3103 if (subpcode > 0x0 && subpcode < 0xff)
3105 len = resp_iec_m_pg(ap, pcontrol, target);
3108 case 0x3f: /* Read all Mode pages */
3109 if (subpcode > 0x0 && subpcode < 0xff)
3111 len = resp_err_recov_pg(ap, pcontrol, target);
3112 len += resp_disconnect_pg(ap + len, pcontrol, target);
3114 len += resp_format_pg(ap + len, pcontrol, target);
3115 len += resp_caching_pg(ap + len, pcontrol, target);
3116 } else if (is_zbc) {
3117 len += resp_caching_pg(ap + len, pcontrol, target);
3119 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3120 if (0xff == subpcode)
3121 len += resp_grouping_m_pg(ap + len, pcontrol, target);
3122 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3123 if (0xff == subpcode) {
3124 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3126 len += resp_sas_sha_m_spg(ap + len, pcontrol);
3128 len += resp_iec_m_pg(ap + len, pcontrol, target);
3136 arr[0] = offset - 1;
3138 put_unaligned_be16((offset - 2), arr + 0);
3139 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3142 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3143 return check_condition_result;
3146 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3147 return check_condition_result;
3150 #define SDEBUG_MAX_MSELECT_SZ 512
3152 static int resp_mode_select(struct scsi_cmnd *scp,
3153 struct sdebug_dev_info *devip)
3155 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3156 int param_len, res, mpage;
3157 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3158 unsigned char *cmd = scp->cmnd;
3159 int mselect6 = (MODE_SELECT == cmd[0]);
3161 memset(arr, 0, sizeof(arr));
3164 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3165 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3166 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3167 return check_condition_result;
3169 res = fetch_to_dev_buffer(scp, arr, param_len);
3171 return DID_ERROR << 16;
3172 else if (sdebug_verbose && (res < param_len))
3173 sdev_printk(KERN_INFO, scp->device,
3174 "%s: cdb indicated=%d, IO sent=%d bytes\n",
3175 __func__, param_len, res);
3176 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3177 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3178 off = (mselect6 ? 4 : 8);
3179 if (scp->device->type == TYPE_TAPE) {
3183 mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3184 mselect6 ? 3 : 6, -1);
3185 return check_condition_result;
3187 if (arr[off] == TAPE_BAD_DENSITY) {
3188 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3189 return check_condition_result;
3191 blksize = get_unaligned_be16(arr + off + 6);
3193 (blksize < TAPE_MIN_BLKSIZE ||
3194 blksize > TAPE_MAX_BLKSIZE ||
3195 (blksize % 4) != 0)) {
3196 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3197 return check_condition_result;
3199 devip->tape_density = arr[off];
3200 devip->tape_blksize = blksize;
3204 return 0; /* No page written, just descriptors */
3206 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3207 return check_condition_result;
3209 mpage = arr[off] & 0x3f;
3210 ps = !!(arr[off] & 0x80);
3212 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3213 return check_condition_result;
3215 spf = !!(arr[off] & 0x40);
3216 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3218 if ((pg_len + off) > param_len) {
3219 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3220 PARAMETER_LIST_LENGTH_ERR, 0);
3221 return check_condition_result;
3224 case 0x8: /* Caching Mode page */
3225 if (caching_pg[1] == arr[off + 1]) {
3226 memcpy(caching_pg + 2, arr + off + 2,
3227 sizeof(caching_pg) - 2);
3228 goto set_mode_changed_ua;
3231 case 0xa: /* Control Mode page */
3232 if (ctrl_m_pg[1] == arr[off + 1]) {
3233 memcpy(ctrl_m_pg + 2, arr + off + 2,
3234 sizeof(ctrl_m_pg) - 2);
3235 if (ctrl_m_pg[4] & 0x8)
3239 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3240 goto set_mode_changed_ua;
3243 case 0xf: /* Compression mode page */
3244 if (scp->device->type != TYPE_TAPE)
3246 if ((arr[off + 2] & 0x40) != 0) {
3247 devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3251 case 0x11: /* Medium Partition Mode Page (tape) */
3252 if (scp->device->type == TYPE_TAPE) {
3255 fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3258 mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3259 return check_condition_result;
3262 case 0x1c: /* Informational Exceptions Mode page */
3263 if (iec_m_pg[1] == arr[off + 1]) {
3264 memcpy(iec_m_pg + 2, arr + off + 2,
3265 sizeof(iec_m_pg) - 2);
3266 goto set_mode_changed_ua;
3272 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3273 return check_condition_result;
3274 set_mode_changed_ua:
3275 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3279 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3280 return check_condition_result;
3283 static int resp_temp_l_pg(unsigned char *arr)
3285 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3286 0x0, 0x1, 0x3, 0x2, 0x0, 65,
3289 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3290 return sizeof(temp_l_pg);
3293 static int resp_ie_l_pg(unsigned char *arr)
3295 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3298 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3299 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
3300 arr[4] = THRESHOLD_EXCEEDED;
3303 return sizeof(ie_l_pg);
3306 static int resp_env_rep_l_spg(unsigned char *arr)
3308 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3309 0x0, 40, 72, 0xff, 45, 18, 0, 0,
3310 0x1, 0x0, 0x23, 0x8,
3311 0x0, 55, 72, 35, 55, 45, 0, 0,
3314 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3315 return sizeof(env_rep_l_spg);
3318 #define SDEBUG_MAX_LSENSE_SZ 512
3320 static int resp_log_sense(struct scsi_cmnd *scp,
3321 struct sdebug_dev_info *devip)
3323 int ppc, sp, pcode, subpcode;
3324 u32 alloc_len, len, n;
3325 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3326 unsigned char *cmd = scp->cmnd;
3328 memset(arr, 0, sizeof(arr));
3332 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3333 return check_condition_result;
3335 pcode = cmd[2] & 0x3f;
3336 subpcode = cmd[3] & 0xff;
3337 alloc_len = get_unaligned_be16(cmd + 7);
3339 if (0 == subpcode) {
3341 case 0x0: /* Supported log pages log page */
3343 arr[n++] = 0x0; /* this page */
3344 arr[n++] = 0xd; /* Temperature */
3345 arr[n++] = 0x2f; /* Informational exceptions */
3348 case 0xd: /* Temperature log page */
3349 arr[3] = resp_temp_l_pg(arr + 4);
3351 case 0x2f: /* Informational exceptions log page */
3352 arr[3] = resp_ie_l_pg(arr + 4);
3355 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3356 return check_condition_result;
3358 } else if (0xff == subpcode) {
3362 case 0x0: /* Supported log pages and subpages log page */
3365 arr[n++] = 0x0; /* 0,0 page */
3367 arr[n++] = 0xff; /* this page */
3369 arr[n++] = 0x0; /* Temperature */
3371 arr[n++] = 0x1; /* Environment reporting */
3373 arr[n++] = 0xff; /* all 0xd subpages */
3375 arr[n++] = 0x0; /* Informational exceptions */
3377 arr[n++] = 0xff; /* all 0x2f subpages */
3380 case 0xd: /* Temperature subpages */
3383 arr[n++] = 0x0; /* Temperature */
3385 arr[n++] = 0x1; /* Environment reporting */
3387 arr[n++] = 0xff; /* these subpages */
3390 case 0x2f: /* Informational exceptions subpages */
3393 arr[n++] = 0x0; /* Informational exceptions */
3395 arr[n++] = 0xff; /* these subpages */
3399 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3400 return check_condition_result;
3402 } else if (subpcode > 0) {
3405 if (pcode == 0xd && subpcode == 1)
3406 arr[3] = resp_env_rep_l_spg(arr + 4);
3408 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3409 return check_condition_result;
3412 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3413 return check_condition_result;
3415 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3416 return fill_from_dev_buffer(scp, arr,
3417 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3420 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
3421 static int resp_read_blklimits(struct scsi_cmnd *scp,
3422 struct sdebug_dev_info *devip)
3424 unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3427 put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3428 put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3429 return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3432 static int resp_locate(struct scsi_cmnd *scp,
3433 struct sdebug_dev_info *devip)
3435 unsigned char *cmd = scp->cmnd;
3436 unsigned int i, pos;
3437 struct tape_block *blp;
3440 if ((cmd[1] & 0x02) != 0) {
3441 if (cmd[8] >= devip->tape_nbr_partitions) {
3442 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3443 return check_condition_result;
3445 devip->tape_partition = cmd[8];
3447 pos = get_unaligned_be32(cmd + 3);
3448 partition = devip->tape_partition;
3450 for (i = 0, blp = devip->tape_blocks[partition];
3451 i < pos && i < devip->tape_eop[partition]; i++, blp++)
3452 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3455 devip->tape_location[partition] = i;
3456 mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3457 return check_condition_result;
3459 devip->tape_location[partition] = pos;
3464 static int resp_write_filemarks(struct scsi_cmnd *scp,
3465 struct sdebug_dev_info *devip)
3467 unsigned char *cmd = scp->cmnd;
3468 unsigned int i, count, pos;
3470 int partition = devip->tape_partition;
3472 if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3473 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3474 return check_condition_result;
3476 count = get_unaligned_be24(cmd + 2);
3477 data = TAPE_BLOCK_FM_FLAG;
3478 for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3479 if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3480 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3481 mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3482 EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3483 return check_condition_result;
3485 (devip->tape_blocks[partition] + pos)->fl_size = data;
3487 (devip->tape_blocks[partition] + pos)->fl_size =
3488 TAPE_BLOCK_EOD_FLAG;
3489 devip->tape_location[partition] = pos;
3494 static int resp_space(struct scsi_cmnd *scp,
3495 struct sdebug_dev_info *devip)
3497 unsigned char *cmd = scp->cmnd, code;
3498 int i = 0, pos, count;
3499 struct tape_block *blp;
3500 int partition = devip->tape_partition;
3502 count = get_unaligned_be24(cmd + 2);
3503 if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3504 count |= 0xff000000;
3505 code = cmd[1] & 0x0f;
3507 pos = devip->tape_location[partition];
3508 if (code == 0) { /* blocks */
3512 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3516 else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3523 } else if (count > 0) {
3524 for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3525 i++, pos++, blp++) {
3526 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3528 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3532 if (pos >= devip->tape_eop[partition])
3536 } else if (code == 1) { /* filemarks */
3542 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3543 i < count && pos >= 0; i++, pos--, blp--) {
3544 for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3545 pos >= 0; pos--, blp--)
3552 } else if (count > 0) {
3553 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3554 i < count; i++, pos++, blp++) {
3555 for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3556 !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3557 pos < devip->tape_eop[partition];
3560 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3562 if (pos >= devip->tape_eop[partition])
3566 } else if (code == 3) { /* EOD */
3567 for (blp = devip->tape_blocks[partition] + pos;
3568 !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3571 if (pos >= devip->tape_eop[partition])
3574 /* sequential filemarks not supported */
3575 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3576 return check_condition_result;
3578 devip->tape_location[partition] = pos;
3582 devip->tape_location[partition] = pos;
3583 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3584 FILEMARK_DETECTED_ASCQ, count - i,
3585 SENSE_FLAG_FILEMARK);
3586 return check_condition_result;
3589 devip->tape_location[partition] = pos;
3590 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3591 EOD_DETECTED_ASCQ, count - i,
3593 return check_condition_result;
3596 devip->tape_location[partition] = 0;
3597 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3598 BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3600 devip->tape_location[partition] = 0;
3601 return check_condition_result;
3604 devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3605 mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3606 EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3608 return check_condition_result;
3611 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
3612 static int resp_read_position(struct scsi_cmnd *scp,
3613 struct sdebug_dev_info *devip)
3615 u8 *cmd = scp->cmnd;
3617 unsigned char arr[20];
3620 all_length = get_unaligned_be16(cmd + 7);
3621 if ((cmd[1] & 0xfe) != 0 ||
3622 all_length != 0) { /* only short form */
3623 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3624 all_length ? 7 : 1, 0);
3625 return check_condition_result;
3627 memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3628 arr[1] = devip->tape_partition;
3629 pos = devip->tape_location[devip->tape_partition];
3630 put_unaligned_be32(pos, arr + 4);
3631 put_unaligned_be32(pos, arr + 8);
3632 return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3635 static int resp_rewind(struct scsi_cmnd *scp,
3636 struct sdebug_dev_info *devip)
3638 devip->tape_location[devip->tape_partition] = 0;
3643 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3644 int part_0_size, int part_1_size)
3648 if (part_0_size + part_1_size > TAPE_UNITS)
3650 devip->tape_eop[0] = part_0_size;
3651 devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3652 devip->tape_eop[1] = part_1_size;
3653 devip->tape_blocks[1] = devip->tape_blocks[0] +
3655 devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3657 for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3658 devip->tape_location[i] = 0;
3660 devip->tape_nbr_partitions = nbr_partitions;
3661 devip->tape_partition = 0;
3663 partition_pg[3] = nbr_partitions - 1;
3664 put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3665 put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3667 return nbr_partitions;
3670 static int resp_format_medium(struct scsi_cmnd *scp,
3671 struct sdebug_dev_info *devip)
3674 unsigned char *cmd = scp->cmnd;
3677 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3678 return check_condition_result;
3681 if (devip->tape_pending_nbr_partitions > 0) {
3682 res = partition_tape(devip,
3683 devip->tape_pending_nbr_partitions,
3684 devip->tape_pending_part_0_size,
3685 devip->tape_pending_part_1_size);
3687 res = partition_tape(devip, devip->tape_nbr_partitions,
3688 devip->tape_eop[0], devip->tape_eop[1]);
3690 res = partition_tape(devip, 1, TAPE_UNITS, 0);
3694 devip->tape_pending_nbr_partitions = -1;
3699 static int resp_erase(struct scsi_cmnd *scp,
3700 struct sdebug_dev_info *devip)
3702 int partition = devip->tape_partition;
3703 int pos = devip->tape_location[partition];
3704 struct tape_block *blp;
3706 blp = devip->tape_blocks[partition] + pos;
3707 blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3712 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3714 return devip->nr_zones != 0;
3717 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3718 unsigned long long lba)
3720 u32 zno = lba >> devip->zsize_shift;
3721 struct sdeb_zone_state *zsp;
3723 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3724 return &devip->zstate[zno];
3727 * If the zone capacity is less than the zone size, adjust for gap
3730 zno = 2 * zno - devip->nr_conv_zones;
3731 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3732 zsp = &devip->zstate[zno];
3733 if (lba >= zsp->z_start + zsp->z_size)
3735 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3739 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3741 return zsp->z_type == ZBC_ZTYPE_CNV;
3744 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3746 return zsp->z_type == ZBC_ZTYPE_GAP;
3749 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3751 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3754 static void zbc_close_zone(struct sdebug_dev_info *devip,
3755 struct sdeb_zone_state *zsp)
3757 enum sdebug_z_cond zc;
3759 if (!zbc_zone_is_seq(zsp))
3763 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3766 if (zc == ZC2_IMPLICIT_OPEN)
3767 devip->nr_imp_open--;
3769 devip->nr_exp_open--;
3771 if (zsp->z_wp == zsp->z_start) {
3772 zsp->z_cond = ZC1_EMPTY;
3774 zsp->z_cond = ZC4_CLOSED;
3779 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3781 struct sdeb_zone_state *zsp = &devip->zstate[0];
3784 for (i = 0; i < devip->nr_zones; i++, zsp++) {
3785 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3786 zbc_close_zone(devip, zsp);
3792 static void zbc_open_zone(struct sdebug_dev_info *devip,
3793 struct sdeb_zone_state *zsp, bool explicit)
3795 enum sdebug_z_cond zc;
3797 if (!zbc_zone_is_seq(zsp))
3801 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3802 (!explicit && zc == ZC2_IMPLICIT_OPEN))
3805 /* Close an implicit open zone if necessary */
3806 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3807 zbc_close_zone(devip, zsp);
3808 else if (devip->max_open &&
3809 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3810 zbc_close_imp_open_zone(devip);
3812 if (zsp->z_cond == ZC4_CLOSED)
3815 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3816 devip->nr_exp_open++;
3818 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3819 devip->nr_imp_open++;
3823 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3824 struct sdeb_zone_state *zsp)
3826 switch (zsp->z_cond) {
3827 case ZC2_IMPLICIT_OPEN:
3828 devip->nr_imp_open--;
3830 case ZC3_EXPLICIT_OPEN:
3831 devip->nr_exp_open--;
3834 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3835 zsp->z_start, zsp->z_cond);
3838 zsp->z_cond = ZC5_FULL;
3841 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3842 unsigned long long lba, unsigned int num)
3844 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3845 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3847 if (!zbc_zone_is_seq(zsp))
3850 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3852 if (zsp->z_wp >= zend)
3853 zbc_set_zone_full(devip, zsp);
3858 if (lba != zsp->z_wp)
3859 zsp->z_non_seq_resource = true;
3865 } else if (end > zsp->z_wp) {
3871 if (zsp->z_wp >= zend)
3872 zbc_set_zone_full(devip, zsp);
3878 zend = zsp->z_start + zsp->z_size;
3883 static int check_zbc_access_params(struct scsi_cmnd *scp,
3884 unsigned long long lba, unsigned int num, bool write)
3886 struct scsi_device *sdp = scp->device;
3887 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3888 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3889 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3892 /* For host-managed, reads cannot cross zone types boundaries */
3893 if (zsp->z_type != zsp_end->z_type) {
3894 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3897 return check_condition_result;
3902 /* Writing into a gap zone is not allowed */
3903 if (zbc_zone_is_gap(zsp)) {
3904 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3905 ATTEMPT_ACCESS_GAP);
3906 return check_condition_result;
3909 /* No restrictions for writes within conventional zones */
3910 if (zbc_zone_is_conv(zsp)) {
3911 if (!zbc_zone_is_conv(zsp_end)) {
3912 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3914 WRITE_BOUNDARY_ASCQ);
3915 return check_condition_result;
3920 if (zsp->z_type == ZBC_ZTYPE_SWR) {
3921 /* Writes cannot cross sequential zone boundaries */
3922 if (zsp_end != zsp) {
3923 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3925 WRITE_BOUNDARY_ASCQ);
3926 return check_condition_result;
3928 /* Cannot write full zones */
3929 if (zsp->z_cond == ZC5_FULL) {
3930 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3931 INVALID_FIELD_IN_CDB, 0);
3932 return check_condition_result;
3934 /* Writes must be aligned to the zone WP */
3935 if (lba != zsp->z_wp) {
3936 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3938 UNALIGNED_WRITE_ASCQ);
3939 return check_condition_result;
3943 /* Handle implicit open of closed and empty zones */
3944 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3945 if (devip->max_open &&
3946 devip->nr_exp_open >= devip->max_open) {
3947 mk_sense_buffer(scp, DATA_PROTECT,
3950 return check_condition_result;
3952 zbc_open_zone(devip, zsp, false);
3958 static inline int check_device_access_params
3959 (struct scsi_cmnd *scp, unsigned long long lba,
3960 unsigned int num, bool write)
3962 struct scsi_device *sdp = scp->device;
3963 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3965 if (lba + num > sdebug_capacity) {
3966 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3967 return check_condition_result;
3969 /* transfer length excessive (tie in to block limits VPD page) */
3970 if (num > sdebug_store_sectors) {
3971 /* needs work to find which cdb byte 'num' comes from */
3972 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3973 return check_condition_result;
3975 if (write && unlikely(sdebug_wp)) {
3976 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3977 return check_condition_result;
3979 if (sdebug_dev_is_zoned(devip))
3980 return check_zbc_access_params(scp, lba, num, write);
3986 * Note: if BUG_ON() fires it usually indicates a problem with the parser
3987 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3988 * that access any of the "stores" in struct sdeb_store_info should call this
3989 * function with bug_if_fake_rw set to true.
3991 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3992 bool bug_if_fake_rw)
3994 if (sdebug_fake_rw) {
3995 BUG_ON(bug_if_fake_rw); /* See note above */
3998 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4002 sdeb_read_lock(rwlock_t *lock)
4004 if (sdebug_no_rwlock)
4011 sdeb_read_unlock(rwlock_t *lock)
4013 if (sdebug_no_rwlock)
4020 sdeb_write_lock(rwlock_t *lock)
4022 if (sdebug_no_rwlock)
4029 sdeb_write_unlock(rwlock_t *lock)
4031 if (sdebug_no_rwlock)
4038 sdeb_data_read_lock(struct sdeb_store_info *sip)
4042 sdeb_read_lock(&sip->macc_data_lck);
4046 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4050 sdeb_read_unlock(&sip->macc_data_lck);
4054 sdeb_data_write_lock(struct sdeb_store_info *sip)
4058 sdeb_write_lock(&sip->macc_data_lck);
4062 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4066 sdeb_write_unlock(&sip->macc_data_lck);
4070 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4074 sdeb_read_lock(&sip->macc_sector_lck);
4078 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4082 sdeb_read_unlock(&sip->macc_sector_lck);
4086 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4090 sdeb_write_lock(&sip->macc_sector_lck);
4094 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4098 sdeb_write_unlock(&sip->macc_sector_lck);
4103 * We simplify the atomic model to allow only 1x atomic write and many non-
4104 * atomic reads or writes for all LBAs.
4106 * A RW lock has a similar bahaviour:
4107 * Only 1x writer and many readers.
4109 * So use a RW lock for per-device read and write locking:
4110 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4115 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4118 sdeb_data_write_lock(sip);
4120 sdeb_data_read_lock(sip);
4124 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4127 sdeb_data_write_unlock(sip);
4129 sdeb_data_read_unlock(sip);
4132 /* Allow many reads but only 1x write per sector */
4134 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4137 sdeb_data_sector_write_lock(sip);
4139 sdeb_data_sector_read_lock(sip);
4143 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4146 sdeb_data_sector_write_unlock(sip);
4148 sdeb_data_sector_read_unlock(sip);
4152 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4154 if (sdebug_no_rwlock) {
4156 __acquire(&sip->macc_meta_lck);
4158 __acquire(&sdeb_fake_rw_lck);
4161 read_lock(&sip->macc_meta_lck);
4163 read_lock(&sdeb_fake_rw_lck);
4168 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4170 if (sdebug_no_rwlock) {
4172 __release(&sip->macc_meta_lck);
4174 __release(&sdeb_fake_rw_lck);
4177 read_unlock(&sip->macc_meta_lck);
4179 read_unlock(&sdeb_fake_rw_lck);
4184 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4186 if (sdebug_no_rwlock) {
4188 __acquire(&sip->macc_meta_lck);
4190 __acquire(&sdeb_fake_rw_lck);
4193 write_lock(&sip->macc_meta_lck);
4195 write_lock(&sdeb_fake_rw_lck);
4200 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4202 if (sdebug_no_rwlock) {
4204 __release(&sip->macc_meta_lck);
4206 __release(&sdeb_fake_rw_lck);
4209 write_unlock(&sip->macc_meta_lck);
4211 write_unlock(&sdeb_fake_rw_lck);
4215 /* Returns number of bytes copied or -1 if error. */
4216 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4217 u32 sg_skip, u64 lba, u32 num, u8 group_number,
4218 bool do_write, bool atomic)
4222 enum dma_data_direction dir;
4223 struct scsi_data_buffer *sdb = &scp->sdb;
4228 * Even though reads are inherently atomic (in this driver), we expect
4229 * the atomic flag only for writes.
4231 if (!do_write && atomic)
4235 dir = DMA_TO_DEVICE;
4236 write_since_sync = true;
4238 dir = DMA_FROM_DEVICE;
4241 if (!sdb->length || !sip)
4243 if (scp->sc_data_direction != dir)
4246 if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4247 atomic_long_inc(&writes_by_group_number[group_number]);
4251 block = do_div(lba, sdebug_store_sectors);
4253 /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4254 sdeb_data_lock(sip, atomic);
4255 for (i = 0; i < num; i++) {
4256 /* We shouldn't need to lock for atomic writes, but do it anyway */
4257 sdeb_data_sector_lock(sip, do_write);
4258 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4259 fsp + (block * sdebug_sector_size),
4260 sdebug_sector_size, sg_skip, do_write);
4261 sdeb_data_sector_unlock(sip, do_write);
4263 if (ret != sdebug_sector_size)
4265 sg_skip += sdebug_sector_size;
4266 if (++block >= sdebug_store_sectors)
4269 sdeb_data_unlock(sip, atomic);
4274 /* Returns number of bytes copied or -1 if error. */
4275 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4277 struct scsi_data_buffer *sdb = &scp->sdb;
4281 if (scp->sc_data_direction != DMA_TO_DEVICE)
4283 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4284 num * sdebug_sector_size, 0, true);
4287 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4288 * arr into sip->storep+lba and return true. If comparison fails then
4290 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4291 const u8 *arr, bool compare_only)
4294 u64 block, rest = 0;
4295 u32 store_blks = sdebug_store_sectors;
4296 u32 lb_size = sdebug_sector_size;
4297 u8 *fsp = sip->storep;
4299 block = do_div(lba, store_blks);
4300 if (block + num > store_blks)
4301 rest = block + num - store_blks;
4303 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4307 res = memcmp(fsp, arr + ((num - rest) * lb_size),
4313 arr += num * lb_size;
4314 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4316 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4320 static __be16 dif_compute_csum(const void *buf, int len)
4325 csum = (__force __be16)ip_compute_csum(buf, len);
4327 csum = cpu_to_be16(crc_t10dif(buf, len));
4332 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4333 sector_t sector, u32 ei_lba)
4335 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
4337 if (sdt->guard_tag != csum) {
4338 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4339 (unsigned long)sector,
4340 be16_to_cpu(sdt->guard_tag),
4344 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4345 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4346 pr_err("REF check failed on sector %lu\n",
4347 (unsigned long)sector);
4350 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4351 be32_to_cpu(sdt->ref_tag) != ei_lba) {
4352 pr_err("REF check failed on sector %lu\n",
4353 (unsigned long)sector);
4359 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4360 unsigned int sectors, bool read)
4364 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4365 scp->device->hostdata, true);
4366 struct t10_pi_tuple *dif_storep = sip->dif_storep;
4367 const void *dif_store_end = dif_storep + sdebug_store_sectors;
4368 struct sg_mapping_iter miter;
4370 /* Bytes of protection data to copy into sgl */
4371 resid = sectors * sizeof(*dif_storep);
4373 sg_miter_start(&miter, scsi_prot_sglist(scp),
4374 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4375 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4377 while (sg_miter_next(&miter) && resid > 0) {
4378 size_t len = min_t(size_t, miter.length, resid);
4379 void *start = dif_store(sip, sector);
4382 if (dif_store_end < start + len)
4383 rest = start + len - dif_store_end;
4388 memcpy(paddr, start, len - rest);
4390 memcpy(start, paddr, len - rest);
4394 memcpy(paddr + len - rest, dif_storep, rest);
4396 memcpy(dif_storep, paddr + len - rest, rest);
4399 sector += len / sizeof(*dif_storep);
4402 sg_miter_stop(&miter);
4405 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4406 unsigned int sectors, u32 ei_lba)
4411 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4412 scp->device->hostdata, true);
4413 struct t10_pi_tuple *sdt;
4415 for (i = 0; i < sectors; i++, ei_lba++) {
4416 sector = start_sec + i;
4417 sdt = dif_store(sip, sector);
4419 if (sdt->app_tag == cpu_to_be16(0xffff))
4423 * Because scsi_debug acts as both initiator and
4424 * target we proceed to verify the PI even if
4425 * RDPROTECT=3. This is done so the "initiator" knows
4426 * which type of error to return. Otherwise we would
4427 * have to iterate over the PI twice.
4429 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4430 ret = dif_verify(sdt, lba2fake_store(sip, sector),
4439 dif_copy_prot(scp, start_sec, sectors, true);
4445 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4447 u32 i, num, transfer, size;
4448 u8 *cmd = scp->cmnd;
4449 struct scsi_data_buffer *sdb = &scp->sdb;
4450 int partition = devip->tape_partition;
4451 u32 pos = devip->tape_location[partition];
4452 struct tape_block *blp;
4455 if (cmd[0] != READ_6) { /* Only Read(6) supported */
4456 mk_sense_invalid_opcode(scp);
4457 return illegal_condition_result;
4459 fixed = (cmd[1] & 0x1) != 0;
4460 sili = (cmd[1] & 0x2) != 0;
4461 if (fixed && sili) {
4462 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4463 return check_condition_result;
4466 transfer = get_unaligned_be24(cmd + 2);
4469 size = devip->tape_blksize;
4471 if (transfer < TAPE_MIN_BLKSIZE ||
4472 transfer > TAPE_MAX_BLKSIZE) {
4473 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4474 return check_condition_result;
4480 for (i = 0, blp = devip->tape_blocks[partition] + pos;
4481 i < num && pos < devip->tape_eop[partition];
4482 i++, pos++, blp++) {
4483 devip->tape_location[partition] = pos + 1;
4484 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4485 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4486 FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4487 SENSE_FLAG_FILEMARK);
4488 scsi_set_resid(scp, (num - i) * size);
4489 return check_condition_result;
4492 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4493 mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4494 EOD_DETECTED_ASCQ, fixed ? num - i : size,
4496 devip->tape_location[partition] = pos;
4497 scsi_set_resid(scp, (num - i) * size);
4498 return check_condition_result;
4500 sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4502 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4503 &(blp->data), 4, i * size, false);
4505 if (blp->fl_size != devip->tape_blksize) {
4506 scsi_set_resid(scp, (num - i) * size);
4507 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4510 return check_condition_result;
4513 if (blp->fl_size != size) {
4514 if (blp->fl_size < size)
4515 scsi_set_resid(scp, size - blp->fl_size);
4517 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4518 0, size - blp->fl_size,
4520 return check_condition_result;
4525 if (pos >= devip->tape_eop[partition]) {
4526 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4527 EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4529 devip->tape_location[partition] = pos - 1;
4530 return check_condition_result;
4532 devip->tape_location[partition] = pos;
4537 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4544 struct sdeb_store_info *sip = devip2sip(devip, true);
4545 u8 *cmd = scp->cmnd;
4546 bool meta_data_locked = false;
4551 lba = get_unaligned_be64(cmd + 2);
4552 num = get_unaligned_be32(cmd + 10);
4557 lba = get_unaligned_be32(cmd + 2);
4558 num = get_unaligned_be16(cmd + 7);
4563 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4564 (u32)(cmd[1] & 0x1f) << 16;
4565 num = (0 == cmd[4]) ? 256 : cmd[4];
4570 lba = get_unaligned_be32(cmd + 2);
4571 num = get_unaligned_be32(cmd + 6);
4574 case XDWRITEREAD_10:
4576 lba = get_unaligned_be32(cmd + 2);
4577 num = get_unaligned_be16(cmd + 7);
4580 default: /* assume READ(32) */
4581 lba = get_unaligned_be64(cmd + 12);
4582 ei_lba = get_unaligned_be32(cmd + 20);
4583 num = get_unaligned_be32(cmd + 28);
4587 if (unlikely(have_dif_prot && check_prot)) {
4588 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4590 mk_sense_invalid_opcode(scp);
4591 return check_condition_result;
4593 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4594 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4595 (cmd[1] & 0xe0) == 0)
4596 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4599 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4600 atomic_read(&sdeb_inject_pending))) {
4602 atomic_set(&sdeb_inject_pending, 0);
4606 * When checking device access params, for reads we only check data
4607 * versus what is set at init time, so no need to lock.
4609 ret = check_device_access_params(scp, lba, num, false);
4612 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4613 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4614 ((lba + num) > sdebug_medium_error_start))) {
4615 /* claim unrecoverable read error */
4616 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4617 /* set info field and valid bit for fixed descriptor */
4618 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4619 scp->sense_buffer[0] |= 0x80; /* Valid bit */
4620 ret = (lba < OPT_MEDIUM_ERR_ADDR)
4621 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4622 put_unaligned_be32(ret, scp->sense_buffer + 3);
4624 scsi_set_resid(scp, scsi_bufflen(scp));
4625 return check_condition_result;
4628 if (sdebug_dev_is_zoned(devip) ||
4629 (sdebug_dix && scsi_prot_sg_count(scp))) {
4630 sdeb_meta_read_lock(sip);
4631 meta_data_locked = true;
4635 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4636 switch (prot_verify_read(scp, lba, num, ei_lba)) {
4637 case 1: /* Guard tag error */
4638 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4639 sdeb_meta_read_unlock(sip);
4640 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4641 return check_condition_result;
4642 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4643 sdeb_meta_read_unlock(sip);
4644 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4645 return illegal_condition_result;
4648 case 3: /* Reference tag error */
4649 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4650 sdeb_meta_read_unlock(sip);
4651 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4652 return check_condition_result;
4653 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4654 sdeb_meta_read_unlock(sip);
4655 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4656 return illegal_condition_result;
4662 ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4663 if (meta_data_locked)
4664 sdeb_meta_read_unlock(sip);
4665 if (unlikely(ret == -1))
4666 return DID_ERROR << 16;
4668 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4670 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4671 atomic_read(&sdeb_inject_pending))) {
4672 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4673 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4674 atomic_set(&sdeb_inject_pending, 0);
4675 return check_condition_result;
4676 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4677 /* Logical block guard check failed */
4678 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4679 atomic_set(&sdeb_inject_pending, 0);
4680 return illegal_condition_result;
4681 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4682 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4683 atomic_set(&sdeb_inject_pending, 0);
4684 return illegal_condition_result;
4690 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4691 unsigned int sectors, u32 ei_lba)
4694 struct t10_pi_tuple *sdt;
4696 sector_t sector = start_sec;
4699 struct sg_mapping_iter diter;
4700 struct sg_mapping_iter piter;
4702 BUG_ON(scsi_sg_count(SCpnt) == 0);
4703 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4705 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4706 scsi_prot_sg_count(SCpnt),
4707 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4708 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4709 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4711 /* For each protection page */
4712 while (sg_miter_next(&piter)) {
4714 if (WARN_ON(!sg_miter_next(&diter))) {
4719 for (ppage_offset = 0; ppage_offset < piter.length;
4720 ppage_offset += sizeof(struct t10_pi_tuple)) {
4721 /* If we're at the end of the current
4722 * data page advance to the next one
4724 if (dpage_offset >= diter.length) {
4725 if (WARN_ON(!sg_miter_next(&diter))) {
4732 sdt = piter.addr + ppage_offset;
4733 daddr = diter.addr + dpage_offset;
4735 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4736 ret = dif_verify(sdt, daddr, sector, ei_lba);
4743 dpage_offset += sdebug_sector_size;
4745 diter.consumed = dpage_offset;
4746 sg_miter_stop(&diter);
4748 sg_miter_stop(&piter);
4750 dif_copy_prot(SCpnt, start_sec, sectors, false);
4757 sg_miter_stop(&diter);
4758 sg_miter_stop(&piter);
4762 static unsigned long lba_to_map_index(sector_t lba)
4764 if (sdebug_unmap_alignment)
4765 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4766 sector_div(lba, sdebug_unmap_granularity);
4770 static sector_t map_index_to_lba(unsigned long index)
4772 sector_t lba = index * sdebug_unmap_granularity;
4774 if (sdebug_unmap_alignment)
4775 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4779 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4783 unsigned int mapped;
4784 unsigned long index;
4787 index = lba_to_map_index(lba);
4788 mapped = test_bit(index, sip->map_storep);
4791 next = find_next_zero_bit(sip->map_storep, map_size, index);
4793 next = find_next_bit(sip->map_storep, map_size, index);
4795 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
4800 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4803 sector_t end = lba + len;
4806 unsigned long index = lba_to_map_index(lba);
4808 if (index < map_size)
4809 set_bit(index, sip->map_storep);
4811 lba = map_index_to_lba(index + 1);
4815 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4818 sector_t end = lba + len;
4819 u8 *fsp = sip->storep;
4822 unsigned long index = lba_to_map_index(lba);
4824 if (lba == map_index_to_lba(index) &&
4825 lba + sdebug_unmap_granularity <= end &&
4827 clear_bit(index, sip->map_storep);
4828 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
4829 memset(fsp + lba * sdebug_sector_size,
4830 (sdebug_lbprz & 1) ? 0 : 0xff,
4831 sdebug_sector_size *
4832 sdebug_unmap_granularity);
4834 if (sip->dif_storep) {
4835 memset(sip->dif_storep + lba, 0xff,
4836 sizeof(*sip->dif_storep) *
4837 sdebug_unmap_granularity);
4840 lba = map_index_to_lba(index + 1);
4844 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4846 u32 i, num, transfer, size, written = 0;
4847 u8 *cmd = scp->cmnd;
4848 struct scsi_data_buffer *sdb = &scp->sdb;
4849 int partition = devip->tape_partition;
4850 int pos = devip->tape_location[partition];
4851 struct tape_block *blp;
4854 if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4855 mk_sense_invalid_opcode(scp);
4856 return illegal_condition_result;
4859 fixed = (cmd[1] & 1) != 0;
4860 transfer = get_unaligned_be24(cmd + 2);
4863 size = devip->tape_blksize;
4865 if (transfer < TAPE_MIN_BLKSIZE ||
4866 transfer > TAPE_MAX_BLKSIZE) {
4867 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4868 return check_condition_result;
4874 scsi_set_resid(scp, num * transfer);
4875 for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4876 i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4877 blp->fl_size = size;
4878 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4879 &(blp->data), 4, i * size, true);
4881 scsi_set_resid(scp, num * transfer - written);
4882 ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4885 devip->tape_location[partition] = pos;
4886 blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4887 if (pos >= devip->tape_eop[partition] - 1) {
4888 mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4889 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4890 fixed ? num - i : transfer,
4892 return check_condition_result;
4894 if (ew) { /* early warning */
4895 mk_sense_info_tape(scp, NO_SENSE,
4896 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4897 fixed ? num - i : transfer,
4899 return check_condition_result;
4905 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4913 struct sdeb_store_info *sip = devip2sip(devip, true);
4914 u8 *cmd = scp->cmnd;
4915 bool meta_data_locked = false;
4920 lba = get_unaligned_be64(cmd + 2);
4921 num = get_unaligned_be32(cmd + 10);
4922 group = cmd[14] & 0x3f;
4927 lba = get_unaligned_be32(cmd + 2);
4928 group = cmd[6] & 0x3f;
4929 num = get_unaligned_be16(cmd + 7);
4934 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4935 (u32)(cmd[1] & 0x1f) << 16;
4936 num = (0 == cmd[4]) ? 256 : cmd[4];
4941 lba = get_unaligned_be32(cmd + 2);
4942 num = get_unaligned_be32(cmd + 6);
4943 group = cmd[6] & 0x3f;
4946 case 0x53: /* XDWRITEREAD(10) */
4948 lba = get_unaligned_be32(cmd + 2);
4949 group = cmd[6] & 0x1f;
4950 num = get_unaligned_be16(cmd + 7);
4953 default: /* assume WRITE(32) */
4954 group = cmd[6] & 0x3f;
4955 lba = get_unaligned_be64(cmd + 12);
4956 ei_lba = get_unaligned_be32(cmd + 20);
4957 num = get_unaligned_be32(cmd + 28);
4961 if (unlikely(have_dif_prot && check_prot)) {
4962 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4964 mk_sense_invalid_opcode(scp);
4965 return check_condition_result;
4967 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4968 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4969 (cmd[1] & 0xe0) == 0)
4970 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4974 if (sdebug_dev_is_zoned(devip) ||
4975 (sdebug_dix && scsi_prot_sg_count(scp)) ||
4977 sdeb_meta_write_lock(sip);
4978 meta_data_locked = true;
4981 ret = check_device_access_params(scp, lba, num, true);
4983 if (meta_data_locked)
4984 sdeb_meta_write_unlock(sip);
4989 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4990 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4991 case 1: /* Guard tag error */
4992 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4993 sdeb_meta_write_unlock(sip);
4994 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4995 return illegal_condition_result;
4996 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4997 sdeb_meta_write_unlock(sip);
4998 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4999 return check_condition_result;
5002 case 3: /* Reference tag error */
5003 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5004 sdeb_meta_write_unlock(sip);
5005 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5006 return illegal_condition_result;
5007 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5008 sdeb_meta_write_unlock(sip);
5009 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5010 return check_condition_result;
5016 ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5017 if (unlikely(scsi_debug_lbp()))
5018 map_region(sip, lba, num);
5020 /* If ZBC zone then bump its write pointer */
5021 if (sdebug_dev_is_zoned(devip))
5022 zbc_inc_wp(devip, lba, num);
5023 if (meta_data_locked)
5024 sdeb_meta_write_unlock(sip);
5026 if (unlikely(-1 == ret))
5027 return DID_ERROR << 16;
5028 else if (unlikely(sdebug_verbose &&
5029 (ret < (num * sdebug_sector_size))))
5030 sdev_printk(KERN_INFO, scp->device,
5031 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5032 my_name, num * sdebug_sector_size, ret);
5034 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5035 atomic_read(&sdeb_inject_pending))) {
5036 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5037 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5038 atomic_set(&sdeb_inject_pending, 0);
5039 return check_condition_result;
5040 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5041 /* Logical block guard check failed */
5042 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5043 atomic_set(&sdeb_inject_pending, 0);
5044 return illegal_condition_result;
5045 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5046 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5047 atomic_set(&sdeb_inject_pending, 0);
5048 return illegal_condition_result;
5055 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5056 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5058 static int resp_write_scat(struct scsi_cmnd *scp,
5059 struct sdebug_dev_info *devip)
5061 u8 *cmd = scp->cmnd;
5064 struct sdeb_store_info *sip = devip2sip(devip, true);
5066 u16 lbdof, num_lrd, k;
5067 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5068 u32 lb_size = sdebug_sector_size;
5074 static const u32 lrd_size = 32; /* + parameter list header size */
5076 if (cmd[0] == VARIABLE_LENGTH_CMD) {
5078 group = cmd[6] & 0x3f;
5079 wrprotect = (cmd[10] >> 5) & 0x7;
5080 lbdof = get_unaligned_be16(cmd + 12);
5081 num_lrd = get_unaligned_be16(cmd + 16);
5082 bt_len = get_unaligned_be32(cmd + 28);
5083 } else { /* that leaves WRITE SCATTERED(16) */
5085 wrprotect = (cmd[2] >> 5) & 0x7;
5086 lbdof = get_unaligned_be16(cmd + 4);
5087 num_lrd = get_unaligned_be16(cmd + 8);
5088 bt_len = get_unaligned_be32(cmd + 10);
5089 group = cmd[14] & 0x3f;
5090 if (unlikely(have_dif_prot)) {
5091 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5093 mk_sense_invalid_opcode(scp);
5094 return illegal_condition_result;
5096 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5097 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5099 sdev_printk(KERN_ERR, scp->device,
5100 "Unprotected WR to DIF device\n");
5103 if ((num_lrd == 0) || (bt_len == 0))
5104 return 0; /* T10 says these do-nothings are not errors */
5107 sdev_printk(KERN_INFO, scp->device,
5108 "%s: %s: LB Data Offset field bad\n",
5110 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5111 return illegal_condition_result;
5113 lbdof_blen = lbdof * lb_size;
5114 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5116 sdev_printk(KERN_INFO, scp->device,
5117 "%s: %s: LBA range descriptors don't fit\n",
5119 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5120 return illegal_condition_result;
5122 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5124 return SCSI_MLQUEUE_HOST_BUSY;
5126 sdev_printk(KERN_INFO, scp->device,
5127 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5128 my_name, __func__, lbdof_blen);
5129 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5131 ret = DID_ERROR << 16;
5135 /* Just keep it simple and always lock for now */
5136 sdeb_meta_write_lock(sip);
5137 sg_off = lbdof_blen;
5138 /* Spec says Buffer xfer Length field in number of LBs in dout */
5140 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5141 lba = get_unaligned_be64(up + 0);
5142 num = get_unaligned_be32(up + 8);
5144 sdev_printk(KERN_INFO, scp->device,
5145 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
5146 my_name, __func__, k, lba, num, sg_off);
5149 ret = check_device_access_params(scp, lba, num, true);
5151 goto err_out_unlock;
5152 num_by = num * lb_size;
5153 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5155 if ((cum_lb + num) > bt_len) {
5157 sdev_printk(KERN_INFO, scp->device,
5158 "%s: %s: sum of blocks > data provided\n",
5160 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5162 ret = illegal_condition_result;
5163 goto err_out_unlock;
5167 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5168 int prot_ret = prot_verify_write(scp, lba, num,
5172 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5174 ret = illegal_condition_result;
5175 goto err_out_unlock;
5180 * Write ranges atomically to keep as close to pre-atomic
5181 * writes behaviour as possible.
5183 ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5184 /* If ZBC zone then bump its write pointer */
5185 if (sdebug_dev_is_zoned(devip))
5186 zbc_inc_wp(devip, lba, num);
5187 if (unlikely(scsi_debug_lbp()))
5188 map_region(sip, lba, num);
5189 if (unlikely(-1 == ret)) {
5190 ret = DID_ERROR << 16;
5191 goto err_out_unlock;
5192 } else if (unlikely(sdebug_verbose && (ret < num_by)))
5193 sdev_printk(KERN_INFO, scp->device,
5194 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5195 my_name, num_by, ret);
5197 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5198 atomic_read(&sdeb_inject_pending))) {
5199 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5200 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5201 atomic_set(&sdeb_inject_pending, 0);
5202 ret = check_condition_result;
5203 goto err_out_unlock;
5204 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5205 /* Logical block guard check failed */
5206 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5207 atomic_set(&sdeb_inject_pending, 0);
5208 ret = illegal_condition_result;
5209 goto err_out_unlock;
5210 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5211 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5212 atomic_set(&sdeb_inject_pending, 0);
5213 ret = illegal_condition_result;
5214 goto err_out_unlock;
5222 sdeb_meta_write_unlock(sip);
5228 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5229 u32 ei_lba, bool unmap, bool ndob)
5231 struct scsi_device *sdp = scp->device;
5232 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5233 unsigned long long i;
5235 u32 lb_size = sdebug_sector_size;
5237 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5238 scp->device->hostdata, true);
5241 bool meta_data_locked = false;
5243 if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5244 sdeb_meta_write_lock(sip);
5245 meta_data_locked = true;
5248 ret = check_device_access_params(scp, lba, num, true);
5252 if (unmap && scsi_debug_lbp()) {
5253 unmap_region(sip, lba, num);
5257 block = do_div(lbaa, sdebug_store_sectors);
5258 /* if ndob then zero 1 logical block, else fetch 1 logical block */
5260 fs1p = fsp + (block * lb_size);
5261 sdeb_data_write_lock(sip);
5263 memset(fs1p, 0, lb_size);
5266 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5269 ret = DID_ERROR << 16;
5271 } else if (sdebug_verbose && !ndob && (ret < lb_size))
5272 sdev_printk(KERN_INFO, scp->device,
5273 "%s: %s: lb size=%u, IO sent=%d bytes\n",
5274 my_name, "write same", lb_size, ret);
5276 /* Copy first sector to remaining blocks */
5277 for (i = 1 ; i < num ; i++) {
5279 block = do_div(lbaa, sdebug_store_sectors);
5280 memmove(fsp + (block * lb_size), fs1p, lb_size);
5282 if (scsi_debug_lbp())
5283 map_region(sip, lba, num);
5284 /* If ZBC zone then bump its write pointer */
5285 if (sdebug_dev_is_zoned(devip))
5286 zbc_inc_wp(devip, lba, num);
5287 sdeb_data_write_unlock(sip);
5290 if (meta_data_locked)
5291 sdeb_meta_write_unlock(sip);
5295 static int resp_write_same_10(struct scsi_cmnd *scp,
5296 struct sdebug_dev_info *devip)
5298 u8 *cmd = scp->cmnd;
5305 if (sdebug_lbpws10 == 0) {
5306 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5307 return check_condition_result;
5311 lba = get_unaligned_be32(cmd + 2);
5312 num = get_unaligned_be16(cmd + 7);
5313 if (num > sdebug_write_same_length) {
5314 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5315 return check_condition_result;
5317 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5320 static int resp_write_same_16(struct scsi_cmnd *scp,
5321 struct sdebug_dev_info *devip)
5323 u8 *cmd = scp->cmnd;
5330 if (cmd[1] & 0x8) { /* UNMAP */
5331 if (sdebug_lbpws == 0) {
5332 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5333 return check_condition_result;
5337 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
5339 lba = get_unaligned_be64(cmd + 2);
5340 num = get_unaligned_be32(cmd + 10);
5341 if (num > sdebug_write_same_length) {
5342 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5343 return check_condition_result;
5345 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5348 /* Note the mode field is in the same position as the (lower) service action
5349 * field. For the Report supported operation codes command, SPC-4 suggests
5350 * each mode of this command should be reported separately; for future. */
5351 static int resp_write_buffer(struct scsi_cmnd *scp,
5352 struct sdebug_dev_info *devip)
5354 u8 *cmd = scp->cmnd;
5355 struct scsi_device *sdp = scp->device;
5356 struct sdebug_dev_info *dp;
5359 mode = cmd[1] & 0x1f;
5361 case 0x4: /* download microcode (MC) and activate (ACT) */
5362 /* set UAs on this device only */
5363 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5364 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5366 case 0x5: /* download MC, save and ACT */
5367 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5369 case 0x6: /* download MC with offsets and ACT */
5370 /* set UAs on most devices (LUs) in this target */
5371 list_for_each_entry(dp,
5372 &devip->sdbg_host->dev_info_list,
5374 if (dp->target == sdp->id) {
5375 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5377 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5381 case 0x7: /* download MC with offsets, save, and ACT */
5382 /* set UA on all devices (LUs) in this target */
5383 list_for_each_entry(dp,
5384 &devip->sdbg_host->dev_info_list,
5386 if (dp->target == sdp->id)
5387 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5391 /* do nothing for this command for other mode values */
5397 static int resp_comp_write(struct scsi_cmnd *scp,
5398 struct sdebug_dev_info *devip)
5400 u8 *cmd = scp->cmnd;
5402 struct sdeb_store_info *sip = devip2sip(devip, true);
5405 u32 lb_size = sdebug_sector_size;
5410 lba = get_unaligned_be64(cmd + 2);
5411 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
5413 return 0; /* degenerate case, not an error */
5414 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5416 mk_sense_invalid_opcode(scp);
5417 return check_condition_result;
5419 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5420 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5421 (cmd[1] & 0xe0) == 0)
5422 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5424 ret = check_device_access_params(scp, lba, num, false);
5428 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5430 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5432 return check_condition_result;
5435 ret = do_dout_fetch(scp, dnum, arr);
5437 retval = DID_ERROR << 16;
5439 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
5440 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5441 "indicated=%u, IO sent=%d bytes\n", my_name,
5442 dnum * lb_size, ret);
5444 sdeb_data_write_lock(sip);
5445 sdeb_meta_write_lock(sip);
5446 if (!comp_write_worker(sip, lba, num, arr, false)) {
5447 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5448 retval = check_condition_result;
5449 goto cleanup_unlock;
5452 /* Cover sip->map_storep (which map_region()) sets with data lock */
5453 if (scsi_debug_lbp())
5454 map_region(sip, lba, num);
5456 sdeb_meta_write_unlock(sip);
5457 sdeb_data_write_unlock(sip);
5463 struct unmap_block_desc {
5469 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5472 struct unmap_block_desc *desc;
5473 struct sdeb_store_info *sip = devip2sip(devip, true);
5474 unsigned int i, payload_len, descriptors;
5477 if (!scsi_debug_lbp())
5478 return 0; /* fib and say its done */
5479 payload_len = get_unaligned_be16(scp->cmnd + 7);
5480 BUG_ON(scsi_bufflen(scp) != payload_len);
5482 descriptors = (payload_len - 8) / 16;
5483 if (descriptors > sdebug_unmap_max_desc) {
5484 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5485 return check_condition_result;
5488 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5490 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5492 return check_condition_result;
5495 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5497 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5498 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5500 desc = (void *)&buf[8];
5502 sdeb_meta_write_lock(sip);
5504 for (i = 0 ; i < descriptors ; i++) {
5505 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5506 unsigned int num = get_unaligned_be32(&desc[i].blocks);
5508 ret = check_device_access_params(scp, lba, num, true);
5512 unmap_region(sip, lba, num);
5518 sdeb_meta_write_unlock(sip);
5524 #define SDEBUG_GET_LBA_STATUS_LEN 32
5526 static int resp_get_lba_status(struct scsi_cmnd *scp,
5527 struct sdebug_dev_info *devip)
5529 u8 *cmd = scp->cmnd;
5531 u32 alloc_len, mapped, num;
5533 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5535 lba = get_unaligned_be64(cmd + 2);
5536 alloc_len = get_unaligned_be32(cmd + 10);
5541 ret = check_device_access_params(scp, lba, 1, false);
5545 if (scsi_debug_lbp()) {
5546 struct sdeb_store_info *sip = devip2sip(devip, true);
5548 mapped = map_state(sip, lba, &num);
5551 /* following just in case virtual_gb changed */
5552 sdebug_capacity = get_sdebug_capacity();
5553 if (sdebug_capacity - lba <= 0xffffffff)
5554 num = sdebug_capacity - lba;
5559 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5560 put_unaligned_be32(20, arr); /* Parameter Data Length */
5561 put_unaligned_be64(lba, arr + 8); /* LBA */
5562 put_unaligned_be32(num, arr + 16); /* Number of blocks */
5563 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
5565 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5568 static int resp_get_stream_status(struct scsi_cmnd *scp,
5569 struct sdebug_dev_info *devip)
5571 u16 starting_stream_id, stream_id;
5572 const u8 *cmd = scp->cmnd;
5573 u32 alloc_len, offset;
5575 struct scsi_stream_status_header *h = (void *)arr;
5577 starting_stream_id = get_unaligned_be16(cmd + 4);
5578 alloc_len = get_unaligned_be32(cmd + 10);
5580 if (alloc_len < 8) {
5581 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5582 return check_condition_result;
5585 if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5586 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5587 return check_condition_result;
5591 * The GET STREAM STATUS command only reports status information
5592 * about open streams. Treat the non-permanent stream as open.
5594 put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5595 &h->number_of_open_streams);
5597 for (offset = 8, stream_id = starting_stream_id;
5598 offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5599 stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5600 offset += 8, stream_id++) {
5601 struct scsi_stream_status *stream_status = (void *)arr + offset;
5603 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5604 put_unaligned_be16(stream_id,
5605 &stream_status->stream_identifier);
5606 stream_status->rel_lifetime = stream_id + 1;
5608 put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5610 return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5613 static int resp_sync_cache(struct scsi_cmnd *scp,
5614 struct sdebug_dev_info *devip)
5619 u8 *cmd = scp->cmnd;
5621 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
5622 lba = get_unaligned_be32(cmd + 2);
5623 num_blocks = get_unaligned_be16(cmd + 7);
5624 } else { /* SYNCHRONIZE_CACHE(16) */
5625 lba = get_unaligned_be64(cmd + 2);
5626 num_blocks = get_unaligned_be32(cmd + 10);
5628 if (lba + num_blocks > sdebug_capacity) {
5629 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5630 return check_condition_result;
5632 if (!write_since_sync || (cmd[1] & 0x2))
5633 res = SDEG_RES_IMMED_MASK;
5634 else /* delay if write_since_sync and IMMED clear */
5635 write_since_sync = false;
5640 * Assuming the LBA+num_blocks is not out-of-range, this function will return
5641 * CONDITION MET if the specified blocks will/have fitted in the cache, and
5642 * a GOOD status otherwise. Model a disk with a big cache and yield
5643 * CONDITION MET. Actually tries to bring range in main memory into the
5644 * cache associated with the CPU(s).
5646 * The pcode 0x34 is also used for READ POSITION by tape devices.
5648 static int resp_pre_fetch(struct scsi_cmnd *scp,
5649 struct sdebug_dev_info *devip)
5653 u64 block, rest = 0;
5655 u8 *cmd = scp->cmnd;
5656 struct sdeb_store_info *sip = devip2sip(devip, true);
5657 u8 *fsp = sip->storep;
5659 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
5660 lba = get_unaligned_be32(cmd + 2);
5661 nblks = get_unaligned_be16(cmd + 7);
5662 } else { /* PRE-FETCH(16) */
5663 lba = get_unaligned_be64(cmd + 2);
5664 nblks = get_unaligned_be32(cmd + 10);
5666 if (lba + nblks > sdebug_capacity) {
5667 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5668 return check_condition_result;
5672 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
5673 block = do_div(lba, sdebug_store_sectors);
5674 if (block + nblks > sdebug_store_sectors)
5675 rest = block + nblks - sdebug_store_sectors;
5677 /* Try to bring the PRE-FETCH range into CPU's cache */
5678 sdeb_data_read_lock(sip);
5679 prefetch_range(fsp + (sdebug_sector_size * block),
5680 (nblks - rest) * sdebug_sector_size);
5682 prefetch_range(fsp, rest * sdebug_sector_size);
5684 sdeb_data_read_unlock(sip);
5687 res = SDEG_RES_IMMED_MASK;
5688 return res | condition_met_result;
5691 #define RL_BUCKET_ELEMS 8
5693 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5694 * (W-LUN), the normal Linux scanning logic does not associate it with a
5695 * device (e.g. /dev/sg7). The following magic will make that association:
5696 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5697 * where <n> is a host number. If there are multiple targets in a host then
5698 * the above will associate a W-LUN to each target. To only get a W-LUN
5699 * for target 2, then use "echo '- 2 49409' > scan" .
5701 static int resp_report_luns(struct scsi_cmnd *scp,
5702 struct sdebug_dev_info *devip)
5704 unsigned char *cmd = scp->cmnd;
5705 unsigned int alloc_len;
5706 unsigned char select_report;
5708 struct scsi_lun *lun_p;
5709 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5710 unsigned int lun_cnt; /* normal LUN count (max: 256) */
5711 unsigned int wlun_cnt; /* report luns W-LUN count */
5712 unsigned int tlun_cnt; /* total LUN count */
5713 unsigned int rlen; /* response length (in bytes) */
5715 unsigned int off_rsp = 0;
5716 const int sz_lun = sizeof(struct scsi_lun);
5718 clear_luns_changed_on_target(devip);
5720 select_report = cmd[2];
5721 alloc_len = get_unaligned_be32(cmd + 6);
5723 if (alloc_len < 4) {
5724 pr_err("alloc len too small %d\n", alloc_len);
5725 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5726 return check_condition_result;
5729 switch (select_report) {
5730 case 0: /* all LUNs apart from W-LUNs */
5731 lun_cnt = sdebug_max_luns;
5734 case 1: /* only W-LUNs */
5738 case 2: /* all LUNs */
5739 lun_cnt = sdebug_max_luns;
5742 case 0x10: /* only administrative LUs */
5743 case 0x11: /* see SPC-5 */
5744 case 0x12: /* only subsiduary LUs owned by referenced LU */
5746 pr_debug("select report invalid %d\n", select_report);
5747 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5748 return check_condition_result;
5751 if (sdebug_no_lun_0 && (lun_cnt > 0))
5754 tlun_cnt = lun_cnt + wlun_cnt;
5755 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
5756 scsi_set_resid(scp, scsi_bufflen(scp));
5757 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5758 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5760 /* loops rely on sizeof response header same as sizeof lun (both 8) */
5761 lun = sdebug_no_lun_0 ? 1 : 0;
5762 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5763 memset(arr, 0, sizeof(arr));
5764 lun_p = (struct scsi_lun *)&arr[0];
5766 put_unaligned_be32(rlen, &arr[0]);
5770 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5771 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5773 int_to_scsilun(lun++, lun_p);
5774 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5775 lun_p->scsi_lun[0] |= 0x40;
5777 if (j < RL_BUCKET_ELEMS)
5780 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5786 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5790 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5794 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5796 bool is_bytchk3 = false;
5799 u32 vnum, a_num, off;
5800 const u32 lb_size = sdebug_sector_size;
5803 u8 *cmd = scp->cmnd;
5804 struct sdeb_store_info *sip = devip2sip(devip, true);
5806 bytchk = (cmd[1] >> 1) & 0x3;
5808 return 0; /* always claim internal verify okay */
5809 } else if (bytchk == 2) {
5810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5811 return check_condition_result;
5812 } else if (bytchk == 3) {
5813 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
5817 lba = get_unaligned_be64(cmd + 2);
5818 vnum = get_unaligned_be32(cmd + 10);
5820 case VERIFY: /* is VERIFY(10) */
5821 lba = get_unaligned_be32(cmd + 2);
5822 vnum = get_unaligned_be16(cmd + 7);
5825 mk_sense_invalid_opcode(scp);
5826 return check_condition_result;
5829 return 0; /* not an error */
5830 a_num = is_bytchk3 ? 1 : vnum;
5831 /* Treat following check like one for read (i.e. no write) access */
5832 ret = check_device_access_params(scp, lba, a_num, false);
5836 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5838 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5840 return check_condition_result;
5842 /* Not changing store, so only need read access */
5843 sdeb_data_read_lock(sip);
5845 ret = do_dout_fetch(scp, a_num, arr);
5847 ret = DID_ERROR << 16;
5849 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5850 sdev_printk(KERN_INFO, scp->device,
5851 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5852 my_name, __func__, a_num * lb_size, ret);
5855 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5856 memcpy(arr + off, arr, lb_size);
5859 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5860 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5861 ret = check_condition_result;
5865 sdeb_data_read_unlock(sip);
5870 #define RZONES_DESC_HD 64
5872 /* Report zones depending on start LBA and reporting options */
5873 static int resp_report_zones(struct scsi_cmnd *scp,
5874 struct sdebug_dev_info *devip)
5876 unsigned int rep_max_zones, nrz = 0;
5878 u32 alloc_len, rep_opts, rep_len;
5881 u8 *arr = NULL, *desc;
5882 u8 *cmd = scp->cmnd;
5883 struct sdeb_zone_state *zsp = NULL;
5884 struct sdeb_store_info *sip = devip2sip(devip, false);
5886 if (!sdebug_dev_is_zoned(devip)) {
5887 mk_sense_invalid_opcode(scp);
5888 return check_condition_result;
5890 zs_lba = get_unaligned_be64(cmd + 2);
5891 alloc_len = get_unaligned_be32(cmd + 10);
5893 return 0; /* not an error */
5894 rep_opts = cmd[14] & 0x3f;
5895 partial = cmd[14] & 0x80;
5897 if (zs_lba >= sdebug_capacity) {
5898 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5899 return check_condition_result;
5902 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5904 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5906 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5908 return check_condition_result;
5911 sdeb_meta_read_lock(sip);
5914 for (lba = zs_lba; lba < sdebug_capacity;
5915 lba = zsp->z_start + zsp->z_size) {
5916 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5918 zsp = zbc_zone(devip, lba);
5925 if (zsp->z_cond != ZC1_EMPTY)
5929 /* Implicit open zones */
5930 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5934 /* Explicit open zones */
5935 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5940 if (zsp->z_cond != ZC4_CLOSED)
5945 if (zsp->z_cond != ZC5_FULL)
5952 * Read-only, offline, reset WP recommended are
5953 * not emulated: no zones to report;
5957 /* non-seq-resource set */
5958 if (!zsp->z_non_seq_resource)
5962 /* All zones except gap zones. */
5963 if (zbc_zone_is_gap(zsp))
5967 /* Not write pointer (conventional) zones */
5968 if (zbc_zone_is_seq(zsp))
5972 mk_sense_buffer(scp, ILLEGAL_REQUEST,
5973 INVALID_FIELD_IN_CDB, 0);
5974 ret = check_condition_result;
5978 if (nrz < rep_max_zones) {
5979 /* Fill zone descriptor */
5980 desc[0] = zsp->z_type;
5981 desc[1] = zsp->z_cond << 4;
5982 if (zsp->z_non_seq_resource)
5984 put_unaligned_be64((u64)zsp->z_size, desc + 8);
5985 put_unaligned_be64((u64)zsp->z_start, desc + 16);
5986 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5990 if (partial && nrz >= rep_max_zones)
5997 /* Zone list length. */
5998 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6000 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6001 /* Zone starting LBA granularity. */
6002 if (devip->zcap < devip->zsize)
6003 put_unaligned_be64(devip->zsize, arr + 16);
6005 rep_len = (unsigned long)desc - (unsigned long)arr;
6006 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6009 sdeb_meta_read_unlock(sip);
6014 static int resp_atomic_write(struct scsi_cmnd *scp,
6015 struct sdebug_dev_info *devip)
6017 struct sdeb_store_info *sip;
6018 u8 *cmd = scp->cmnd;
6023 if (!scsi_debug_atomic_write()) {
6024 mk_sense_invalid_opcode(scp);
6025 return check_condition_result;
6028 sip = devip2sip(devip, true);
6030 lba = get_unaligned_be64(cmd + 2);
6031 boundary = get_unaligned_be16(cmd + 10);
6032 len = get_unaligned_be16(cmd + 12);
6035 if (sdebug_atomic_wr_align &&
6036 do_div(lba_tmp, sdebug_atomic_wr_align)) {
6037 /* Does not meet alignment requirement */
6038 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6039 return check_condition_result;
6042 if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6043 /* Does not meet alignment requirement */
6044 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6045 return check_condition_result;
6049 if (boundary > sdebug_atomic_wr_max_bndry) {
6050 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6051 return check_condition_result;
6054 if (len > sdebug_atomic_wr_max_length_bndry) {
6055 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6056 return check_condition_result;
6059 if (len > sdebug_atomic_wr_max_length) {
6060 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6061 return check_condition_result;
6065 ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6066 if (unlikely(ret == -1))
6067 return DID_ERROR << 16;
6068 if (unlikely(ret != len * sdebug_sector_size))
6069 return DID_ERROR << 16;
6073 /* Logic transplanted from tcmu-runner, file_zbc.c */
6074 static void zbc_open_all(struct sdebug_dev_info *devip)
6076 struct sdeb_zone_state *zsp = &devip->zstate[0];
6079 for (i = 0; i < devip->nr_zones; i++, zsp++) {
6080 if (zsp->z_cond == ZC4_CLOSED)
6081 zbc_open_zone(devip, &devip->zstate[i], true);
6085 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6089 enum sdebug_z_cond zc;
6090 u8 *cmd = scp->cmnd;
6091 struct sdeb_zone_state *zsp;
6092 bool all = cmd[14] & 0x01;
6093 struct sdeb_store_info *sip = devip2sip(devip, false);
6095 if (!sdebug_dev_is_zoned(devip)) {
6096 mk_sense_invalid_opcode(scp);
6097 return check_condition_result;
6099 sdeb_meta_write_lock(sip);
6102 /* Check if all closed zones can be open */
6103 if (devip->max_open &&
6104 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6105 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6107 res = check_condition_result;
6110 /* Open all closed zones */
6111 zbc_open_all(devip);
6115 /* Open the specified zone */
6116 z_id = get_unaligned_be64(cmd + 2);
6117 if (z_id >= sdebug_capacity) {
6118 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6119 res = check_condition_result;
6123 zsp = zbc_zone(devip, z_id);
6124 if (z_id != zsp->z_start) {
6125 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6126 res = check_condition_result;
6129 if (zbc_zone_is_conv(zsp)) {
6130 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6131 res = check_condition_result;
6136 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6139 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6140 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6142 res = check_condition_result;
6146 zbc_open_zone(devip, zsp, true);
6148 sdeb_meta_write_unlock(sip);
6152 static void zbc_close_all(struct sdebug_dev_info *devip)
6156 for (i = 0; i < devip->nr_zones; i++)
6157 zbc_close_zone(devip, &devip->zstate[i]);
6160 static int resp_close_zone(struct scsi_cmnd *scp,
6161 struct sdebug_dev_info *devip)
6165 u8 *cmd = scp->cmnd;
6166 struct sdeb_zone_state *zsp;
6167 bool all = cmd[14] & 0x01;
6168 struct sdeb_store_info *sip = devip2sip(devip, false);
6170 if (!sdebug_dev_is_zoned(devip)) {
6171 mk_sense_invalid_opcode(scp);
6172 return check_condition_result;
6175 sdeb_meta_write_lock(sip);
6178 zbc_close_all(devip);
6182 /* Close specified zone */
6183 z_id = get_unaligned_be64(cmd + 2);
6184 if (z_id >= sdebug_capacity) {
6185 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6186 res = check_condition_result;
6190 zsp = zbc_zone(devip, z_id);
6191 if (z_id != zsp->z_start) {
6192 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6193 res = check_condition_result;
6196 if (zbc_zone_is_conv(zsp)) {
6197 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6198 res = check_condition_result;
6202 zbc_close_zone(devip, zsp);
6204 sdeb_meta_write_unlock(sip);
6208 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6209 struct sdeb_zone_state *zsp, bool empty)
6211 enum sdebug_z_cond zc = zsp->z_cond;
6213 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6214 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6215 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6216 zbc_close_zone(devip, zsp);
6217 if (zsp->z_cond == ZC4_CLOSED)
6219 zsp->z_wp = zsp->z_start + zsp->z_size;
6220 zsp->z_cond = ZC5_FULL;
6224 static void zbc_finish_all(struct sdebug_dev_info *devip)
6228 for (i = 0; i < devip->nr_zones; i++)
6229 zbc_finish_zone(devip, &devip->zstate[i], false);
6232 static int resp_finish_zone(struct scsi_cmnd *scp,
6233 struct sdebug_dev_info *devip)
6235 struct sdeb_zone_state *zsp;
6238 u8 *cmd = scp->cmnd;
6239 bool all = cmd[14] & 0x01;
6240 struct sdeb_store_info *sip = devip2sip(devip, false);
6242 if (!sdebug_dev_is_zoned(devip)) {
6243 mk_sense_invalid_opcode(scp);
6244 return check_condition_result;
6247 sdeb_meta_write_lock(sip);
6250 zbc_finish_all(devip);
6254 /* Finish the specified zone */
6255 z_id = get_unaligned_be64(cmd + 2);
6256 if (z_id >= sdebug_capacity) {
6257 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6258 res = check_condition_result;
6262 zsp = zbc_zone(devip, z_id);
6263 if (z_id != zsp->z_start) {
6264 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6265 res = check_condition_result;
6268 if (zbc_zone_is_conv(zsp)) {
6269 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6270 res = check_condition_result;
6274 zbc_finish_zone(devip, zsp, true);
6276 sdeb_meta_write_unlock(sip);
6280 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6281 struct sdeb_zone_state *zsp)
6283 enum sdebug_z_cond zc;
6284 struct sdeb_store_info *sip = devip2sip(devip, false);
6286 if (!zbc_zone_is_seq(zsp))
6290 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6291 zbc_close_zone(devip, zsp);
6293 if (zsp->z_cond == ZC4_CLOSED)
6296 if (zsp->z_wp > zsp->z_start)
6297 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6298 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6300 zsp->z_non_seq_resource = false;
6301 zsp->z_wp = zsp->z_start;
6302 zsp->z_cond = ZC1_EMPTY;
6305 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6309 for (i = 0; i < devip->nr_zones; i++)
6310 zbc_rwp_zone(devip, &devip->zstate[i]);
6313 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6315 struct sdeb_zone_state *zsp;
6318 u8 *cmd = scp->cmnd;
6319 bool all = cmd[14] & 0x01;
6320 struct sdeb_store_info *sip = devip2sip(devip, false);
6322 if (!sdebug_dev_is_zoned(devip)) {
6323 mk_sense_invalid_opcode(scp);
6324 return check_condition_result;
6327 sdeb_meta_write_lock(sip);
6334 z_id = get_unaligned_be64(cmd + 2);
6335 if (z_id >= sdebug_capacity) {
6336 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6337 res = check_condition_result;
6341 zsp = zbc_zone(devip, z_id);
6342 if (z_id != zsp->z_start) {
6343 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6344 res = check_condition_result;
6347 if (zbc_zone_is_conv(zsp)) {
6348 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6349 res = check_condition_result;
6353 zbc_rwp_zone(devip, zsp);
6355 sdeb_meta_write_unlock(sip);
6359 static u32 get_tag(struct scsi_cmnd *cmnd)
6361 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6364 /* Queued (deferred) command completions converge here. */
6365 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6367 struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6368 typeof(*sdsc), sd_dp);
6369 struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6370 unsigned long flags;
6373 if (sdebug_statistics) {
6374 atomic_inc(&sdebug_completions);
6375 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6376 atomic_inc(&sdebug_miss_cpus);
6380 pr_err("scmd=NULL\n");
6384 spin_lock_irqsave(&sdsc->lock, flags);
6385 aborted = sd_dp->aborted;
6386 if (unlikely(aborted))
6387 sd_dp->aborted = false;
6389 spin_unlock_irqrestore(&sdsc->lock, flags);
6392 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6393 blk_abort_request(scsi_cmd_to_rq(scp));
6397 scsi_done(scp); /* callback to mid level */
6400 /* When high resolution timer goes off this function is called. */
6401 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6403 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6405 sdebug_q_cmd_complete(sd_dp);
6406 return HRTIMER_NORESTART;
6409 /* When work queue schedules work, it calls this function. */
6410 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6412 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6414 sdebug_q_cmd_complete(sd_dp);
6417 static bool got_shared_uuid;
6418 static uuid_t shared_uuid;
6420 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6422 struct sdeb_zone_state *zsp;
6423 sector_t capacity = get_sdebug_capacity();
6424 sector_t conv_capacity;
6425 sector_t zstart = 0;
6429 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6430 * a zone size allowing for at least 4 zones on the device. Otherwise,
6431 * use the specified zone size checking that at least 2 zones can be
6432 * created for the device.
6434 if (!sdeb_zbc_zone_size_mb) {
6435 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6436 >> ilog2(sdebug_sector_size);
6437 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6439 if (devip->zsize < 2) {
6440 pr_err("Device capacity too small\n");
6444 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6445 pr_err("Zone size is not a power of 2\n");
6448 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6449 >> ilog2(sdebug_sector_size);
6450 if (devip->zsize >= capacity) {
6451 pr_err("Zone size too large for device capacity\n");
6456 devip->zsize_shift = ilog2(devip->zsize);
6457 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6459 if (sdeb_zbc_zone_cap_mb == 0) {
6460 devip->zcap = devip->zsize;
6462 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6463 ilog2(sdebug_sector_size);
6464 if (devip->zcap > devip->zsize) {
6465 pr_err("Zone capacity too large\n");
6470 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6471 if (conv_capacity >= capacity) {
6472 pr_err("Number of conventional zones too large\n");
6475 devip->nr_conv_zones = sdeb_zbc_nr_conv;
6476 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6478 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6480 /* Add gap zones if zone capacity is smaller than the zone size */
6481 if (devip->zcap < devip->zsize)
6482 devip->nr_zones += devip->nr_seq_zones;
6485 /* zbc_max_open_zones can be 0, meaning "not reported" */
6486 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6487 devip->max_open = (devip->nr_zones - 1) / 2;
6489 devip->max_open = sdeb_zbc_max_open;
6492 devip->zstate = kcalloc(devip->nr_zones,
6493 sizeof(struct sdeb_zone_state), GFP_KERNEL);
6497 for (i = 0; i < devip->nr_zones; i++) {
6498 zsp = &devip->zstate[i];
6500 zsp->z_start = zstart;
6502 if (i < devip->nr_conv_zones) {
6503 zsp->z_type = ZBC_ZTYPE_CNV;
6504 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6505 zsp->z_wp = (sector_t)-1;
6507 min_t(u64, devip->zsize, capacity - zstart);
6508 } else if ((zstart & (devip->zsize - 1)) == 0) {
6510 zsp->z_type = ZBC_ZTYPE_SWR;
6512 zsp->z_type = ZBC_ZTYPE_SWP;
6513 zsp->z_cond = ZC1_EMPTY;
6514 zsp->z_wp = zsp->z_start;
6516 min_t(u64, devip->zcap, capacity - zstart);
6518 zsp->z_type = ZBC_ZTYPE_GAP;
6519 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6520 zsp->z_wp = (sector_t)-1;
6521 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6525 WARN_ON_ONCE((int)zsp->z_size <= 0);
6526 zstart += zsp->z_size;
6532 static struct sdebug_dev_info *sdebug_device_create(
6533 struct sdebug_host_info *sdbg_host, gfp_t flags)
6535 struct sdebug_dev_info *devip;
6537 devip = kzalloc(sizeof(*devip), flags);
6539 if (sdebug_uuid_ctl == 1)
6540 uuid_gen(&devip->lu_name);
6541 else if (sdebug_uuid_ctl == 2) {
6542 if (got_shared_uuid)
6543 devip->lu_name = shared_uuid;
6545 uuid_gen(&shared_uuid);
6546 got_shared_uuid = true;
6547 devip->lu_name = shared_uuid;
6550 devip->sdbg_host = sdbg_host;
6551 if (sdeb_zbc_in_use) {
6552 devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6553 if (sdebug_device_create_zones(devip)) {
6558 devip->zoned = false;
6560 if (sdebug_ptype == TYPE_TAPE) {
6561 devip->tape_density = TAPE_DEF_DENSITY;
6562 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6564 devip->create_ts = ktime_get_boottime();
6565 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6566 spin_lock_init(&devip->list_lock);
6567 INIT_LIST_HEAD(&devip->inject_err_list);
6568 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6573 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6575 struct sdebug_host_info *sdbg_host;
6576 struct sdebug_dev_info *open_devip = NULL;
6577 struct sdebug_dev_info *devip;
6579 sdbg_host = shost_to_sdebug_host(sdev->host);
6581 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6582 if ((devip->used) && (devip->channel == sdev->channel) &&
6583 (devip->target == sdev->id) &&
6584 (devip->lun == sdev->lun))
6587 if ((!devip->used) && (!open_devip))
6591 if (!open_devip) { /* try and make a new one */
6592 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6594 pr_err("out of memory at line %d\n", __LINE__);
6599 open_devip->channel = sdev->channel;
6600 open_devip->target = sdev->id;
6601 open_devip->lun = sdev->lun;
6602 open_devip->sdbg_host = sdbg_host;
6603 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6604 open_devip->used = true;
6608 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6611 pr_info("sdev_init <%u %u %u %llu>\n",
6612 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6617 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6618 struct queue_limits *lim)
6620 struct sdebug_dev_info *devip =
6621 (struct sdebug_dev_info *)sdp->hostdata;
6622 struct dentry *dentry;
6625 pr_info("sdev_configure <%u %u %u %llu>\n",
6626 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6627 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6628 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6629 if (devip == NULL) {
6630 devip = find_build_dev_info(sdp);
6632 return 1; /* no resources, will be marked offline */
6634 if (sdebug_ptype == TYPE_TAPE) {
6635 if (!devip->tape_blocks[0]) {
6636 devip->tape_blocks[0] =
6637 kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6639 if (!devip->tape_blocks[0])
6642 devip->tape_pending_nbr_partitions = -1;
6643 if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6644 kfree(devip->tape_blocks[0]);
6645 devip->tape_blocks[0] = NULL;
6649 sdp->hostdata = devip;
6651 sdp->no_uld_attach = 1;
6652 config_cdb_len(sdp);
6654 if (sdebug_allow_restart)
6655 sdp->allow_restart = 1;
6657 devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6658 sdebug_debugfs_root);
6659 if (IS_ERR_OR_NULL(devip->debugfs_entry))
6660 pr_info("%s: failed to create debugfs directory for device %s\n",
6661 __func__, dev_name(&sdp->sdev_gendev));
6663 dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6664 &sdebug_error_fops);
6665 if (IS_ERR_OR_NULL(dentry))
6666 pr_info("%s: failed to create error file for device %s\n",
6667 __func__, dev_name(&sdp->sdev_gendev));
6672 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6674 struct sdebug_dev_info *devip =
6675 (struct sdebug_dev_info *)sdp->hostdata;
6676 struct sdebug_err_inject *err;
6679 pr_info("sdev_destroy <%u %u %u %llu>\n",
6680 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6685 spin_lock(&devip->list_lock);
6686 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6687 list_del_rcu(&err->list);
6688 call_rcu(&err->rcu, sdebug_err_free);
6690 spin_unlock(&devip->list_lock);
6692 debugfs_remove(devip->debugfs_entry);
6694 if (sdp->type == TYPE_TAPE) {
6695 kfree(devip->tape_blocks[0]);
6696 devip->tape_blocks[0] = NULL;
6699 /* make this slot available for re-use */
6700 devip->used = false;
6701 sdp->hostdata = NULL;
6704 /* Returns true if cancelled or not running callback. */
6705 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6707 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6708 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6709 enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6711 lockdep_assert_held(&sdsc->lock);
6713 if (defer_t == SDEB_DEFER_HRT) {
6714 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6717 case -1: /* -1 It's executing the CB */
6719 case 0: /* Not active, it must have already run */
6720 case 1: /* Was active, we've now cancelled */
6724 } else if (defer_t == SDEB_DEFER_WQ) {
6725 /* Cancel if pending */
6726 if (cancel_work(&sd_dp->ew.work))
6728 /* callback may be running, so return false */
6730 } else if (defer_t == SDEB_DEFER_POLL) {
6738 * Called from scsi_debug_abort() only, which is for timed-out cmd.
6740 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6742 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6743 unsigned long flags;
6746 spin_lock_irqsave(&sdsc->lock, flags);
6747 res = scsi_debug_stop_cmnd(cmnd);
6748 spin_unlock_irqrestore(&sdsc->lock, flags);
6754 * All we can do is set the cmnd as internally aborted and wait for it to
6755 * finish. We cannot call scsi_done() as normal completion path may do that.
6757 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6759 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6764 /* Deletes (stops) timers or work queues of all queued commands */
6765 static void stop_all_queued(void)
6767 struct sdebug_host_info *sdhp;
6769 mutex_lock(&sdebug_host_list_mutex);
6770 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6771 struct Scsi_Host *shost = sdhp->shost;
6773 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6775 mutex_unlock(&sdebug_host_list_mutex);
6778 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6780 struct scsi_device *sdp = cmnd->device;
6781 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6782 struct sdebug_err_inject *err;
6783 unsigned char *cmd = cmnd->cmnd;
6790 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6791 if (err->type == ERR_ABORT_CMD_FAILED &&
6792 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6806 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6808 bool aborted = scsi_debug_abort_cmnd(SCpnt);
6809 u8 *cmd = SCpnt->cmnd;
6814 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6815 sdev_printk(KERN_INFO, SCpnt->device,
6816 "%s: command%s found\n", __func__,
6817 aborted ? "" : " not");
6820 if (sdebug_fail_abort(SCpnt)) {
6821 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6826 if (aborted == false)
6832 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6834 struct scsi_device *sdp = data;
6835 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6837 if (scmd->device == sdp)
6838 scsi_debug_abort_cmnd(scmd);
6843 /* Deletes (stops) timers or work queues of all queued commands per sdev */
6844 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6846 struct Scsi_Host *shost = sdp->host;
6848 blk_mq_tagset_busy_iter(&shost->tag_set,
6849 scsi_debug_stop_all_queued_iter, sdp);
6852 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6854 struct scsi_device *sdp = cmnd->device;
6855 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6856 struct sdebug_err_inject *err;
6857 unsigned char *cmd = cmnd->cmnd;
6864 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6865 if (err->type == ERR_LUN_RESET_FAILED &&
6866 (err->cmd == cmd[0] || err->cmd == 0xff)) {
6880 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6884 devip->tape_blksize = TAPE_DEF_BLKSIZE;
6885 devip->tape_density = TAPE_DEF_DENSITY;
6886 devip->tape_partition = 0;
6887 devip->tape_dce = 0;
6888 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6889 devip->tape_location[i] = 0;
6890 devip->tape_pending_nbr_partitions = -1;
6891 /* Don't reset partitioning? */
6894 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6896 struct scsi_device *sdp = SCpnt->device;
6897 struct sdebug_dev_info *devip = sdp->hostdata;
6898 u8 *cmd = SCpnt->cmnd;
6903 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6904 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6906 scsi_debug_stop_all_queued(sdp);
6908 set_bit(SDEBUG_UA_POR, devip->uas_bm);
6909 if (SCpnt->device->type == TYPE_TAPE)
6910 scsi_tape_reset_clear(devip);
6913 if (sdebug_fail_lun_reset(SCpnt)) {
6914 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6921 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6923 struct scsi_target *starget = scsi_target(cmnd->device);
6924 struct sdebug_target_info *targetip =
6925 (struct sdebug_target_info *)starget->hostdata;
6928 return targetip->reset_fail;
6933 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6935 struct scsi_device *sdp = SCpnt->device;
6936 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6937 struct sdebug_dev_info *devip;
6938 u8 *cmd = SCpnt->cmnd;
6942 ++num_target_resets;
6943 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6944 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6946 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6947 if (devip->target == sdp->id) {
6948 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6949 if (SCpnt->device->type == TYPE_TAPE)
6950 scsi_tape_reset_clear(devip);
6955 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6956 sdev_printk(KERN_INFO, sdp,
6957 "%s: %d device(s) found in target\n", __func__, k);
6959 if (sdebug_fail_target_reset(SCpnt)) {
6960 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6968 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6970 struct scsi_device *sdp = SCpnt->device;
6971 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6972 struct sdebug_dev_info *devip;
6977 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6978 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6980 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6981 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6982 if (SCpnt->device->type == TYPE_TAPE)
6983 scsi_tape_reset_clear(devip);
6987 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6988 sdev_printk(KERN_INFO, sdp,
6989 "%s: %d device(s) found in host\n", __func__, k);
6993 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6995 struct sdebug_host_info *sdbg_host;
6996 struct sdebug_dev_info *devip;
7000 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7001 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
7002 mutex_lock(&sdebug_host_list_mutex);
7003 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7004 list_for_each_entry(devip, &sdbg_host->dev_info_list,
7006 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7007 if (SCpnt->device->type == TYPE_TAPE)
7008 scsi_tape_reset_clear(devip);
7012 mutex_unlock(&sdebug_host_list_mutex);
7014 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7015 sdev_printk(KERN_INFO, SCpnt->device,
7016 "%s: %d device(s) found\n", __func__, k);
7020 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7022 struct msdos_partition *pp;
7023 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7024 int sectors_per_part, num_sectors, k;
7025 int heads_by_sects, start_sec, end_sec;
7027 /* assume partition table already zeroed */
7028 if ((sdebug_num_parts < 1) || (store_size < 1048576))
7030 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7031 sdebug_num_parts = SDEBUG_MAX_PARTS;
7032 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7034 num_sectors = (int)get_sdebug_capacity();
7035 sectors_per_part = (num_sectors - sdebug_sectors_per)
7037 heads_by_sects = sdebug_heads * sdebug_sectors_per;
7038 starts[0] = sdebug_sectors_per;
7039 max_part_secs = sectors_per_part;
7040 for (k = 1; k < sdebug_num_parts; ++k) {
7041 starts[k] = ((k * sectors_per_part) / heads_by_sects)
7043 if (starts[k] - starts[k - 1] < max_part_secs)
7044 max_part_secs = starts[k] - starts[k - 1];
7046 starts[sdebug_num_parts] = num_sectors;
7047 starts[sdebug_num_parts + 1] = 0;
7049 ramp[510] = 0x55; /* magic partition markings */
7051 pp = (struct msdos_partition *)(ramp + 0x1be);
7052 for (k = 0; starts[k + 1]; ++k, ++pp) {
7053 start_sec = starts[k];
7054 end_sec = starts[k] + max_part_secs - 1;
7057 pp->cyl = start_sec / heads_by_sects;
7058 pp->head = (start_sec - (pp->cyl * heads_by_sects))
7059 / sdebug_sectors_per;
7060 pp->sector = (start_sec % sdebug_sectors_per) + 1;
7062 pp->end_cyl = end_sec / heads_by_sects;
7063 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7064 / sdebug_sectors_per;
7065 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7067 pp->start_sect = cpu_to_le32(start_sec);
7068 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7069 pp->sys_ind = 0x83; /* plain Linux partition */
7073 static void block_unblock_all_queues(bool block)
7075 struct sdebug_host_info *sdhp;
7077 lockdep_assert_held(&sdebug_host_list_mutex);
7079 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7080 struct Scsi_Host *shost = sdhp->shost;
7083 scsi_block_requests(shost);
7085 scsi_unblock_requests(shost);
7089 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7090 * commands will be processed normally before triggers occur.
7092 static void tweak_cmnd_count(void)
7096 modulo = abs(sdebug_every_nth);
7100 mutex_lock(&sdebug_host_list_mutex);
7101 block_unblock_all_queues(true);
7102 count = atomic_read(&sdebug_cmnd_count);
7103 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7104 block_unblock_all_queues(false);
7105 mutex_unlock(&sdebug_host_list_mutex);
7108 static void clear_queue_stats(void)
7110 atomic_set(&sdebug_cmnd_count, 0);
7111 atomic_set(&sdebug_completions, 0);
7112 atomic_set(&sdebug_miss_cpus, 0);
7113 atomic_set(&sdebug_a_tsf, 0);
7116 static bool inject_on_this_cmd(void)
7118 if (sdebug_every_nth == 0)
7120 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7123 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
7125 /* Complete the processing of the thread that queued a SCSI command to this
7126 * driver. It either completes the command by calling cmnd_done() or
7127 * schedules a hr timer or work queue then returns 0. Returns
7128 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7130 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7132 int (*pfp)(struct scsi_cmnd *,
7133 struct sdebug_dev_info *),
7134 int delta_jiff, int ndelay)
7136 struct request *rq = scsi_cmd_to_rq(cmnd);
7137 bool polled = rq->cmd_flags & REQ_POLLED;
7138 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7139 unsigned long flags;
7140 u64 ns_from_boot = 0;
7141 struct scsi_device *sdp;
7142 struct sdebug_defer *sd_dp;
7144 if (unlikely(devip == NULL)) {
7145 if (scsi_result == 0)
7146 scsi_result = DID_NO_CONNECT << 16;
7147 goto respond_in_thread;
7151 if (delta_jiff == 0)
7152 goto respond_in_thread;
7155 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7156 (scsi_result == 0))) {
7157 int num_in_q = scsi_device_busy(sdp);
7158 int qdepth = cmnd->device->queue_depth;
7160 if ((num_in_q == qdepth) &&
7161 (atomic_inc_return(&sdebug_a_tsf) >=
7162 abs(sdebug_every_nth))) {
7163 atomic_set(&sdebug_a_tsf, 0);
7164 scsi_result = device_qfull_result;
7166 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7167 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7168 __func__, num_in_q);
7172 sd_dp = &sdsc->sd_dp;
7174 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7175 ns_from_boot = ktime_get_boottime_ns();
7177 /* one of the resp_*() response functions is called here */
7178 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7179 if (cmnd->result & SDEG_RES_IMMED_MASK) {
7180 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7181 delta_jiff = ndelay = 0;
7183 if (cmnd->result == 0 && scsi_result != 0)
7184 cmnd->result = scsi_result;
7185 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7186 if (atomic_read(&sdeb_inject_pending)) {
7187 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7188 atomic_set(&sdeb_inject_pending, 0);
7189 cmnd->result = check_condition_result;
7193 if (unlikely(sdebug_verbose && cmnd->result))
7194 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7195 __func__, cmnd->result);
7197 if (delta_jiff > 0 || ndelay > 0) {
7200 if (delta_jiff > 0) {
7201 u64 ns = jiffies_to_nsecs(delta_jiff);
7203 if (sdebug_random && ns < U32_MAX) {
7204 ns = get_random_u32_below((u32)ns);
7205 } else if (sdebug_random) {
7206 ns >>= 12; /* scale to 4 usec precision */
7207 if (ns < U32_MAX) /* over 4 hours max */
7208 ns = get_random_u32_below((u32)ns);
7211 kt = ns_to_ktime(ns);
7212 } else { /* ndelay has a 4.2 second max */
7213 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7215 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7216 u64 d = ktime_get_boottime_ns() - ns_from_boot;
7218 if (kt <= d) { /* elapsed duration >= kt */
7219 /* call scsi_done() from this thread */
7223 /* otherwise reduce kt by elapsed time */
7227 if (sdebug_statistics)
7228 sd_dp->issuing_cpu = raw_smp_processor_id();
7230 spin_lock_irqsave(&sdsc->lock, flags);
7231 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7232 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7233 spin_unlock_irqrestore(&sdsc->lock, flags);
7235 /* schedule the invocation of scsi_done() for a later time */
7236 spin_lock_irqsave(&sdsc->lock, flags);
7237 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7238 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7240 * The completion handler will try to grab sqcp->lock,
7241 * so there is no chance that the completion handler
7242 * will call scsi_done() until we release the lock
7243 * here (so ok to keep referencing sdsc).
7245 spin_unlock_irqrestore(&sdsc->lock, flags);
7247 } else { /* jdelay < 0, use work queue */
7248 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7249 atomic_read(&sdeb_inject_pending))) {
7250 sd_dp->aborted = true;
7251 atomic_set(&sdeb_inject_pending, 0);
7252 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7253 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7256 if (sdebug_statistics)
7257 sd_dp->issuing_cpu = raw_smp_processor_id();
7259 spin_lock_irqsave(&sdsc->lock, flags);
7260 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7261 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7262 spin_unlock_irqrestore(&sdsc->lock, flags);
7264 spin_lock_irqsave(&sdsc->lock, flags);
7265 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7266 schedule_work(&sd_dp->ew.work);
7267 spin_unlock_irqrestore(&sdsc->lock, flags);
7273 respond_in_thread: /* call back to mid-layer using invocation thread */
7274 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7275 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7276 if (cmnd->result == 0 && scsi_result != 0)
7277 cmnd->result = scsi_result;
7282 /* Note: The following macros create attribute files in the
7283 /sys/module/scsi_debug/parameters directory. Unfortunately this
7284 driver is unaware of a change and cannot trigger auxiliary actions
7285 as it can when the corresponding attribute in the
7286 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7288 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7289 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7290 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7291 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7292 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7293 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7294 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7295 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7296 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7297 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7298 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7299 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7300 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7301 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7302 module_param_string(inq_product, sdebug_inq_product_id,
7303 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7304 module_param_string(inq_rev, sdebug_inq_product_rev,
7305 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7306 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7307 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7308 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7309 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7310 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7311 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7312 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7313 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7314 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7315 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7316 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7317 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7319 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7321 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7322 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7323 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7324 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7325 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7326 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7327 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7328 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7329 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7330 module_param_named(per_host_store, sdebug_per_host_store, bool,
7332 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7333 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7334 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7335 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7336 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7337 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7338 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7339 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7340 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7341 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7342 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7343 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7344 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7345 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7346 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7347 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7348 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7349 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7350 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7351 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7352 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7353 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7354 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7356 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7357 module_param_named(write_same_length, sdebug_write_same_length, int,
7359 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7360 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7361 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7362 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7363 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7364 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7366 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7367 MODULE_DESCRIPTION("SCSI debug adapter driver");
7368 MODULE_LICENSE("GPL");
7369 MODULE_VERSION(SDEBUG_VERSION);
7371 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7372 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7373 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7374 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7375 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7376 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7377 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7378 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7379 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7380 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7381 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7382 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7383 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7384 MODULE_PARM_DESC(host_max_queue,
7385 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7386 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7387 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7388 SDEBUG_VERSION "\")");
7389 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7390 MODULE_PARM_DESC(lbprz,
7391 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7392 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7393 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7394 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7395 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7396 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7397 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7398 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7399 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7400 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7401 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7402 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7403 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7404 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7405 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7406 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7407 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7408 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7409 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7410 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7411 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7412 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7413 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7414 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7415 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7416 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7417 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7418 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7419 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7420 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7421 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7422 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7423 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7424 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7425 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7426 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7427 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7428 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7429 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7430 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7431 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7432 MODULE_PARM_DESC(uuid_ctl,
7433 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7434 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7435 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7436 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7437 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7438 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7439 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7440 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7441 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7442 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7443 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7445 #define SDEBUG_INFO_LEN 256
7446 static char sdebug_info[SDEBUG_INFO_LEN];
7448 static const char *scsi_debug_info(struct Scsi_Host *shp)
7452 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7453 my_name, SDEBUG_VERSION, sdebug_version_date);
7454 if (k >= (SDEBUG_INFO_LEN - 1))
7456 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7457 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7458 sdebug_dev_size_mb, sdebug_opts, submit_queues,
7459 "statistics", (int)sdebug_statistics);
7463 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
7464 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7469 int minLen = length > 15 ? 15 : length;
7471 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7473 memcpy(arr, buffer, minLen);
7475 if (1 != sscanf(arr, "%d", &opts))
7478 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7479 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7480 if (sdebug_every_nth != 0)
7485 struct sdebug_submit_queue_data {
7491 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7493 struct sdebug_submit_queue_data *data = opaque;
7494 u32 unique_tag = blk_mq_unique_tag(rq);
7495 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7496 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7497 int queue_num = data->queue_num;
7499 if (hwq != queue_num)
7502 /* Rely on iter'ing in ascending tag order */
7503 if (*data->first == -1)
7504 *data->first = *data->last = tag;
7511 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7512 * same for each scsi_debug host (if more than one). Some of the counters
7513 * output are not atomics so might be inaccurate in a busy system. */
7514 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7516 struct sdebug_host_info *sdhp;
7519 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7520 SDEBUG_VERSION, sdebug_version_date);
7521 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7522 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7523 sdebug_opts, sdebug_every_nth);
7524 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7525 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7526 sdebug_sector_size, "bytes");
7527 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7528 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7530 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7531 num_dev_resets, num_target_resets, num_bus_resets,
7533 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7534 dix_reads, dix_writes, dif_errors);
7535 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7537 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7538 atomic_read(&sdebug_cmnd_count),
7539 atomic_read(&sdebug_completions),
7540 "miss_cpus", atomic_read(&sdebug_miss_cpus),
7541 atomic_read(&sdebug_a_tsf),
7542 atomic_read(&sdeb_mq_poll_count));
7544 seq_printf(m, "submit_queues=%d\n", submit_queues);
7545 for (j = 0; j < submit_queues; ++j) {
7547 struct sdebug_submit_queue_data data = {
7552 seq_printf(m, " queue %d:\n", j);
7553 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7556 seq_printf(m, " BUSY: %s: %d,%d\n",
7557 "first,last bits", f, l);
7561 seq_printf(m, "this host_no=%d\n", host->host_no);
7562 if (!xa_empty(per_store_ap)) {
7565 unsigned long l_idx;
7566 struct sdeb_store_info *sip;
7568 seq_puts(m, "\nhost list:\n");
7570 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7572 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
7573 sdhp->shost->host_no, idx);
7576 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7577 sdeb_most_recent_idx);
7579 xa_for_each(per_store_ap, l_idx, sip) {
7580 niu = xa_get_mark(per_store_ap, l_idx,
7581 SDEB_XA_NOT_IN_USE);
7583 seq_printf(m, " %d: idx=%d%s\n", j, idx,
7584 (niu ? " not_in_use" : ""));
7591 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7593 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7595 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7596 * of delay is jiffies.
7598 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7603 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7605 if (sdebug_jdelay != jdelay) {
7606 struct sdebug_host_info *sdhp;
7608 mutex_lock(&sdebug_host_list_mutex);
7609 block_unblock_all_queues(true);
7611 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7612 struct Scsi_Host *shost = sdhp->shost;
7614 if (scsi_host_busy(shost)) {
7615 res = -EBUSY; /* queued commands */
7620 sdebug_jdelay = jdelay;
7623 block_unblock_all_queues(false);
7624 mutex_unlock(&sdebug_host_list_mutex);
7630 static DRIVER_ATTR_RW(delay);
7632 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7634 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7636 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7637 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
7638 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7643 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7644 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7646 if (sdebug_ndelay != ndelay) {
7647 struct sdebug_host_info *sdhp;
7649 mutex_lock(&sdebug_host_list_mutex);
7650 block_unblock_all_queues(true);
7652 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7653 struct Scsi_Host *shost = sdhp->shost;
7655 if (scsi_host_busy(shost)) {
7656 res = -EBUSY; /* queued commands */
7662 sdebug_ndelay = ndelay;
7663 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
7666 block_unblock_all_queues(false);
7667 mutex_unlock(&sdebug_host_list_mutex);
7673 static DRIVER_ATTR_RW(ndelay);
7675 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7677 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7680 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7686 if (sscanf(buf, "%10s", work) == 1) {
7687 if (strncasecmp(work, "0x", 2) == 0) {
7688 if (kstrtoint(work + 2, 16, &opts) == 0)
7691 if (kstrtoint(work, 10, &opts) == 0)
7698 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7699 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7703 static DRIVER_ATTR_RW(opts);
7705 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7707 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7709 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7714 /* Cannot change from or to TYPE_ZBC with sysfs */
7715 if (sdebug_ptype == TYPE_ZBC)
7718 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7726 static DRIVER_ATTR_RW(ptype);
7728 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7730 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7732 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7737 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7743 static DRIVER_ATTR_RW(dsense);
7745 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7747 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7749 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7754 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7755 bool want_store = (n == 0);
7756 struct sdebug_host_info *sdhp;
7759 sdebug_fake_rw = (sdebug_fake_rw > 0);
7760 if (sdebug_fake_rw == n)
7761 return count; /* not transitioning so do nothing */
7763 if (want_store) { /* 1 --> 0 transition, set up store */
7764 if (sdeb_first_idx < 0) {
7765 idx = sdebug_add_store();
7769 idx = sdeb_first_idx;
7770 xa_clear_mark(per_store_ap, idx,
7771 SDEB_XA_NOT_IN_USE);
7773 /* make all hosts use same store */
7774 list_for_each_entry(sdhp, &sdebug_host_list,
7776 if (sdhp->si_idx != idx) {
7777 xa_set_mark(per_store_ap, sdhp->si_idx,
7778 SDEB_XA_NOT_IN_USE);
7782 sdeb_most_recent_idx = idx;
7783 } else { /* 0 --> 1 transition is trigger for shrink */
7784 sdebug_erase_all_stores(true /* apart from first */);
7791 static DRIVER_ATTR_RW(fake_rw);
7793 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7795 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7797 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7802 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7803 sdebug_no_lun_0 = n;
7808 static DRIVER_ATTR_RW(no_lun_0);
7810 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7812 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7814 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7819 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7820 sdebug_num_tgts = n;
7821 sdebug_max_tgts_luns();
7826 static DRIVER_ATTR_RW(num_tgts);
7828 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7830 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7832 static DRIVER_ATTR_RO(dev_size_mb);
7834 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7836 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7839 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7844 if (kstrtobool(buf, &v))
7847 sdebug_per_host_store = v;
7850 static DRIVER_ATTR_RW(per_host_store);
7852 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7854 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7856 static DRIVER_ATTR_RO(num_parts);
7858 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7860 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7862 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7868 if (sscanf(buf, "%10s", work) == 1) {
7869 if (strncasecmp(work, "0x", 2) == 0) {
7870 if (kstrtoint(work + 2, 16, &nth) == 0)
7871 goto every_nth_done;
7873 if (kstrtoint(work, 10, &nth) == 0)
7874 goto every_nth_done;
7880 sdebug_every_nth = nth;
7881 if (nth && !sdebug_statistics) {
7882 pr_info("every_nth needs statistics=1, set it\n");
7883 sdebug_statistics = true;
7888 static DRIVER_ATTR_RW(every_nth);
7890 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7892 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7894 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7900 if (kstrtoint(buf, 0, &n))
7903 if (n > (int)SAM_LUN_AM_FLAT) {
7904 pr_warn("only LUN address methods 0 and 1 are supported\n");
7907 changed = ((int)sdebug_lun_am != n);
7909 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
7910 struct sdebug_host_info *sdhp;
7911 struct sdebug_dev_info *dp;
7913 mutex_lock(&sdebug_host_list_mutex);
7914 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7915 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7916 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7919 mutex_unlock(&sdebug_host_list_mutex);
7925 static DRIVER_ATTR_RW(lun_format);
7927 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7929 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7931 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7937 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7939 pr_warn("max_luns can be no more than 256\n");
7942 changed = (sdebug_max_luns != n);
7943 sdebug_max_luns = n;
7944 sdebug_max_tgts_luns();
7945 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
7946 struct sdebug_host_info *sdhp;
7947 struct sdebug_dev_info *dp;
7949 mutex_lock(&sdebug_host_list_mutex);
7950 list_for_each_entry(sdhp, &sdebug_host_list,
7952 list_for_each_entry(dp, &sdhp->dev_info_list,
7954 set_bit(SDEBUG_UA_LUNS_CHANGED,
7958 mutex_unlock(&sdebug_host_list_mutex);
7964 static DRIVER_ATTR_RW(max_luns);
7966 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7968 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7970 /* N.B. max_queue can be changed while there are queued commands. In flight
7971 * commands beyond the new max_queue will be completed. */
7972 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7977 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7978 (n <= SDEBUG_CANQUEUE) &&
7979 (sdebug_host_max_queue == 0)) {
7980 mutex_lock(&sdebug_host_list_mutex);
7982 /* We may only change sdebug_max_queue when we have no shosts */
7983 if (list_empty(&sdebug_host_list))
7984 sdebug_max_queue = n;
7987 mutex_unlock(&sdebug_host_list_mutex);
7992 static DRIVER_ATTR_RW(max_queue);
7994 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7996 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7999 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8001 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8004 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8008 if (kstrtobool(buf, &v))
8011 sdebug_no_rwlock = v;
8014 static DRIVER_ATTR_RW(no_rwlock);
8017 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8018 * in range [0, sdebug_host_max_queue), we can't change it.
8020 static DRIVER_ATTR_RO(host_max_queue);
8022 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8024 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8026 static DRIVER_ATTR_RO(no_uld);
8028 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8030 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8032 static DRIVER_ATTR_RO(scsi_level);
8034 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8036 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8038 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8044 /* Ignore capacity change for ZBC drives for now */
8045 if (sdeb_zbc_in_use)
8048 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8049 changed = (sdebug_virtual_gb != n);
8050 sdebug_virtual_gb = n;
8051 sdebug_capacity = get_sdebug_capacity();
8053 struct sdebug_host_info *sdhp;
8054 struct sdebug_dev_info *dp;
8056 mutex_lock(&sdebug_host_list_mutex);
8057 list_for_each_entry(sdhp, &sdebug_host_list,
8059 list_for_each_entry(dp, &sdhp->dev_info_list,
8061 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8065 mutex_unlock(&sdebug_host_list_mutex);
8071 static DRIVER_ATTR_RW(virtual_gb);
8073 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8075 /* absolute number of hosts currently active is what is shown */
8076 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8079 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8084 struct sdeb_store_info *sip;
8085 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8088 if (sscanf(buf, "%d", &delta_hosts) != 1)
8090 if (delta_hosts > 0) {
8094 xa_for_each_marked(per_store_ap, idx, sip,
8095 SDEB_XA_NOT_IN_USE) {
8096 sdeb_most_recent_idx = (int)idx;
8100 if (found) /* re-use case */
8101 sdebug_add_host_helper((int)idx);
8103 sdebug_do_add_host(true);
8105 sdebug_do_add_host(false);
8107 } while (--delta_hosts);
8108 } else if (delta_hosts < 0) {
8110 sdebug_do_remove_host(false);
8111 } while (++delta_hosts);
8115 static DRIVER_ATTR_RW(add_host);
8117 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8119 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8121 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8126 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8127 sdebug_vpd_use_hostno = n;
8132 static DRIVER_ATTR_RW(vpd_use_hostno);
8134 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8136 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8138 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8143 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8145 sdebug_statistics = true;
8147 clear_queue_stats();
8148 sdebug_statistics = false;
8154 static DRIVER_ATTR_RW(statistics);
8156 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8158 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8160 static DRIVER_ATTR_RO(sector_size);
8162 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8164 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8166 static DRIVER_ATTR_RO(submit_queues);
8168 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8170 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8172 static DRIVER_ATTR_RO(dix);
8174 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8176 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8178 static DRIVER_ATTR_RO(dif);
8180 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8182 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8184 static DRIVER_ATTR_RO(guard);
8186 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8188 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8190 static DRIVER_ATTR_RO(ato);
8192 static ssize_t map_show(struct device_driver *ddp, char *buf)
8196 if (!scsi_debug_lbp())
8197 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8198 sdebug_store_sectors);
8200 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8201 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8204 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8205 (int)map_size, sip->map_storep);
8207 buf[count++] = '\n';
8212 static DRIVER_ATTR_RO(map);
8214 static ssize_t random_show(struct device_driver *ddp, char *buf)
8216 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8219 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8224 if (kstrtobool(buf, &v))
8230 static DRIVER_ATTR_RW(random);
8232 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8234 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8236 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8241 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8242 sdebug_removable = (n > 0);
8247 static DRIVER_ATTR_RW(removable);
8249 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8251 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8253 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
8254 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8259 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8260 sdebug_host_lock = (n > 0);
8265 static DRIVER_ATTR_RW(host_lock);
8267 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8269 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8271 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8276 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8277 sdebug_strict = (n > 0);
8282 static DRIVER_ATTR_RW(strict);
8284 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8286 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8288 static DRIVER_ATTR_RO(uuid_ctl);
8290 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8292 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8294 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8299 ret = kstrtoint(buf, 0, &n);
8303 all_config_cdb_len();
8306 static DRIVER_ATTR_RW(cdb_len);
8308 static const char * const zbc_model_strs_a[] = {
8309 [BLK_ZONED_NONE] = "none",
8310 [BLK_ZONED_HA] = "host-aware",
8311 [BLK_ZONED_HM] = "host-managed",
8314 static const char * const zbc_model_strs_b[] = {
8315 [BLK_ZONED_NONE] = "no",
8316 [BLK_ZONED_HA] = "aware",
8317 [BLK_ZONED_HM] = "managed",
8320 static const char * const zbc_model_strs_c[] = {
8321 [BLK_ZONED_NONE] = "0",
8322 [BLK_ZONED_HA] = "1",
8323 [BLK_ZONED_HM] = "2",
8326 static int sdeb_zbc_model_str(const char *cp)
8328 int res = sysfs_match_string(zbc_model_strs_a, cp);
8331 res = sysfs_match_string(zbc_model_strs_b, cp);
8333 res = sysfs_match_string(zbc_model_strs_c, cp);
8341 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8343 return scnprintf(buf, PAGE_SIZE, "%s\n",
8344 zbc_model_strs_a[sdeb_zbc_model]);
8346 static DRIVER_ATTR_RO(zbc);
8348 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8350 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8352 static DRIVER_ATTR_RO(tur_ms_to_ready);
8354 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8356 char *p = buf, *end = buf + PAGE_SIZE;
8359 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8360 p += scnprintf(p, end - p, "%d %ld\n", i,
8361 atomic_long_read(&writes_by_group_number[i]));
8366 static ssize_t group_number_stats_store(struct device_driver *ddp,
8367 const char *buf, size_t count)
8371 for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8372 atomic_long_set(&writes_by_group_number[i], 0);
8376 static DRIVER_ATTR_RW(group_number_stats);
8378 /* Note: The following array creates attribute files in the
8379 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8380 files (over those found in the /sys/module/scsi_debug/parameters
8381 directory) is that auxiliary actions can be triggered when an attribute
8382 is changed. For example see: add_host_store() above.
8385 static struct attribute *sdebug_drv_attrs[] = {
8386 &driver_attr_delay.attr,
8387 &driver_attr_opts.attr,
8388 &driver_attr_ptype.attr,
8389 &driver_attr_dsense.attr,
8390 &driver_attr_fake_rw.attr,
8391 &driver_attr_host_max_queue.attr,
8392 &driver_attr_no_lun_0.attr,
8393 &driver_attr_num_tgts.attr,
8394 &driver_attr_dev_size_mb.attr,
8395 &driver_attr_num_parts.attr,
8396 &driver_attr_every_nth.attr,
8397 &driver_attr_lun_format.attr,
8398 &driver_attr_max_luns.attr,
8399 &driver_attr_max_queue.attr,
8400 &driver_attr_no_rwlock.attr,
8401 &driver_attr_no_uld.attr,
8402 &driver_attr_scsi_level.attr,
8403 &driver_attr_virtual_gb.attr,
8404 &driver_attr_add_host.attr,
8405 &driver_attr_per_host_store.attr,
8406 &driver_attr_vpd_use_hostno.attr,
8407 &driver_attr_sector_size.attr,
8408 &driver_attr_statistics.attr,
8409 &driver_attr_submit_queues.attr,
8410 &driver_attr_dix.attr,
8411 &driver_attr_dif.attr,
8412 &driver_attr_guard.attr,
8413 &driver_attr_ato.attr,
8414 &driver_attr_map.attr,
8415 &driver_attr_random.attr,
8416 &driver_attr_removable.attr,
8417 &driver_attr_host_lock.attr,
8418 &driver_attr_ndelay.attr,
8419 &driver_attr_strict.attr,
8420 &driver_attr_uuid_ctl.attr,
8421 &driver_attr_cdb_len.attr,
8422 &driver_attr_tur_ms_to_ready.attr,
8423 &driver_attr_zbc.attr,
8424 &driver_attr_group_number_stats.attr,
8427 ATTRIBUTE_GROUPS(sdebug_drv);
8429 static struct device *pseudo_primary;
8431 static int __init scsi_debug_init(void)
8433 bool want_store = (sdebug_fake_rw == 0);
8435 int k, ret, hosts_to_add;
8438 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8439 pr_warn("ndelay must be less than 1 second, ignored\n");
8441 } else if (sdebug_ndelay > 0)
8442 sdebug_jdelay = JDELAY_OVERRIDDEN;
8444 switch (sdebug_sector_size) {
8451 pr_err("invalid sector_size %d\n", sdebug_sector_size);
8455 switch (sdebug_dif) {
8456 case T10_PI_TYPE0_PROTECTION:
8458 case T10_PI_TYPE1_PROTECTION:
8459 case T10_PI_TYPE2_PROTECTION:
8460 case T10_PI_TYPE3_PROTECTION:
8461 have_dif_prot = true;
8465 pr_err("dif must be 0, 1, 2 or 3\n");
8469 if (sdebug_num_tgts < 0) {
8470 pr_err("num_tgts must be >= 0\n");
8474 if (sdebug_guard > 1) {
8475 pr_err("guard must be 0 or 1\n");
8479 if (sdebug_ato > 1) {
8480 pr_err("ato must be 0 or 1\n");
8484 if (sdebug_physblk_exp > 15) {
8485 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8489 sdebug_lun_am = sdebug_lun_am_i;
8490 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8491 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8492 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8495 if (sdebug_max_luns > 256) {
8496 if (sdebug_max_luns > 16384) {
8497 pr_warn("max_luns can be no more than 16384, use default\n");
8498 sdebug_max_luns = DEF_MAX_LUNS;
8500 sdebug_lun_am = SAM_LUN_AM_FLAT;
8503 if (sdebug_lowest_aligned > 0x3fff) {
8504 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8508 if (submit_queues < 1) {
8509 pr_err("submit_queues must be 1 or more\n");
8513 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8514 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8518 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8519 (sdebug_host_max_queue < 0)) {
8520 pr_err("host_max_queue must be in range [0 %d]\n",
8525 if (sdebug_host_max_queue &&
8526 (sdebug_max_queue != sdebug_host_max_queue)) {
8527 sdebug_max_queue = sdebug_host_max_queue;
8528 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8533 * check for host managed zoned block device specified with
8534 * ptype=0x14 or zbc=XXX.
8536 if (sdebug_ptype == TYPE_ZBC) {
8537 sdeb_zbc_model = BLK_ZONED_HM;
8538 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8539 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8543 switch (sdeb_zbc_model) {
8544 case BLK_ZONED_NONE:
8546 sdebug_ptype = TYPE_DISK;
8549 sdebug_ptype = TYPE_ZBC;
8552 pr_err("Invalid ZBC model\n");
8556 if (sdeb_zbc_model != BLK_ZONED_NONE) {
8557 sdeb_zbc_in_use = true;
8558 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8559 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8562 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8563 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8564 if (sdebug_dev_size_mb < 1)
8565 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
8566 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8567 sdebug_store_sectors = sz / sdebug_sector_size;
8568 sdebug_capacity = get_sdebug_capacity();
8570 /* play around with geometry, don't waste too much on track 0 */
8572 sdebug_sectors_per = 32;
8573 if (sdebug_dev_size_mb >= 256)
8575 else if (sdebug_dev_size_mb >= 16)
8577 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8578 (sdebug_sectors_per * sdebug_heads);
8579 if (sdebug_cylinders_per >= 1024) {
8580 /* other LLDs do this; implies >= 1GB ram disk ... */
8582 sdebug_sectors_per = 63;
8583 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8584 (sdebug_sectors_per * sdebug_heads);
8586 if (scsi_debug_lbp()) {
8587 sdebug_unmap_max_blocks =
8588 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8590 sdebug_unmap_max_desc =
8591 clamp(sdebug_unmap_max_desc, 0U, 256U);
8593 sdebug_unmap_granularity =
8594 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8596 if (sdebug_unmap_alignment &&
8597 sdebug_unmap_granularity <=
8598 sdebug_unmap_alignment) {
8599 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8604 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8606 idx = sdebug_add_store();
8611 pseudo_primary = root_device_register("pseudo_0");
8612 if (IS_ERR(pseudo_primary)) {
8613 pr_warn("root_device_register() error\n");
8614 ret = PTR_ERR(pseudo_primary);
8617 ret = bus_register(&pseudo_lld_bus);
8619 pr_warn("bus_register error: %d\n", ret);
8622 ret = driver_register(&sdebug_driverfs_driver);
8624 pr_warn("driver_register error: %d\n", ret);
8628 hosts_to_add = sdebug_add_host;
8629 sdebug_add_host = 0;
8631 sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8632 if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8633 pr_info("%s: failed to create initial debugfs directory\n", __func__);
8635 for (k = 0; k < hosts_to_add; k++) {
8636 if (want_store && k == 0) {
8637 ret = sdebug_add_host_helper(idx);
8639 pr_err("add_host_helper k=%d, error=%d\n",
8644 ret = sdebug_do_add_host(want_store &&
8645 sdebug_per_host_store);
8647 pr_err("add_host k=%d error=%d\n", k, -ret);
8653 pr_info("built %d host(s)\n", sdebug_num_hosts);
8658 bus_unregister(&pseudo_lld_bus);
8660 root_device_unregister(pseudo_primary);
8662 sdebug_erase_store(idx, NULL);
8666 static void __exit scsi_debug_exit(void)
8668 int k = sdebug_num_hosts;
8671 sdebug_do_remove_host(true);
8672 driver_unregister(&sdebug_driverfs_driver);
8673 bus_unregister(&pseudo_lld_bus);
8674 root_device_unregister(pseudo_primary);
8676 sdebug_erase_all_stores(false);
8677 xa_destroy(per_store_ap);
8678 debugfs_remove(sdebug_debugfs_root);
8681 device_initcall(scsi_debug_init);
8682 module_exit(scsi_debug_exit);
8684 static void sdebug_release_adapter(struct device *dev)
8686 struct sdebug_host_info *sdbg_host;
8688 sdbg_host = dev_to_sdebug_host(dev);
8692 /* idx must be valid, if sip is NULL then it will be obtained using idx */
8693 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8698 if (xa_empty(per_store_ap))
8700 sip = xa_load(per_store_ap, idx);
8704 vfree(sip->map_storep);
8705 vfree(sip->dif_storep);
8707 xa_erase(per_store_ap, idx);
8711 /* Assume apart_from_first==false only in shutdown case. */
8712 static void sdebug_erase_all_stores(bool apart_from_first)
8715 struct sdeb_store_info *sip = NULL;
8717 xa_for_each(per_store_ap, idx, sip) {
8718 if (apart_from_first)
8719 apart_from_first = false;
8721 sdebug_erase_store(idx, sip);
8723 if (apart_from_first)
8724 sdeb_most_recent_idx = sdeb_first_idx;
8728 * Returns store xarray new element index (idx) if >=0 else negated errno.
8729 * Limit the number of stores to 65536.
8731 static int sdebug_add_store(void)
8735 unsigned long iflags;
8736 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8737 struct sdeb_store_info *sip = NULL;
8738 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8740 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8744 xa_lock_irqsave(per_store_ap, iflags);
8745 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8746 if (unlikely(res < 0)) {
8747 xa_unlock_irqrestore(per_store_ap, iflags);
8749 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8752 sdeb_most_recent_idx = n_idx;
8753 if (sdeb_first_idx < 0)
8754 sdeb_first_idx = n_idx;
8755 xa_unlock_irqrestore(per_store_ap, iflags);
8758 sip->storep = vzalloc(sz);
8760 pr_err("user data oom\n");
8763 if (sdebug_num_parts > 0)
8764 sdebug_build_parts(sip->storep, sz);
8766 /* DIF/DIX: what T10 calls Protection Information (PI) */
8770 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8771 sip->dif_storep = vmalloc(dif_size);
8773 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8776 if (!sip->dif_storep) {
8777 pr_err("DIX oom\n");
8780 memset(sip->dif_storep, 0xff, dif_size);
8782 /* Logical Block Provisioning */
8783 if (scsi_debug_lbp()) {
8784 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8785 sip->map_storep = vmalloc(array_size(sizeof(long),
8786 BITS_TO_LONGS(map_size)));
8788 pr_info("%lu provisioning blocks\n", map_size);
8790 if (!sip->map_storep) {
8791 pr_err("LBP map oom\n");
8795 bitmap_zero(sip->map_storep, map_size);
8797 /* Map first 1KB for partition table */
8798 if (sdebug_num_parts)
8799 map_region(sip, 0, 2);
8802 rwlock_init(&sip->macc_data_lck);
8803 rwlock_init(&sip->macc_meta_lck);
8804 rwlock_init(&sip->macc_sector_lck);
8807 sdebug_erase_store((int)n_idx, sip);
8808 pr_warn("%s: failed, errno=%d\n", __func__, -res);
8812 static int sdebug_add_host_helper(int per_host_idx)
8814 int k, devs_per_host, idx;
8815 int error = -ENOMEM;
8816 struct sdebug_host_info *sdbg_host;
8817 struct sdebug_dev_info *sdbg_devinfo, *tmp;
8819 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8822 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8823 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8824 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8825 sdbg_host->si_idx = idx;
8827 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8829 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8830 for (k = 0; k < devs_per_host; k++) {
8831 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8836 mutex_lock(&sdebug_host_list_mutex);
8837 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8838 mutex_unlock(&sdebug_host_list_mutex);
8840 sdbg_host->dev.bus = &pseudo_lld_bus;
8841 sdbg_host->dev.parent = pseudo_primary;
8842 sdbg_host->dev.release = &sdebug_release_adapter;
8843 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8845 error = device_register(&sdbg_host->dev);
8847 mutex_lock(&sdebug_host_list_mutex);
8848 list_del(&sdbg_host->host_list);
8849 mutex_unlock(&sdebug_host_list_mutex);
8857 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8859 list_del(&sdbg_devinfo->dev_list);
8860 kfree(sdbg_devinfo->zstate);
8861 kfree(sdbg_devinfo);
8863 if (sdbg_host->dev.release)
8864 put_device(&sdbg_host->dev);
8867 pr_warn("%s: failed, errno=%d\n", __func__, -error);
8871 static int sdebug_do_add_host(bool mk_new_store)
8873 int ph_idx = sdeb_most_recent_idx;
8876 ph_idx = sdebug_add_store();
8880 return sdebug_add_host_helper(ph_idx);
8883 static void sdebug_do_remove_host(bool the_end)
8886 struct sdebug_host_info *sdbg_host = NULL;
8887 struct sdebug_host_info *sdbg_host2;
8889 mutex_lock(&sdebug_host_list_mutex);
8890 if (!list_empty(&sdebug_host_list)) {
8891 sdbg_host = list_entry(sdebug_host_list.prev,
8892 struct sdebug_host_info, host_list);
8893 idx = sdbg_host->si_idx;
8895 if (!the_end && idx >= 0) {
8898 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8899 if (sdbg_host2 == sdbg_host)
8901 if (idx == sdbg_host2->si_idx) {
8907 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8908 if (idx == sdeb_most_recent_idx)
8909 --sdeb_most_recent_idx;
8913 list_del(&sdbg_host->host_list);
8914 mutex_unlock(&sdebug_host_list_mutex);
8919 device_unregister(&sdbg_host->dev);
8923 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8925 struct sdebug_dev_info *devip = sdev->hostdata;
8930 mutex_lock(&sdebug_host_list_mutex);
8931 block_unblock_all_queues(true);
8933 if (qdepth > SDEBUG_CANQUEUE) {
8934 qdepth = SDEBUG_CANQUEUE;
8935 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8936 qdepth, SDEBUG_CANQUEUE);
8940 if (qdepth != sdev->queue_depth)
8941 scsi_change_queue_depth(sdev, qdepth);
8943 block_unblock_all_queues(false);
8944 mutex_unlock(&sdebug_host_list_mutex);
8946 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8947 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8949 return sdev->queue_depth;
8952 static bool fake_timeout(struct scsi_cmnd *scp)
8954 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8955 if (sdebug_every_nth < -1)
8956 sdebug_every_nth = -1;
8957 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8958 return true; /* ignore command causing timeout */
8959 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8960 scsi_medium_access_command(scp))
8961 return true; /* time out reads and writes */
8966 /* Response to TUR or media access command when device stopped */
8967 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8971 ktime_t now_ts = ktime_get_boottime();
8972 struct scsi_device *sdp = scp->device;
8974 stopped_state = atomic_read(&devip->stopped);
8975 if (stopped_state == 2) {
8976 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8977 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8978 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8979 /* tur_ms_to_ready timer extinguished */
8980 atomic_set(&devip->stopped, 0);
8984 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8986 sdev_printk(KERN_INFO, sdp,
8987 "%s: Not ready: in process of becoming ready\n", my_name);
8988 if (scp->cmnd[0] == TEST_UNIT_READY) {
8989 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8991 if (diff_ns <= tur_nanosecs_to_ready)
8992 diff_ns = tur_nanosecs_to_ready - diff_ns;
8994 diff_ns = tur_nanosecs_to_ready;
8995 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8996 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
8997 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8999 return check_condition_result;
9002 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9004 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9006 return check_condition_result;
9009 static void sdebug_map_queues(struct Scsi_Host *shost)
9013 if (shost->nr_hw_queues == 1)
9016 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9017 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9021 if (i == HCTX_TYPE_DEFAULT)
9022 map->nr_queues = submit_queues - poll_queues;
9023 else if (i == HCTX_TYPE_POLL)
9024 map->nr_queues = poll_queues;
9026 if (!map->nr_queues) {
9027 BUG_ON(i == HCTX_TYPE_DEFAULT);
9031 map->queue_offset = qoff;
9032 blk_mq_map_queues(map);
9034 qoff += map->nr_queues;
9038 struct sdebug_blk_mq_poll_data {
9039 unsigned int queue_num;
9044 * We don't handle aborted commands here, but it does not seem possible to have
9045 * aborted polled commands from schedule_resp()
9047 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9049 struct sdebug_blk_mq_poll_data *data = opaque;
9050 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9051 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9052 struct sdebug_defer *sd_dp;
9053 u32 unique_tag = blk_mq_unique_tag(rq);
9054 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9055 unsigned long flags;
9056 int queue_num = data->queue_num;
9059 /* We're only interested in one queue for this iteration */
9060 if (hwq != queue_num)
9063 /* Subsequent checks would fail if this failed, but check anyway */
9064 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9067 time = ktime_get_boottime();
9069 spin_lock_irqsave(&sdsc->lock, flags);
9070 sd_dp = &sdsc->sd_dp;
9071 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9072 spin_unlock_irqrestore(&sdsc->lock, flags);
9076 if (time < sd_dp->cmpl_ts) {
9077 spin_unlock_irqrestore(&sdsc->lock, flags);
9080 spin_unlock_irqrestore(&sdsc->lock, flags);
9082 if (sdebug_statistics) {
9083 atomic_inc(&sdebug_completions);
9084 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9085 atomic_inc(&sdebug_miss_cpus);
9088 scsi_done(cmd); /* callback to mid level */
9089 (*data->num_entries)++;
9093 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9095 int num_entries = 0;
9096 struct sdebug_blk_mq_poll_data data = {
9097 .queue_num = queue_num,
9098 .num_entries = &num_entries,
9101 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9104 if (num_entries > 0)
9105 atomic_add(num_entries, &sdeb_mq_poll_count);
9109 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9111 struct scsi_device *sdp = cmnd->device;
9112 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9113 struct sdebug_err_inject *err;
9114 unsigned char *cmd = cmnd->cmnd;
9121 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9122 if (err->type == ERR_TMOUT_CMD &&
9123 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9137 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9139 struct scsi_device *sdp = cmnd->device;
9140 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9141 struct sdebug_err_inject *err;
9142 unsigned char *cmd = cmnd->cmnd;
9149 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9150 if (err->type == ERR_FAIL_QUEUE_CMD &&
9151 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9152 ret = err->cnt ? err->queuecmd_ret : 0;
9165 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9166 struct sdebug_err_inject *info)
9168 struct scsi_device *sdp = cmnd->device;
9169 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9170 struct sdebug_err_inject *err;
9171 unsigned char *cmd = cmnd->cmnd;
9179 list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9180 if (err->type == ERR_FAIL_CMD &&
9181 (err->cmd == cmd[0] || err->cmd == 0xff)) {
9199 mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9200 result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9202 *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9207 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9208 struct scsi_cmnd *scp)
9211 struct scsi_device *sdp = scp->device;
9212 const struct opcode_info_t *oip;
9213 const struct opcode_info_t *r_oip;
9214 struct sdebug_dev_info *devip;
9215 u8 *cmd = scp->cmnd;
9216 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9217 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9220 u64 lun_index = sdp->lun & 0x3FFF;
9224 u32 devsel = sdebug_get_devsel(scp->device);
9228 struct sdebug_err_inject err;
9230 scsi_set_resid(scp, 0);
9231 if (sdebug_statistics) {
9232 atomic_inc(&sdebug_cmnd_count);
9233 inject_now = inject_on_this_cmd();
9237 if (unlikely(sdebug_verbose &&
9238 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9243 sb = (int)sizeof(b);
9245 strcpy(b, "too long, over 32 bytes");
9247 for (k = 0, n = 0; k < len && n < sb; ++k)
9248 n += scnprintf(b + n, sb - n, "%02x ",
9251 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9252 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9254 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9255 return SCSI_MLQUEUE_HOST_BUSY;
9256 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9257 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9260 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
9261 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
9262 devip = (struct sdebug_dev_info *)sdp->hostdata;
9263 if (unlikely(!devip)) {
9264 devip = find_build_dev_info(sdp);
9269 if (sdebug_timeout_cmd(scp)) {
9270 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9274 ret = sdebug_fail_queue_cmd(scp);
9276 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9281 if (sdebug_fail_cmd(scp, &ret, &err)) {
9282 scmd_printk(KERN_INFO, scp,
9283 "fail command 0x%x with hostbyte=0x%x, "
9284 "driverbyte=0x%x, statusbyte=0x%x, "
9285 "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9286 opcode, err.host_byte, err.driver_byte,
9287 err.status_byte, err.sense_key, err.asc, err.asq);
9291 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9292 atomic_set(&sdeb_inject_pending, 1);
9294 na = oip->num_attached;
9296 if (na) { /* multiple commands with this opcode */
9298 if (FF_SA & r_oip->flags) {
9299 if (F_SA_LOW & oip->flags)
9302 sa = get_unaligned_be16(cmd + 8);
9303 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9304 if (opcode == oip->opcode && sa == oip->sa &&
9305 (devsel & oip->devsel) != 0)
9308 } else { /* since no service action only check opcode */
9309 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9310 if (opcode == oip->opcode &&
9311 (devsel & oip->devsel) != 0)
9316 if (F_SA_LOW & r_oip->flags)
9317 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9318 else if (F_SA_HIGH & r_oip->flags)
9319 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9321 mk_sense_invalid_opcode(scp);
9324 } /* else (when na==0) we assume the oip is a match */
9326 if (unlikely(F_INV_OP & flags)) {
9327 mk_sense_invalid_opcode(scp);
9330 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9332 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9333 my_name, opcode, " supported for wlun");
9334 mk_sense_invalid_opcode(scp);
9337 if (unlikely(sdebug_strict)) { /* check cdb against mask */
9341 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9342 rem = ~oip->len_mask[k] & cmd[k];
9344 for (j = 7; j >= 0; --j, rem <<= 1) {
9348 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9353 if (unlikely(!(F_SKIP_UA & flags) &&
9354 find_first_bit(devip->uas_bm,
9355 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9356 errsts = make_ua(scp, devip);
9360 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9361 atomic_read(&devip->stopped))) {
9362 errsts = resp_not_ready(scp, devip);
9366 if (sdebug_fake_rw && (F_FAKE_RW & flags))
9368 if (unlikely(sdebug_every_nth)) {
9369 if (fake_timeout(scp))
9370 return 0; /* ignore command: make trouble */
9372 if (likely(oip->pfp))
9373 pfp = oip->pfp; /* calls a resp_* function */
9375 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
9378 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
9379 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9380 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9381 sdebug_ndelay > 10000)) {
9383 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9384 * for Start Stop Unit (SSU) want at least 1 second delay and
9385 * if sdebug_jdelay>1 want a long delay of that many seconds.
9386 * For Synchronize Cache want 1/20 of SSU's delay.
9388 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9389 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9391 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9392 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9394 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9397 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9399 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9402 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9404 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9405 struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9407 spin_lock_init(&sdsc->lock);
9408 hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9409 HRTIMER_MODE_REL_PINNED);
9410 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9415 static const struct scsi_host_template sdebug_driver_template = {
9416 .show_info = scsi_debug_show_info,
9417 .write_info = scsi_debug_write_info,
9418 .proc_name = sdebug_proc_name,
9419 .name = "SCSI DEBUG",
9420 .info = scsi_debug_info,
9421 .sdev_init = scsi_debug_sdev_init,
9422 .sdev_configure = scsi_debug_sdev_configure,
9423 .sdev_destroy = scsi_debug_sdev_destroy,
9424 .ioctl = scsi_debug_ioctl,
9425 .queuecommand = scsi_debug_queuecommand,
9426 .change_queue_depth = sdebug_change_qdepth,
9427 .map_queues = sdebug_map_queues,
9428 .mq_poll = sdebug_blk_mq_poll,
9429 .eh_abort_handler = scsi_debug_abort,
9430 .eh_device_reset_handler = scsi_debug_device_reset,
9431 .eh_target_reset_handler = scsi_debug_target_reset,
9432 .eh_bus_reset_handler = scsi_debug_bus_reset,
9433 .eh_host_reset_handler = scsi_debug_host_reset,
9434 .can_queue = SDEBUG_CANQUEUE,
9436 .sg_tablesize = SG_MAX_SEGMENTS,
9437 .cmd_per_lun = DEF_CMD_PER_LUN,
9439 .max_segment_size = -1U,
9440 .module = THIS_MODULE,
9441 .skip_settle_delay = 1,
9442 .track_queue_depth = 1,
9443 .cmd_size = sizeof(struct sdebug_scsi_cmd),
9444 .init_cmd_priv = sdebug_init_cmd_priv,
9445 .target_alloc = sdebug_target_alloc,
9446 .target_destroy = sdebug_target_destroy,
9449 static int sdebug_driver_probe(struct device *dev)
9452 struct sdebug_host_info *sdbg_host;
9453 struct Scsi_Host *hpnt;
9456 sdbg_host = dev_to_sdebug_host(dev);
9458 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9460 pr_err("scsi_host_alloc failed\n");
9464 hpnt->can_queue = sdebug_max_queue;
9465 hpnt->cmd_per_lun = sdebug_max_queue;
9466 if (!sdebug_clustering)
9467 hpnt->dma_boundary = PAGE_SIZE - 1;
9469 if (submit_queues > nr_cpu_ids) {
9470 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9471 my_name, submit_queues, nr_cpu_ids);
9472 submit_queues = nr_cpu_ids;
9475 * Decide whether to tell scsi subsystem that we want mq. The
9476 * following should give the same answer for each host.
9478 hpnt->nr_hw_queues = submit_queues;
9479 if (sdebug_host_max_queue)
9480 hpnt->host_tagset = 1;
9482 /* poll queues are possible for nr_hw_queues > 1 */
9483 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9484 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9485 my_name, poll_queues, hpnt->nr_hw_queues);
9490 * Poll queues don't need interrupts, but we need at least one I/O queue
9491 * left over for non-polled I/O.
9492 * If condition not met, trim poll_queues to 1 (just for simplicity).
9494 if (poll_queues >= submit_queues) {
9495 if (submit_queues < 3)
9496 pr_warn("%s: trim poll_queues to 1\n", my_name);
9498 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9499 my_name, submit_queues - 1);
9505 sdbg_host->shost = hpnt;
9506 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9507 hpnt->max_id = sdebug_num_tgts + 1;
9509 hpnt->max_id = sdebug_num_tgts;
9510 /* = sdebug_max_luns; */
9511 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9515 switch (sdebug_dif) {
9517 case T10_PI_TYPE1_PROTECTION:
9518 hprot = SHOST_DIF_TYPE1_PROTECTION;
9520 hprot |= SHOST_DIX_TYPE1_PROTECTION;
9523 case T10_PI_TYPE2_PROTECTION:
9524 hprot = SHOST_DIF_TYPE2_PROTECTION;
9526 hprot |= SHOST_DIX_TYPE2_PROTECTION;
9529 case T10_PI_TYPE3_PROTECTION:
9530 hprot = SHOST_DIF_TYPE3_PROTECTION;
9532 hprot |= SHOST_DIX_TYPE3_PROTECTION;
9537 hprot |= SHOST_DIX_TYPE0_PROTECTION;
9541 scsi_host_set_prot(hpnt, hprot);
9543 if (have_dif_prot || sdebug_dix)
9544 pr_info("host protection%s%s%s%s%s%s%s\n",
9545 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9546 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9547 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9548 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9549 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9550 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9551 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9553 if (sdebug_guard == 1)
9554 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9556 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9558 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9559 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9560 if (sdebug_every_nth) /* need stats counters for every_nth */
9561 sdebug_statistics = true;
9562 error = scsi_add_host(hpnt, &sdbg_host->dev);
9564 pr_err("scsi_add_host failed\n");
9566 scsi_host_put(hpnt);
9568 scsi_scan_host(hpnt);
9574 static void sdebug_driver_remove(struct device *dev)
9576 struct sdebug_host_info *sdbg_host;
9577 struct sdebug_dev_info *sdbg_devinfo, *tmp;
9579 sdbg_host = dev_to_sdebug_host(dev);
9581 scsi_remove_host(sdbg_host->shost);
9583 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9585 list_del(&sdbg_devinfo->dev_list);
9586 kfree(sdbg_devinfo->zstate);
9587 kfree(sdbg_devinfo);
9590 scsi_host_put(sdbg_host->shost);
9593 static const struct bus_type pseudo_lld_bus = {
9595 .probe = sdebug_driver_probe,
9596 .remove = sdebug_driver_remove,
9597 .drv_groups = sdebug_drv_groups,