aef33d1e346ab9ebf914effc7f94af22cada99ba
[linux-2.6-block.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47
48 #include <net/checksum.h>
49
50 #include <linux/unaligned.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60
61 #include "sd.h"
62 #include "scsi_logging.h"
63
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"   /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67
68 #define MY_NAME "scsi_debug"
69
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST   1
121 #define DEF_NUM_TGTS   1
122 #define DEF_MAX_LUNS   1
123 /* With these defaults, this driver will make 1 host with 1 target
124  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125  */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT   0
130 #define DEF_DEV_SIZE_MB   8
131 #define DEF_ZBC_DEV_SIZE_MB   128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE   0
136 #define DEF_EVERY_NTH   0
137 #define DEF_FAKE_RW     0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0   0
147 #define DEF_NUM_PARTS   0
148 #define DEF_OPTS   0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE   TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB   0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB    128
179 #define DEF_ZBC_MAX_OPEN_ZONES  8
180 #define DEF_ZBC_NR_CONV_ZONES   1
181
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY  0x0
184 #define TAPE_BAD_DENSITY  0x65
185 #define TAPE_DEF_BLKSIZE  0
186 #define TAPE_MIN_BLKSIZE  512
187 #define TAPE_MAX_BLKSIZE  1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG   ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG  ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a)   ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a)  ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202
203 struct tape_block {
204         u32 fl_size;
205         unsigned char data[4];
206 };
207
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK  0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212
213 #define SDEBUG_LUN_0_VAL 0
214
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE                1
217 #define SDEBUG_OPT_MEDIUM_ERR           2
218 #define SDEBUG_OPT_TIMEOUT              4
219 #define SDEBUG_OPT_RECOVERED_ERR        8
220 #define SDEBUG_OPT_TRANSPORT_ERR        16
221 #define SDEBUG_OPT_DIF_ERR              32
222 #define SDEBUG_OPT_DIX_ERR              64
223 #define SDEBUG_OPT_MAC_TIMEOUT          128
224 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
225 #define SDEBUG_OPT_Q_NOISE              0x200
226 #define SDEBUG_OPT_ALL_TSF              0x400   /* ignore */
227 #define SDEBUG_OPT_RARE_TSF             0x800
228 #define SDEBUG_OPT_N_WCE                0x1000
229 #define SDEBUG_OPT_RESET_NOISE          0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
231 #define SDEBUG_OPT_HOST_BUSY            0x8000
232 #define SDEBUG_OPT_CMD_ABORT            0x10000
233 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
234                               SDEBUG_OPT_RESET_NOISE)
235 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
236                                   SDEBUG_OPT_TRANSPORT_ERR | \
237                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
238                                   SDEBUG_OPT_SHORT_TRANSFER | \
239                                   SDEBUG_OPT_HOST_BUSY | \
240                                   SDEBUG_OPT_CMD_ABORT)
241 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
242                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
243
244 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
245  * priority order. In the subset implemented here lower numbers have higher
246  * priority. The UA numbers should be a sequence starting from 0 with
247  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
248 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
249 #define SDEBUG_UA_POOCCUR 1     /* Power on occurred */
250 #define SDEBUG_UA_BUS_RESET 2
251 #define SDEBUG_UA_MODE_CHANGED 3
252 #define SDEBUG_UA_CAPACITY_CHANGED 4
253 #define SDEBUG_UA_LUNS_CHANGED 5
254 #define SDEBUG_UA_MICROCODE_CHANGED 6   /* simulate firmware change */
255 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
256 #define SDEBUG_UA_NOT_READY_TO_READY 8
257 #define SDEBUG_NUM_UAS 9
258
259 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
260  * sector on read commands: */
261 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
262 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
263
264 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
265  * (for response) per submit queue at one time. Can be reduced by max_queue
266  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
267  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
268  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
269  * but cannot exceed SDEBUG_CANQUEUE .
270  */
271 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
272 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
273 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
274
275 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
276 #define F_D_IN                  1       /* Data-in command (e.g. READ) */
277 #define F_D_OUT                 2       /* Data-out command (e.g. WRITE) */
278 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
279 #define F_D_UNKN                8
280 #define F_RL_WLUN_OK            0x10    /* allowed with REPORT LUNS W-LUN */
281 #define F_SKIP_UA               0x20    /* bypass UAs (e.g. INQUIRY command) */
282 #define F_DELAY_OVERR           0x40    /* for commands like INQUIRY */
283 #define F_SA_LOW                0x80    /* SA is in cdb byte 1, bits 4 to 0 */
284 #define F_SA_HIGH               0x100   /* SA is in cdb bytes 8 and 9 */
285 #define F_INV_OP                0x200   /* invalid opcode (not supported) */
286 #define F_FAKE_RW               0x400   /* bypass resp_*() when fake_rw set */
287 #define F_M_ACCESS              0x800   /* media access, reacts to SSU state */
288 #define F_SSU_DELAY             0x1000  /* SSU command delay (long-ish) */
289 #define F_SYNC_DELAY            0x2000  /* SYNCHRONIZE CACHE delay */
290
291 /* Useful combinations of the above flags */
292 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
293 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
294 #define FF_SA (F_SA_HIGH | F_SA_LOW)
295 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
296
297 /* Device selection bit mask */
298 #define DS_ALL     0xffffffff
299 #define DS_SBC     (1 << TYPE_DISK)
300 #define DS_SSC     (1 << TYPE_TAPE)
301 #define DS_ZBC     (1 << TYPE_ZBC)
302
303 #define DS_NO_SSC  (DS_ALL & ~DS_SSC)
304
305 #define SDEBUG_MAX_PARTS 4
306
307 #define SDEBUG_MAX_CMD_LEN 32
308
309 #define SDEB_XA_NOT_IN_USE XA_MARK_1
310
311 /* Zone types (zbcr05 table 25) */
312 enum sdebug_z_type {
313         ZBC_ZTYPE_CNV   = 0x1,
314         ZBC_ZTYPE_SWR   = 0x2,
315         ZBC_ZTYPE_SWP   = 0x3,
316         /* ZBC_ZTYPE_SOBR = 0x4, */
317         ZBC_ZTYPE_GAP   = 0x5,
318 };
319
320 /* enumeration names taken from table 26, zbcr05 */
321 enum sdebug_z_cond {
322         ZBC_NOT_WRITE_POINTER   = 0x0,
323         ZC1_EMPTY               = 0x1,
324         ZC2_IMPLICIT_OPEN       = 0x2,
325         ZC3_EXPLICIT_OPEN       = 0x3,
326         ZC4_CLOSED              = 0x4,
327         ZC6_READ_ONLY           = 0xd,
328         ZC5_FULL                = 0xe,
329         ZC7_OFFLINE             = 0xf,
330 };
331
332 struct sdeb_zone_state {        /* ZBC: per zone state */
333         enum sdebug_z_type z_type;
334         enum sdebug_z_cond z_cond;
335         bool z_non_seq_resource;
336         unsigned int z_size;
337         sector_t z_start;
338         sector_t z_wp;
339 };
340
341 enum sdebug_err_type {
342         ERR_TMOUT_CMD           = 0,    /* make specific scsi command timeout */
343         ERR_FAIL_QUEUE_CMD      = 1,    /* make specific scsi command's */
344                                         /* queuecmd return failed */
345         ERR_FAIL_CMD            = 2,    /* make specific scsi command's */
346                                         /* queuecmd return succeed but */
347                                         /* with errors set in scsi_cmnd */
348         ERR_ABORT_CMD_FAILED    = 3,    /* control return FAILED from */
349                                         /* scsi_debug_abort() */
350         ERR_LUN_RESET_FAILED    = 4,    /* control return FAILED from */
351                                         /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
352 };
353
354 struct sdebug_err_inject {
355         int type;
356         struct list_head list;
357         int cnt;
358         unsigned char cmd;
359         struct rcu_head rcu;
360
361         union {
362                 /*
363                  * For ERR_FAIL_QUEUE_CMD
364                  */
365                 int queuecmd_ret;
366
367                 /*
368                  * For ERR_FAIL_CMD
369                  */
370                 struct {
371                         unsigned char host_byte;
372                         unsigned char driver_byte;
373                         unsigned char status_byte;
374                         unsigned char sense_key;
375                         unsigned char asc;
376                         unsigned char asq;
377                 };
378         };
379 };
380
381 struct sdebug_dev_info {
382         struct list_head dev_list;
383         unsigned int channel;
384         unsigned int target;
385         u64 lun;
386         uuid_t lu_name;
387         struct sdebug_host_info *sdbg_host;
388         unsigned long uas_bm[1];
389         atomic_t stopped;       /* 1: by SSU, 2: device start */
390         bool used;
391
392         /* For ZBC devices */
393         bool zoned;
394         unsigned int zcap;
395         unsigned int zsize;
396         unsigned int zsize_shift;
397         unsigned int nr_zones;
398         unsigned int nr_conv_zones;
399         unsigned int nr_seq_zones;
400         unsigned int nr_imp_open;
401         unsigned int nr_exp_open;
402         unsigned int nr_closed;
403         unsigned int max_open;
404         ktime_t create_ts;      /* time since bootup that this device was created */
405         struct sdeb_zone_state *zstate;
406
407         /* For tapes */
408         unsigned int tape_blksize;
409         unsigned int tape_density;
410         unsigned char tape_partition;
411         unsigned char tape_nbr_partitions;
412         unsigned char tape_pending_nbr_partitions;
413         unsigned int tape_pending_part_0_size;
414         unsigned int tape_pending_part_1_size;
415         unsigned char tape_dce;
416         unsigned int tape_location[TAPE_MAX_PARTITIONS];
417         unsigned int tape_eop[TAPE_MAX_PARTITIONS];
418         struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
419
420         struct dentry *debugfs_entry;
421         struct spinlock list_lock;
422         struct list_head inject_err_list;
423 };
424
425 struct sdebug_target_info {
426         bool reset_fail;
427         struct dentry *debugfs_entry;
428 };
429
430 struct sdebug_host_info {
431         struct list_head host_list;
432         int si_idx;     /* sdeb_store_info (per host) xarray index */
433         struct Scsi_Host *shost;
434         struct device dev;
435         struct list_head dev_info_list;
436 };
437
438 /* There is an xarray of pointers to this struct's objects, one per host */
439 struct sdeb_store_info {
440         rwlock_t macc_data_lck; /* for media data access on this store */
441         rwlock_t macc_meta_lck; /* for atomic media meta access on this store */
442         rwlock_t macc_sector_lck;       /* per-sector media data access on this store */
443         u8 *storep;             /* user data storage (ram) */
444         struct t10_pi_tuple *dif_storep; /* protection info */
445         void *map_storep;       /* provisioning map */
446 };
447
448 #define dev_to_sdebug_host(d)   \
449         container_of(d, struct sdebug_host_info, dev)
450
451 #define shost_to_sdebug_host(shost)     \
452         dev_to_sdebug_host(shost->dma_dev)
453
454 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
455                       SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
456
457 struct sdebug_defer {
458         struct hrtimer hrt;
459         struct execute_work ew;
460         ktime_t cmpl_ts;/* time since boot to complete this cmd */
461         int issuing_cpu;
462         bool aborted;   /* true when blk_abort_request() already called */
463         enum sdeb_defer_type defer_t;
464 };
465
466 struct sdebug_scsi_cmd {
467         spinlock_t   lock;
468         struct sdebug_defer sd_dp;
469 };
470
471 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
472 static atomic_t sdebug_completions;  /* count of deferred completions */
473 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
474 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
475 static atomic_t sdeb_inject_pending;
476 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
477
478 struct opcode_info_t {
479         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
480                                 /* for terminating element */
481         u8 opcode;              /* if num_attached > 0, preferred */
482         u16 sa;                 /* service action */
483         u32 devsel;             /* device type mask for this definition */
484         u32 flags;              /* OR-ed set of SDEB_F_* */
485         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
486         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
487         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
488                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
489 };
490
491 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
492 enum sdeb_opcode_index {
493         SDEB_I_INVALID_OPCODE = 0,
494         SDEB_I_INQUIRY = 1,
495         SDEB_I_REPORT_LUNS = 2,
496         SDEB_I_REQUEST_SENSE = 3,
497         SDEB_I_TEST_UNIT_READY = 4,
498         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
499         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
500         SDEB_I_LOG_SENSE = 7,
501         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
502         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
503         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
504         SDEB_I_START_STOP = 11,
505         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
506         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
507         SDEB_I_MAINT_IN = 14,
508         SDEB_I_MAINT_OUT = 15,
509         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
510         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
511         SDEB_I_RESERVE = 18,            /* 6, 10 */
512         SDEB_I_RELEASE = 19,            /* 6, 10 */
513         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
514         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
515         SDEB_I_ATA_PT = 22,             /* 12, 16 */
516         SDEB_I_SEND_DIAG = 23,
517         SDEB_I_UNMAP = 24,
518         SDEB_I_WRITE_BUFFER = 25,
519         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
520         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
521         SDEB_I_COMP_WRITE = 28,
522         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
523         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
524         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
525         SDEB_I_ATOMIC_WRITE_16 = 32,
526         SDEB_I_READ_BLOCK_LIMITS = 33,
527         SDEB_I_LOCATE = 34,
528         SDEB_I_WRITE_FILEMARKS = 35,
529         SDEB_I_SPACE = 36,
530         SDEB_I_FORMAT_MEDIUM = 37,
531         SDEB_I_ERASE = 38,
532         SDEB_I_LAST_ELEM_P1 = 39,       /* keep this last (previous + 1) */
533 };
534
535
536 static const unsigned char opcode_ind_arr[256] = {
537 /* 0x0; 0x0->0x1f: 6 byte cdbs */
538         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
539             SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
540         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
541         SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
542             SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
543         0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
544             SDEB_I_ALLOW_REMOVAL, 0,
545 /* 0x20; 0x20->0x3f: 10 byte cdbs */
546         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
547         SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
548         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
549         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
550 /* 0x40; 0x40->0x5f: 10 byte cdbs */
551         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
552         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
553         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
554             SDEB_I_RELEASE,
555         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
556 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
557         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
558         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
559         0, SDEB_I_VARIABLE_LEN,
560 /* 0x80; 0x80->0x9f: 16 byte cdbs */
561         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
562         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
563         0, 0, 0, SDEB_I_VERIFY,
564         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
565         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
566         0, 0, 0, 0,
567         SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
568 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
569         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
570              SDEB_I_MAINT_OUT, 0, 0, 0,
571         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
572              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
573         0, 0, 0, 0, 0, 0, 0, 0,
574         0, 0, 0, 0, 0, 0, 0, 0,
575 /* 0xc0; 0xc0->0xff: vendor specific */
576         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
577         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
578         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580 };
581
582 /*
583  * The following "response" functions return the SCSI mid-level's 4 byte
584  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
585  * command completion, they can mask their return value with
586  * SDEG_RES_IMMED_MASK .
587  */
588 #define SDEG_RES_IMMED_MASK 0x40000000
589
590 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
591 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
592 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_get_stream_status(struct scsi_cmnd *scp,
606                                   struct sdebug_dev_info *devip);
607 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
608 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
609 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
632
633 static int sdebug_do_add_host(bool mk_new_store);
634 static int sdebug_add_host_helper(int per_host_idx);
635 static void sdebug_do_remove_host(bool the_end);
636 static int sdebug_add_store(void);
637 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
638 static void sdebug_erase_all_stores(bool apart_from_first);
639
640 /*
641  * The following are overflow arrays for cdbs that "hit" the same index in
642  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
643  * should be placed in opcode_info_arr[], the others should be placed here.
644  */
645 static const struct opcode_info_t msense_iarr[] = {
646         {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
647             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
648 };
649
650 static const struct opcode_info_t mselect_iarr[] = {
651         {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
652             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
653 };
654
655 static const struct opcode_info_t read_iarr[] = {
656         {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
657             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
658              0, 0, 0, 0} },
659         {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
660             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
661         {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
662             {6,  0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663         {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
664             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
665              0xc7, 0, 0, 0, 0} },
666 };
667
668 static const struct opcode_info_t write_iarr[] = {
669         {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
670             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
671                    0, 0, 0, 0, 0, 0} },
672         {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
673             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
674                    0, 0, 0} },
675         {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
676             NULL, {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
677                    0, 0, 0} },
678         {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
679             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
680                    0xbf, 0xc7, 0, 0, 0, 0} },
681 };
682
683 static const struct opcode_info_t verify_iarr[] = {
684         {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
685             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
686                    0, 0, 0, 0, 0, 0} },
687 };
688
689 static const struct opcode_info_t sa_in_16_iarr[] = {
690         {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
691             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
692              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
693         {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
694             {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
695              0, 0} },   /* GET STREAM STATUS */
696 };
697
698 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
699         {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
700             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
701                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
702         {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
703             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
704                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
705 };
706
707 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
708         {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
709             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
710              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
711         {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
712             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
713              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
714 };
715
716 static const struct opcode_info_t write_same_iarr[] = {
717         {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
718             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
719              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
720 };
721
722 static const struct opcode_info_t reserve_iarr[] = {
723         {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
724             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
725 };
726
727 static const struct opcode_info_t release_iarr[] = {
728         {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
729             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 };
731
732 static const struct opcode_info_t sync_cache_iarr[] = {
733         {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
734             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
735              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
736 };
737
738 static const struct opcode_info_t pre_fetch_iarr[] = {
739         {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
740             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
742         {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
743             {10,  0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
744              0, 0, 0, 0} },                             /* READ POSITION (10) */
745 };
746
747 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
748         {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
749             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
750              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
751         {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
752             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
753              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
754         {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
755             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
756              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
757 };
758
759 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
760         {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
761             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
762              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
763 };
764
765
766 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
767  * plus the terminating elements for logic that scans this table such as
768  * REPORT SUPPORTED OPERATION CODES. */
769 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
770 /* 0 */
771         {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
772             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
773         {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
774             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775         {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
776             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
777              0, 0} },                                   /* REPORT LUNS */
778         {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
779             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
780         {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
781             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 /* 5 */
783         {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN,      /* MODE SENSE(10) */
784             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
785                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
786         {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT,    /* MODE SELECT(10) */
787             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
788                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
789         {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL,   /* LOG SENSE */
790             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
791              0, 0, 0} },
792         {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
793             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
794              0, 0} },
795         {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
796             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
797             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
798 /* 10 */
799         {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
800             resp_write_dt0, write_iarr,                 /* WRITE(16) */
801                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
802                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
803         {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
804             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
805         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
806             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
807                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
808                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
809         {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
810             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
811             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
812         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
813             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
814                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
815                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
816 /* 15 */
817         {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
818             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
819         {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
820             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
821             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
822                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
823         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
824             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
825             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
826              0xff, 0xff} },
827         {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
828             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
829             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
830              0} },
831         {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
832             NULL, release_iarr, /* RELEASE(10) <no response function> */
833             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
834              0} },
835 /* 20 */
836         {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
837             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
838         {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
839             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840         {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
841             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842         {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL,      /* SEND DIAGNOSTIC */
843             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844         {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
845             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
846 /* 25 */
847         {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
848             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
849              0, 0, 0, 0} },                     /* WRITE_BUFFER */
850         {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
851             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
852                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
853                  0, 0, 0, 0, 0} },
854         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
855             resp_sync_cache, sync_cache_iarr,
856             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
857              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
858         {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
859             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
860              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
861         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
862             resp_pre_fetch, pre_fetch_iarr,
863             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
864              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
865                                                 /* READ POSITION (10) */
866
867 /* 30 */
868         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
869             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
870                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
871                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
872         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
873             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
874                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
875                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
876 /* 32 */
877         {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
878             resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
879                 {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
880                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
881         {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL,    /* READ BLOCK LIMITS (6) */
882             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
883         {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL,          /* LOCATE (10) */
884             {10,  0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
885              0, 0, 0, 0} },
886         {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL,   /* WRITE FILEMARKS (6) */
887             {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
888         {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL,    /* SPACE (6) */
889             {6,  0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890         {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL,  /* FORMAT MEDIUM (6) */
891             {6,  0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892         {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL,    /* ERASE (6) */
893             {6,  0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894 /* 39 */
895 /* sentinel */
896         {0xff, 0, 0, 0, 0, NULL, NULL,          /* terminating element */
897             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
898 };
899
900 static int sdebug_num_hosts;
901 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
902 static int sdebug_ato = DEF_ATO;
903 static int sdebug_cdb_len = DEF_CDB_LEN;
904 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
905 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
906 static int sdebug_dif = DEF_DIF;
907 static int sdebug_dix = DEF_DIX;
908 static int sdebug_dsense = DEF_D_SENSE;
909 static int sdebug_every_nth = DEF_EVERY_NTH;
910 static int sdebug_fake_rw = DEF_FAKE_RW;
911 static unsigned int sdebug_guard = DEF_GUARD;
912 static int sdebug_host_max_queue;       /* per host */
913 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
914 static int sdebug_max_luns = DEF_MAX_LUNS;
915 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
916 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
917 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
918 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
919 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
920 static int sdebug_no_uld;
921 static int sdebug_num_parts = DEF_NUM_PARTS;
922 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
923 static int sdebug_opt_blks = DEF_OPT_BLKS;
924 static int sdebug_opts = DEF_OPTS;
925 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
926 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
927 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
928 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
929 static int sdebug_sector_size = DEF_SECTOR_SIZE;
930 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
931 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
932 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
933 static unsigned int sdebug_lbpu = DEF_LBPU;
934 static unsigned int sdebug_lbpws = DEF_LBPWS;
935 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
936 static unsigned int sdebug_lbprz = DEF_LBPRZ;
937 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
938 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
939 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
940 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
941 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
942 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
943 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
944 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
945 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
946 static unsigned int sdebug_atomic_wr_max_length_bndry =
947                         DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
948 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
949 static int sdebug_uuid_ctl = DEF_UUID_CTL;
950 static bool sdebug_random = DEF_RANDOM;
951 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
952 static bool sdebug_removable = DEF_REMOVABLE;
953 static bool sdebug_clustering;
954 static bool sdebug_host_lock = DEF_HOST_LOCK;
955 static bool sdebug_strict = DEF_STRICT;
956 static bool sdebug_any_injecting_opt;
957 static bool sdebug_no_rwlock;
958 static bool sdebug_verbose;
959 static bool have_dif_prot;
960 static bool write_since_sync;
961 static bool sdebug_statistics = DEF_STATISTICS;
962 static bool sdebug_wp;
963 static bool sdebug_allow_restart;
964 static enum {
965         BLK_ZONED_NONE  = 0,
966         BLK_ZONED_HA    = 1,
967         BLK_ZONED_HM    = 2,
968 } sdeb_zbc_model = BLK_ZONED_NONE;
969 static char *sdeb_zbc_model_s;
970
971 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
972                           SAM_LUN_AM_FLAT = 0x1,
973                           SAM_LUN_AM_LOGICAL_UNIT = 0x2,
974                           SAM_LUN_AM_EXTENDED = 0x3};
975 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
976 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
977
978 static unsigned int sdebug_store_sectors;
979 static sector_t sdebug_capacity;        /* in sectors */
980
981 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
982    may still need them */
983 static int sdebug_heads;                /* heads per disk */
984 static int sdebug_cylinders_per;        /* cylinders per surface */
985 static int sdebug_sectors_per;          /* sectors per cylinder */
986
987 static LIST_HEAD(sdebug_host_list);
988 static DEFINE_MUTEX(sdebug_host_list_mutex);
989
990 static struct xarray per_store_arr;
991 static struct xarray *per_store_ap = &per_store_arr;
992 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
993 static int sdeb_most_recent_idx = -1;
994 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
995
996 static unsigned long map_size;
997 static int num_aborts;
998 static int num_dev_resets;
999 static int num_target_resets;
1000 static int num_bus_resets;
1001 static int num_host_resets;
1002 static int dix_writes;
1003 static int dix_reads;
1004 static int dif_errors;
1005
1006 /* ZBC global data */
1007 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
1008 static int sdeb_zbc_zone_cap_mb;
1009 static int sdeb_zbc_zone_size_mb;
1010 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1011 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1012
1013 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
1014 static int poll_queues; /* iouring iopoll interface.*/
1015
1016 static atomic_long_t writes_by_group_number[64];
1017
1018 static char sdebug_proc_name[] = MY_NAME;
1019 static const char *my_name = MY_NAME;
1020
1021 static const struct bus_type pseudo_lld_bus;
1022
1023 static struct device_driver sdebug_driverfs_driver = {
1024         .name           = sdebug_proc_name,
1025         .bus            = &pseudo_lld_bus,
1026 };
1027
1028 static const int check_condition_result =
1029         SAM_STAT_CHECK_CONDITION;
1030
1031 static const int illegal_condition_result =
1032         (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1033
1034 static const int device_qfull_result =
1035         (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1036
1037 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1038
1039 static struct dentry *sdebug_debugfs_root;
1040 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1041
1042 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1043 {
1044         unsigned char devtype = sdp->type;
1045         u32 devsel;
1046
1047         if (devtype < 32)
1048                 devsel = (1 << devtype);
1049         else
1050                 devsel = DS_ALL;
1051
1052         return devsel;
1053 }
1054
1055 static void sdebug_err_free(struct rcu_head *head)
1056 {
1057         struct sdebug_err_inject *inject =
1058                 container_of(head, typeof(*inject), rcu);
1059
1060         kfree(inject);
1061 }
1062
1063 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1064 {
1065         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1066         struct sdebug_err_inject *err;
1067
1068         spin_lock(&devip->list_lock);
1069         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1070                 if (err->type == new->type && err->cmd == new->cmd) {
1071                         list_del_rcu(&err->list);
1072                         call_rcu(&err->rcu, sdebug_err_free);
1073                 }
1074         }
1075
1076         list_add_tail_rcu(&new->list, &devip->inject_err_list);
1077         spin_unlock(&devip->list_lock);
1078 }
1079
1080 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1081 {
1082         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1083         struct sdebug_err_inject *err;
1084         int type;
1085         unsigned char cmd;
1086
1087         if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1088                 kfree(buf);
1089                 return -EINVAL;
1090         }
1091
1092         spin_lock(&devip->list_lock);
1093         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1094                 if (err->type == type && err->cmd == cmd) {
1095                         list_del_rcu(&err->list);
1096                         call_rcu(&err->rcu, sdebug_err_free);
1097                         spin_unlock(&devip->list_lock);
1098                         kfree(buf);
1099                         return count;
1100                 }
1101         }
1102         spin_unlock(&devip->list_lock);
1103
1104         kfree(buf);
1105         return -EINVAL;
1106 }
1107
1108 static int sdebug_error_show(struct seq_file *m, void *p)
1109 {
1110         struct scsi_device *sdev = (struct scsi_device *)m->private;
1111         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1112         struct sdebug_err_inject *err;
1113
1114         seq_puts(m, "Type\tCount\tCommand\n");
1115
1116         rcu_read_lock();
1117         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1118                 switch (err->type) {
1119                 case ERR_TMOUT_CMD:
1120                 case ERR_ABORT_CMD_FAILED:
1121                 case ERR_LUN_RESET_FAILED:
1122                         seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1123                                 err->cmd);
1124                 break;
1125
1126                 case ERR_FAIL_QUEUE_CMD:
1127                         seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1128                                 err->cnt, err->cmd, err->queuecmd_ret);
1129                 break;
1130
1131                 case ERR_FAIL_CMD:
1132                         seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1133                                 err->type, err->cnt, err->cmd,
1134                                 err->host_byte, err->driver_byte,
1135                                 err->status_byte, err->sense_key,
1136                                 err->asc, err->asq);
1137                 break;
1138                 }
1139         }
1140         rcu_read_unlock();
1141
1142         return 0;
1143 }
1144
1145 static int sdebug_error_open(struct inode *inode, struct file *file)
1146 {
1147         return single_open(file, sdebug_error_show, inode->i_private);
1148 }
1149
1150 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1151                 size_t count, loff_t *ppos)
1152 {
1153         char *buf;
1154         unsigned int inject_type;
1155         struct sdebug_err_inject *inject;
1156         struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1157
1158         buf = kzalloc(count + 1, GFP_KERNEL);
1159         if (!buf)
1160                 return -ENOMEM;
1161
1162         if (copy_from_user(buf, ubuf, count)) {
1163                 kfree(buf);
1164                 return -EFAULT;
1165         }
1166
1167         if (buf[0] == '-')
1168                 return sdebug_err_remove(sdev, buf, count);
1169
1170         if (sscanf(buf, "%d", &inject_type) != 1) {
1171                 kfree(buf);
1172                 return -EINVAL;
1173         }
1174
1175         inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1176         if (!inject) {
1177                 kfree(buf);
1178                 return -ENOMEM;
1179         }
1180
1181         switch (inject_type) {
1182         case ERR_TMOUT_CMD:
1183         case ERR_ABORT_CMD_FAILED:
1184         case ERR_LUN_RESET_FAILED:
1185                 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1186                            &inject->cmd) != 3)
1187                         goto out_error;
1188         break;
1189
1190         case ERR_FAIL_QUEUE_CMD:
1191                 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1192                            &inject->cmd, &inject->queuecmd_ret) != 4)
1193                         goto out_error;
1194         break;
1195
1196         case ERR_FAIL_CMD:
1197                 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1198                            &inject->type, &inject->cnt, &inject->cmd,
1199                            &inject->host_byte, &inject->driver_byte,
1200                            &inject->status_byte, &inject->sense_key,
1201                            &inject->asc, &inject->asq) != 9)
1202                         goto out_error;
1203         break;
1204
1205         default:
1206                 goto out_error;
1207         break;
1208         }
1209
1210         kfree(buf);
1211         sdebug_err_add(sdev, inject);
1212
1213         return count;
1214
1215 out_error:
1216         kfree(buf);
1217         kfree(inject);
1218         return -EINVAL;
1219 }
1220
1221 static const struct file_operations sdebug_error_fops = {
1222         .open   = sdebug_error_open,
1223         .read   = seq_read,
1224         .write  = sdebug_error_write,
1225         .release = single_release,
1226 };
1227
1228 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1229 {
1230         struct scsi_target *starget = (struct scsi_target *)m->private;
1231         struct sdebug_target_info *targetip =
1232                 (struct sdebug_target_info *)starget->hostdata;
1233
1234         if (targetip)
1235                 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1236
1237         return 0;
1238 }
1239
1240 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1241 {
1242         return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1243 }
1244
1245 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1246                 const char __user *ubuf, size_t count, loff_t *ppos)
1247 {
1248         int ret;
1249         struct scsi_target *starget =
1250                 (struct scsi_target *)file->f_inode->i_private;
1251         struct sdebug_target_info *targetip =
1252                 (struct sdebug_target_info *)starget->hostdata;
1253
1254         if (targetip) {
1255                 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1256                 return ret < 0 ? ret : count;
1257         }
1258         return -ENODEV;
1259 }
1260
1261 static const struct file_operations sdebug_target_reset_fail_fops = {
1262         .open   = sdebug_target_reset_fail_open,
1263         .read   = seq_read,
1264         .write  = sdebug_target_reset_fail_write,
1265         .release = single_release,
1266 };
1267
1268 static int sdebug_target_alloc(struct scsi_target *starget)
1269 {
1270         struct sdebug_target_info *targetip;
1271
1272         targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1273         if (!targetip)
1274                 return -ENOMEM;
1275
1276         async_synchronize_full_domain(&sdebug_async_domain);
1277
1278         targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1279                                 sdebug_debugfs_root);
1280
1281         debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1282                                 &sdebug_target_reset_fail_fops);
1283
1284         starget->hostdata = targetip;
1285
1286         return 0;
1287 }
1288
1289 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1290 {
1291         struct sdebug_target_info *targetip = data;
1292
1293         debugfs_remove(targetip->debugfs_entry);
1294         kfree(targetip);
1295 }
1296
1297 static void sdebug_target_destroy(struct scsi_target *starget)
1298 {
1299         struct sdebug_target_info *targetip;
1300
1301         targetip = (struct sdebug_target_info *)starget->hostdata;
1302         if (targetip) {
1303                 starget->hostdata = NULL;
1304                 async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1305                                 &sdebug_async_domain);
1306         }
1307 }
1308
1309 /* Only do the extra work involved in logical block provisioning if one or
1310  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1311  * real reads and writes (i.e. not skipping them for speed).
1312  */
1313 static inline bool scsi_debug_lbp(void)
1314 {
1315         return 0 == sdebug_fake_rw &&
1316                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1317 }
1318
1319 static inline bool scsi_debug_atomic_write(void)
1320 {
1321         return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1322 }
1323
1324 static void *lba2fake_store(struct sdeb_store_info *sip,
1325                             unsigned long long lba)
1326 {
1327         struct sdeb_store_info *lsip = sip;
1328
1329         lba = do_div(lba, sdebug_store_sectors);
1330         if (!sip || !sip->storep) {
1331                 WARN_ON_ONCE(true);
1332                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1333         }
1334         return lsip->storep + lba * sdebug_sector_size;
1335 }
1336
1337 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1338                                       sector_t sector)
1339 {
1340         sector = sector_div(sector, sdebug_store_sectors);
1341
1342         return sip->dif_storep + sector;
1343 }
1344
1345 static void sdebug_max_tgts_luns(void)
1346 {
1347         struct sdebug_host_info *sdbg_host;
1348         struct Scsi_Host *hpnt;
1349
1350         mutex_lock(&sdebug_host_list_mutex);
1351         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1352                 hpnt = sdbg_host->shost;
1353                 if ((hpnt->this_id >= 0) &&
1354                     (sdebug_num_tgts > hpnt->this_id))
1355                         hpnt->max_id = sdebug_num_tgts + 1;
1356                 else
1357                         hpnt->max_id = sdebug_num_tgts;
1358                 /* sdebug_max_luns; */
1359                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1360         }
1361         mutex_unlock(&sdebug_host_list_mutex);
1362 }
1363
1364 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1365
1366 /* Set in_bit to -1 to indicate no bit position of invalid field */
1367 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1368                                  enum sdeb_cmd_data c_d,
1369                                  int in_byte, int in_bit)
1370 {
1371         unsigned char *sbuff;
1372         u8 sks[4];
1373         int sl, asc;
1374
1375         sbuff = scp->sense_buffer;
1376         if (!sbuff) {
1377                 sdev_printk(KERN_ERR, scp->device,
1378                             "%s: sense_buffer is NULL\n", __func__);
1379                 return;
1380         }
1381         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1382         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1383         scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1384         memset(sks, 0, sizeof(sks));
1385         sks[0] = 0x80;
1386         if (c_d)
1387                 sks[0] |= 0x40;
1388         if (in_bit >= 0) {
1389                 sks[0] |= 0x8;
1390                 sks[0] |= 0x7 & in_bit;
1391         }
1392         put_unaligned_be16(in_byte, sks + 1);
1393         if (sdebug_dsense) {
1394                 sl = sbuff[7] + 8;
1395                 sbuff[7] = sl;
1396                 sbuff[sl] = 0x2;
1397                 sbuff[sl + 1] = 0x6;
1398                 memcpy(sbuff + sl + 4, sks, 3);
1399         } else
1400                 memcpy(sbuff + 15, sks, 3);
1401         if (sdebug_verbose)
1402                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1403                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1404                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1405 }
1406
1407 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1408 {
1409         if (!scp->sense_buffer) {
1410                 sdev_printk(KERN_ERR, scp->device,
1411                             "%s: sense_buffer is NULL\n", __func__);
1412                 return;
1413         }
1414         memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1415
1416         scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1417
1418         if (sdebug_verbose)
1419                 sdev_printk(KERN_INFO, scp->device,
1420                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1421                             my_name, key, asc, asq);
1422 }
1423
1424 /* Sense data that has information fields for tapes */
1425 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1426                         unsigned int information, unsigned char tape_flags)
1427 {
1428         if (!scp->sense_buffer) {
1429                 sdev_printk(KERN_ERR, scp->device,
1430                             "%s: sense_buffer is NULL\n", __func__);
1431                 return;
1432         }
1433         memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1434
1435         scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1436         /* only fixed format so far */
1437
1438         scp->sense_buffer[0] |= 0x80; /* valid */
1439         scp->sense_buffer[2] |= tape_flags;
1440         put_unaligned_be32(information, &scp->sense_buffer[3]);
1441
1442         if (sdebug_verbose)
1443                 sdev_printk(KERN_INFO, scp->device,
1444                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1445                             my_name, key, asc, asq);
1446 }
1447
1448 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1449 {
1450         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1451 }
1452
1453 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1454                             void __user *arg)
1455 {
1456         if (sdebug_verbose) {
1457                 if (0x1261 == cmd)
1458                         sdev_printk(KERN_INFO, dev,
1459                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
1460                 else if (0x5331 == cmd)
1461                         sdev_printk(KERN_INFO, dev,
1462                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1463                                     __func__);
1464                 else
1465                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1466                                     __func__, cmd);
1467         }
1468         return -EINVAL;
1469         /* return -ENOTTY; // correct return but upsets fdisk */
1470 }
1471
1472 static void config_cdb_len(struct scsi_device *sdev)
1473 {
1474         switch (sdebug_cdb_len) {
1475         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1476                 sdev->use_10_for_rw = false;
1477                 sdev->use_16_for_rw = false;
1478                 sdev->use_10_for_ms = false;
1479                 break;
1480         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1481                 sdev->use_10_for_rw = true;
1482                 sdev->use_16_for_rw = false;
1483                 sdev->use_10_for_ms = false;
1484                 break;
1485         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1486                 sdev->use_10_for_rw = true;
1487                 sdev->use_16_for_rw = false;
1488                 sdev->use_10_for_ms = true;
1489                 break;
1490         case 16:
1491                 sdev->use_10_for_rw = false;
1492                 sdev->use_16_for_rw = true;
1493                 sdev->use_10_for_ms = true;
1494                 break;
1495         case 32: /* No knobs to suggest this so same as 16 for now */
1496                 sdev->use_10_for_rw = false;
1497                 sdev->use_16_for_rw = true;
1498                 sdev->use_10_for_ms = true;
1499                 break;
1500         default:
1501                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1502                         sdebug_cdb_len);
1503                 sdev->use_10_for_rw = true;
1504                 sdev->use_16_for_rw = false;
1505                 sdev->use_10_for_ms = false;
1506                 sdebug_cdb_len = 10;
1507                 break;
1508         }
1509 }
1510
1511 static void all_config_cdb_len(void)
1512 {
1513         struct sdebug_host_info *sdbg_host;
1514         struct Scsi_Host *shost;
1515         struct scsi_device *sdev;
1516
1517         mutex_lock(&sdebug_host_list_mutex);
1518         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1519                 shost = sdbg_host->shost;
1520                 shost_for_each_device(sdev, shost) {
1521                         config_cdb_len(sdev);
1522                 }
1523         }
1524         mutex_unlock(&sdebug_host_list_mutex);
1525 }
1526
1527 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1528 {
1529         struct sdebug_host_info *sdhp = devip->sdbg_host;
1530         struct sdebug_dev_info *dp;
1531
1532         list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1533                 if ((devip->sdbg_host == dp->sdbg_host) &&
1534                     (devip->target == dp->target)) {
1535                         clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1536                 }
1537         }
1538 }
1539
1540 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1541 {
1542         int k;
1543
1544         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1545         if (k != SDEBUG_NUM_UAS) {
1546                 const char *cp = NULL;
1547
1548                 switch (k) {
1549                 case SDEBUG_UA_POR:
1550                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1551                                         POWER_ON_RESET_ASCQ);
1552                         if (sdebug_verbose)
1553                                 cp = "power on reset";
1554                         break;
1555                 case SDEBUG_UA_POOCCUR:
1556                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1557                                         POWER_ON_OCCURRED_ASCQ);
1558                         if (sdebug_verbose)
1559                                 cp = "power on occurred";
1560                         break;
1561                 case SDEBUG_UA_BUS_RESET:
1562                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1563                                         BUS_RESET_ASCQ);
1564                         if (sdebug_verbose)
1565                                 cp = "bus reset";
1566                         break;
1567                 case SDEBUG_UA_MODE_CHANGED:
1568                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1569                                         MODE_CHANGED_ASCQ);
1570                         if (sdebug_verbose)
1571                                 cp = "mode parameters changed";
1572                         break;
1573                 case SDEBUG_UA_CAPACITY_CHANGED:
1574                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1575                                         CAPACITY_CHANGED_ASCQ);
1576                         if (sdebug_verbose)
1577                                 cp = "capacity data changed";
1578                         break;
1579                 case SDEBUG_UA_MICROCODE_CHANGED:
1580                         mk_sense_buffer(scp, UNIT_ATTENTION,
1581                                         TARGET_CHANGED_ASC,
1582                                         MICROCODE_CHANGED_ASCQ);
1583                         if (sdebug_verbose)
1584                                 cp = "microcode has been changed";
1585                         break;
1586                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1587                         mk_sense_buffer(scp, UNIT_ATTENTION,
1588                                         TARGET_CHANGED_ASC,
1589                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1590                         if (sdebug_verbose)
1591                                 cp = "microcode has been changed without reset";
1592                         break;
1593                 case SDEBUG_UA_LUNS_CHANGED:
1594                         /*
1595                          * SPC-3 behavior is to report a UNIT ATTENTION with
1596                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1597                          * on the target, until a REPORT LUNS command is
1598                          * received.  SPC-4 behavior is to report it only once.
1599                          * NOTE:  sdebug_scsi_level does not use the same
1600                          * values as struct scsi_device->scsi_level.
1601                          */
1602                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1603                                 clear_luns_changed_on_target(devip);
1604                         mk_sense_buffer(scp, UNIT_ATTENTION,
1605                                         TARGET_CHANGED_ASC,
1606                                         LUNS_CHANGED_ASCQ);
1607                         if (sdebug_verbose)
1608                                 cp = "reported luns data has changed";
1609                         break;
1610                 case SDEBUG_UA_NOT_READY_TO_READY:
1611                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1612                                         0);
1613                         if (sdebug_verbose)
1614                                 cp = "not ready to ready transition/media change";
1615                         break;
1616                 default:
1617                         pr_warn("unexpected unit attention code=%d\n", k);
1618                         if (sdebug_verbose)
1619                                 cp = "unknown";
1620                         break;
1621                 }
1622                 clear_bit(k, devip->uas_bm);
1623                 if (sdebug_verbose)
1624                         sdev_printk(KERN_INFO, scp->device,
1625                                    "%s reports: Unit attention: %s\n",
1626                                    my_name, cp);
1627                 return check_condition_result;
1628         }
1629         return 0;
1630 }
1631
1632 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1633 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1634                                 int arr_len)
1635 {
1636         int act_len;
1637         struct scsi_data_buffer *sdb = &scp->sdb;
1638
1639         if (!sdb->length)
1640                 return 0;
1641         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1642                 return DID_ERROR << 16;
1643
1644         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1645                                       arr, arr_len);
1646         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1647
1648         return 0;
1649 }
1650
1651 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1652  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1653  * calls, not required to write in ascending offset order. Assumes resid
1654  * set to scsi_bufflen() prior to any calls.
1655  */
1656 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1657                                   int arr_len, unsigned int off_dst)
1658 {
1659         unsigned int act_len, n;
1660         struct scsi_data_buffer *sdb = &scp->sdb;
1661         off_t skip = off_dst;
1662
1663         if (sdb->length <= off_dst)
1664                 return 0;
1665         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1666                 return DID_ERROR << 16;
1667
1668         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1669                                        arr, arr_len, skip);
1670         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1671                  __func__, off_dst, scsi_bufflen(scp), act_len,
1672                  scsi_get_resid(scp));
1673         n = scsi_bufflen(scp) - (off_dst + act_len);
1674         scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1675         return 0;
1676 }
1677
1678 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1679  * 'arr' or -1 if error.
1680  */
1681 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1682                                int arr_len)
1683 {
1684         if (!scsi_bufflen(scp))
1685                 return 0;
1686         if (scp->sc_data_direction != DMA_TO_DEVICE)
1687                 return -1;
1688
1689         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1690 }
1691
1692
1693 static char sdebug_inq_vendor_id[9] = "Linux   ";
1694 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1695 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1696 /* Use some locally assigned NAAs for SAS addresses. */
1697 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1698 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1699 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1700
1701 /* Device identification VPD page. Returns number of bytes placed in arr */
1702 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1703                           int target_dev_id, int dev_id_num,
1704                           const char *dev_id_str, int dev_id_str_len,
1705                           const uuid_t *lu_name)
1706 {
1707         int num, port_a;
1708         char b[32];
1709
1710         port_a = target_dev_id + 1;
1711         /* T10 vendor identifier field format (faked) */
1712         arr[0] = 0x2;   /* ASCII */
1713         arr[1] = 0x1;
1714         arr[2] = 0x0;
1715         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1716         memcpy(&arr[12], sdebug_inq_product_id, 16);
1717         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1718         num = 8 + 16 + dev_id_str_len;
1719         arr[3] = num;
1720         num += 4;
1721         if (dev_id_num >= 0) {
1722                 if (sdebug_uuid_ctl) {
1723                         /* Locally assigned UUID */
1724                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1725                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1726                         arr[num++] = 0x0;
1727                         arr[num++] = 0x12;
1728                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1729                         arr[num++] = 0x0;
1730                         memcpy(arr + num, lu_name, 16);
1731                         num += 16;
1732                 } else {
1733                         /* NAA-3, Logical unit identifier (binary) */
1734                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1735                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1736                         arr[num++] = 0x0;
1737                         arr[num++] = 0x8;
1738                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1739                         num += 8;
1740                 }
1741                 /* Target relative port number */
1742                 arr[num++] = 0x61;      /* proto=sas, binary */
1743                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1744                 arr[num++] = 0x0;       /* reserved */
1745                 arr[num++] = 0x4;       /* length */
1746                 arr[num++] = 0x0;       /* reserved */
1747                 arr[num++] = 0x0;       /* reserved */
1748                 arr[num++] = 0x0;
1749                 arr[num++] = 0x1;       /* relative port A */
1750         }
1751         /* NAA-3, Target port identifier */
1752         arr[num++] = 0x61;      /* proto=sas, binary */
1753         arr[num++] = 0x93;      /* piv=1, target port, naa */
1754         arr[num++] = 0x0;
1755         arr[num++] = 0x8;
1756         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1757         num += 8;
1758         /* NAA-3, Target port group identifier */
1759         arr[num++] = 0x61;      /* proto=sas, binary */
1760         arr[num++] = 0x95;      /* piv=1, target port group id */
1761         arr[num++] = 0x0;
1762         arr[num++] = 0x4;
1763         arr[num++] = 0;
1764         arr[num++] = 0;
1765         put_unaligned_be16(port_group_id, arr + num);
1766         num += 2;
1767         /* NAA-3, Target device identifier */
1768         arr[num++] = 0x61;      /* proto=sas, binary */
1769         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1770         arr[num++] = 0x0;
1771         arr[num++] = 0x8;
1772         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1773         num += 8;
1774         /* SCSI name string: Target device identifier */
1775         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1776         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1777         arr[num++] = 0x0;
1778         arr[num++] = 24;
1779         memcpy(arr + num, "naa.32222220", 12);
1780         num += 12;
1781         snprintf(b, sizeof(b), "%08X", target_dev_id);
1782         memcpy(arr + num, b, 8);
1783         num += 8;
1784         memset(arr + num, 0, 4);
1785         num += 4;
1786         return num;
1787 }
1788
1789 static unsigned char vpd84_data[] = {
1790 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1791     0x22,0x22,0x22,0x0,0xbb,0x1,
1792     0x22,0x22,0x22,0x0,0xbb,0x2,
1793 };
1794
1795 /*  Software interface identification VPD page */
1796 static int inquiry_vpd_84(unsigned char *arr)
1797 {
1798         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1799         return sizeof(vpd84_data);
1800 }
1801
1802 /* Management network addresses VPD page */
1803 static int inquiry_vpd_85(unsigned char *arr)
1804 {
1805         int num = 0;
1806         const char *na1 = "https://www.kernel.org/config";
1807         const char *na2 = "http://www.kernel.org/log";
1808         int plen, olen;
1809
1810         arr[num++] = 0x1;       /* lu, storage config */
1811         arr[num++] = 0x0;       /* reserved */
1812         arr[num++] = 0x0;
1813         olen = strlen(na1);
1814         plen = olen + 1;
1815         if (plen % 4)
1816                 plen = ((plen / 4) + 1) * 4;
1817         arr[num++] = plen;      /* length, null termianted, padded */
1818         memcpy(arr + num, na1, olen);
1819         memset(arr + num + olen, 0, plen - olen);
1820         num += plen;
1821
1822         arr[num++] = 0x4;       /* lu, logging */
1823         arr[num++] = 0x0;       /* reserved */
1824         arr[num++] = 0x0;
1825         olen = strlen(na2);
1826         plen = olen + 1;
1827         if (plen % 4)
1828                 plen = ((plen / 4) + 1) * 4;
1829         arr[num++] = plen;      /* length, null terminated, padded */
1830         memcpy(arr + num, na2, olen);
1831         memset(arr + num + olen, 0, plen - olen);
1832         num += plen;
1833
1834         return num;
1835 }
1836
1837 /* SCSI ports VPD page */
1838 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1839 {
1840         int num = 0;
1841         int port_a, port_b;
1842
1843         port_a = target_dev_id + 1;
1844         port_b = port_a + 1;
1845         arr[num++] = 0x0;       /* reserved */
1846         arr[num++] = 0x0;       /* reserved */
1847         arr[num++] = 0x0;
1848         arr[num++] = 0x1;       /* relative port 1 (primary) */
1849         memset(arr + num, 0, 6);
1850         num += 6;
1851         arr[num++] = 0x0;
1852         arr[num++] = 12;        /* length tp descriptor */
1853         /* naa-5 target port identifier (A) */
1854         arr[num++] = 0x61;      /* proto=sas, binary */
1855         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1856         arr[num++] = 0x0;       /* reserved */
1857         arr[num++] = 0x8;       /* length */
1858         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1859         num += 8;
1860         arr[num++] = 0x0;       /* reserved */
1861         arr[num++] = 0x0;       /* reserved */
1862         arr[num++] = 0x0;
1863         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1864         memset(arr + num, 0, 6);
1865         num += 6;
1866         arr[num++] = 0x0;
1867         arr[num++] = 12;        /* length tp descriptor */
1868         /* naa-5 target port identifier (B) */
1869         arr[num++] = 0x61;      /* proto=sas, binary */
1870         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1871         arr[num++] = 0x0;       /* reserved */
1872         arr[num++] = 0x8;       /* length */
1873         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1874         num += 8;
1875
1876         return num;
1877 }
1878
1879
1880 static unsigned char vpd89_data[] = {
1881 /* from 4th byte */ 0,0,0,0,
1882 'l','i','n','u','x',' ',' ',' ',
1883 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1884 '1','2','3','4',
1885 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1886 0xec,0,0,0,
1887 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1888 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1889 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1890 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1891 0x53,0x41,
1892 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1893 0x20,0x20,
1894 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1895 0x10,0x80,
1896 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1897 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1898 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1899 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1900 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1901 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1902 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1903 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1904 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1905 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1906 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1907 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1908 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1909 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1913 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1914 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1915 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1916 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1917 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1918 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1919 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1920 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1921 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1922 };
1923
1924 /* ATA Information VPD page */
1925 static int inquiry_vpd_89(unsigned char *arr)
1926 {
1927         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1928         return sizeof(vpd89_data);
1929 }
1930
1931
1932 static unsigned char vpdb0_data[] = {
1933         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1934         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1935         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1936         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1937 };
1938
1939 /* Block limits VPD page (SBC-3) */
1940 static int inquiry_vpd_b0(unsigned char *arr)
1941 {
1942         unsigned int gran;
1943
1944         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1945
1946         /* Optimal transfer length granularity */
1947         if (sdebug_opt_xferlen_exp != 0 &&
1948             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1949                 gran = 1 << sdebug_opt_xferlen_exp;
1950         else
1951                 gran = 1 << sdebug_physblk_exp;
1952         put_unaligned_be16(gran, arr + 2);
1953
1954         /* Maximum Transfer Length */
1955         if (sdebug_store_sectors > 0x400)
1956                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1957
1958         /* Optimal Transfer Length */
1959         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1960
1961         if (sdebug_lbpu) {
1962                 /* Maximum Unmap LBA Count */
1963                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1964
1965                 /* Maximum Unmap Block Descriptor Count */
1966                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1967         }
1968
1969         /* Unmap Granularity Alignment */
1970         if (sdebug_unmap_alignment) {
1971                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1972                 arr[28] |= 0x80; /* UGAVALID */
1973         }
1974
1975         /* Optimal Unmap Granularity */
1976         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1977
1978         /* Maximum WRITE SAME Length */
1979         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1980
1981         if (sdebug_atomic_wr) {
1982                 put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1983                 put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1984                 put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1985                 put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1986                 put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1987         }
1988
1989         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1990 }
1991
1992 /* Block device characteristics VPD page (SBC-3) */
1993 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1994 {
1995         memset(arr, 0, 0x3c);
1996         arr[0] = 0;
1997         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1998         arr[2] = 0;
1999         arr[3] = 5;     /* less than 1.8" */
2000
2001         return 0x3c;
2002 }
2003
2004 /* Logical block provisioning VPD page (SBC-4) */
2005 static int inquiry_vpd_b2(unsigned char *arr)
2006 {
2007         memset(arr, 0, 0x4);
2008         arr[0] = 0;                     /* threshold exponent */
2009         if (sdebug_lbpu)
2010                 arr[1] = 1 << 7;
2011         if (sdebug_lbpws)
2012                 arr[1] |= 1 << 6;
2013         if (sdebug_lbpws10)
2014                 arr[1] |= 1 << 5;
2015         if (sdebug_lbprz && scsi_debug_lbp())
2016                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
2017         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
2018         /* minimum_percentage=0; provisioning_type=0 (unknown) */
2019         /* threshold_percentage=0 */
2020         return 0x4;
2021 }
2022
2023 /* Zoned block device characteristics VPD page (ZBC mandatory) */
2024 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2025 {
2026         memset(arr, 0, 0x3c);
2027         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2028         /*
2029          * Set Optimal number of open sequential write preferred zones and
2030          * Optimal number of non-sequentially written sequential write
2031          * preferred zones fields to 'not reported' (0xffffffff). Leave other
2032          * fields set to zero, apart from Max. number of open swrz_s field.
2033          */
2034         put_unaligned_be32(0xffffffff, &arr[4]);
2035         put_unaligned_be32(0xffffffff, &arr[8]);
2036         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2037                 put_unaligned_be32(devip->max_open, &arr[12]);
2038         else
2039                 put_unaligned_be32(0xffffffff, &arr[12]);
2040         if (devip->zcap < devip->zsize) {
2041                 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2042                 put_unaligned_be64(devip->zsize, &arr[20]);
2043         } else {
2044                 arr[19] = 0;
2045         }
2046         return 0x3c;
2047 }
2048
2049 #define SDEBUG_BLE_LEN_AFTER_B4 28      /* thus vpage 32 bytes long */
2050
2051 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2052
2053 /* Block limits extension VPD page (SBC-4) */
2054 static int inquiry_vpd_b7(unsigned char *arrb4)
2055 {
2056         memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2057         arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2058         put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2059         return SDEBUG_BLE_LEN_AFTER_B4;
2060 }
2061
2062 #define SDEBUG_LONG_INQ_SZ 96
2063 #define SDEBUG_MAX_INQ_ARR_SZ 584
2064
2065 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2066 {
2067         unsigned char pq_pdt;
2068         unsigned char *arr;
2069         unsigned char *cmd = scp->cmnd;
2070         u32 alloc_len, n;
2071         int ret;
2072         bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2073
2074         alloc_len = get_unaligned_be16(cmd + 3);
2075         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2076         if (! arr)
2077                 return DID_REQUEUE << 16;
2078         if (scp->device->type >= 32) {
2079                 is_disk = (sdebug_ptype == TYPE_DISK);
2080                 is_tape = (sdebug_ptype == TYPE_TAPE);
2081         } else {
2082                 is_disk = (scp->device->type == TYPE_DISK);
2083                 is_tape = (scp->device->type == TYPE_TAPE);
2084         }
2085         is_zbc = devip->zoned;
2086         is_disk_zbc = (is_disk || is_zbc);
2087         have_wlun = scsi_is_wlun(scp->device->lun);
2088         if (have_wlun)
2089                 pq_pdt = TYPE_WLUN;     /* present, wlun */
2090         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2091                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
2092         else
2093                 pq_pdt = ((scp->device->type >= 32 ?
2094                                 sdebug_ptype : scp->device->type) & 0x1f);
2095         arr[0] = pq_pdt;
2096         if (0x2 & cmd[1]) {  /* CMDDT bit set */
2097                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2098                 kfree(arr);
2099                 return check_condition_result;
2100         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
2101                 int lu_id_num, port_group_id, target_dev_id;
2102                 u32 len;
2103                 char lu_id_str[6];
2104                 int host_no = devip->sdbg_host->shost->host_no;
2105
2106                 arr[1] = cmd[2];
2107                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
2108                     (devip->channel & 0x7f);
2109                 if (sdebug_vpd_use_hostno == 0)
2110                         host_no = 0;
2111                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2112                             (devip->target * 1000) + devip->lun);
2113                 target_dev_id = ((host_no + 1) * 2000) +
2114                                  (devip->target * 1000) - 3;
2115                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2116                 if (0 == cmd[2]) { /* supported vital product data pages */
2117                         n = 4;
2118                         arr[n++] = 0x0;   /* this page */
2119                         arr[n++] = 0x80;  /* unit serial number */
2120                         arr[n++] = 0x83;  /* device identification */
2121                         arr[n++] = 0x84;  /* software interface ident. */
2122                         arr[n++] = 0x85;  /* management network addresses */
2123                         arr[n++] = 0x86;  /* extended inquiry */
2124                         arr[n++] = 0x87;  /* mode page policy */
2125                         arr[n++] = 0x88;  /* SCSI ports */
2126                         if (is_disk_zbc) {        /* SBC or ZBC */
2127                                 arr[n++] = 0x89;  /* ATA information */
2128                                 arr[n++] = 0xb0;  /* Block limits */
2129                                 arr[n++] = 0xb1;  /* Block characteristics */
2130                                 if (is_disk)
2131                                         arr[n++] = 0xb2;  /* LB Provisioning */
2132                                 if (is_zbc)
2133                                         arr[n++] = 0xb6;  /* ZB dev. char. */
2134                                 arr[n++] = 0xb7;  /* Block limits extension */
2135                         }
2136                         arr[3] = n - 4;   /* number of supported VPD pages */
2137                 } else if (0x80 == cmd[2]) { /* unit serial number */
2138                         arr[3] = len;
2139                         memcpy(&arr[4], lu_id_str, len);
2140                 } else if (0x83 == cmd[2]) { /* device identification */
2141                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2142                                                 target_dev_id, lu_id_num,
2143                                                 lu_id_str, len,
2144                                                 &devip->lu_name);
2145                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
2146                         arr[3] = inquiry_vpd_84(&arr[4]);
2147                 } else if (0x85 == cmd[2]) { /* Management network addresses */
2148                         arr[3] = inquiry_vpd_85(&arr[4]);
2149                 } else if (0x86 == cmd[2]) { /* extended inquiry */
2150                         arr[3] = 0x3c;  /* number of following entries */
2151                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2152                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
2153                         else if (have_dif_prot)
2154                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2155                         else
2156                                 arr[4] = 0x0;   /* no protection stuff */
2157                         /*
2158                          * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2159                          * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2160                          */
2161                         arr[5] = 0x17;
2162                 } else if (0x87 == cmd[2]) { /* mode page policy */
2163                         arr[3] = 0x8;   /* number of following entries */
2164                         arr[4] = 0x2;   /* disconnect-reconnect mp */
2165                         arr[6] = 0x80;  /* mlus, shared */
2166                         arr[8] = 0x18;   /* protocol specific lu */
2167                         arr[10] = 0x82;  /* mlus, per initiator port */
2168                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
2169                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2170                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2171                         n = inquiry_vpd_89(&arr[4]);
2172                         put_unaligned_be16(n, arr + 2);
2173                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2174                         arr[3] = inquiry_vpd_b0(&arr[4]);
2175                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2176                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2177                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2178                         arr[3] = inquiry_vpd_b2(&arr[4]);
2179                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2180                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2181                 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2182                         arr[3] = inquiry_vpd_b7(&arr[4]);
2183                 } else {
2184                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2185                         kfree(arr);
2186                         return check_condition_result;
2187                 }
2188                 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2189                 ret = fill_from_dev_buffer(scp, arr,
2190                             min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2191                 kfree(arr);
2192                 return ret;
2193         }
2194         /* drops through here for a standard inquiry */
2195         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
2196         arr[2] = sdebug_scsi_level;
2197         arr[3] = 2;    /* response_data_format==2 */
2198         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2199         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
2200         if (sdebug_vpd_use_hostno == 0)
2201                 arr[5] |= 0x10; /* claim: implicit TPGS */
2202         arr[6] = 0x10; /* claim: MultiP */
2203         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2204         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2205         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2206         memcpy(&arr[16], sdebug_inq_product_id, 16);
2207         memcpy(&arr[32], sdebug_inq_product_rev, 4);
2208         /* Use Vendor Specific area to place driver date in ASCII hex */
2209         memcpy(&arr[36], sdebug_version_date, 8);
2210         /* version descriptors (2 bytes each) follow */
2211         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2212         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2213         n = 62;
2214         if (is_disk) {          /* SBC-4 no version claimed */
2215                 put_unaligned_be16(0x600, arr + n);
2216                 n += 2;
2217         } else if (is_tape) {   /* SSC-4 rev 3 */
2218                 put_unaligned_be16(0x525, arr + n);
2219                 n += 2;
2220         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
2221                 put_unaligned_be16(0x624, arr + n);
2222                 n += 2;
2223         }
2224         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
2225         ret = fill_from_dev_buffer(scp, arr,
2226                             min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2227         kfree(arr);
2228         return ret;
2229 }
2230
2231 /* See resp_iec_m_pg() for how this data is manipulated */
2232 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2233                                    0, 0, 0x0, 0x0};
2234
2235 static int resp_requests(struct scsi_cmnd *scp,
2236                          struct sdebug_dev_info *devip)
2237 {
2238         unsigned char *cmd = scp->cmnd;
2239         unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
2240         bool dsense = !!(cmd[1] & 1);
2241         u32 alloc_len = cmd[4];
2242         u32 len = 18;
2243         int stopped_state = atomic_read(&devip->stopped);
2244
2245         memset(arr, 0, sizeof(arr));
2246         if (stopped_state > 0) {        /* some "pollable" data [spc6r02: 5.12.2] */
2247                 if (dsense) {
2248                         arr[0] = 0x72;
2249                         arr[1] = NOT_READY;
2250                         arr[2] = LOGICAL_UNIT_NOT_READY;
2251                         arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2252                         len = 8;
2253                 } else {
2254                         arr[0] = 0x70;
2255                         arr[2] = NOT_READY;             /* NO_SENSE in sense_key */
2256                         arr[7] = 0xa;                   /* 18 byte sense buffer */
2257                         arr[12] = LOGICAL_UNIT_NOT_READY;
2258                         arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2259                 }
2260         } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2261                 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2262                 if (dsense) {
2263                         arr[0] = 0x72;
2264                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
2265                         arr[2] = THRESHOLD_EXCEEDED;
2266                         arr[3] = 0xff;          /* Failure prediction(false) */
2267                         len = 8;
2268                 } else {
2269                         arr[0] = 0x70;
2270                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
2271                         arr[7] = 0xa;           /* 18 byte sense buffer */
2272                         arr[12] = THRESHOLD_EXCEEDED;
2273                         arr[13] = 0xff;         /* Failure prediction(false) */
2274                 }
2275         } else {        /* nothing to report */
2276                 if (dsense) {
2277                         len = 8;
2278                         memset(arr, 0, len);
2279                         arr[0] = 0x72;
2280                 } else {
2281                         memset(arr, 0, len);
2282                         arr[0] = 0x70;
2283                         arr[7] = 0xa;
2284                 }
2285         }
2286         return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2287 }
2288
2289 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2290 {
2291         unsigned char *cmd = scp->cmnd;
2292         int power_cond, want_stop, stopped_state;
2293         bool changing;
2294
2295         power_cond = (cmd[4] & 0xf0) >> 4;
2296         if (power_cond) {
2297                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2298                 return check_condition_result;
2299         }
2300         want_stop = !(cmd[4] & 1);
2301         stopped_state = atomic_read(&devip->stopped);
2302         if (stopped_state == 2) {
2303                 ktime_t now_ts = ktime_get_boottime();
2304
2305                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2306                         u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2307
2308                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2309                                 /* tur_ms_to_ready timer extinguished */
2310                                 atomic_set(&devip->stopped, 0);
2311                                 stopped_state = 0;
2312                         }
2313                 }
2314                 if (stopped_state == 2) {
2315                         if (want_stop) {
2316                                 stopped_state = 1;      /* dummy up success */
2317                         } else {        /* Disallow tur_ms_to_ready delay to be overridden */
2318                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2319                                 return check_condition_result;
2320                         }
2321                 }
2322         }
2323         changing = (stopped_state != want_stop);
2324         if (changing)
2325                 atomic_xchg(&devip->stopped, want_stop);
2326         if (scp->device->type == TYPE_TAPE && !want_stop) {
2327                 int i;
2328
2329                 set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2330                 for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2331                         devip->tape_location[i] = 0;
2332                 devip->tape_partition = 0;
2333         }
2334         if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2335                 return SDEG_RES_IMMED_MASK;
2336         else
2337                 return 0;
2338 }
2339
2340 static sector_t get_sdebug_capacity(void)
2341 {
2342         static const unsigned int gibibyte = 1073741824;
2343
2344         if (sdebug_virtual_gb > 0)
2345                 return (sector_t)sdebug_virtual_gb *
2346                         (gibibyte / sdebug_sector_size);
2347         else
2348                 return sdebug_store_sectors;
2349 }
2350
2351 #define SDEBUG_READCAP_ARR_SZ 8
2352 static int resp_readcap(struct scsi_cmnd *scp,
2353                         struct sdebug_dev_info *devip)
2354 {
2355         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2356         unsigned int capac;
2357
2358         /* following just in case virtual_gb changed */
2359         sdebug_capacity = get_sdebug_capacity();
2360         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2361         if (sdebug_capacity < 0xffffffff) {
2362                 capac = (unsigned int)sdebug_capacity - 1;
2363                 put_unaligned_be32(capac, arr + 0);
2364         } else
2365                 put_unaligned_be32(0xffffffff, arr + 0);
2366         put_unaligned_be16(sdebug_sector_size, arr + 6);
2367         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2368 }
2369
2370 #define SDEBUG_READCAP16_ARR_SZ 32
2371 static int resp_readcap16(struct scsi_cmnd *scp,
2372                           struct sdebug_dev_info *devip)
2373 {
2374         unsigned char *cmd = scp->cmnd;
2375         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2376         u32 alloc_len;
2377
2378         alloc_len = get_unaligned_be32(cmd + 10);
2379         /* following just in case virtual_gb changed */
2380         sdebug_capacity = get_sdebug_capacity();
2381         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2382         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2383         put_unaligned_be32(sdebug_sector_size, arr + 8);
2384         arr[13] = sdebug_physblk_exp & 0xf;
2385         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2386
2387         if (scsi_debug_lbp()) {
2388                 arr[14] |= 0x80; /* LBPME */
2389                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2390                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2391                  * in the wider field maps to 0 in this field.
2392                  */
2393                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
2394                         arr[14] |= 0x40;
2395         }
2396
2397         /*
2398          * Since the scsi_debug READ CAPACITY implementation always reports the
2399          * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2400          */
2401         if (devip->zoned)
2402                 arr[12] |= 1 << 4;
2403
2404         arr[15] = sdebug_lowest_aligned & 0xff;
2405
2406         if (have_dif_prot) {
2407                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2408                 arr[12] |= 1; /* PROT_EN */
2409         }
2410
2411         return fill_from_dev_buffer(scp, arr,
2412                             min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2413 }
2414
2415 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2416
2417 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2418                               struct sdebug_dev_info *devip)
2419 {
2420         unsigned char *cmd = scp->cmnd;
2421         unsigned char *arr;
2422         int host_no = devip->sdbg_host->shost->host_no;
2423         int port_group_a, port_group_b, port_a, port_b;
2424         u32 alen, n, rlen;
2425         int ret;
2426
2427         alen = get_unaligned_be32(cmd + 6);
2428         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2429         if (! arr)
2430                 return DID_REQUEUE << 16;
2431         /*
2432          * EVPD page 0x88 states we have two ports, one
2433          * real and a fake port with no device connected.
2434          * So we create two port groups with one port each
2435          * and set the group with port B to unavailable.
2436          */
2437         port_a = 0x1; /* relative port A */
2438         port_b = 0x2; /* relative port B */
2439         port_group_a = (((host_no + 1) & 0x7f) << 8) +
2440                         (devip->channel & 0x7f);
2441         port_group_b = (((host_no + 1) & 0x7f) << 8) +
2442                         (devip->channel & 0x7f) + 0x80;
2443
2444         /*
2445          * The asymmetric access state is cycled according to the host_id.
2446          */
2447         n = 4;
2448         if (sdebug_vpd_use_hostno == 0) {
2449                 arr[n++] = host_no % 3; /* Asymm access state */
2450                 arr[n++] = 0x0F; /* claim: all states are supported */
2451         } else {
2452                 arr[n++] = 0x0; /* Active/Optimized path */
2453                 arr[n++] = 0x01; /* only support active/optimized paths */
2454         }
2455         put_unaligned_be16(port_group_a, arr + n);
2456         n += 2;
2457         arr[n++] = 0;    /* Reserved */
2458         arr[n++] = 0;    /* Status code */
2459         arr[n++] = 0;    /* Vendor unique */
2460         arr[n++] = 0x1;  /* One port per group */
2461         arr[n++] = 0;    /* Reserved */
2462         arr[n++] = 0;    /* Reserved */
2463         put_unaligned_be16(port_a, arr + n);
2464         n += 2;
2465         arr[n++] = 3;    /* Port unavailable */
2466         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2467         put_unaligned_be16(port_group_b, arr + n);
2468         n += 2;
2469         arr[n++] = 0;    /* Reserved */
2470         arr[n++] = 0;    /* Status code */
2471         arr[n++] = 0;    /* Vendor unique */
2472         arr[n++] = 0x1;  /* One port per group */
2473         arr[n++] = 0;    /* Reserved */
2474         arr[n++] = 0;    /* Reserved */
2475         put_unaligned_be16(port_b, arr + n);
2476         n += 2;
2477
2478         rlen = n - 4;
2479         put_unaligned_be32(rlen, arr + 0);
2480
2481         /*
2482          * Return the smallest value of either
2483          * - The allocated length
2484          * - The constructed command length
2485          * - The maximum array size
2486          */
2487         rlen = min(alen, n);
2488         ret = fill_from_dev_buffer(scp, arr,
2489                            min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2490         kfree(arr);
2491         return ret;
2492 }
2493
2494 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2495                              struct sdebug_dev_info *devip)
2496 {
2497         bool rctd;
2498         u8 reporting_opts, req_opcode, sdeb_i, supp;
2499         u16 req_sa, u;
2500         u32 alloc_len, a_len;
2501         int k, offset, len, errsts, bump, na;
2502         const struct opcode_info_t *oip;
2503         const struct opcode_info_t *r_oip;
2504         u8 *arr;
2505         u8 *cmd = scp->cmnd;
2506         u32 devsel = sdebug_get_devsel(scp->device);
2507
2508         rctd = !!(cmd[2] & 0x80);
2509         reporting_opts = cmd[2] & 0x7;
2510         req_opcode = cmd[3];
2511         req_sa = get_unaligned_be16(cmd + 4);
2512         alloc_len = get_unaligned_be32(cmd + 6);
2513         if (alloc_len < 4 || alloc_len > 0xffff) {
2514                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2515                 return check_condition_result;
2516         }
2517         if (alloc_len > 8192)
2518                 a_len = 8192;
2519         else
2520                 a_len = alloc_len;
2521         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2522         if (NULL == arr) {
2523                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2524                                 INSUFF_RES_ASCQ);
2525                 return check_condition_result;
2526         }
2527         switch (reporting_opts) {
2528         case 0: /* all commands */
2529                 bump = rctd ? 20 : 8;
2530                 for (offset = 4, oip = opcode_info_arr;
2531                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2532                         if (F_INV_OP & oip->flags)
2533                                 continue;
2534                         if ((devsel & oip->devsel) != 0) {
2535                                 arr[offset] = oip->opcode;
2536                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2537                                 if (rctd)
2538                                         arr[offset + 5] |= 0x2;
2539                                 if (FF_SA & oip->flags)
2540                                         arr[offset + 5] |= 0x1;
2541                                 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2542                                 if (rctd)
2543                                         put_unaligned_be16(0xa, arr + offset + 8);
2544                                 offset += bump;
2545                         }
2546                         na = oip->num_attached;
2547                         r_oip = oip;
2548                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2549                                 if (F_INV_OP & oip->flags)
2550                                         continue;
2551                                 if ((devsel & oip->devsel) == 0)
2552                                         continue;
2553                                 arr[offset] = oip->opcode;
2554                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2555                                 if (rctd)
2556                                         arr[offset + 5] |= 0x2;
2557                                 if (FF_SA & oip->flags)
2558                                         arr[offset + 5] |= 0x1;
2559                                 put_unaligned_be16(oip->len_mask[0],
2560                                                 arr + offset + 6);
2561                                 if (rctd)
2562                                         put_unaligned_be16(0xa,
2563                                                            arr + offset + 8);
2564                                 offset += bump;
2565                         }
2566                         oip = r_oip;
2567                 }
2568                 put_unaligned_be32(offset - 4, arr);
2569                 break;
2570         case 1: /* one command: opcode only */
2571         case 2: /* one command: opcode plus service action */
2572         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2573                 sdeb_i = opcode_ind_arr[req_opcode];
2574                 oip = &opcode_info_arr[sdeb_i];
2575                 if (F_INV_OP & oip->flags) {
2576                         supp = 1;
2577                         offset = 4;
2578                 } else {
2579                         if (1 == reporting_opts) {
2580                                 if (FF_SA & oip->flags) {
2581                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2582                                                              2, 2);
2583                                         kfree(arr);
2584                                         return check_condition_result;
2585                                 }
2586                                 req_sa = 0;
2587                         } else if (2 == reporting_opts &&
2588                                    0 == (FF_SA & oip->flags)) {
2589                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2590                                 kfree(arr);     /* point at requested sa */
2591                                 return check_condition_result;
2592                         }
2593                         if (0 == (FF_SA & oip->flags) &&
2594                                 (devsel & oip->devsel) != 0 &&
2595                                 req_opcode == oip->opcode)
2596                                 supp = 3;
2597                         else if (0 == (FF_SA & oip->flags)) {
2598                                 na = oip->num_attached;
2599                                 for (k = 0, oip = oip->arrp; k < na;
2600                                      ++k, ++oip) {
2601                                         if (req_opcode == oip->opcode &&
2602                                                 (devsel & oip->devsel) != 0)
2603                                                 break;
2604                                 }
2605                                 supp = (k >= na) ? 1 : 3;
2606                         } else if (req_sa != oip->sa) {
2607                                 na = oip->num_attached;
2608                                 for (k = 0, oip = oip->arrp; k < na;
2609                                      ++k, ++oip) {
2610                                         if (req_sa == oip->sa &&
2611                                                 (devsel & oip->devsel) != 0)
2612                                                 break;
2613                                 }
2614                                 supp = (k >= na) ? 1 : 3;
2615                         } else
2616                                 supp = 3;
2617                         if (3 == supp) {
2618                                 u = oip->len_mask[0];
2619                                 put_unaligned_be16(u, arr + 2);
2620                                 arr[4] = oip->opcode;
2621                                 for (k = 1; k < u; ++k)
2622                                         arr[4 + k] = (k < 16) ?
2623                                                  oip->len_mask[k] : 0xff;
2624                                 offset = 4 + u;
2625                         } else
2626                                 offset = 4;
2627                 }
2628                 arr[1] = (rctd ? 0x80 : 0) | supp;
2629                 if (rctd) {
2630                         put_unaligned_be16(0xa, arr + offset);
2631                         offset += 12;
2632                 }
2633                 break;
2634         default:
2635                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2636                 kfree(arr);
2637                 return check_condition_result;
2638         }
2639         offset = (offset < a_len) ? offset : a_len;
2640         len = (offset < alloc_len) ? offset : alloc_len;
2641         errsts = fill_from_dev_buffer(scp, arr, len);
2642         kfree(arr);
2643         return errsts;
2644 }
2645
2646 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2647                           struct sdebug_dev_info *devip)
2648 {
2649         bool repd;
2650         u32 alloc_len, len;
2651         u8 arr[16];
2652         u8 *cmd = scp->cmnd;
2653
2654         memset(arr, 0, sizeof(arr));
2655         repd = !!(cmd[2] & 0x80);
2656         alloc_len = get_unaligned_be32(cmd + 6);
2657         if (alloc_len < 4) {
2658                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2659                 return check_condition_result;
2660         }
2661         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2662         arr[1] = 0x1;           /* ITNRS */
2663         if (repd) {
2664                 arr[3] = 0xc;
2665                 len = 16;
2666         } else
2667                 len = 4;
2668
2669         len = (len < alloc_len) ? len : alloc_len;
2670         return fill_from_dev_buffer(scp, arr, len);
2671 }
2672
2673 /* <<Following mode page info copied from ST318451LW>> */
2674
2675 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2676 {       /* Read-Write Error Recovery page for mode_sense */
2677         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2678                                         5, 0, 0xff, 0xff};
2679
2680         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2681         if (1 == pcontrol)
2682                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2683         return sizeof(err_recov_pg);
2684 }
2685
2686 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2687 {       /* Disconnect-Reconnect page for mode_sense */
2688         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2689                                          0, 0, 0, 0, 0, 0, 0, 0};
2690
2691         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2692         if (1 == pcontrol)
2693                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2694         return sizeof(disconnect_pg);
2695 }
2696
2697 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2698 {       /* Format device page for mode_sense */
2699         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2700                                      0, 0, 0, 0, 0, 0, 0, 0,
2701                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2702
2703         memcpy(p, format_pg, sizeof(format_pg));
2704         put_unaligned_be16(sdebug_sectors_per, p + 10);
2705         put_unaligned_be16(sdebug_sector_size, p + 12);
2706         if (sdebug_removable)
2707                 p[20] |= 0x20; /* should agree with INQUIRY */
2708         if (1 == pcontrol)
2709                 memset(p + 2, 0, sizeof(format_pg) - 2);
2710         return sizeof(format_pg);
2711 }
2712
2713 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2714                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2715                                      0, 0, 0, 0};
2716
2717 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2718 {       /* Caching page for mode_sense */
2719         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2720                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2721         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2722                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2723
2724         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2725                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2726         memcpy(p, caching_pg, sizeof(caching_pg));
2727         if (1 == pcontrol)
2728                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2729         else if (2 == pcontrol)
2730                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2731         return sizeof(caching_pg);
2732 }
2733
2734 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2735                                     0, 0, 0x2, 0x4b};
2736
2737 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2738 {       /* Control mode page for mode_sense */
2739         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2740                                         0, 0, 0, 0};
2741         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2742                                      0, 0, 0x2, 0x4b};
2743
2744         if (sdebug_dsense)
2745                 ctrl_m_pg[2] |= 0x4;
2746         else
2747                 ctrl_m_pg[2] &= ~0x4;
2748
2749         if (sdebug_ato)
2750                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2751
2752         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2753         if (1 == pcontrol)
2754                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2755         else if (2 == pcontrol)
2756                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2757         return sizeof(ctrl_m_pg);
2758 }
2759
2760 /* IO Advice Hints Grouping mode page */
2761 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2762 {
2763         /* IO Advice Hints Grouping mode page */
2764         struct grouping_m_pg {
2765                 u8 page_code;   /* OR 0x40 when subpage_code > 0 */
2766                 u8 subpage_code;
2767                 __be16 page_length;
2768                 u8 reserved[12];
2769                 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2770         };
2771         static const struct grouping_m_pg gr_m_pg = {
2772                 .page_code = 0xa | 0x40,
2773                 .subpage_code = 5,
2774                 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2775                 .descr = {
2776                         { .st_enble = 1 },
2777                         { .st_enble = 1 },
2778                         { .st_enble = 1 },
2779                         { .st_enble = 1 },
2780                         { .st_enble = 1 },
2781                         { .st_enble = 0 },
2782                 }
2783         };
2784
2785         BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2786                      16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2787         memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2788         if (1 == pcontrol) {
2789                 /* There are no changeable values so clear from byte 4 on. */
2790                 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2791         }
2792         return sizeof(gr_m_pg);
2793 }
2794
2795 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2796 {       /* Informational Exceptions control mode page for mode_sense */
2797         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2798                                        0, 0, 0x0, 0x0};
2799         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2800                                       0, 0, 0x0, 0x0};
2801
2802         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2803         if (1 == pcontrol)
2804                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2805         else if (2 == pcontrol)
2806                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2807         return sizeof(iec_m_pg);
2808 }
2809
2810 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2811 {       /* SAS SSP mode page - short format for mode_sense */
2812         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2813                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2814
2815         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2816         if (1 == pcontrol)
2817                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2818         return sizeof(sas_sf_m_pg);
2819 }
2820
2821
2822 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2823                               int target_dev_id)
2824 {       /* SAS phy control and discover mode page for mode_sense */
2825         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2826                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2827                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2828                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2829                     0x2, 0, 0, 0, 0, 0, 0, 0,
2830                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2831                     0, 0, 0, 0, 0, 0, 0, 0,
2832                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2833                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2834                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2835                     0x3, 0, 0, 0, 0, 0, 0, 0,
2836                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2837                     0, 0, 0, 0, 0, 0, 0, 0,
2838                 };
2839         int port_a, port_b;
2840
2841         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2842         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2843         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2844         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2845         port_a = target_dev_id + 1;
2846         port_b = port_a + 1;
2847         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2848         put_unaligned_be32(port_a, p + 20);
2849         put_unaligned_be32(port_b, p + 48 + 20);
2850         if (1 == pcontrol)
2851                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2852         return sizeof(sas_pcd_m_pg);
2853 }
2854
2855 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2856 {       /* SAS SSP shared protocol specific port mode subpage */
2857         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2858                     0, 0, 0, 0, 0, 0, 0, 0,
2859                 };
2860
2861         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2862         if (1 == pcontrol)
2863                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2864         return sizeof(sas_sha_m_pg);
2865 }
2866
2867 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2868         0xff, 0xff, 0x00, 0x00};
2869
2870 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2871 {       /* Partition page for mode_sense (tape) */
2872         memcpy(p, partition_pg, sizeof(partition_pg));
2873         if (pcontrol == 1)
2874                 memset(p + 2, 0, sizeof(partition_pg) - 2);
2875         return sizeof(partition_pg);
2876 }
2877
2878 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2879                                 unsigned char *new, int pg_len)
2880 {
2881         int new_nbr, p0_size, p1_size;
2882
2883         if ((new[4] & 0x80) != 0) { /* FDP */
2884                 partition_pg[4] |= 0x80;
2885                 devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2886                 devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2887                 devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2888         } else {
2889                 new_nbr = new[3] + 1;
2890                 if (new_nbr > TAPE_MAX_PARTITIONS)
2891                         return 3;
2892                 if ((new[4] & 0x40) != 0) { /* SDP */
2893                         p1_size = TAPE_PARTITION_1_UNITS;
2894                         p0_size = TAPE_UNITS - p1_size;
2895                         if (p0_size < 100)
2896                                 return 4;
2897                 } else if ((new[4] & 0x20) != 0) {
2898                         if (new_nbr > 1) {
2899                                 p0_size = get_unaligned_be16(new + 8);
2900                                 p1_size = get_unaligned_be16(new + 10);
2901                                 if (p1_size == 0xFFFF)
2902                                         p1_size = TAPE_UNITS - p0_size;
2903                                 else if (p0_size == 0xFFFF)
2904                                         p0_size = TAPE_UNITS - p1_size;
2905                                 if (p0_size < 100 || p1_size < 100)
2906                                         return 8;
2907                         } else {
2908                                 p0_size = TAPE_UNITS;
2909                                 p1_size = 0;
2910                         }
2911                 } else
2912                         return 6;
2913                 devip->tape_pending_nbr_partitions = new_nbr;
2914                 devip->tape_pending_part_0_size = p0_size;
2915                 devip->tape_pending_part_1_size = p1_size;
2916                 partition_pg[3] = new_nbr;
2917                 devip->tape_pending_nbr_partitions = new_nbr;
2918         }
2919
2920         return 0;
2921 }
2922
2923 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2924         unsigned char dce)
2925 {       /* Compression page for mode_sense (tape) */
2926         unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2927                 0, 0, 0, 0, 00, 00};
2928
2929         memcpy(p, compression_pg, sizeof(compression_pg));
2930         if (dce)
2931                 p[2] |= 0x80;
2932         if (pcontrol == 1)
2933                 memset(p + 2, 0, sizeof(compression_pg) - 2);
2934         return sizeof(compression_pg);
2935 }
2936
2937 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2938 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2939
2940 static int resp_mode_sense(struct scsi_cmnd *scp,
2941                            struct sdebug_dev_info *devip)
2942 {
2943         int pcontrol, pcode, subpcode, bd_len;
2944         unsigned char dev_spec;
2945         u32 alloc_len, offset, len;
2946         int target_dev_id;
2947         int target = scp->device->id;
2948         unsigned char *ap;
2949         unsigned char *arr __free(kfree);
2950         unsigned char *cmd = scp->cmnd;
2951         bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2952
2953         arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2954         if (!arr)
2955                 return -ENOMEM;
2956         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2957         pcontrol = (cmd[2] & 0xc0) >> 6;
2958         pcode = cmd[2] & 0x3f;
2959         subpcode = cmd[3];
2960         msense_6 = (MODE_SENSE == cmd[0]);
2961         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2962         is_disk = (scp->device->type == TYPE_DISK);
2963         is_zbc = devip->zoned;
2964         is_tape = (scp->device->type == TYPE_TAPE);
2965         if ((is_disk || is_zbc || is_tape) && !dbd)
2966                 bd_len = llbaa ? 16 : 8;
2967         else
2968                 bd_len = 0;
2969         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2970         if (0x3 == pcontrol) {  /* Saving values not supported */
2971                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2972                 return check_condition_result;
2973         }
2974         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2975                         (devip->target * 1000) - 3;
2976         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2977         if (is_disk || is_zbc) {
2978                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2979                 if (sdebug_wp)
2980                         dev_spec |= 0x80;
2981         } else
2982                 dev_spec = 0x0;
2983         if (msense_6) {
2984                 arr[2] = dev_spec;
2985                 arr[3] = bd_len;
2986                 offset = 4;
2987         } else {
2988                 arr[3] = dev_spec;
2989                 if (16 == bd_len)
2990                         arr[4] = 0x1;   /* set LONGLBA bit */
2991                 arr[7] = bd_len;        /* assume 255 or less */
2992                 offset = 8;
2993         }
2994         ap = arr + offset;
2995         if ((bd_len > 0) && (!sdebug_capacity))
2996                 sdebug_capacity = get_sdebug_capacity();
2997
2998         if (8 == bd_len) {
2999                 if (sdebug_capacity > 0xfffffffe)
3000                         put_unaligned_be32(0xffffffff, ap + 0);
3001                 else
3002                         put_unaligned_be32(sdebug_capacity, ap + 0);
3003                 if (is_tape) {
3004                         ap[0] = devip->tape_density;
3005                         put_unaligned_be16(devip->tape_blksize, ap + 6);
3006                 } else
3007                         put_unaligned_be16(sdebug_sector_size, ap + 6);
3008                 offset += bd_len;
3009                 ap = arr + offset;
3010         } else if (16 == bd_len) {
3011                 if (is_tape) {
3012                         mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3013                         return check_condition_result;
3014                 }
3015                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3016                 put_unaligned_be32(sdebug_sector_size, ap + 12);
3017                 offset += bd_len;
3018                 ap = arr + offset;
3019         }
3020         if (cmd[2] == 0)
3021                 goto only_bd; /* Only block descriptor requested */
3022
3023         /*
3024          * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3025          *        len += resp_*_pg(ap + len, pcontrol, target);
3026          */
3027         switch (pcode) {
3028         case 0x1:       /* Read-Write error recovery page, direct access */
3029                 if (subpcode > 0x0 && subpcode < 0xff)
3030                         goto bad_subpcode;
3031                 len = resp_err_recov_pg(ap, pcontrol, target);
3032                 offset += len;
3033                 break;
3034         case 0x2:       /* Disconnect-Reconnect page, all devices */
3035                 if (subpcode > 0x0 && subpcode < 0xff)
3036                         goto bad_subpcode;
3037                 len = resp_disconnect_pg(ap, pcontrol, target);
3038                 offset += len;
3039                 break;
3040         case 0x3:       /* Format device page, direct access */
3041                 if (subpcode > 0x0 && subpcode < 0xff)
3042                         goto bad_subpcode;
3043                 if (is_disk) {
3044                         len = resp_format_pg(ap, pcontrol, target);
3045                         offset += len;
3046                 } else {
3047                         goto bad_pcode;
3048                 }
3049                 break;
3050         case 0x8:       /* Caching page, direct access */
3051                 if (subpcode > 0x0 && subpcode < 0xff)
3052                         goto bad_subpcode;
3053                 if (is_disk || is_zbc) {
3054                         len = resp_caching_pg(ap, pcontrol, target);
3055                         offset += len;
3056                 } else {
3057                         goto bad_pcode;
3058                 }
3059                 break;
3060         case 0xa:       /* Control Mode page, all devices */
3061                 switch (subpcode) {
3062                 case 0:
3063                         len = resp_ctrl_m_pg(ap, pcontrol, target);
3064                         break;
3065                 case 0x05:
3066                         len = resp_grouping_m_pg(ap, pcontrol, target);
3067                         break;
3068                 case 0xff:
3069                         len = resp_ctrl_m_pg(ap, pcontrol, target);
3070                         len += resp_grouping_m_pg(ap + len, pcontrol, target);
3071                         break;
3072                 default:
3073                         goto bad_subpcode;
3074                 }
3075                 offset += len;
3076                 break;
3077         case 0xf:       /* Compression Mode Page (tape) */
3078                 if (!is_tape)
3079                         goto bad_pcode;
3080                 len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3081                 offset += len;
3082                 break;
3083         case 0x11:      /* Partition Mode Page (tape) */
3084                 if (!is_tape)
3085                         goto bad_pcode;
3086                 len = resp_partition_m_pg(ap, pcontrol, target);
3087                 offset += len;
3088                 break;
3089         case 0x19:      /* if spc==1 then sas phy, control+discover */
3090                 if (subpcode > 0x2 && subpcode < 0xff)
3091                         goto bad_subpcode;
3092                 len = 0;
3093                 if ((0x0 == subpcode) || (0xff == subpcode))
3094                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3095                 if ((0x1 == subpcode) || (0xff == subpcode))
3096                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3097                                                   target_dev_id);
3098                 if ((0x2 == subpcode) || (0xff == subpcode))
3099                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
3100                 offset += len;
3101                 break;
3102         case 0x1c:      /* Informational Exceptions Mode page, all devices */
3103                 if (subpcode > 0x0 && subpcode < 0xff)
3104                         goto bad_subpcode;
3105                 len = resp_iec_m_pg(ap, pcontrol, target);
3106                 offset += len;
3107                 break;
3108         case 0x3f:      /* Read all Mode pages */
3109                 if (subpcode > 0x0 && subpcode < 0xff)
3110                         goto bad_subpcode;
3111                 len = resp_err_recov_pg(ap, pcontrol, target);
3112                 len += resp_disconnect_pg(ap + len, pcontrol, target);
3113                 if (is_disk) {
3114                         len += resp_format_pg(ap + len, pcontrol, target);
3115                         len += resp_caching_pg(ap + len, pcontrol, target);
3116                 } else if (is_zbc) {
3117                         len += resp_caching_pg(ap + len, pcontrol, target);
3118                 }
3119                 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3120                 if (0xff == subpcode)
3121                         len += resp_grouping_m_pg(ap + len, pcontrol, target);
3122                 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3123                 if (0xff == subpcode) {
3124                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3125                                                   target_dev_id);
3126                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
3127                 }
3128                 len += resp_iec_m_pg(ap + len, pcontrol, target);
3129                 offset += len;
3130                 break;
3131         default:
3132                 goto bad_pcode;
3133         }
3134 only_bd:
3135         if (msense_6)
3136                 arr[0] = offset - 1;
3137         else
3138                 put_unaligned_be16((offset - 2), arr + 0);
3139         return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3140
3141 bad_pcode:
3142         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3143         return check_condition_result;
3144
3145 bad_subpcode:
3146         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3147         return check_condition_result;
3148 }
3149
3150 #define SDEBUG_MAX_MSELECT_SZ 512
3151
3152 static int resp_mode_select(struct scsi_cmnd *scp,
3153                             struct sdebug_dev_info *devip)
3154 {
3155         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3156         int param_len, res, mpage;
3157         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3158         unsigned char *cmd = scp->cmnd;
3159         int mselect6 = (MODE_SELECT == cmd[0]);
3160
3161         memset(arr, 0, sizeof(arr));
3162         pf = cmd[1] & 0x10;
3163         sp = cmd[1] & 0x1;
3164         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3165         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3166                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3167                 return check_condition_result;
3168         }
3169         res = fetch_to_dev_buffer(scp, arr, param_len);
3170         if (-1 == res)
3171                 return DID_ERROR << 16;
3172         else if (sdebug_verbose && (res < param_len))
3173                 sdev_printk(KERN_INFO, scp->device,
3174                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
3175                             __func__, param_len, res);
3176         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3177         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3178         off = (mselect6 ? 4 : 8);
3179         if (scp->device->type == TYPE_TAPE) {
3180                 int blksize;
3181
3182                 if (bd_len != 8) {
3183                         mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3184                                         mselect6 ? 3 : 6, -1);
3185                         return check_condition_result;
3186                 }
3187                 if (arr[off] == TAPE_BAD_DENSITY) {
3188                         mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3189                         return check_condition_result;
3190                 }
3191                 blksize = get_unaligned_be16(arr + off + 6);
3192                 if (blksize != 0 &&
3193                         (blksize < TAPE_MIN_BLKSIZE ||
3194                                 blksize > TAPE_MAX_BLKSIZE ||
3195                                 (blksize % 4) != 0)) {
3196                         mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3197                         return check_condition_result;
3198                 }
3199                 devip->tape_density = arr[off];
3200                 devip->tape_blksize = blksize;
3201         }
3202         off += bd_len;
3203         if (off >= res)
3204                 return 0; /* No page written, just descriptors */
3205         if (md_len > 2) {
3206                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3207                 return check_condition_result;
3208         }
3209         mpage = arr[off] & 0x3f;
3210         ps = !!(arr[off] & 0x80);
3211         if (ps) {
3212                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3213                 return check_condition_result;
3214         }
3215         spf = !!(arr[off] & 0x40);
3216         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3217                        (arr[off + 1] + 2);
3218         if ((pg_len + off) > param_len) {
3219                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3220                                 PARAMETER_LIST_LENGTH_ERR, 0);
3221                 return check_condition_result;
3222         }
3223         switch (mpage) {
3224         case 0x8:      /* Caching Mode page */
3225                 if (caching_pg[1] == arr[off + 1]) {
3226                         memcpy(caching_pg + 2, arr + off + 2,
3227                                sizeof(caching_pg) - 2);
3228                         goto set_mode_changed_ua;
3229                 }
3230                 break;
3231         case 0xa:      /* Control Mode page */
3232                 if (ctrl_m_pg[1] == arr[off + 1]) {
3233                         memcpy(ctrl_m_pg + 2, arr + off + 2,
3234                                sizeof(ctrl_m_pg) - 2);
3235                         if (ctrl_m_pg[4] & 0x8)
3236                                 sdebug_wp = true;
3237                         else
3238                                 sdebug_wp = false;
3239                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3240                         goto set_mode_changed_ua;
3241                 }
3242                 break;
3243         case 0xf:       /* Compression mode page */
3244                 if (scp->device->type != TYPE_TAPE)
3245                         goto bad_pcode;
3246                 if ((arr[off + 2] & 0x40) != 0) {
3247                         devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3248                         return 0;
3249                 }
3250                 break;
3251         case 0x11:      /* Medium Partition Mode Page (tape) */
3252                 if (scp->device->type == TYPE_TAPE) {
3253                         int fld;
3254
3255                         fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3256                         if (fld == 0)
3257                                 return 0;
3258                         mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3259                         return check_condition_result;
3260                 }
3261                 break;
3262         case 0x1c:      /* Informational Exceptions Mode page */
3263                 if (iec_m_pg[1] == arr[off + 1]) {
3264                         memcpy(iec_m_pg + 2, arr + off + 2,
3265                                sizeof(iec_m_pg) - 2);
3266                         goto set_mode_changed_ua;
3267                 }
3268                 break;
3269         default:
3270                 break;
3271         }
3272         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3273         return check_condition_result;
3274 set_mode_changed_ua:
3275         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3276         return 0;
3277
3278 bad_pcode:
3279         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3280         return check_condition_result;
3281 }
3282
3283 static int resp_temp_l_pg(unsigned char *arr)
3284 {
3285         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
3286                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
3287                 };
3288
3289         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3290         return sizeof(temp_l_pg);
3291 }
3292
3293 static int resp_ie_l_pg(unsigned char *arr)
3294 {
3295         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3296                 };
3297
3298         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3299         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
3300                 arr[4] = THRESHOLD_EXCEEDED;
3301                 arr[5] = 0xff;
3302         }
3303         return sizeof(ie_l_pg);
3304 }
3305
3306 static int resp_env_rep_l_spg(unsigned char *arr)
3307 {
3308         unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
3309                                          0x0, 40, 72, 0xff, 45, 18, 0, 0,
3310                                          0x1, 0x0, 0x23, 0x8,
3311                                          0x0, 55, 72, 35, 55, 45, 0, 0,
3312                 };
3313
3314         memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3315         return sizeof(env_rep_l_spg);
3316 }
3317
3318 #define SDEBUG_MAX_LSENSE_SZ 512
3319
3320 static int resp_log_sense(struct scsi_cmnd *scp,
3321                           struct sdebug_dev_info *devip)
3322 {
3323         int ppc, sp, pcode, subpcode;
3324         u32 alloc_len, len, n;
3325         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3326         unsigned char *cmd = scp->cmnd;
3327
3328         memset(arr, 0, sizeof(arr));
3329         ppc = cmd[1] & 0x2;
3330         sp = cmd[1] & 0x1;
3331         if (ppc || sp) {
3332                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3333                 return check_condition_result;
3334         }
3335         pcode = cmd[2] & 0x3f;
3336         subpcode = cmd[3] & 0xff;
3337         alloc_len = get_unaligned_be16(cmd + 7);
3338         arr[0] = pcode;
3339         if (0 == subpcode) {
3340                 switch (pcode) {
3341                 case 0x0:       /* Supported log pages log page */
3342                         n = 4;
3343                         arr[n++] = 0x0;         /* this page */
3344                         arr[n++] = 0xd;         /* Temperature */
3345                         arr[n++] = 0x2f;        /* Informational exceptions */
3346                         arr[3] = n - 4;
3347                         break;
3348                 case 0xd:       /* Temperature log page */
3349                         arr[3] = resp_temp_l_pg(arr + 4);
3350                         break;
3351                 case 0x2f:      /* Informational exceptions log page */
3352                         arr[3] = resp_ie_l_pg(arr + 4);
3353                         break;
3354                 default:
3355                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3356                         return check_condition_result;
3357                 }
3358         } else if (0xff == subpcode) {
3359                 arr[0] |= 0x40;
3360                 arr[1] = subpcode;
3361                 switch (pcode) {
3362                 case 0x0:       /* Supported log pages and subpages log page */
3363                         n = 4;
3364                         arr[n++] = 0x0;
3365                         arr[n++] = 0x0;         /* 0,0 page */
3366                         arr[n++] = 0x0;
3367                         arr[n++] = 0xff;        /* this page */
3368                         arr[n++] = 0xd;
3369                         arr[n++] = 0x0;         /* Temperature */
3370                         arr[n++] = 0xd;
3371                         arr[n++] = 0x1;         /* Environment reporting */
3372                         arr[n++] = 0xd;
3373                         arr[n++] = 0xff;        /* all 0xd subpages */
3374                         arr[n++] = 0x2f;
3375                         arr[n++] = 0x0; /* Informational exceptions */
3376                         arr[n++] = 0x2f;
3377                         arr[n++] = 0xff;        /* all 0x2f subpages */
3378                         arr[3] = n - 4;
3379                         break;
3380                 case 0xd:       /* Temperature subpages */
3381                         n = 4;
3382                         arr[n++] = 0xd;
3383                         arr[n++] = 0x0;         /* Temperature */
3384                         arr[n++] = 0xd;
3385                         arr[n++] = 0x1;         /* Environment reporting */
3386                         arr[n++] = 0xd;
3387                         arr[n++] = 0xff;        /* these subpages */
3388                         arr[3] = n - 4;
3389                         break;
3390                 case 0x2f:      /* Informational exceptions subpages */
3391                         n = 4;
3392                         arr[n++] = 0x2f;
3393                         arr[n++] = 0x0;         /* Informational exceptions */
3394                         arr[n++] = 0x2f;
3395                         arr[n++] = 0xff;        /* these subpages */
3396                         arr[3] = n - 4;
3397                         break;
3398                 default:
3399                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3400                         return check_condition_result;
3401                 }
3402         } else if (subpcode > 0) {
3403                 arr[0] |= 0x40;
3404                 arr[1] = subpcode;
3405                 if (pcode == 0xd && subpcode == 1)
3406                         arr[3] = resp_env_rep_l_spg(arr + 4);
3407                 else {
3408                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3409                         return check_condition_result;
3410                 }
3411         } else {
3412                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3413                 return check_condition_result;
3414         }
3415         len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3416         return fill_from_dev_buffer(scp, arr,
3417                     min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3418 }
3419
3420 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
3421 static int resp_read_blklimits(struct scsi_cmnd *scp,
3422                         struct sdebug_dev_info *devip)
3423 {
3424         unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3425
3426         arr[0] = 4;
3427         put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3428         put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3429         return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3430 }
3431
3432 static int resp_locate(struct scsi_cmnd *scp,
3433                 struct sdebug_dev_info *devip)
3434 {
3435         unsigned char *cmd = scp->cmnd;
3436         unsigned int i, pos;
3437         struct tape_block *blp;
3438         int partition;
3439
3440         if ((cmd[1] & 0x02) != 0) {
3441                 if (cmd[8] >= devip->tape_nbr_partitions) {
3442                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3443                         return check_condition_result;
3444                 }
3445                 devip->tape_partition = cmd[8];
3446         }
3447         pos = get_unaligned_be32(cmd + 3);
3448         partition = devip->tape_partition;
3449
3450         for (i = 0, blp = devip->tape_blocks[partition];
3451              i < pos && i < devip->tape_eop[partition]; i++, blp++)
3452                 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3453                         break;
3454         if (i < pos) {
3455                 devip->tape_location[partition] = i;
3456                 mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3457                 return check_condition_result;
3458         }
3459         devip->tape_location[partition] = pos;
3460
3461         return 0;
3462 }
3463
3464 static int resp_write_filemarks(struct scsi_cmnd *scp,
3465                 struct sdebug_dev_info *devip)
3466 {
3467         unsigned char *cmd = scp->cmnd;
3468         unsigned int i, count, pos;
3469         u32 data;
3470         int partition = devip->tape_partition;
3471
3472         if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3473                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3474                 return check_condition_result;
3475         }
3476         count = get_unaligned_be24(cmd + 2);
3477         data = TAPE_BLOCK_FM_FLAG;
3478         for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3479                 if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3480                         devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3481                         mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3482                                         EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3483                         return check_condition_result;
3484                 }
3485                 (devip->tape_blocks[partition] + pos)->fl_size = data;
3486         }
3487         (devip->tape_blocks[partition] + pos)->fl_size =
3488                 TAPE_BLOCK_EOD_FLAG;
3489         devip->tape_location[partition] = pos;
3490
3491         return 0;
3492 }
3493
3494 static int resp_space(struct scsi_cmnd *scp,
3495                 struct sdebug_dev_info *devip)
3496 {
3497         unsigned char *cmd = scp->cmnd, code;
3498         int i = 0, pos, count;
3499         struct tape_block *blp;
3500         int partition = devip->tape_partition;
3501
3502         count = get_unaligned_be24(cmd + 2);
3503         if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3504                 count |= 0xff000000;
3505         code = cmd[1] & 0x0f;
3506
3507         pos = devip->tape_location[partition];
3508         if (code == 0) { /* blocks */
3509                 if (count < 0) {
3510                         count = (-count);
3511                         pos -= 1;
3512                         for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3513                              i++) {
3514                                 if (pos < 0)
3515                                         goto is_bop;
3516                                 else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3517                                         goto is_fm;
3518                                 if (i > 0) {
3519                                         pos--;
3520                                         blp--;
3521                                 }
3522                         }
3523                 } else if (count > 0) {
3524                         for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3525                              i++, pos++, blp++) {
3526                                 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3527                                         goto is_eod;
3528                                 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3529                                         pos += 1;
3530                                         goto is_fm;
3531                                 }
3532                                 if (pos >= devip->tape_eop[partition])
3533                                         goto is_eop;
3534                         }
3535                 }
3536         } else if (code == 1) { /* filemarks */
3537                 if (count < 0) {
3538                         count = (-count);
3539                         if (pos == 0)
3540                                 goto is_bop;
3541                         else {
3542                                 for (i = 0, blp = devip->tape_blocks[partition] + pos;
3543                                      i < count && pos >= 0; i++, pos--, blp--) {
3544                                         for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3545                                                      pos >= 0; pos--, blp--)
3546                                                 ; /* empty */
3547                                         if (pos < 0)
3548                                                 goto is_bop;
3549                                 }
3550                         }
3551                         pos += 1;
3552                 } else if (count > 0) {
3553                         for (i = 0, blp = devip->tape_blocks[partition] + pos;
3554                              i < count; i++, pos++, blp++) {
3555                                 for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3556                                               !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3557                                               pos < devip->tape_eop[partition];
3558                                       pos++, blp++)
3559                                         ; /* empty */
3560                                 if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3561                                         goto is_eod;
3562                                 if (pos >= devip->tape_eop[partition])
3563                                         goto is_eop;
3564                         }
3565                 }
3566         } else if (code == 3) { /* EOD */
3567                 for (blp = devip->tape_blocks[partition] + pos;
3568                      !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3569                      pos++, blp++)
3570                         ; /* empty */
3571                 if (pos >= devip->tape_eop[partition])
3572                         goto is_eop;
3573         } else {
3574                 /* sequential filemarks not supported */
3575                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3576                 return check_condition_result;
3577         }
3578         devip->tape_location[partition] = pos;
3579         return 0;
3580
3581 is_fm:
3582         devip->tape_location[partition] = pos;
3583         mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3584                         FILEMARK_DETECTED_ASCQ, count - i,
3585                         SENSE_FLAG_FILEMARK);
3586         return check_condition_result;
3587
3588 is_eod:
3589         devip->tape_location[partition] = pos;
3590         mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3591                         EOD_DETECTED_ASCQ, count - i,
3592                         0);
3593         return check_condition_result;
3594
3595 is_bop:
3596         devip->tape_location[partition] = 0;
3597         mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3598                         BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3599                         SENSE_FLAG_EOM);
3600         devip->tape_location[partition] = 0;
3601         return check_condition_result;
3602
3603 is_eop:
3604         devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3605         mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3606                         EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3607                         SENSE_FLAG_EOM);
3608         return check_condition_result;
3609 }
3610
3611 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
3612 static int resp_read_position(struct scsi_cmnd *scp,
3613                         struct sdebug_dev_info *devip)
3614 {
3615         u8 *cmd = scp->cmnd;
3616         int all_length;
3617         unsigned char arr[20];
3618         unsigned int pos;
3619
3620         all_length = get_unaligned_be16(cmd + 7);
3621         if ((cmd[1] & 0xfe) != 0 ||
3622                 all_length != 0) { /* only short form */
3623                 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3624                                 all_length ? 7 : 1, 0);
3625                 return check_condition_result;
3626         }
3627         memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3628         arr[1] = devip->tape_partition;
3629         pos = devip->tape_location[devip->tape_partition];
3630         put_unaligned_be32(pos, arr + 4);
3631         put_unaligned_be32(pos, arr + 8);
3632         return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3633 }
3634
3635 static int resp_rewind(struct scsi_cmnd *scp,
3636                 struct sdebug_dev_info *devip)
3637 {
3638         devip->tape_location[devip->tape_partition] = 0;
3639
3640         return 0;
3641 }
3642
3643 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3644                         int part_0_size, int part_1_size)
3645 {
3646         int i;
3647
3648         if (part_0_size + part_1_size > TAPE_UNITS)
3649                 return -1;
3650         devip->tape_eop[0] = part_0_size;
3651         devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3652         devip->tape_eop[1] = part_1_size;
3653         devip->tape_blocks[1] = devip->tape_blocks[0] +
3654                         devip->tape_eop[0];
3655         devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3656
3657         for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3658                 devip->tape_location[i] = 0;
3659
3660         devip->tape_nbr_partitions = nbr_partitions;
3661         devip->tape_partition = 0;
3662
3663         partition_pg[3] = nbr_partitions - 1;
3664         put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3665         put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3666
3667         return nbr_partitions;
3668 }
3669
3670 static int resp_format_medium(struct scsi_cmnd *scp,
3671                         struct sdebug_dev_info *devip)
3672 {
3673         int res = 0;
3674         unsigned char *cmd = scp->cmnd;
3675
3676         if (cmd[2] > 2) {
3677                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3678                 return check_condition_result;
3679         }
3680         if (cmd[2] != 0) {
3681                 if (devip->tape_pending_nbr_partitions > 0) {
3682                         res = partition_tape(devip,
3683                                         devip->tape_pending_nbr_partitions,
3684                                         devip->tape_pending_part_0_size,
3685                                         devip->tape_pending_part_1_size);
3686                 } else
3687                         res = partition_tape(devip, devip->tape_nbr_partitions,
3688                                         devip->tape_eop[0], devip->tape_eop[1]);
3689         } else
3690                 res = partition_tape(devip, 1, TAPE_UNITS, 0);
3691         if (res < 0)
3692                 return -EINVAL;
3693
3694         devip->tape_pending_nbr_partitions = -1;
3695
3696         return 0;
3697 }
3698
3699 static int resp_erase(struct scsi_cmnd *scp,
3700                 struct sdebug_dev_info *devip)
3701 {
3702         int partition = devip->tape_partition;
3703         int pos = devip->tape_location[partition];
3704         struct tape_block *blp;
3705
3706         blp = devip->tape_blocks[partition] + pos;
3707         blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3708
3709         return 0;
3710 }
3711
3712 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3713 {
3714         return devip->nr_zones != 0;
3715 }
3716
3717 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3718                                         unsigned long long lba)
3719 {
3720         u32 zno = lba >> devip->zsize_shift;
3721         struct sdeb_zone_state *zsp;
3722
3723         if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3724                 return &devip->zstate[zno];
3725
3726         /*
3727          * If the zone capacity is less than the zone size, adjust for gap
3728          * zones.
3729          */
3730         zno = 2 * zno - devip->nr_conv_zones;
3731         WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3732         zsp = &devip->zstate[zno];
3733         if (lba >= zsp->z_start + zsp->z_size)
3734                 zsp++;
3735         WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3736         return zsp;
3737 }
3738
3739 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3740 {
3741         return zsp->z_type == ZBC_ZTYPE_CNV;
3742 }
3743
3744 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3745 {
3746         return zsp->z_type == ZBC_ZTYPE_GAP;
3747 }
3748
3749 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3750 {
3751         return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3752 }
3753
3754 static void zbc_close_zone(struct sdebug_dev_info *devip,
3755                            struct sdeb_zone_state *zsp)
3756 {
3757         enum sdebug_z_cond zc;
3758
3759         if (!zbc_zone_is_seq(zsp))
3760                 return;
3761
3762         zc = zsp->z_cond;
3763         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3764                 return;
3765
3766         if (zc == ZC2_IMPLICIT_OPEN)
3767                 devip->nr_imp_open--;
3768         else
3769                 devip->nr_exp_open--;
3770
3771         if (zsp->z_wp == zsp->z_start) {
3772                 zsp->z_cond = ZC1_EMPTY;
3773         } else {
3774                 zsp->z_cond = ZC4_CLOSED;
3775                 devip->nr_closed++;
3776         }
3777 }
3778
3779 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3780 {
3781         struct sdeb_zone_state *zsp = &devip->zstate[0];
3782         unsigned int i;
3783
3784         for (i = 0; i < devip->nr_zones; i++, zsp++) {
3785                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3786                         zbc_close_zone(devip, zsp);
3787                         return;
3788                 }
3789         }
3790 }
3791
3792 static void zbc_open_zone(struct sdebug_dev_info *devip,
3793                           struct sdeb_zone_state *zsp, bool explicit)
3794 {
3795         enum sdebug_z_cond zc;
3796
3797         if (!zbc_zone_is_seq(zsp))
3798                 return;
3799
3800         zc = zsp->z_cond;
3801         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3802             (!explicit && zc == ZC2_IMPLICIT_OPEN))
3803                 return;
3804
3805         /* Close an implicit open zone if necessary */
3806         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3807                 zbc_close_zone(devip, zsp);
3808         else if (devip->max_open &&
3809                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3810                 zbc_close_imp_open_zone(devip);
3811
3812         if (zsp->z_cond == ZC4_CLOSED)
3813                 devip->nr_closed--;
3814         if (explicit) {
3815                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3816                 devip->nr_exp_open++;
3817         } else {
3818                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3819                 devip->nr_imp_open++;
3820         }
3821 }
3822
3823 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3824                                      struct sdeb_zone_state *zsp)
3825 {
3826         switch (zsp->z_cond) {
3827         case ZC2_IMPLICIT_OPEN:
3828                 devip->nr_imp_open--;
3829                 break;
3830         case ZC3_EXPLICIT_OPEN:
3831                 devip->nr_exp_open--;
3832                 break;
3833         default:
3834                 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3835                           zsp->z_start, zsp->z_cond);
3836                 break;
3837         }
3838         zsp->z_cond = ZC5_FULL;
3839 }
3840
3841 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3842                        unsigned long long lba, unsigned int num)
3843 {
3844         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3845         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3846
3847         if (!zbc_zone_is_seq(zsp))
3848                 return;
3849
3850         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3851                 zsp->z_wp += num;
3852                 if (zsp->z_wp >= zend)
3853                         zbc_set_zone_full(devip, zsp);
3854                 return;
3855         }
3856
3857         while (num) {
3858                 if (lba != zsp->z_wp)
3859                         zsp->z_non_seq_resource = true;
3860
3861                 end = lba + num;
3862                 if (end >= zend) {
3863                         n = zend - lba;
3864                         zsp->z_wp = zend;
3865                 } else if (end > zsp->z_wp) {
3866                         n = num;
3867                         zsp->z_wp = end;
3868                 } else {
3869                         n = num;
3870                 }
3871                 if (zsp->z_wp >= zend)
3872                         zbc_set_zone_full(devip, zsp);
3873
3874                 num -= n;
3875                 lba += n;
3876                 if (num) {
3877                         zsp++;
3878                         zend = zsp->z_start + zsp->z_size;
3879                 }
3880         }
3881 }
3882
3883 static int check_zbc_access_params(struct scsi_cmnd *scp,
3884                         unsigned long long lba, unsigned int num, bool write)
3885 {
3886         struct scsi_device *sdp = scp->device;
3887         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3888         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3889         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3890
3891         if (!write) {
3892                 /* For host-managed, reads cannot cross zone types boundaries */
3893                 if (zsp->z_type != zsp_end->z_type) {
3894                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3895                                         LBA_OUT_OF_RANGE,
3896                                         READ_INVDATA_ASCQ);
3897                         return check_condition_result;
3898                 }
3899                 return 0;
3900         }
3901
3902         /* Writing into a gap zone is not allowed */
3903         if (zbc_zone_is_gap(zsp)) {
3904                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3905                                 ATTEMPT_ACCESS_GAP);
3906                 return check_condition_result;
3907         }
3908
3909         /* No restrictions for writes within conventional zones */
3910         if (zbc_zone_is_conv(zsp)) {
3911                 if (!zbc_zone_is_conv(zsp_end)) {
3912                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3913                                         LBA_OUT_OF_RANGE,
3914                                         WRITE_BOUNDARY_ASCQ);
3915                         return check_condition_result;
3916                 }
3917                 return 0;
3918         }
3919
3920         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3921                 /* Writes cannot cross sequential zone boundaries */
3922                 if (zsp_end != zsp) {
3923                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3924                                         LBA_OUT_OF_RANGE,
3925                                         WRITE_BOUNDARY_ASCQ);
3926                         return check_condition_result;
3927                 }
3928                 /* Cannot write full zones */
3929                 if (zsp->z_cond == ZC5_FULL) {
3930                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3931                                         INVALID_FIELD_IN_CDB, 0);
3932                         return check_condition_result;
3933                 }
3934                 /* Writes must be aligned to the zone WP */
3935                 if (lba != zsp->z_wp) {
3936                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3937                                         LBA_OUT_OF_RANGE,
3938                                         UNALIGNED_WRITE_ASCQ);
3939                         return check_condition_result;
3940                 }
3941         }
3942
3943         /* Handle implicit open of closed and empty zones */
3944         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3945                 if (devip->max_open &&
3946                     devip->nr_exp_open >= devip->max_open) {
3947                         mk_sense_buffer(scp, DATA_PROTECT,
3948                                         INSUFF_RES_ASC,
3949                                         INSUFF_ZONE_ASCQ);
3950                         return check_condition_result;
3951                 }
3952                 zbc_open_zone(devip, zsp, false);
3953         }
3954
3955         return 0;
3956 }
3957
3958 static inline int check_device_access_params
3959                         (struct scsi_cmnd *scp, unsigned long long lba,
3960                          unsigned int num, bool write)
3961 {
3962         struct scsi_device *sdp = scp->device;
3963         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3964
3965         if (lba + num > sdebug_capacity) {
3966                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3967                 return check_condition_result;
3968         }
3969         /* transfer length excessive (tie in to block limits VPD page) */
3970         if (num > sdebug_store_sectors) {
3971                 /* needs work to find which cdb byte 'num' comes from */
3972                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3973                 return check_condition_result;
3974         }
3975         if (write && unlikely(sdebug_wp)) {
3976                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3977                 return check_condition_result;
3978         }
3979         if (sdebug_dev_is_zoned(devip))
3980                 return check_zbc_access_params(scp, lba, num, write);
3981
3982         return 0;
3983 }
3984
3985 /*
3986  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3987  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3988  * that access any of the "stores" in struct sdeb_store_info should call this
3989  * function with bug_if_fake_rw set to true.
3990  */
3991 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3992                                                 bool bug_if_fake_rw)
3993 {
3994         if (sdebug_fake_rw) {
3995                 BUG_ON(bug_if_fake_rw); /* See note above */
3996                 return NULL;
3997         }
3998         return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3999 }
4000
4001 static inline void
4002 sdeb_read_lock(rwlock_t *lock)
4003 {
4004         if (sdebug_no_rwlock)
4005                 __acquire(lock);
4006         else
4007                 read_lock(lock);
4008 }
4009
4010 static inline void
4011 sdeb_read_unlock(rwlock_t *lock)
4012 {
4013         if (sdebug_no_rwlock)
4014                 __release(lock);
4015         else
4016                 read_unlock(lock);
4017 }
4018
4019 static inline void
4020 sdeb_write_lock(rwlock_t *lock)
4021 {
4022         if (sdebug_no_rwlock)
4023                 __acquire(lock);
4024         else
4025                 write_lock(lock);
4026 }
4027
4028 static inline void
4029 sdeb_write_unlock(rwlock_t *lock)
4030 {
4031         if (sdebug_no_rwlock)
4032                 __release(lock);
4033         else
4034                 write_unlock(lock);
4035 }
4036
4037 static inline void
4038 sdeb_data_read_lock(struct sdeb_store_info *sip)
4039 {
4040         BUG_ON(!sip);
4041
4042         sdeb_read_lock(&sip->macc_data_lck);
4043 }
4044
4045 static inline void
4046 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4047 {
4048         BUG_ON(!sip);
4049
4050         sdeb_read_unlock(&sip->macc_data_lck);
4051 }
4052
4053 static inline void
4054 sdeb_data_write_lock(struct sdeb_store_info *sip)
4055 {
4056         BUG_ON(!sip);
4057
4058         sdeb_write_lock(&sip->macc_data_lck);
4059 }
4060
4061 static inline void
4062 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4063 {
4064         BUG_ON(!sip);
4065
4066         sdeb_write_unlock(&sip->macc_data_lck);
4067 }
4068
4069 static inline void
4070 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4071 {
4072         BUG_ON(!sip);
4073
4074         sdeb_read_lock(&sip->macc_sector_lck);
4075 }
4076
4077 static inline void
4078 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4079 {
4080         BUG_ON(!sip);
4081
4082         sdeb_read_unlock(&sip->macc_sector_lck);
4083 }
4084
4085 static inline void
4086 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4087 {
4088         BUG_ON(!sip);
4089
4090         sdeb_write_lock(&sip->macc_sector_lck);
4091 }
4092
4093 static inline void
4094 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4095 {
4096         BUG_ON(!sip);
4097
4098         sdeb_write_unlock(&sip->macc_sector_lck);
4099 }
4100
4101 /*
4102  * Atomic locking:
4103  * We simplify the atomic model to allow only 1x atomic write and many non-
4104  * atomic reads or writes for all LBAs.
4105
4106  * A RW lock has a similar bahaviour:
4107  * Only 1x writer and many readers.
4108
4109  * So use a RW lock for per-device read and write locking:
4110  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4111  * as a reader.
4112  */
4113
4114 static inline void
4115 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4116 {
4117         if (atomic)
4118                 sdeb_data_write_lock(sip);
4119         else
4120                 sdeb_data_read_lock(sip);
4121 }
4122
4123 static inline void
4124 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4125 {
4126         if (atomic)
4127                 sdeb_data_write_unlock(sip);
4128         else
4129                 sdeb_data_read_unlock(sip);
4130 }
4131
4132 /* Allow many reads but only 1x write per sector */
4133 static inline void
4134 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4135 {
4136         if (do_write)
4137                 sdeb_data_sector_write_lock(sip);
4138         else
4139                 sdeb_data_sector_read_lock(sip);
4140 }
4141
4142 static inline void
4143 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4144 {
4145         if (do_write)
4146                 sdeb_data_sector_write_unlock(sip);
4147         else
4148                 sdeb_data_sector_read_unlock(sip);
4149 }
4150
4151 static inline void
4152 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4153 {
4154         if (sdebug_no_rwlock) {
4155                 if (sip)
4156                         __acquire(&sip->macc_meta_lck);
4157                 else
4158                         __acquire(&sdeb_fake_rw_lck);
4159         } else {
4160                 if (sip)
4161                         read_lock(&sip->macc_meta_lck);
4162                 else
4163                         read_lock(&sdeb_fake_rw_lck);
4164         }
4165 }
4166
4167 static inline void
4168 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4169 {
4170         if (sdebug_no_rwlock) {
4171                 if (sip)
4172                         __release(&sip->macc_meta_lck);
4173                 else
4174                         __release(&sdeb_fake_rw_lck);
4175         } else {
4176                 if (sip)
4177                         read_unlock(&sip->macc_meta_lck);
4178                 else
4179                         read_unlock(&sdeb_fake_rw_lck);
4180         }
4181 }
4182
4183 static inline void
4184 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4185 {
4186         if (sdebug_no_rwlock) {
4187                 if (sip)
4188                         __acquire(&sip->macc_meta_lck);
4189                 else
4190                         __acquire(&sdeb_fake_rw_lck);
4191         } else {
4192                 if (sip)
4193                         write_lock(&sip->macc_meta_lck);
4194                 else
4195                         write_lock(&sdeb_fake_rw_lck);
4196         }
4197 }
4198
4199 static inline void
4200 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4201 {
4202         if (sdebug_no_rwlock) {
4203                 if (sip)
4204                         __release(&sip->macc_meta_lck);
4205                 else
4206                         __release(&sdeb_fake_rw_lck);
4207         } else {
4208                 if (sip)
4209                         write_unlock(&sip->macc_meta_lck);
4210                 else
4211                         write_unlock(&sdeb_fake_rw_lck);
4212         }
4213 }
4214
4215 /* Returns number of bytes copied or -1 if error. */
4216 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4217                             u32 sg_skip, u64 lba, u32 num, u8 group_number,
4218                             bool do_write, bool atomic)
4219 {
4220         int ret;
4221         u64 block;
4222         enum dma_data_direction dir;
4223         struct scsi_data_buffer *sdb = &scp->sdb;
4224         u8 *fsp;
4225         int i, total = 0;
4226
4227         /*
4228          * Even though reads are inherently atomic (in this driver), we expect
4229          * the atomic flag only for writes.
4230          */
4231         if (!do_write && atomic)
4232                 return -1;
4233
4234         if (do_write) {
4235                 dir = DMA_TO_DEVICE;
4236                 write_since_sync = true;
4237         } else {
4238                 dir = DMA_FROM_DEVICE;
4239         }
4240
4241         if (!sdb->length || !sip)
4242                 return 0;
4243         if (scp->sc_data_direction != dir)
4244                 return -1;
4245
4246         if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4247                 atomic_long_inc(&writes_by_group_number[group_number]);
4248
4249         fsp = sip->storep;
4250
4251         block = do_div(lba, sdebug_store_sectors);
4252
4253         /* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4254         sdeb_data_lock(sip, atomic);
4255         for (i = 0; i < num; i++) {
4256                 /* We shouldn't need to lock for atomic writes, but do it anyway */
4257                 sdeb_data_sector_lock(sip, do_write);
4258                 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4259                    fsp + (block * sdebug_sector_size),
4260                    sdebug_sector_size, sg_skip, do_write);
4261                 sdeb_data_sector_unlock(sip, do_write);
4262                 total += ret;
4263                 if (ret != sdebug_sector_size)
4264                         break;
4265                 sg_skip += sdebug_sector_size;
4266                 if (++block >= sdebug_store_sectors)
4267                         block = 0;
4268         }
4269         sdeb_data_unlock(sip, atomic);
4270
4271         return total;
4272 }
4273
4274 /* Returns number of bytes copied or -1 if error. */
4275 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4276 {
4277         struct scsi_data_buffer *sdb = &scp->sdb;
4278
4279         if (!sdb->length)
4280                 return 0;
4281         if (scp->sc_data_direction != DMA_TO_DEVICE)
4282                 return -1;
4283         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4284                               num * sdebug_sector_size, 0, true);
4285 }
4286
4287 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4288  * arr into sip->storep+lba and return true. If comparison fails then
4289  * return false. */
4290 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4291                               const u8 *arr, bool compare_only)
4292 {
4293         bool res;
4294         u64 block, rest = 0;
4295         u32 store_blks = sdebug_store_sectors;
4296         u32 lb_size = sdebug_sector_size;
4297         u8 *fsp = sip->storep;
4298
4299         block = do_div(lba, store_blks);
4300         if (block + num > store_blks)
4301                 rest = block + num - store_blks;
4302
4303         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4304         if (!res)
4305                 return res;
4306         if (rest)
4307                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
4308                              rest * lb_size);
4309         if (!res)
4310                 return res;
4311         if (compare_only)
4312                 return true;
4313         arr += num * lb_size;
4314         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4315         if (rest)
4316                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4317         return res;
4318 }
4319
4320 static __be16 dif_compute_csum(const void *buf, int len)
4321 {
4322         __be16 csum;
4323
4324         if (sdebug_guard)
4325                 csum = (__force __be16)ip_compute_csum(buf, len);
4326         else
4327                 csum = cpu_to_be16(crc_t10dif(buf, len));
4328
4329         return csum;
4330 }
4331
4332 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4333                       sector_t sector, u32 ei_lba)
4334 {
4335         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
4336
4337         if (sdt->guard_tag != csum) {
4338                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4339                         (unsigned long)sector,
4340                         be16_to_cpu(sdt->guard_tag),
4341                         be16_to_cpu(csum));
4342                 return 0x01;
4343         }
4344         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4345             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4346                 pr_err("REF check failed on sector %lu\n",
4347                         (unsigned long)sector);
4348                 return 0x03;
4349         }
4350         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4351             be32_to_cpu(sdt->ref_tag) != ei_lba) {
4352                 pr_err("REF check failed on sector %lu\n",
4353                         (unsigned long)sector);
4354                 return 0x03;
4355         }
4356         return 0;
4357 }
4358
4359 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4360                           unsigned int sectors, bool read)
4361 {
4362         size_t resid;
4363         void *paddr;
4364         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4365                                                 scp->device->hostdata, true);
4366         struct t10_pi_tuple *dif_storep = sip->dif_storep;
4367         const void *dif_store_end = dif_storep + sdebug_store_sectors;
4368         struct sg_mapping_iter miter;
4369
4370         /* Bytes of protection data to copy into sgl */
4371         resid = sectors * sizeof(*dif_storep);
4372
4373         sg_miter_start(&miter, scsi_prot_sglist(scp),
4374                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4375                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4376
4377         while (sg_miter_next(&miter) && resid > 0) {
4378                 size_t len = min_t(size_t, miter.length, resid);
4379                 void *start = dif_store(sip, sector);
4380                 size_t rest = 0;
4381
4382                 if (dif_store_end < start + len)
4383                         rest = start + len - dif_store_end;
4384
4385                 paddr = miter.addr;
4386
4387                 if (read)
4388                         memcpy(paddr, start, len - rest);
4389                 else
4390                         memcpy(start, paddr, len - rest);
4391
4392                 if (rest) {
4393                         if (read)
4394                                 memcpy(paddr + len - rest, dif_storep, rest);
4395                         else
4396                                 memcpy(dif_storep, paddr + len - rest, rest);
4397                 }
4398
4399                 sector += len / sizeof(*dif_storep);
4400                 resid -= len;
4401         }
4402         sg_miter_stop(&miter);
4403 }
4404
4405 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4406                             unsigned int sectors, u32 ei_lba)
4407 {
4408         int ret = 0;
4409         unsigned int i;
4410         sector_t sector;
4411         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4412                                                 scp->device->hostdata, true);
4413         struct t10_pi_tuple *sdt;
4414
4415         for (i = 0; i < sectors; i++, ei_lba++) {
4416                 sector = start_sec + i;
4417                 sdt = dif_store(sip, sector);
4418
4419                 if (sdt->app_tag == cpu_to_be16(0xffff))
4420                         continue;
4421
4422                 /*
4423                  * Because scsi_debug acts as both initiator and
4424                  * target we proceed to verify the PI even if
4425                  * RDPROTECT=3. This is done so the "initiator" knows
4426                  * which type of error to return. Otherwise we would
4427                  * have to iterate over the PI twice.
4428                  */
4429                 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4430                         ret = dif_verify(sdt, lba2fake_store(sip, sector),
4431                                          sector, ei_lba);
4432                         if (ret) {
4433                                 dif_errors++;
4434                                 break;
4435                         }
4436                 }
4437         }
4438
4439         dif_copy_prot(scp, start_sec, sectors, true);
4440         dix_reads++;
4441
4442         return ret;
4443 }
4444
4445 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4446 {
4447         u32 i, num, transfer, size;
4448         u8 *cmd = scp->cmnd;
4449         struct scsi_data_buffer *sdb = &scp->sdb;
4450         int partition = devip->tape_partition;
4451         u32 pos = devip->tape_location[partition];
4452         struct tape_block *blp;
4453         bool fixed, sili;
4454
4455         if (cmd[0] != READ_6) { /* Only Read(6) supported */
4456                 mk_sense_invalid_opcode(scp);
4457                 return illegal_condition_result;
4458         }
4459         fixed = (cmd[1] & 0x1) != 0;
4460         sili = (cmd[1] & 0x2) != 0;
4461         if (fixed && sili) {
4462                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4463                 return check_condition_result;
4464         }
4465
4466         transfer = get_unaligned_be24(cmd + 2);
4467         if (fixed) {
4468                 num = transfer;
4469                 size = devip->tape_blksize;
4470         } else {
4471                 if (transfer < TAPE_MIN_BLKSIZE ||
4472                         transfer > TAPE_MAX_BLKSIZE) {
4473                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4474                         return check_condition_result;
4475                 }
4476                 num = 1;
4477                 size = transfer;
4478         }
4479
4480         for (i = 0, blp = devip->tape_blocks[partition] + pos;
4481              i < num && pos < devip->tape_eop[partition];
4482              i++, pos++, blp++) {
4483                 devip->tape_location[partition] = pos + 1;
4484                 if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4485                         mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4486                                         FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4487                                         SENSE_FLAG_FILEMARK);
4488                         scsi_set_resid(scp, (num - i) * size);
4489                         return check_condition_result;
4490                 }
4491                 /* Assume no REW */
4492                 if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4493                         mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4494                                         EOD_DETECTED_ASCQ, fixed ? num - i : size,
4495                                         0);
4496                         devip->tape_location[partition] = pos;
4497                         scsi_set_resid(scp, (num - i) * size);
4498                         return check_condition_result;
4499                 }
4500                 sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4501                         size, i * size);
4502                 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4503                         &(blp->data), 4, i * size, false);
4504                 if (fixed) {
4505                         if (blp->fl_size != devip->tape_blksize) {
4506                                 scsi_set_resid(scp, (num - i) * size);
4507                                 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4508                                                 0, num - i,
4509                                                 SENSE_FLAG_ILI);
4510                                 return check_condition_result;
4511                         }
4512                 } else {
4513                         if (blp->fl_size != size) {
4514                                 if (blp->fl_size < size)
4515                                         scsi_set_resid(scp, size - blp->fl_size);
4516                                 if (!sili) {
4517                                         mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4518                                                         0, size - blp->fl_size,
4519                                                         SENSE_FLAG_ILI);
4520                                         return check_condition_result;
4521                                 }
4522                         }
4523                 }
4524         }
4525         if (pos >= devip->tape_eop[partition]) {
4526                 mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4527                                 EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4528                                 SENSE_FLAG_EOM);
4529                 devip->tape_location[partition] = pos - 1;
4530                 return check_condition_result;
4531         }
4532         devip->tape_location[partition] = pos;
4533
4534         return 0;
4535 }
4536
4537 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4538 {
4539         bool check_prot;
4540         u32 num;
4541         u32 ei_lba;
4542         int ret;
4543         u64 lba;
4544         struct sdeb_store_info *sip = devip2sip(devip, true);
4545         u8 *cmd = scp->cmnd;
4546         bool meta_data_locked = false;
4547
4548         switch (cmd[0]) {
4549         case READ_16:
4550                 ei_lba = 0;
4551                 lba = get_unaligned_be64(cmd + 2);
4552                 num = get_unaligned_be32(cmd + 10);
4553                 check_prot = true;
4554                 break;
4555         case READ_10:
4556                 ei_lba = 0;
4557                 lba = get_unaligned_be32(cmd + 2);
4558                 num = get_unaligned_be16(cmd + 7);
4559                 check_prot = true;
4560                 break;
4561         case READ_6:
4562                 ei_lba = 0;
4563                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4564                       (u32)(cmd[1] & 0x1f) << 16;
4565                 num = (0 == cmd[4]) ? 256 : cmd[4];
4566                 check_prot = true;
4567                 break;
4568         case READ_12:
4569                 ei_lba = 0;
4570                 lba = get_unaligned_be32(cmd + 2);
4571                 num = get_unaligned_be32(cmd + 6);
4572                 check_prot = true;
4573                 break;
4574         case XDWRITEREAD_10:
4575                 ei_lba = 0;
4576                 lba = get_unaligned_be32(cmd + 2);
4577                 num = get_unaligned_be16(cmd + 7);
4578                 check_prot = false;
4579                 break;
4580         default:        /* assume READ(32) */
4581                 lba = get_unaligned_be64(cmd + 12);
4582                 ei_lba = get_unaligned_be32(cmd + 20);
4583                 num = get_unaligned_be32(cmd + 28);
4584                 check_prot = false;
4585                 break;
4586         }
4587         if (unlikely(have_dif_prot && check_prot)) {
4588                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4589                     (cmd[1] & 0xe0)) {
4590                         mk_sense_invalid_opcode(scp);
4591                         return check_condition_result;
4592                 }
4593                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4594                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4595                     (cmd[1] & 0xe0) == 0)
4596                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4597                                     "to DIF device\n");
4598         }
4599         if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4600                      atomic_read(&sdeb_inject_pending))) {
4601                 num /= 2;
4602                 atomic_set(&sdeb_inject_pending, 0);
4603         }
4604
4605         /*
4606          * When checking device access params, for reads we only check data
4607          * versus what is set at init time, so no need to lock.
4608          */
4609         ret = check_device_access_params(scp, lba, num, false);
4610         if (ret)
4611                 return ret;
4612         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4613                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4614                      ((lba + num) > sdebug_medium_error_start))) {
4615                 /* claim unrecoverable read error */
4616                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4617                 /* set info field and valid bit for fixed descriptor */
4618                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4619                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
4620                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
4621                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4622                         put_unaligned_be32(ret, scp->sense_buffer + 3);
4623                 }
4624                 scsi_set_resid(scp, scsi_bufflen(scp));
4625                 return check_condition_result;
4626         }
4627
4628         if (sdebug_dev_is_zoned(devip) ||
4629             (sdebug_dix && scsi_prot_sg_count(scp)))  {
4630                 sdeb_meta_read_lock(sip);
4631                 meta_data_locked = true;
4632         }
4633
4634         /* DIX + T10 DIF */
4635         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4636                 switch (prot_verify_read(scp, lba, num, ei_lba)) {
4637                 case 1: /* Guard tag error */
4638                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4639                                 sdeb_meta_read_unlock(sip);
4640                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4641                                 return check_condition_result;
4642                         } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4643                                 sdeb_meta_read_unlock(sip);
4644                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4645                                 return illegal_condition_result;
4646                         }
4647                         break;
4648                 case 3: /* Reference tag error */
4649                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4650                                 sdeb_meta_read_unlock(sip);
4651                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4652                                 return check_condition_result;
4653                         } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4654                                 sdeb_meta_read_unlock(sip);
4655                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4656                                 return illegal_condition_result;
4657                         }
4658                         break;
4659                 }
4660         }
4661
4662         ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4663         if (meta_data_locked)
4664                 sdeb_meta_read_unlock(sip);
4665         if (unlikely(ret == -1))
4666                 return DID_ERROR << 16;
4667
4668         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4669
4670         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4671                      atomic_read(&sdeb_inject_pending))) {
4672                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4673                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4674                         atomic_set(&sdeb_inject_pending, 0);
4675                         return check_condition_result;
4676                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4677                         /* Logical block guard check failed */
4678                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4679                         atomic_set(&sdeb_inject_pending, 0);
4680                         return illegal_condition_result;
4681                 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4682                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4683                         atomic_set(&sdeb_inject_pending, 0);
4684                         return illegal_condition_result;
4685                 }
4686         }
4687         return 0;
4688 }
4689
4690 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4691                              unsigned int sectors, u32 ei_lba)
4692 {
4693         int ret;
4694         struct t10_pi_tuple *sdt;
4695         void *daddr;
4696         sector_t sector = start_sec;
4697         int ppage_offset;
4698         int dpage_offset;
4699         struct sg_mapping_iter diter;
4700         struct sg_mapping_iter piter;
4701
4702         BUG_ON(scsi_sg_count(SCpnt) == 0);
4703         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4704
4705         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4706                         scsi_prot_sg_count(SCpnt),
4707                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4708         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4709                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4710
4711         /* For each protection page */
4712         while (sg_miter_next(&piter)) {
4713                 dpage_offset = 0;
4714                 if (WARN_ON(!sg_miter_next(&diter))) {
4715                         ret = 0x01;
4716                         goto out;
4717                 }
4718
4719                 for (ppage_offset = 0; ppage_offset < piter.length;
4720                      ppage_offset += sizeof(struct t10_pi_tuple)) {
4721                         /* If we're at the end of the current
4722                          * data page advance to the next one
4723                          */
4724                         if (dpage_offset >= diter.length) {
4725                                 if (WARN_ON(!sg_miter_next(&diter))) {
4726                                         ret = 0x01;
4727                                         goto out;
4728                                 }
4729                                 dpage_offset = 0;
4730                         }
4731
4732                         sdt = piter.addr + ppage_offset;
4733                         daddr = diter.addr + dpage_offset;
4734
4735                         if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4736                                 ret = dif_verify(sdt, daddr, sector, ei_lba);
4737                                 if (ret)
4738                                         goto out;
4739                         }
4740
4741                         sector++;
4742                         ei_lba++;
4743                         dpage_offset += sdebug_sector_size;
4744                 }
4745                 diter.consumed = dpage_offset;
4746                 sg_miter_stop(&diter);
4747         }
4748         sg_miter_stop(&piter);
4749
4750         dif_copy_prot(SCpnt, start_sec, sectors, false);
4751         dix_writes++;
4752
4753         return 0;
4754
4755 out:
4756         dif_errors++;
4757         sg_miter_stop(&diter);
4758         sg_miter_stop(&piter);
4759         return ret;
4760 }
4761
4762 static unsigned long lba_to_map_index(sector_t lba)
4763 {
4764         if (sdebug_unmap_alignment)
4765                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4766         sector_div(lba, sdebug_unmap_granularity);
4767         return lba;
4768 }
4769
4770 static sector_t map_index_to_lba(unsigned long index)
4771 {
4772         sector_t lba = index * sdebug_unmap_granularity;
4773
4774         if (sdebug_unmap_alignment)
4775                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4776         return lba;
4777 }
4778
4779 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4780                               unsigned int *num)
4781 {
4782         sector_t end;
4783         unsigned int mapped;
4784         unsigned long index;
4785         unsigned long next;
4786
4787         index = lba_to_map_index(lba);
4788         mapped = test_bit(index, sip->map_storep);
4789
4790         if (mapped)
4791                 next = find_next_zero_bit(sip->map_storep, map_size, index);
4792         else
4793                 next = find_next_bit(sip->map_storep, map_size, index);
4794
4795         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4796         *num = end - lba;
4797         return mapped;
4798 }
4799
4800 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4801                        unsigned int len)
4802 {
4803         sector_t end = lba + len;
4804
4805         while (lba < end) {
4806                 unsigned long index = lba_to_map_index(lba);
4807
4808                 if (index < map_size)
4809                         set_bit(index, sip->map_storep);
4810
4811                 lba = map_index_to_lba(index + 1);
4812         }
4813 }
4814
4815 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4816                          unsigned int len)
4817 {
4818         sector_t end = lba + len;
4819         u8 *fsp = sip->storep;
4820
4821         while (lba < end) {
4822                 unsigned long index = lba_to_map_index(lba);
4823
4824                 if (lba == map_index_to_lba(index) &&
4825                     lba + sdebug_unmap_granularity <= end &&
4826                     index < map_size) {
4827                         clear_bit(index, sip->map_storep);
4828                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4829                                 memset(fsp + lba * sdebug_sector_size,
4830                                        (sdebug_lbprz & 1) ? 0 : 0xff,
4831                                        sdebug_sector_size *
4832                                        sdebug_unmap_granularity);
4833                         }
4834                         if (sip->dif_storep) {
4835                                 memset(sip->dif_storep + lba, 0xff,
4836                                        sizeof(*sip->dif_storep) *
4837                                        sdebug_unmap_granularity);
4838                         }
4839                 }
4840                 lba = map_index_to_lba(index + 1);
4841         }
4842 }
4843
4844 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4845 {
4846         u32 i, num, transfer, size, written = 0;
4847         u8 *cmd = scp->cmnd;
4848         struct scsi_data_buffer *sdb = &scp->sdb;
4849         int partition = devip->tape_partition;
4850         int pos = devip->tape_location[partition];
4851         struct tape_block *blp;
4852         bool fixed, ew;
4853
4854         if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4855                 mk_sense_invalid_opcode(scp);
4856                 return illegal_condition_result;
4857         }
4858
4859         fixed = (cmd[1] & 1) != 0;
4860         transfer = get_unaligned_be24(cmd + 2);
4861         if (fixed) {
4862                 num = transfer;
4863                 size = devip->tape_blksize;
4864         } else {
4865                 if (transfer < TAPE_MIN_BLKSIZE ||
4866                         transfer > TAPE_MAX_BLKSIZE) {
4867                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4868                         return check_condition_result;
4869                 }
4870                 num = 1;
4871                 size = transfer;
4872         }
4873
4874         scsi_set_resid(scp, num * transfer);
4875         for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4876              i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4877                 blp->fl_size = size;
4878                 sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4879                         &(blp->data), 4, i * size, true);
4880                 written += size;
4881                 scsi_set_resid(scp, num * transfer - written);
4882                 ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4883         }
4884
4885         devip->tape_location[partition] = pos;
4886         blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4887         if (pos >= devip->tape_eop[partition] - 1) {
4888                 mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4889                                 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4890                                 fixed ? num - i : transfer,
4891                                 SENSE_FLAG_EOM);
4892                 return check_condition_result;
4893         }
4894         if (ew) { /* early warning */
4895                 mk_sense_info_tape(scp, NO_SENSE,
4896                                 NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4897                                 fixed ? num - i : transfer,
4898                                 SENSE_FLAG_EOM);
4899                 return check_condition_result;
4900         }
4901
4902         return 0;
4903 }
4904
4905 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4906 {
4907         bool check_prot;
4908         u32 num;
4909         u8 group = 0;
4910         u32 ei_lba;
4911         int ret;
4912         u64 lba;
4913         struct sdeb_store_info *sip = devip2sip(devip, true);
4914         u8 *cmd = scp->cmnd;
4915         bool meta_data_locked = false;
4916
4917         switch (cmd[0]) {
4918         case WRITE_16:
4919                 ei_lba = 0;
4920                 lba = get_unaligned_be64(cmd + 2);
4921                 num = get_unaligned_be32(cmd + 10);
4922                 group = cmd[14] & 0x3f;
4923                 check_prot = true;
4924                 break;
4925         case WRITE_10:
4926                 ei_lba = 0;
4927                 lba = get_unaligned_be32(cmd + 2);
4928                 group = cmd[6] & 0x3f;
4929                 num = get_unaligned_be16(cmd + 7);
4930                 check_prot = true;
4931                 break;
4932         case WRITE_6:
4933                 ei_lba = 0;
4934                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4935                       (u32)(cmd[1] & 0x1f) << 16;
4936                 num = (0 == cmd[4]) ? 256 : cmd[4];
4937                 check_prot = true;
4938                 break;
4939         case WRITE_12:
4940                 ei_lba = 0;
4941                 lba = get_unaligned_be32(cmd + 2);
4942                 num = get_unaligned_be32(cmd + 6);
4943                 group = cmd[6] & 0x3f;
4944                 check_prot = true;
4945                 break;
4946         case 0x53:      /* XDWRITEREAD(10) */
4947                 ei_lba = 0;
4948                 lba = get_unaligned_be32(cmd + 2);
4949                 group = cmd[6] & 0x1f;
4950                 num = get_unaligned_be16(cmd + 7);
4951                 check_prot = false;
4952                 break;
4953         default:        /* assume WRITE(32) */
4954                 group = cmd[6] & 0x3f;
4955                 lba = get_unaligned_be64(cmd + 12);
4956                 ei_lba = get_unaligned_be32(cmd + 20);
4957                 num = get_unaligned_be32(cmd + 28);
4958                 check_prot = false;
4959                 break;
4960         }
4961         if (unlikely(have_dif_prot && check_prot)) {
4962                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4963                     (cmd[1] & 0xe0)) {
4964                         mk_sense_invalid_opcode(scp);
4965                         return check_condition_result;
4966                 }
4967                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4968                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4969                     (cmd[1] & 0xe0) == 0)
4970                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4971                                     "to DIF device\n");
4972         }
4973
4974         if (sdebug_dev_is_zoned(devip) ||
4975             (sdebug_dix && scsi_prot_sg_count(scp)) ||
4976             scsi_debug_lbp())  {
4977                 sdeb_meta_write_lock(sip);
4978                 meta_data_locked = true;
4979         }
4980
4981         ret = check_device_access_params(scp, lba, num, true);
4982         if (ret) {
4983                 if (meta_data_locked)
4984                         sdeb_meta_write_unlock(sip);
4985                 return ret;
4986         }
4987
4988         /* DIX + T10 DIF */
4989         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4990                 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4991                 case 1: /* Guard tag error */
4992                         if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4993                                 sdeb_meta_write_unlock(sip);
4994                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4995                                 return illegal_condition_result;
4996                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4997                                 sdeb_meta_write_unlock(sip);
4998                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4999                                 return check_condition_result;
5000                         }
5001                         break;
5002                 case 3: /* Reference tag error */
5003                         if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5004                                 sdeb_meta_write_unlock(sip);
5005                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5006                                 return illegal_condition_result;
5007                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5008                                 sdeb_meta_write_unlock(sip);
5009                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5010                                 return check_condition_result;
5011                         }
5012                         break;
5013                 }
5014         }
5015
5016         ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5017         if (unlikely(scsi_debug_lbp()))
5018                 map_region(sip, lba, num);
5019
5020         /* If ZBC zone then bump its write pointer */
5021         if (sdebug_dev_is_zoned(devip))
5022                 zbc_inc_wp(devip, lba, num);
5023         if (meta_data_locked)
5024                 sdeb_meta_write_unlock(sip);
5025
5026         if (unlikely(-1 == ret))
5027                 return DID_ERROR << 16;
5028         else if (unlikely(sdebug_verbose &&
5029                           (ret < (num * sdebug_sector_size))))
5030                 sdev_printk(KERN_INFO, scp->device,
5031                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5032                             my_name, num * sdebug_sector_size, ret);
5033
5034         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5035                      atomic_read(&sdeb_inject_pending))) {
5036                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5037                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5038                         atomic_set(&sdeb_inject_pending, 0);
5039                         return check_condition_result;
5040                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5041                         /* Logical block guard check failed */
5042                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5043                         atomic_set(&sdeb_inject_pending, 0);
5044                         return illegal_condition_result;
5045                 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5046                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5047                         atomic_set(&sdeb_inject_pending, 0);
5048                         return illegal_condition_result;
5049                 }
5050         }
5051         return 0;
5052 }
5053
5054 /*
5055  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5056  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5057  */
5058 static int resp_write_scat(struct scsi_cmnd *scp,
5059                            struct sdebug_dev_info *devip)
5060 {
5061         u8 *cmd = scp->cmnd;
5062         u8 *lrdp = NULL;
5063         u8 *up;
5064         struct sdeb_store_info *sip = devip2sip(devip, true);
5065         u8 wrprotect;
5066         u16 lbdof, num_lrd, k;
5067         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5068         u32 lb_size = sdebug_sector_size;
5069         u32 ei_lba;
5070         u64 lba;
5071         u8 group;
5072         int ret, res;
5073         bool is_16;
5074         static const u32 lrd_size = 32; /* + parameter list header size */
5075
5076         if (cmd[0] == VARIABLE_LENGTH_CMD) {
5077                 is_16 = false;
5078                 group = cmd[6] & 0x3f;
5079                 wrprotect = (cmd[10] >> 5) & 0x7;
5080                 lbdof = get_unaligned_be16(cmd + 12);
5081                 num_lrd = get_unaligned_be16(cmd + 16);
5082                 bt_len = get_unaligned_be32(cmd + 28);
5083         } else {        /* that leaves WRITE SCATTERED(16) */
5084                 is_16 = true;
5085                 wrprotect = (cmd[2] >> 5) & 0x7;
5086                 lbdof = get_unaligned_be16(cmd + 4);
5087                 num_lrd = get_unaligned_be16(cmd + 8);
5088                 bt_len = get_unaligned_be32(cmd + 10);
5089                 group = cmd[14] & 0x3f;
5090                 if (unlikely(have_dif_prot)) {
5091                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5092                             wrprotect) {
5093                                 mk_sense_invalid_opcode(scp);
5094                                 return illegal_condition_result;
5095                         }
5096                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5097                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5098                              wrprotect == 0)
5099                                 sdev_printk(KERN_ERR, scp->device,
5100                                             "Unprotected WR to DIF device\n");
5101                 }
5102         }
5103         if ((num_lrd == 0) || (bt_len == 0))
5104                 return 0;       /* T10 says these do-nothings are not errors */
5105         if (lbdof == 0) {
5106                 if (sdebug_verbose)
5107                         sdev_printk(KERN_INFO, scp->device,
5108                                 "%s: %s: LB Data Offset field bad\n",
5109                                 my_name, __func__);
5110                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5111                 return illegal_condition_result;
5112         }
5113         lbdof_blen = lbdof * lb_size;
5114         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5115                 if (sdebug_verbose)
5116                         sdev_printk(KERN_INFO, scp->device,
5117                                 "%s: %s: LBA range descriptors don't fit\n",
5118                                 my_name, __func__);
5119                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5120                 return illegal_condition_result;
5121         }
5122         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5123         if (lrdp == NULL)
5124                 return SCSI_MLQUEUE_HOST_BUSY;
5125         if (sdebug_verbose)
5126                 sdev_printk(KERN_INFO, scp->device,
5127                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
5128                         my_name, __func__, lbdof_blen);
5129         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5130         if (res == -1) {
5131                 ret = DID_ERROR << 16;
5132                 goto err_out;
5133         }
5134
5135         /* Just keep it simple and always lock for now */
5136         sdeb_meta_write_lock(sip);
5137         sg_off = lbdof_blen;
5138         /* Spec says Buffer xfer Length field in number of LBs in dout */
5139         cum_lb = 0;
5140         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5141                 lba = get_unaligned_be64(up + 0);
5142                 num = get_unaligned_be32(up + 8);
5143                 if (sdebug_verbose)
5144                         sdev_printk(KERN_INFO, scp->device,
5145                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
5146                                 my_name, __func__, k, lba, num, sg_off);
5147                 if (num == 0)
5148                         continue;
5149                 ret = check_device_access_params(scp, lba, num, true);
5150                 if (ret)
5151                         goto err_out_unlock;
5152                 num_by = num * lb_size;
5153                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5154
5155                 if ((cum_lb + num) > bt_len) {
5156                         if (sdebug_verbose)
5157                                 sdev_printk(KERN_INFO, scp->device,
5158                                     "%s: %s: sum of blocks > data provided\n",
5159                                     my_name, __func__);
5160                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5161                                         0);
5162                         ret = illegal_condition_result;
5163                         goto err_out_unlock;
5164                 }
5165
5166                 /* DIX + T10 DIF */
5167                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5168                         int prot_ret = prot_verify_write(scp, lba, num,
5169                                                          ei_lba);
5170
5171                         if (prot_ret) {
5172                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5173                                                 prot_ret);
5174                                 ret = illegal_condition_result;
5175                                 goto err_out_unlock;
5176                         }
5177                 }
5178
5179                 /*
5180                  * Write ranges atomically to keep as close to pre-atomic
5181                  * writes behaviour as possible.
5182                  */
5183                 ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5184                 /* If ZBC zone then bump its write pointer */
5185                 if (sdebug_dev_is_zoned(devip))
5186                         zbc_inc_wp(devip, lba, num);
5187                 if (unlikely(scsi_debug_lbp()))
5188                         map_region(sip, lba, num);
5189                 if (unlikely(-1 == ret)) {
5190                         ret = DID_ERROR << 16;
5191                         goto err_out_unlock;
5192                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
5193                         sdev_printk(KERN_INFO, scp->device,
5194                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5195                             my_name, num_by, ret);
5196
5197                 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5198                              atomic_read(&sdeb_inject_pending))) {
5199                         if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5200                                 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5201                                 atomic_set(&sdeb_inject_pending, 0);
5202                                 ret = check_condition_result;
5203                                 goto err_out_unlock;
5204                         } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5205                                 /* Logical block guard check failed */
5206                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5207                                 atomic_set(&sdeb_inject_pending, 0);
5208                                 ret = illegal_condition_result;
5209                                 goto err_out_unlock;
5210                         } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5211                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5212                                 atomic_set(&sdeb_inject_pending, 0);
5213                                 ret = illegal_condition_result;
5214                                 goto err_out_unlock;
5215                         }
5216                 }
5217                 sg_off += num_by;
5218                 cum_lb += num;
5219         }
5220         ret = 0;
5221 err_out_unlock:
5222         sdeb_meta_write_unlock(sip);
5223 err_out:
5224         kfree(lrdp);
5225         return ret;
5226 }
5227
5228 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5229                            u32 ei_lba, bool unmap, bool ndob)
5230 {
5231         struct scsi_device *sdp = scp->device;
5232         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5233         unsigned long long i;
5234         u64 block, lbaa;
5235         u32 lb_size = sdebug_sector_size;
5236         int ret;
5237         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5238                                                 scp->device->hostdata, true);
5239         u8 *fs1p;
5240         u8 *fsp;
5241         bool meta_data_locked = false;
5242
5243         if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5244                 sdeb_meta_write_lock(sip);
5245                 meta_data_locked = true;
5246         }
5247
5248         ret = check_device_access_params(scp, lba, num, true);
5249         if (ret)
5250                 goto out;
5251
5252         if (unmap && scsi_debug_lbp()) {
5253                 unmap_region(sip, lba, num);
5254                 goto out;
5255         }
5256         lbaa = lba;
5257         block = do_div(lbaa, sdebug_store_sectors);
5258         /* if ndob then zero 1 logical block, else fetch 1 logical block */
5259         fsp = sip->storep;
5260         fs1p = fsp + (block * lb_size);
5261         sdeb_data_write_lock(sip);
5262         if (ndob) {
5263                 memset(fs1p, 0, lb_size);
5264                 ret = 0;
5265         } else
5266                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5267
5268         if (-1 == ret) {
5269                 ret = DID_ERROR << 16;
5270                 goto out;
5271         } else if (sdebug_verbose && !ndob && (ret < lb_size))
5272                 sdev_printk(KERN_INFO, scp->device,
5273                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
5274                             my_name, "write same", lb_size, ret);
5275
5276         /* Copy first sector to remaining blocks */
5277         for (i = 1 ; i < num ; i++) {
5278                 lbaa = lba + i;
5279                 block = do_div(lbaa, sdebug_store_sectors);
5280                 memmove(fsp + (block * lb_size), fs1p, lb_size);
5281         }
5282         if (scsi_debug_lbp())
5283                 map_region(sip, lba, num);
5284         /* If ZBC zone then bump its write pointer */
5285         if (sdebug_dev_is_zoned(devip))
5286                 zbc_inc_wp(devip, lba, num);
5287         sdeb_data_write_unlock(sip);
5288         ret = 0;
5289 out:
5290         if (meta_data_locked)
5291                 sdeb_meta_write_unlock(sip);
5292         return ret;
5293 }
5294
5295 static int resp_write_same_10(struct scsi_cmnd *scp,
5296                               struct sdebug_dev_info *devip)
5297 {
5298         u8 *cmd = scp->cmnd;
5299         u32 lba;
5300         u16 num;
5301         u32 ei_lba = 0;
5302         bool unmap = false;
5303
5304         if (cmd[1] & 0x8) {
5305                 if (sdebug_lbpws10 == 0) {
5306                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5307                         return check_condition_result;
5308                 } else
5309                         unmap = true;
5310         }
5311         lba = get_unaligned_be32(cmd + 2);
5312         num = get_unaligned_be16(cmd + 7);
5313         if (num > sdebug_write_same_length) {
5314                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5315                 return check_condition_result;
5316         }
5317         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5318 }
5319
5320 static int resp_write_same_16(struct scsi_cmnd *scp,
5321                               struct sdebug_dev_info *devip)
5322 {
5323         u8 *cmd = scp->cmnd;
5324         u64 lba;
5325         u32 num;
5326         u32 ei_lba = 0;
5327         bool unmap = false;
5328         bool ndob = false;
5329
5330         if (cmd[1] & 0x8) {     /* UNMAP */
5331                 if (sdebug_lbpws == 0) {
5332                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5333                         return check_condition_result;
5334                 } else
5335                         unmap = true;
5336         }
5337         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
5338                 ndob = true;
5339         lba = get_unaligned_be64(cmd + 2);
5340         num = get_unaligned_be32(cmd + 10);
5341         if (num > sdebug_write_same_length) {
5342                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5343                 return check_condition_result;
5344         }
5345         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5346 }
5347
5348 /* Note the mode field is in the same position as the (lower) service action
5349  * field. For the Report supported operation codes command, SPC-4 suggests
5350  * each mode of this command should be reported separately; for future. */
5351 static int resp_write_buffer(struct scsi_cmnd *scp,
5352                              struct sdebug_dev_info *devip)
5353 {
5354         u8 *cmd = scp->cmnd;
5355         struct scsi_device *sdp = scp->device;
5356         struct sdebug_dev_info *dp;
5357         u8 mode;
5358
5359         mode = cmd[1] & 0x1f;
5360         switch (mode) {
5361         case 0x4:       /* download microcode (MC) and activate (ACT) */
5362                 /* set UAs on this device only */
5363                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5364                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5365                 break;
5366         case 0x5:       /* download MC, save and ACT */
5367                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5368                 break;
5369         case 0x6:       /* download MC with offsets and ACT */
5370                 /* set UAs on most devices (LUs) in this target */
5371                 list_for_each_entry(dp,
5372                                     &devip->sdbg_host->dev_info_list,
5373                                     dev_list)
5374                         if (dp->target == sdp->id) {
5375                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5376                                 if (devip != dp)
5377                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5378                                                 dp->uas_bm);
5379                         }
5380                 break;
5381         case 0x7:       /* download MC with offsets, save, and ACT */
5382                 /* set UA on all devices (LUs) in this target */
5383                 list_for_each_entry(dp,
5384                                     &devip->sdbg_host->dev_info_list,
5385                                     dev_list)
5386                         if (dp->target == sdp->id)
5387                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5388                                         dp->uas_bm);
5389                 break;
5390         default:
5391                 /* do nothing for this command for other mode values */
5392                 break;
5393         }
5394         return 0;
5395 }
5396
5397 static int resp_comp_write(struct scsi_cmnd *scp,
5398                            struct sdebug_dev_info *devip)
5399 {
5400         u8 *cmd = scp->cmnd;
5401         u8 *arr;
5402         struct sdeb_store_info *sip = devip2sip(devip, true);
5403         u64 lba;
5404         u32 dnum;
5405         u32 lb_size = sdebug_sector_size;
5406         u8 num;
5407         int ret;
5408         int retval = 0;
5409
5410         lba = get_unaligned_be64(cmd + 2);
5411         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
5412         if (0 == num)
5413                 return 0;       /* degenerate case, not an error */
5414         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5415             (cmd[1] & 0xe0)) {
5416                 mk_sense_invalid_opcode(scp);
5417                 return check_condition_result;
5418         }
5419         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5420              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5421             (cmd[1] & 0xe0) == 0)
5422                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5423                             "to DIF device\n");
5424         ret = check_device_access_params(scp, lba, num, false);
5425         if (ret)
5426                 return ret;
5427         dnum = 2 * num;
5428         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5429         if (NULL == arr) {
5430                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5431                                 INSUFF_RES_ASCQ);
5432                 return check_condition_result;
5433         }
5434
5435         ret = do_dout_fetch(scp, dnum, arr);
5436         if (ret == -1) {
5437                 retval = DID_ERROR << 16;
5438                 goto cleanup_free;
5439         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
5440                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5441                             "indicated=%u, IO sent=%d bytes\n", my_name,
5442                             dnum * lb_size, ret);
5443
5444         sdeb_data_write_lock(sip);
5445         sdeb_meta_write_lock(sip);
5446         if (!comp_write_worker(sip, lba, num, arr, false)) {
5447                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5448                 retval = check_condition_result;
5449                 goto cleanup_unlock;
5450         }
5451
5452         /* Cover sip->map_storep (which map_region()) sets with data lock */
5453         if (scsi_debug_lbp())
5454                 map_region(sip, lba, num);
5455 cleanup_unlock:
5456         sdeb_meta_write_unlock(sip);
5457         sdeb_data_write_unlock(sip);
5458 cleanup_free:
5459         kfree(arr);
5460         return retval;
5461 }
5462
5463 struct unmap_block_desc {
5464         __be64  lba;
5465         __be32  blocks;
5466         __be32  __reserved;
5467 };
5468
5469 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5470 {
5471         unsigned char *buf;
5472         struct unmap_block_desc *desc;
5473         struct sdeb_store_info *sip = devip2sip(devip, true);
5474         unsigned int i, payload_len, descriptors;
5475         int ret;
5476
5477         if (!scsi_debug_lbp())
5478                 return 0;       /* fib and say its done */
5479         payload_len = get_unaligned_be16(scp->cmnd + 7);
5480         BUG_ON(scsi_bufflen(scp) != payload_len);
5481
5482         descriptors = (payload_len - 8) / 16;
5483         if (descriptors > sdebug_unmap_max_desc) {
5484                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5485                 return check_condition_result;
5486         }
5487
5488         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5489         if (!buf) {
5490                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5491                                 INSUFF_RES_ASCQ);
5492                 return check_condition_result;
5493         }
5494
5495         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5496
5497         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5498         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5499
5500         desc = (void *)&buf[8];
5501
5502         sdeb_meta_write_lock(sip);
5503
5504         for (i = 0 ; i < descriptors ; i++) {
5505                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5506                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
5507
5508                 ret = check_device_access_params(scp, lba, num, true);
5509                 if (ret)
5510                         goto out;
5511
5512                 unmap_region(sip, lba, num);
5513         }
5514
5515         ret = 0;
5516
5517 out:
5518         sdeb_meta_write_unlock(sip);
5519         kfree(buf);
5520
5521         return ret;
5522 }
5523
5524 #define SDEBUG_GET_LBA_STATUS_LEN 32
5525
5526 static int resp_get_lba_status(struct scsi_cmnd *scp,
5527                                struct sdebug_dev_info *devip)
5528 {
5529         u8 *cmd = scp->cmnd;
5530         u64 lba;
5531         u32 alloc_len, mapped, num;
5532         int ret;
5533         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5534
5535         lba = get_unaligned_be64(cmd + 2);
5536         alloc_len = get_unaligned_be32(cmd + 10);
5537
5538         if (alloc_len < 24)
5539                 return 0;
5540
5541         ret = check_device_access_params(scp, lba, 1, false);
5542         if (ret)
5543                 return ret;
5544
5545         if (scsi_debug_lbp()) {
5546                 struct sdeb_store_info *sip = devip2sip(devip, true);
5547
5548                 mapped = map_state(sip, lba, &num);
5549         } else {
5550                 mapped = 1;
5551                 /* following just in case virtual_gb changed */
5552                 sdebug_capacity = get_sdebug_capacity();
5553                 if (sdebug_capacity - lba <= 0xffffffff)
5554                         num = sdebug_capacity - lba;
5555                 else
5556                         num = 0xffffffff;
5557         }
5558
5559         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5560         put_unaligned_be32(20, arr);            /* Parameter Data Length */
5561         put_unaligned_be64(lba, arr + 8);       /* LBA */
5562         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
5563         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
5564
5565         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5566 }
5567
5568 static int resp_get_stream_status(struct scsi_cmnd *scp,
5569                                   struct sdebug_dev_info *devip)
5570 {
5571         u16 starting_stream_id, stream_id;
5572         const u8 *cmd = scp->cmnd;
5573         u32 alloc_len, offset;
5574         u8 arr[256] = {};
5575         struct scsi_stream_status_header *h = (void *)arr;
5576
5577         starting_stream_id = get_unaligned_be16(cmd + 4);
5578         alloc_len = get_unaligned_be32(cmd + 10);
5579
5580         if (alloc_len < 8) {
5581                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5582                 return check_condition_result;
5583         }
5584
5585         if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5586                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5587                 return check_condition_result;
5588         }
5589
5590         /*
5591          * The GET STREAM STATUS command only reports status information
5592          * about open streams. Treat the non-permanent stream as open.
5593          */
5594         put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5595                            &h->number_of_open_streams);
5596
5597         for (offset = 8, stream_id = starting_stream_id;
5598              offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5599                      stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5600              offset += 8, stream_id++) {
5601                 struct scsi_stream_status *stream_status = (void *)arr + offset;
5602
5603                 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5604                 put_unaligned_be16(stream_id,
5605                                    &stream_status->stream_identifier);
5606                 stream_status->rel_lifetime = stream_id + 1;
5607         }
5608         put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5609
5610         return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5611 }
5612
5613 static int resp_sync_cache(struct scsi_cmnd *scp,
5614                            struct sdebug_dev_info *devip)
5615 {
5616         int res = 0;
5617         u64 lba;
5618         u32 num_blocks;
5619         u8 *cmd = scp->cmnd;
5620
5621         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
5622                 lba = get_unaligned_be32(cmd + 2);
5623                 num_blocks = get_unaligned_be16(cmd + 7);
5624         } else {                                /* SYNCHRONIZE_CACHE(16) */
5625                 lba = get_unaligned_be64(cmd + 2);
5626                 num_blocks = get_unaligned_be32(cmd + 10);
5627         }
5628         if (lba + num_blocks > sdebug_capacity) {
5629                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5630                 return check_condition_result;
5631         }
5632         if (!write_since_sync || (cmd[1] & 0x2))
5633                 res = SDEG_RES_IMMED_MASK;
5634         else            /* delay if write_since_sync and IMMED clear */
5635                 write_since_sync = false;
5636         return res;
5637 }
5638
5639 /*
5640  * Assuming the LBA+num_blocks is not out-of-range, this function will return
5641  * CONDITION MET if the specified blocks will/have fitted in the cache, and
5642  * a GOOD status otherwise. Model a disk with a big cache and yield
5643  * CONDITION MET. Actually tries to bring range in main memory into the
5644  * cache associated with the CPU(s).
5645  *
5646  * The pcode 0x34 is also used for READ POSITION by tape devices.
5647  */
5648 static int resp_pre_fetch(struct scsi_cmnd *scp,
5649                           struct sdebug_dev_info *devip)
5650 {
5651         int res = 0;
5652         u64 lba;
5653         u64 block, rest = 0;
5654         u32 nblks;
5655         u8 *cmd = scp->cmnd;
5656         struct sdeb_store_info *sip = devip2sip(devip, true);
5657         u8 *fsp = sip->storep;
5658
5659         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
5660                 lba = get_unaligned_be32(cmd + 2);
5661                 nblks = get_unaligned_be16(cmd + 7);
5662         } else {                        /* PRE-FETCH(16) */
5663                 lba = get_unaligned_be64(cmd + 2);
5664                 nblks = get_unaligned_be32(cmd + 10);
5665         }
5666         if (lba + nblks > sdebug_capacity) {
5667                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5668                 return check_condition_result;
5669         }
5670         if (!fsp)
5671                 goto fini;
5672         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
5673         block = do_div(lba, sdebug_store_sectors);
5674         if (block + nblks > sdebug_store_sectors)
5675                 rest = block + nblks - sdebug_store_sectors;
5676
5677         /* Try to bring the PRE-FETCH range into CPU's cache */
5678         sdeb_data_read_lock(sip);
5679         prefetch_range(fsp + (sdebug_sector_size * block),
5680                        (nblks - rest) * sdebug_sector_size);
5681         if (rest)
5682                 prefetch_range(fsp, rest * sdebug_sector_size);
5683
5684         sdeb_data_read_unlock(sip);
5685 fini:
5686         if (cmd[1] & 0x2)
5687                 res = SDEG_RES_IMMED_MASK;
5688         return res | condition_met_result;
5689 }
5690
5691 #define RL_BUCKET_ELEMS 8
5692
5693 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5694  * (W-LUN), the normal Linux scanning logic does not associate it with a
5695  * device (e.g. /dev/sg7). The following magic will make that association:
5696  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5697  * where <n> is a host number. If there are multiple targets in a host then
5698  * the above will associate a W-LUN to each target. To only get a W-LUN
5699  * for target 2, then use "echo '- 2 49409' > scan" .
5700  */
5701 static int resp_report_luns(struct scsi_cmnd *scp,
5702                             struct sdebug_dev_info *devip)
5703 {
5704         unsigned char *cmd = scp->cmnd;
5705         unsigned int alloc_len;
5706         unsigned char select_report;
5707         u64 lun;
5708         struct scsi_lun *lun_p;
5709         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5710         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
5711         unsigned int wlun_cnt;  /* report luns W-LUN count */
5712         unsigned int tlun_cnt;  /* total LUN count */
5713         unsigned int rlen;      /* response length (in bytes) */
5714         int k, j, n, res;
5715         unsigned int off_rsp = 0;
5716         const int sz_lun = sizeof(struct scsi_lun);
5717
5718         clear_luns_changed_on_target(devip);
5719
5720         select_report = cmd[2];
5721         alloc_len = get_unaligned_be32(cmd + 6);
5722
5723         if (alloc_len < 4) {
5724                 pr_err("alloc len too small %d\n", alloc_len);
5725                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5726                 return check_condition_result;
5727         }
5728
5729         switch (select_report) {
5730         case 0:         /* all LUNs apart from W-LUNs */
5731                 lun_cnt = sdebug_max_luns;
5732                 wlun_cnt = 0;
5733                 break;
5734         case 1:         /* only W-LUNs */
5735                 lun_cnt = 0;
5736                 wlun_cnt = 1;
5737                 break;
5738         case 2:         /* all LUNs */
5739                 lun_cnt = sdebug_max_luns;
5740                 wlun_cnt = 1;
5741                 break;
5742         case 0x10:      /* only administrative LUs */
5743         case 0x11:      /* see SPC-5 */
5744         case 0x12:      /* only subsiduary LUs owned by referenced LU */
5745         default:
5746                 pr_debug("select report invalid %d\n", select_report);
5747                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5748                 return check_condition_result;
5749         }
5750
5751         if (sdebug_no_lun_0 && (lun_cnt > 0))
5752                 --lun_cnt;
5753
5754         tlun_cnt = lun_cnt + wlun_cnt;
5755         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
5756         scsi_set_resid(scp, scsi_bufflen(scp));
5757         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5758                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5759
5760         /* loops rely on sizeof response header same as sizeof lun (both 8) */
5761         lun = sdebug_no_lun_0 ? 1 : 0;
5762         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5763                 memset(arr, 0, sizeof(arr));
5764                 lun_p = (struct scsi_lun *)&arr[0];
5765                 if (k == 0) {
5766                         put_unaligned_be32(rlen, &arr[0]);
5767                         ++lun_p;
5768                         j = 1;
5769                 }
5770                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5771                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5772                                 break;
5773                         int_to_scsilun(lun++, lun_p);
5774                         if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5775                                 lun_p->scsi_lun[0] |= 0x40;
5776                 }
5777                 if (j < RL_BUCKET_ELEMS)
5778                         break;
5779                 n = j * sz_lun;
5780                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5781                 if (res)
5782                         return res;
5783                 off_rsp += n;
5784         }
5785         if (wlun_cnt) {
5786                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5787                 ++j;
5788         }
5789         if (j > 0)
5790                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5791         return res;
5792 }
5793
5794 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5795 {
5796         bool is_bytchk3 = false;
5797         u8 bytchk;
5798         int ret, j;
5799         u32 vnum, a_num, off;
5800         const u32 lb_size = sdebug_sector_size;
5801         u64 lba;
5802         u8 *arr;
5803         u8 *cmd = scp->cmnd;
5804         struct sdeb_store_info *sip = devip2sip(devip, true);
5805
5806         bytchk = (cmd[1] >> 1) & 0x3;
5807         if (bytchk == 0) {
5808                 return 0;       /* always claim internal verify okay */
5809         } else if (bytchk == 2) {
5810                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5811                 return check_condition_result;
5812         } else if (bytchk == 3) {
5813                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
5814         }
5815         switch (cmd[0]) {
5816         case VERIFY_16:
5817                 lba = get_unaligned_be64(cmd + 2);
5818                 vnum = get_unaligned_be32(cmd + 10);
5819                 break;
5820         case VERIFY:            /* is VERIFY(10) */
5821                 lba = get_unaligned_be32(cmd + 2);
5822                 vnum = get_unaligned_be16(cmd + 7);
5823                 break;
5824         default:
5825                 mk_sense_invalid_opcode(scp);
5826                 return check_condition_result;
5827         }
5828         if (vnum == 0)
5829                 return 0;       /* not an error */
5830         a_num = is_bytchk3 ? 1 : vnum;
5831         /* Treat following check like one for read (i.e. no write) access */
5832         ret = check_device_access_params(scp, lba, a_num, false);
5833         if (ret)
5834                 return ret;
5835
5836         arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5837         if (!arr) {
5838                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5839                                 INSUFF_RES_ASCQ);
5840                 return check_condition_result;
5841         }
5842         /* Not changing store, so only need read access */
5843         sdeb_data_read_lock(sip);
5844
5845         ret = do_dout_fetch(scp, a_num, arr);
5846         if (ret == -1) {
5847                 ret = DID_ERROR << 16;
5848                 goto cleanup;
5849         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5850                 sdev_printk(KERN_INFO, scp->device,
5851                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
5852                             my_name, __func__, a_num * lb_size, ret);
5853         }
5854         if (is_bytchk3) {
5855                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5856                         memcpy(arr + off, arr, lb_size);
5857         }
5858         ret = 0;
5859         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5860                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5861                 ret = check_condition_result;
5862                 goto cleanup;
5863         }
5864 cleanup:
5865         sdeb_data_read_unlock(sip);
5866         kfree(arr);
5867         return ret;
5868 }
5869
5870 #define RZONES_DESC_HD 64
5871
5872 /* Report zones depending on start LBA and reporting options */
5873 static int resp_report_zones(struct scsi_cmnd *scp,
5874                              struct sdebug_dev_info *devip)
5875 {
5876         unsigned int rep_max_zones, nrz = 0;
5877         int ret = 0;
5878         u32 alloc_len, rep_opts, rep_len;
5879         bool partial;
5880         u64 lba, zs_lba;
5881         u8 *arr = NULL, *desc;
5882         u8 *cmd = scp->cmnd;
5883         struct sdeb_zone_state *zsp = NULL;
5884         struct sdeb_store_info *sip = devip2sip(devip, false);
5885
5886         if (!sdebug_dev_is_zoned(devip)) {
5887                 mk_sense_invalid_opcode(scp);
5888                 return check_condition_result;
5889         }
5890         zs_lba = get_unaligned_be64(cmd + 2);
5891         alloc_len = get_unaligned_be32(cmd + 10);
5892         if (alloc_len == 0)
5893                 return 0;       /* not an error */
5894         rep_opts = cmd[14] & 0x3f;
5895         partial = cmd[14] & 0x80;
5896
5897         if (zs_lba >= sdebug_capacity) {
5898                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5899                 return check_condition_result;
5900         }
5901
5902         rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5903
5904         arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5905         if (!arr) {
5906                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5907                                 INSUFF_RES_ASCQ);
5908                 return check_condition_result;
5909         }
5910
5911         sdeb_meta_read_lock(sip);
5912
5913         desc = arr + 64;
5914         for (lba = zs_lba; lba < sdebug_capacity;
5915              lba = zsp->z_start + zsp->z_size) {
5916                 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5917                         break;
5918                 zsp = zbc_zone(devip, lba);
5919                 switch (rep_opts) {
5920                 case 0x00:
5921                         /* All zones */
5922                         break;
5923                 case 0x01:
5924                         /* Empty zones */
5925                         if (zsp->z_cond != ZC1_EMPTY)
5926                                 continue;
5927                         break;
5928                 case 0x02:
5929                         /* Implicit open zones */
5930                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5931                                 continue;
5932                         break;
5933                 case 0x03:
5934                         /* Explicit open zones */
5935                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5936                                 continue;
5937                         break;
5938                 case 0x04:
5939                         /* Closed zones */
5940                         if (zsp->z_cond != ZC4_CLOSED)
5941                                 continue;
5942                         break;
5943                 case 0x05:
5944                         /* Full zones */
5945                         if (zsp->z_cond != ZC5_FULL)
5946                                 continue;
5947                         break;
5948                 case 0x06:
5949                 case 0x07:
5950                 case 0x10:
5951                         /*
5952                          * Read-only, offline, reset WP recommended are
5953                          * not emulated: no zones to report;
5954                          */
5955                         continue;
5956                 case 0x11:
5957                         /* non-seq-resource set */
5958                         if (!zsp->z_non_seq_resource)
5959                                 continue;
5960                         break;
5961                 case 0x3e:
5962                         /* All zones except gap zones. */
5963                         if (zbc_zone_is_gap(zsp))
5964                                 continue;
5965                         break;
5966                 case 0x3f:
5967                         /* Not write pointer (conventional) zones */
5968                         if (zbc_zone_is_seq(zsp))
5969                                 continue;
5970                         break;
5971                 default:
5972                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
5973                                         INVALID_FIELD_IN_CDB, 0);
5974                         ret = check_condition_result;
5975                         goto fini;
5976                 }
5977
5978                 if (nrz < rep_max_zones) {
5979                         /* Fill zone descriptor */
5980                         desc[0] = zsp->z_type;
5981                         desc[1] = zsp->z_cond << 4;
5982                         if (zsp->z_non_seq_resource)
5983                                 desc[1] |= 1 << 1;
5984                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
5985                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
5986                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
5987                         desc += 64;
5988                 }
5989
5990                 if (partial && nrz >= rep_max_zones)
5991                         break;
5992
5993                 nrz++;
5994         }
5995
5996         /* Report header */
5997         /* Zone list length. */
5998         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5999         /* Maximum LBA */
6000         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6001         /* Zone starting LBA granularity. */
6002         if (devip->zcap < devip->zsize)
6003                 put_unaligned_be64(devip->zsize, arr + 16);
6004
6005         rep_len = (unsigned long)desc - (unsigned long)arr;
6006         ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6007
6008 fini:
6009         sdeb_meta_read_unlock(sip);
6010         kfree(arr);
6011         return ret;
6012 }
6013
6014 static int resp_atomic_write(struct scsi_cmnd *scp,
6015                              struct sdebug_dev_info *devip)
6016 {
6017         struct sdeb_store_info *sip;
6018         u8 *cmd = scp->cmnd;
6019         u16 boundary, len;
6020         u64 lba, lba_tmp;
6021         int ret;
6022
6023         if (!scsi_debug_atomic_write()) {
6024                 mk_sense_invalid_opcode(scp);
6025                 return check_condition_result;
6026         }
6027
6028         sip = devip2sip(devip, true);
6029
6030         lba = get_unaligned_be64(cmd + 2);
6031         boundary = get_unaligned_be16(cmd + 10);
6032         len = get_unaligned_be16(cmd + 12);
6033
6034         lba_tmp = lba;
6035         if (sdebug_atomic_wr_align &&
6036             do_div(lba_tmp, sdebug_atomic_wr_align)) {
6037                 /* Does not meet alignment requirement */
6038                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6039                 return check_condition_result;
6040         }
6041
6042         if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6043                 /* Does not meet alignment requirement */
6044                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6045                 return check_condition_result;
6046         }
6047
6048         if (boundary > 0) {
6049                 if (boundary > sdebug_atomic_wr_max_bndry) {
6050                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6051                         return check_condition_result;
6052                 }
6053
6054                 if (len > sdebug_atomic_wr_max_length_bndry) {
6055                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6056                         return check_condition_result;
6057                 }
6058         } else {
6059                 if (len > sdebug_atomic_wr_max_length) {
6060                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6061                         return check_condition_result;
6062                 }
6063         }
6064
6065         ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6066         if (unlikely(ret == -1))
6067                 return DID_ERROR << 16;
6068         if (unlikely(ret != len * sdebug_sector_size))
6069                 return DID_ERROR << 16;
6070         return 0;
6071 }
6072
6073 /* Logic transplanted from tcmu-runner, file_zbc.c */
6074 static void zbc_open_all(struct sdebug_dev_info *devip)
6075 {
6076         struct sdeb_zone_state *zsp = &devip->zstate[0];
6077         unsigned int i;
6078
6079         for (i = 0; i < devip->nr_zones; i++, zsp++) {
6080                 if (zsp->z_cond == ZC4_CLOSED)
6081                         zbc_open_zone(devip, &devip->zstate[i], true);
6082         }
6083 }
6084
6085 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6086 {
6087         int res = 0;
6088         u64 z_id;
6089         enum sdebug_z_cond zc;
6090         u8 *cmd = scp->cmnd;
6091         struct sdeb_zone_state *zsp;
6092         bool all = cmd[14] & 0x01;
6093         struct sdeb_store_info *sip = devip2sip(devip, false);
6094
6095         if (!sdebug_dev_is_zoned(devip)) {
6096                 mk_sense_invalid_opcode(scp);
6097                 return check_condition_result;
6098         }
6099         sdeb_meta_write_lock(sip);
6100
6101         if (all) {
6102                 /* Check if all closed zones can be open */
6103                 if (devip->max_open &&
6104                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6105                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6106                                         INSUFF_ZONE_ASCQ);
6107                         res = check_condition_result;
6108                         goto fini;
6109                 }
6110                 /* Open all closed zones */
6111                 zbc_open_all(devip);
6112                 goto fini;
6113         }
6114
6115         /* Open the specified zone */
6116         z_id = get_unaligned_be64(cmd + 2);
6117         if (z_id >= sdebug_capacity) {
6118                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6119                 res = check_condition_result;
6120                 goto fini;
6121         }
6122
6123         zsp = zbc_zone(devip, z_id);
6124         if (z_id != zsp->z_start) {
6125                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6126                 res = check_condition_result;
6127                 goto fini;
6128         }
6129         if (zbc_zone_is_conv(zsp)) {
6130                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6131                 res = check_condition_result;
6132                 goto fini;
6133         }
6134
6135         zc = zsp->z_cond;
6136         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6137                 goto fini;
6138
6139         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6140                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6141                                 INSUFF_ZONE_ASCQ);
6142                 res = check_condition_result;
6143                 goto fini;
6144         }
6145
6146         zbc_open_zone(devip, zsp, true);
6147 fini:
6148         sdeb_meta_write_unlock(sip);
6149         return res;
6150 }
6151
6152 static void zbc_close_all(struct sdebug_dev_info *devip)
6153 {
6154         unsigned int i;
6155
6156         for (i = 0; i < devip->nr_zones; i++)
6157                 zbc_close_zone(devip, &devip->zstate[i]);
6158 }
6159
6160 static int resp_close_zone(struct scsi_cmnd *scp,
6161                            struct sdebug_dev_info *devip)
6162 {
6163         int res = 0;
6164         u64 z_id;
6165         u8 *cmd = scp->cmnd;
6166         struct sdeb_zone_state *zsp;
6167         bool all = cmd[14] & 0x01;
6168         struct sdeb_store_info *sip = devip2sip(devip, false);
6169
6170         if (!sdebug_dev_is_zoned(devip)) {
6171                 mk_sense_invalid_opcode(scp);
6172                 return check_condition_result;
6173         }
6174
6175         sdeb_meta_write_lock(sip);
6176
6177         if (all) {
6178                 zbc_close_all(devip);
6179                 goto fini;
6180         }
6181
6182         /* Close specified zone */
6183         z_id = get_unaligned_be64(cmd + 2);
6184         if (z_id >= sdebug_capacity) {
6185                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6186                 res = check_condition_result;
6187                 goto fini;
6188         }
6189
6190         zsp = zbc_zone(devip, z_id);
6191         if (z_id != zsp->z_start) {
6192                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6193                 res = check_condition_result;
6194                 goto fini;
6195         }
6196         if (zbc_zone_is_conv(zsp)) {
6197                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6198                 res = check_condition_result;
6199                 goto fini;
6200         }
6201
6202         zbc_close_zone(devip, zsp);
6203 fini:
6204         sdeb_meta_write_unlock(sip);
6205         return res;
6206 }
6207
6208 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6209                             struct sdeb_zone_state *zsp, bool empty)
6210 {
6211         enum sdebug_z_cond zc = zsp->z_cond;
6212
6213         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6214             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6215                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6216                         zbc_close_zone(devip, zsp);
6217                 if (zsp->z_cond == ZC4_CLOSED)
6218                         devip->nr_closed--;
6219                 zsp->z_wp = zsp->z_start + zsp->z_size;
6220                 zsp->z_cond = ZC5_FULL;
6221         }
6222 }
6223
6224 static void zbc_finish_all(struct sdebug_dev_info *devip)
6225 {
6226         unsigned int i;
6227
6228         for (i = 0; i < devip->nr_zones; i++)
6229                 zbc_finish_zone(devip, &devip->zstate[i], false);
6230 }
6231
6232 static int resp_finish_zone(struct scsi_cmnd *scp,
6233                             struct sdebug_dev_info *devip)
6234 {
6235         struct sdeb_zone_state *zsp;
6236         int res = 0;
6237         u64 z_id;
6238         u8 *cmd = scp->cmnd;
6239         bool all = cmd[14] & 0x01;
6240         struct sdeb_store_info *sip = devip2sip(devip, false);
6241
6242         if (!sdebug_dev_is_zoned(devip)) {
6243                 mk_sense_invalid_opcode(scp);
6244                 return check_condition_result;
6245         }
6246
6247         sdeb_meta_write_lock(sip);
6248
6249         if (all) {
6250                 zbc_finish_all(devip);
6251                 goto fini;
6252         }
6253
6254         /* Finish the specified zone */
6255         z_id = get_unaligned_be64(cmd + 2);
6256         if (z_id >= sdebug_capacity) {
6257                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6258                 res = check_condition_result;
6259                 goto fini;
6260         }
6261
6262         zsp = zbc_zone(devip, z_id);
6263         if (z_id != zsp->z_start) {
6264                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6265                 res = check_condition_result;
6266                 goto fini;
6267         }
6268         if (zbc_zone_is_conv(zsp)) {
6269                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6270                 res = check_condition_result;
6271                 goto fini;
6272         }
6273
6274         zbc_finish_zone(devip, zsp, true);
6275 fini:
6276         sdeb_meta_write_unlock(sip);
6277         return res;
6278 }
6279
6280 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6281                          struct sdeb_zone_state *zsp)
6282 {
6283         enum sdebug_z_cond zc;
6284         struct sdeb_store_info *sip = devip2sip(devip, false);
6285
6286         if (!zbc_zone_is_seq(zsp))
6287                 return;
6288
6289         zc = zsp->z_cond;
6290         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6291                 zbc_close_zone(devip, zsp);
6292
6293         if (zsp->z_cond == ZC4_CLOSED)
6294                 devip->nr_closed--;
6295
6296         if (zsp->z_wp > zsp->z_start)
6297                 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6298                        (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6299
6300         zsp->z_non_seq_resource = false;
6301         zsp->z_wp = zsp->z_start;
6302         zsp->z_cond = ZC1_EMPTY;
6303 }
6304
6305 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6306 {
6307         unsigned int i;
6308
6309         for (i = 0; i < devip->nr_zones; i++)
6310                 zbc_rwp_zone(devip, &devip->zstate[i]);
6311 }
6312
6313 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6314 {
6315         struct sdeb_zone_state *zsp;
6316         int res = 0;
6317         u64 z_id;
6318         u8 *cmd = scp->cmnd;
6319         bool all = cmd[14] & 0x01;
6320         struct sdeb_store_info *sip = devip2sip(devip, false);
6321
6322         if (!sdebug_dev_is_zoned(devip)) {
6323                 mk_sense_invalid_opcode(scp);
6324                 return check_condition_result;
6325         }
6326
6327         sdeb_meta_write_lock(sip);
6328
6329         if (all) {
6330                 zbc_rwp_all(devip);
6331                 goto fini;
6332         }
6333
6334         z_id = get_unaligned_be64(cmd + 2);
6335         if (z_id >= sdebug_capacity) {
6336                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6337                 res = check_condition_result;
6338                 goto fini;
6339         }
6340
6341         zsp = zbc_zone(devip, z_id);
6342         if (z_id != zsp->z_start) {
6343                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6344                 res = check_condition_result;
6345                 goto fini;
6346         }
6347         if (zbc_zone_is_conv(zsp)) {
6348                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6349                 res = check_condition_result;
6350                 goto fini;
6351         }
6352
6353         zbc_rwp_zone(devip, zsp);
6354 fini:
6355         sdeb_meta_write_unlock(sip);
6356         return res;
6357 }
6358
6359 static u32 get_tag(struct scsi_cmnd *cmnd)
6360 {
6361         return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6362 }
6363
6364 /* Queued (deferred) command completions converge here. */
6365 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6366 {
6367         struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6368                                         typeof(*sdsc), sd_dp);
6369         struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6370         unsigned long flags;
6371         bool aborted;
6372
6373         if (sdebug_statistics) {
6374                 atomic_inc(&sdebug_completions);
6375                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6376                         atomic_inc(&sdebug_miss_cpus);
6377         }
6378
6379         if (!scp) {
6380                 pr_err("scmd=NULL\n");
6381                 return;
6382         }
6383
6384         spin_lock_irqsave(&sdsc->lock, flags);
6385         aborted = sd_dp->aborted;
6386         if (unlikely(aborted))
6387                 sd_dp->aborted = false;
6388
6389         spin_unlock_irqrestore(&sdsc->lock, flags);
6390
6391         if (aborted) {
6392                 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6393                 blk_abort_request(scsi_cmd_to_rq(scp));
6394                 return;
6395         }
6396
6397         scsi_done(scp); /* callback to mid level */
6398 }
6399
6400 /* When high resolution timer goes off this function is called. */
6401 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6402 {
6403         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6404                                                   hrt);
6405         sdebug_q_cmd_complete(sd_dp);
6406         return HRTIMER_NORESTART;
6407 }
6408
6409 /* When work queue schedules work, it calls this function. */
6410 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6411 {
6412         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6413                                                   ew.work);
6414         sdebug_q_cmd_complete(sd_dp);
6415 }
6416
6417 static bool got_shared_uuid;
6418 static uuid_t shared_uuid;
6419
6420 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6421 {
6422         struct sdeb_zone_state *zsp;
6423         sector_t capacity = get_sdebug_capacity();
6424         sector_t conv_capacity;
6425         sector_t zstart = 0;
6426         unsigned int i;
6427
6428         /*
6429          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6430          * a zone size allowing for at least 4 zones on the device. Otherwise,
6431          * use the specified zone size checking that at least 2 zones can be
6432          * created for the device.
6433          */
6434         if (!sdeb_zbc_zone_size_mb) {
6435                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6436                         >> ilog2(sdebug_sector_size);
6437                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6438                         devip->zsize >>= 1;
6439                 if (devip->zsize < 2) {
6440                         pr_err("Device capacity too small\n");
6441                         return -EINVAL;
6442                 }
6443         } else {
6444                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6445                         pr_err("Zone size is not a power of 2\n");
6446                         return -EINVAL;
6447                 }
6448                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6449                         >> ilog2(sdebug_sector_size);
6450                 if (devip->zsize >= capacity) {
6451                         pr_err("Zone size too large for device capacity\n");
6452                         return -EINVAL;
6453                 }
6454         }
6455
6456         devip->zsize_shift = ilog2(devip->zsize);
6457         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6458
6459         if (sdeb_zbc_zone_cap_mb == 0) {
6460                 devip->zcap = devip->zsize;
6461         } else {
6462                 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6463                               ilog2(sdebug_sector_size);
6464                 if (devip->zcap > devip->zsize) {
6465                         pr_err("Zone capacity too large\n");
6466                         return -EINVAL;
6467                 }
6468         }
6469
6470         conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6471         if (conv_capacity >= capacity) {
6472                 pr_err("Number of conventional zones too large\n");
6473                 return -EINVAL;
6474         }
6475         devip->nr_conv_zones = sdeb_zbc_nr_conv;
6476         devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6477                               devip->zsize_shift;
6478         devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6479
6480         /* Add gap zones if zone capacity is smaller than the zone size */
6481         if (devip->zcap < devip->zsize)
6482                 devip->nr_zones += devip->nr_seq_zones;
6483
6484         if (devip->zoned) {
6485                 /* zbc_max_open_zones can be 0, meaning "not reported" */
6486                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6487                         devip->max_open = (devip->nr_zones - 1) / 2;
6488                 else
6489                         devip->max_open = sdeb_zbc_max_open;
6490         }
6491
6492         devip->zstate = kcalloc(devip->nr_zones,
6493                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
6494         if (!devip->zstate)
6495                 return -ENOMEM;
6496
6497         for (i = 0; i < devip->nr_zones; i++) {
6498                 zsp = &devip->zstate[i];
6499
6500                 zsp->z_start = zstart;
6501
6502                 if (i < devip->nr_conv_zones) {
6503                         zsp->z_type = ZBC_ZTYPE_CNV;
6504                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6505                         zsp->z_wp = (sector_t)-1;
6506                         zsp->z_size =
6507                                 min_t(u64, devip->zsize, capacity - zstart);
6508                 } else if ((zstart & (devip->zsize - 1)) == 0) {
6509                         if (devip->zoned)
6510                                 zsp->z_type = ZBC_ZTYPE_SWR;
6511                         else
6512                                 zsp->z_type = ZBC_ZTYPE_SWP;
6513                         zsp->z_cond = ZC1_EMPTY;
6514                         zsp->z_wp = zsp->z_start;
6515                         zsp->z_size =
6516                                 min_t(u64, devip->zcap, capacity - zstart);
6517                 } else {
6518                         zsp->z_type = ZBC_ZTYPE_GAP;
6519                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6520                         zsp->z_wp = (sector_t)-1;
6521                         zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6522                                             capacity - zstart);
6523                 }
6524
6525                 WARN_ON_ONCE((int)zsp->z_size <= 0);
6526                 zstart += zsp->z_size;
6527         }
6528
6529         return 0;
6530 }
6531
6532 static struct sdebug_dev_info *sdebug_device_create(
6533                         struct sdebug_host_info *sdbg_host, gfp_t flags)
6534 {
6535         struct sdebug_dev_info *devip;
6536
6537         devip = kzalloc(sizeof(*devip), flags);
6538         if (devip) {
6539                 if (sdebug_uuid_ctl == 1)
6540                         uuid_gen(&devip->lu_name);
6541                 else if (sdebug_uuid_ctl == 2) {
6542                         if (got_shared_uuid)
6543                                 devip->lu_name = shared_uuid;
6544                         else {
6545                                 uuid_gen(&shared_uuid);
6546                                 got_shared_uuid = true;
6547                                 devip->lu_name = shared_uuid;
6548                         }
6549                 }
6550                 devip->sdbg_host = sdbg_host;
6551                 if (sdeb_zbc_in_use) {
6552                         devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6553                         if (sdebug_device_create_zones(devip)) {
6554                                 kfree(devip);
6555                                 return NULL;
6556                         }
6557                 } else {
6558                         devip->zoned = false;
6559                 }
6560                 if (sdebug_ptype == TYPE_TAPE) {
6561                         devip->tape_density = TAPE_DEF_DENSITY;
6562                         devip->tape_blksize = TAPE_DEF_BLKSIZE;
6563                 }
6564                 devip->create_ts = ktime_get_boottime();
6565                 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6566                 spin_lock_init(&devip->list_lock);
6567                 INIT_LIST_HEAD(&devip->inject_err_list);
6568                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6569         }
6570         return devip;
6571 }
6572
6573 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6574 {
6575         struct sdebug_host_info *sdbg_host;
6576         struct sdebug_dev_info *open_devip = NULL;
6577         struct sdebug_dev_info *devip;
6578
6579         sdbg_host = shost_to_sdebug_host(sdev->host);
6580
6581         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6582                 if ((devip->used) && (devip->channel == sdev->channel) &&
6583                     (devip->target == sdev->id) &&
6584                     (devip->lun == sdev->lun))
6585                         return devip;
6586                 else {
6587                         if ((!devip->used) && (!open_devip))
6588                                 open_devip = devip;
6589                 }
6590         }
6591         if (!open_devip) { /* try and make a new one */
6592                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6593                 if (!open_devip) {
6594                         pr_err("out of memory at line %d\n", __LINE__);
6595                         return NULL;
6596                 }
6597         }
6598
6599         open_devip->channel = sdev->channel;
6600         open_devip->target = sdev->id;
6601         open_devip->lun = sdev->lun;
6602         open_devip->sdbg_host = sdbg_host;
6603         set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6604         open_devip->used = true;
6605         return open_devip;
6606 }
6607
6608 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6609 {
6610         if (sdebug_verbose)
6611                 pr_info("sdev_init <%u %u %u %llu>\n",
6612                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6613
6614         return 0;
6615 }
6616
6617 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6618                                      struct queue_limits *lim)
6619 {
6620         struct sdebug_dev_info *devip =
6621                         (struct sdebug_dev_info *)sdp->hostdata;
6622         struct dentry *dentry;
6623
6624         if (sdebug_verbose)
6625                 pr_info("sdev_configure <%u %u %u %llu>\n",
6626                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6627         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6628                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6629         if (devip == NULL) {
6630                 devip = find_build_dev_info(sdp);
6631                 if (devip == NULL)
6632                         return 1;  /* no resources, will be marked offline */
6633         }
6634         if (sdebug_ptype == TYPE_TAPE) {
6635                 if (!devip->tape_blocks[0]) {
6636                         devip->tape_blocks[0] =
6637                                 kcalloc(TAPE_UNITS, sizeof(struct tape_block),
6638                                         GFP_KERNEL);
6639                         if (!devip->tape_blocks[0])
6640                                 return 1;
6641                 }
6642                 devip->tape_pending_nbr_partitions = -1;
6643                 if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6644                         kfree(devip->tape_blocks[0]);
6645                         devip->tape_blocks[0] = NULL;
6646                         return 1;
6647                 }
6648         }
6649         sdp->hostdata = devip;
6650         if (sdebug_no_uld)
6651                 sdp->no_uld_attach = 1;
6652         config_cdb_len(sdp);
6653
6654         if (sdebug_allow_restart)
6655                 sdp->allow_restart = 1;
6656
6657         devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6658                                 sdebug_debugfs_root);
6659         if (IS_ERR_OR_NULL(devip->debugfs_entry))
6660                 pr_info("%s: failed to create debugfs directory for device %s\n",
6661                         __func__, dev_name(&sdp->sdev_gendev));
6662
6663         dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6664                                 &sdebug_error_fops);
6665         if (IS_ERR_OR_NULL(dentry))
6666                 pr_info("%s: failed to create error file for device %s\n",
6667                         __func__, dev_name(&sdp->sdev_gendev));
6668
6669         return 0;
6670 }
6671
6672 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6673 {
6674         struct sdebug_dev_info *devip =
6675                 (struct sdebug_dev_info *)sdp->hostdata;
6676         struct sdebug_err_inject *err;
6677
6678         if (sdebug_verbose)
6679                 pr_info("sdev_destroy <%u %u %u %llu>\n",
6680                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6681
6682         if (!devip)
6683                 return;
6684
6685         spin_lock(&devip->list_lock);
6686         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6687                 list_del_rcu(&err->list);
6688                 call_rcu(&err->rcu, sdebug_err_free);
6689         }
6690         spin_unlock(&devip->list_lock);
6691
6692         debugfs_remove(devip->debugfs_entry);
6693
6694         if (sdp->type == TYPE_TAPE) {
6695                 kfree(devip->tape_blocks[0]);
6696                 devip->tape_blocks[0] = NULL;
6697         }
6698
6699         /* make this slot available for re-use */
6700         devip->used = false;
6701         sdp->hostdata = NULL;
6702 }
6703
6704 /* Returns true if cancelled or not running callback. */
6705 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6706 {
6707         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6708         struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6709         enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
6710
6711         lockdep_assert_held(&sdsc->lock);
6712
6713         if (defer_t == SDEB_DEFER_HRT) {
6714                 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6715
6716                 switch (res) {
6717                 case -1: /* -1 It's executing the CB */
6718                         return false;
6719                 case 0: /* Not active, it must have already run */
6720                 case 1: /* Was active, we've now cancelled */
6721                 default:
6722                         return true;
6723                 }
6724         } else if (defer_t == SDEB_DEFER_WQ) {
6725                 /* Cancel if pending */
6726                 if (cancel_work(&sd_dp->ew.work))
6727                         return true;
6728                 /* callback may be running, so return false */
6729                 return false;
6730         } else if (defer_t == SDEB_DEFER_POLL) {
6731                 return true;
6732         }
6733
6734         return false;
6735 }
6736
6737 /*
6738  * Called from scsi_debug_abort() only, which is for timed-out cmd.
6739  */
6740 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6741 {
6742         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6743         unsigned long flags;
6744         bool res;
6745
6746         spin_lock_irqsave(&sdsc->lock, flags);
6747         res = scsi_debug_stop_cmnd(cmnd);
6748         spin_unlock_irqrestore(&sdsc->lock, flags);
6749
6750         return res;
6751 }
6752
6753 /*
6754  * All we can do is set the cmnd as internally aborted and wait for it to
6755  * finish. We cannot call scsi_done() as normal completion path may do that.
6756  */
6757 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6758 {
6759         scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6760
6761         return true;
6762 }
6763
6764 /* Deletes (stops) timers or work queues of all queued commands */
6765 static void stop_all_queued(void)
6766 {
6767         struct sdebug_host_info *sdhp;
6768
6769         mutex_lock(&sdebug_host_list_mutex);
6770         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6771                 struct Scsi_Host *shost = sdhp->shost;
6772
6773                 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6774         }
6775         mutex_unlock(&sdebug_host_list_mutex);
6776 }
6777
6778 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6779 {
6780         struct scsi_device *sdp = cmnd->device;
6781         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6782         struct sdebug_err_inject *err;
6783         unsigned char *cmd = cmnd->cmnd;
6784         int ret = 0;
6785
6786         if (devip == NULL)
6787                 return 0;
6788
6789         rcu_read_lock();
6790         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6791                 if (err->type == ERR_ABORT_CMD_FAILED &&
6792                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
6793                         ret = !!err->cnt;
6794                         if (err->cnt < 0)
6795                                 err->cnt++;
6796
6797                         rcu_read_unlock();
6798                         return ret;
6799                 }
6800         }
6801         rcu_read_unlock();
6802
6803         return 0;
6804 }
6805
6806 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6807 {
6808         bool aborted = scsi_debug_abort_cmnd(SCpnt);
6809         u8 *cmd = SCpnt->cmnd;
6810         u8 opcode = cmd[0];
6811
6812         ++num_aborts;
6813
6814         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6815                 sdev_printk(KERN_INFO, SCpnt->device,
6816                             "%s: command%s found\n", __func__,
6817                             aborted ? "" : " not");
6818
6819
6820         if (sdebug_fail_abort(SCpnt)) {
6821                 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6822                             opcode);
6823                 return FAILED;
6824         }
6825
6826         if (aborted == false)
6827                 return FAILED;
6828
6829         return SUCCESS;
6830 }
6831
6832 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6833 {
6834         struct scsi_device *sdp = data;
6835         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6836
6837         if (scmd->device == sdp)
6838                 scsi_debug_abort_cmnd(scmd);
6839
6840         return true;
6841 }
6842
6843 /* Deletes (stops) timers or work queues of all queued commands per sdev */
6844 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6845 {
6846         struct Scsi_Host *shost = sdp->host;
6847
6848         blk_mq_tagset_busy_iter(&shost->tag_set,
6849                                 scsi_debug_stop_all_queued_iter, sdp);
6850 }
6851
6852 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6853 {
6854         struct scsi_device *sdp = cmnd->device;
6855         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6856         struct sdebug_err_inject *err;
6857         unsigned char *cmd = cmnd->cmnd;
6858         int ret = 0;
6859
6860         if (devip == NULL)
6861                 return 0;
6862
6863         rcu_read_lock();
6864         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6865                 if (err->type == ERR_LUN_RESET_FAILED &&
6866                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
6867                         ret = !!err->cnt;
6868                         if (err->cnt < 0)
6869                                 err->cnt++;
6870
6871                         rcu_read_unlock();
6872                         return ret;
6873                 }
6874         }
6875         rcu_read_unlock();
6876
6877         return 0;
6878 }
6879
6880 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6881 {
6882         int i;
6883
6884         devip->tape_blksize = TAPE_DEF_BLKSIZE;
6885         devip->tape_density = TAPE_DEF_DENSITY;
6886         devip->tape_partition = 0;
6887         devip->tape_dce = 0;
6888         for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6889                 devip->tape_location[i] = 0;
6890         devip->tape_pending_nbr_partitions = -1;
6891         /* Don't reset partitioning? */
6892 }
6893
6894 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6895 {
6896         struct scsi_device *sdp = SCpnt->device;
6897         struct sdebug_dev_info *devip = sdp->hostdata;
6898         u8 *cmd = SCpnt->cmnd;
6899         u8 opcode = cmd[0];
6900
6901         ++num_dev_resets;
6902
6903         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6904                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6905
6906         scsi_debug_stop_all_queued(sdp);
6907         if (devip) {
6908                 set_bit(SDEBUG_UA_POR, devip->uas_bm);
6909                 if (SCpnt->device->type == TYPE_TAPE)
6910                         scsi_tape_reset_clear(devip);
6911         }
6912
6913         if (sdebug_fail_lun_reset(SCpnt)) {
6914                 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6915                 return FAILED;
6916         }
6917
6918         return SUCCESS;
6919 }
6920
6921 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6922 {
6923         struct scsi_target *starget = scsi_target(cmnd->device);
6924         struct sdebug_target_info *targetip =
6925                 (struct sdebug_target_info *)starget->hostdata;
6926
6927         if (targetip)
6928                 return targetip->reset_fail;
6929
6930         return 0;
6931 }
6932
6933 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6934 {
6935         struct scsi_device *sdp = SCpnt->device;
6936         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6937         struct sdebug_dev_info *devip;
6938         u8 *cmd = SCpnt->cmnd;
6939         u8 opcode = cmd[0];
6940         int k = 0;
6941
6942         ++num_target_resets;
6943         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6944                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6945
6946         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6947                 if (devip->target == sdp->id) {
6948                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6949                         if (SCpnt->device->type == TYPE_TAPE)
6950                                 scsi_tape_reset_clear(devip);
6951                         ++k;
6952                 }
6953         }
6954
6955         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6956                 sdev_printk(KERN_INFO, sdp,
6957                             "%s: %d device(s) found in target\n", __func__, k);
6958
6959         if (sdebug_fail_target_reset(SCpnt)) {
6960                 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
6961                             opcode);
6962                 return FAILED;
6963         }
6964
6965         return SUCCESS;
6966 }
6967
6968 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
6969 {
6970         struct scsi_device *sdp = SCpnt->device;
6971         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6972         struct sdebug_dev_info *devip;
6973         int k = 0;
6974
6975         ++num_bus_resets;
6976
6977         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6978                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
6979
6980         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6981                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
6982                 if (SCpnt->device->type == TYPE_TAPE)
6983                         scsi_tape_reset_clear(devip);
6984                 ++k;
6985         }
6986
6987         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
6988                 sdev_printk(KERN_INFO, sdp,
6989                             "%s: %d device(s) found in host\n", __func__, k);
6990         return SUCCESS;
6991 }
6992
6993 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
6994 {
6995         struct sdebug_host_info *sdbg_host;
6996         struct sdebug_dev_info *devip;
6997         int k = 0;
6998
6999         ++num_host_resets;
7000         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7001                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
7002         mutex_lock(&sdebug_host_list_mutex);
7003         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7004                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
7005                                     dev_list) {
7006                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7007                         if (SCpnt->device->type == TYPE_TAPE)
7008                                 scsi_tape_reset_clear(devip);
7009                         ++k;
7010                 }
7011         }
7012         mutex_unlock(&sdebug_host_list_mutex);
7013         stop_all_queued();
7014         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7015                 sdev_printk(KERN_INFO, SCpnt->device,
7016                             "%s: %d device(s) found\n", __func__, k);
7017         return SUCCESS;
7018 }
7019
7020 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7021 {
7022         struct msdos_partition *pp;
7023         int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7024         int sectors_per_part, num_sectors, k;
7025         int heads_by_sects, start_sec, end_sec;
7026
7027         /* assume partition table already zeroed */
7028         if ((sdebug_num_parts < 1) || (store_size < 1048576))
7029                 return;
7030         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7031                 sdebug_num_parts = SDEBUG_MAX_PARTS;
7032                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7033         }
7034         num_sectors = (int)get_sdebug_capacity();
7035         sectors_per_part = (num_sectors - sdebug_sectors_per)
7036                            / sdebug_num_parts;
7037         heads_by_sects = sdebug_heads * sdebug_sectors_per;
7038         starts[0] = sdebug_sectors_per;
7039         max_part_secs = sectors_per_part;
7040         for (k = 1; k < sdebug_num_parts; ++k) {
7041                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
7042                             * heads_by_sects;
7043                 if (starts[k] - starts[k - 1] < max_part_secs)
7044                         max_part_secs = starts[k] - starts[k - 1];
7045         }
7046         starts[sdebug_num_parts] = num_sectors;
7047         starts[sdebug_num_parts + 1] = 0;
7048
7049         ramp[510] = 0x55;       /* magic partition markings */
7050         ramp[511] = 0xAA;
7051         pp = (struct msdos_partition *)(ramp + 0x1be);
7052         for (k = 0; starts[k + 1]; ++k, ++pp) {
7053                 start_sec = starts[k];
7054                 end_sec = starts[k] + max_part_secs - 1;
7055                 pp->boot_ind = 0;
7056
7057                 pp->cyl = start_sec / heads_by_sects;
7058                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
7059                            / sdebug_sectors_per;
7060                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
7061
7062                 pp->end_cyl = end_sec / heads_by_sects;
7063                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7064                                / sdebug_sectors_per;
7065                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7066
7067                 pp->start_sect = cpu_to_le32(start_sec);
7068                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7069                 pp->sys_ind = 0x83;     /* plain Linux partition */
7070         }
7071 }
7072
7073 static void block_unblock_all_queues(bool block)
7074 {
7075         struct sdebug_host_info *sdhp;
7076
7077         lockdep_assert_held(&sdebug_host_list_mutex);
7078
7079         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7080                 struct Scsi_Host *shost = sdhp->shost;
7081
7082                 if (block)
7083                         scsi_block_requests(shost);
7084                 else
7085                         scsi_unblock_requests(shost);
7086         }
7087 }
7088
7089 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7090  * commands will be processed normally before triggers occur.
7091  */
7092 static void tweak_cmnd_count(void)
7093 {
7094         int count, modulo;
7095
7096         modulo = abs(sdebug_every_nth);
7097         if (modulo < 2)
7098                 return;
7099
7100         mutex_lock(&sdebug_host_list_mutex);
7101         block_unblock_all_queues(true);
7102         count = atomic_read(&sdebug_cmnd_count);
7103         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7104         block_unblock_all_queues(false);
7105         mutex_unlock(&sdebug_host_list_mutex);
7106 }
7107
7108 static void clear_queue_stats(void)
7109 {
7110         atomic_set(&sdebug_cmnd_count, 0);
7111         atomic_set(&sdebug_completions, 0);
7112         atomic_set(&sdebug_miss_cpus, 0);
7113         atomic_set(&sdebug_a_tsf, 0);
7114 }
7115
7116 static bool inject_on_this_cmd(void)
7117 {
7118         if (sdebug_every_nth == 0)
7119                 return false;
7120         return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7121 }
7122
7123 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
7124
7125 /* Complete the processing of the thread that queued a SCSI command to this
7126  * driver. It either completes the command by calling cmnd_done() or
7127  * schedules a hr timer or work queue then returns 0. Returns
7128  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7129  */
7130 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7131                          int scsi_result,
7132                          int (*pfp)(struct scsi_cmnd *,
7133                                     struct sdebug_dev_info *),
7134                          int delta_jiff, int ndelay)
7135 {
7136         struct request *rq = scsi_cmd_to_rq(cmnd);
7137         bool polled = rq->cmd_flags & REQ_POLLED;
7138         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7139         unsigned long flags;
7140         u64 ns_from_boot = 0;
7141         struct scsi_device *sdp;
7142         struct sdebug_defer *sd_dp;
7143
7144         if (unlikely(devip == NULL)) {
7145                 if (scsi_result == 0)
7146                         scsi_result = DID_NO_CONNECT << 16;
7147                 goto respond_in_thread;
7148         }
7149         sdp = cmnd->device;
7150
7151         if (delta_jiff == 0)
7152                 goto respond_in_thread;
7153
7154
7155         if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7156                      (scsi_result == 0))) {
7157                 int num_in_q = scsi_device_busy(sdp);
7158                 int qdepth = cmnd->device->queue_depth;
7159
7160                 if ((num_in_q == qdepth) &&
7161                     (atomic_inc_return(&sdebug_a_tsf) >=
7162                      abs(sdebug_every_nth))) {
7163                         atomic_set(&sdebug_a_tsf, 0);
7164                         scsi_result = device_qfull_result;
7165
7166                         if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7167                                 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7168                                             __func__, num_in_q);
7169                 }
7170         }
7171
7172         sd_dp = &sdsc->sd_dp;
7173
7174         if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7175                 ns_from_boot = ktime_get_boottime_ns();
7176
7177         /* one of the resp_*() response functions is called here */
7178         cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7179         if (cmnd->result & SDEG_RES_IMMED_MASK) {
7180                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
7181                 delta_jiff = ndelay = 0;
7182         }
7183         if (cmnd->result == 0 && scsi_result != 0)
7184                 cmnd->result = scsi_result;
7185         if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7186                 if (atomic_read(&sdeb_inject_pending)) {
7187                         mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7188                         atomic_set(&sdeb_inject_pending, 0);
7189                         cmnd->result = check_condition_result;
7190                 }
7191         }
7192
7193         if (unlikely(sdebug_verbose && cmnd->result))
7194                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
7195                             __func__, cmnd->result);
7196
7197         if (delta_jiff > 0 || ndelay > 0) {
7198                 ktime_t kt;
7199
7200                 if (delta_jiff > 0) {
7201                         u64 ns = jiffies_to_nsecs(delta_jiff);
7202
7203                         if (sdebug_random && ns < U32_MAX) {
7204                                 ns = get_random_u32_below((u32)ns);
7205                         } else if (sdebug_random) {
7206                                 ns >>= 12;      /* scale to 4 usec precision */
7207                                 if (ns < U32_MAX)       /* over 4 hours max */
7208                                         ns = get_random_u32_below((u32)ns);
7209                                 ns <<= 12;
7210                         }
7211                         kt = ns_to_ktime(ns);
7212                 } else {        /* ndelay has a 4.2 second max */
7213                         kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7214                                              (u32)ndelay;
7215                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7216                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
7217
7218                                 if (kt <= d) {  /* elapsed duration >= kt */
7219                                         /* call scsi_done() from this thread */
7220                                         scsi_done(cmnd);
7221                                         return 0;
7222                                 }
7223                                 /* otherwise reduce kt by elapsed time */
7224                                 kt -= d;
7225                         }
7226                 }
7227                 if (sdebug_statistics)
7228                         sd_dp->issuing_cpu = raw_smp_processor_id();
7229                 if (polled) {
7230                         spin_lock_irqsave(&sdsc->lock, flags);
7231                         sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7232                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7233                         spin_unlock_irqrestore(&sdsc->lock, flags);
7234                 } else {
7235                         /* schedule the invocation of scsi_done() for a later time */
7236                         spin_lock_irqsave(&sdsc->lock, flags);
7237                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
7238                         hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7239                         /*
7240                          * The completion handler will try to grab sqcp->lock,
7241                          * so there is no chance that the completion handler
7242                          * will call scsi_done() until we release the lock
7243                          * here (so ok to keep referencing sdsc).
7244                          */
7245                         spin_unlock_irqrestore(&sdsc->lock, flags);
7246                 }
7247         } else {        /* jdelay < 0, use work queue */
7248                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7249                              atomic_read(&sdeb_inject_pending))) {
7250                         sd_dp->aborted = true;
7251                         atomic_set(&sdeb_inject_pending, 0);
7252                         sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7253                                     blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7254                 }
7255
7256                 if (sdebug_statistics)
7257                         sd_dp->issuing_cpu = raw_smp_processor_id();
7258                 if (polled) {
7259                         spin_lock_irqsave(&sdsc->lock, flags);
7260                         sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7261                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
7262                         spin_unlock_irqrestore(&sdsc->lock, flags);
7263                 } else {
7264                         spin_lock_irqsave(&sdsc->lock, flags);
7265                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
7266                         schedule_work(&sd_dp->ew.work);
7267                         spin_unlock_irqrestore(&sdsc->lock, flags);
7268                 }
7269         }
7270
7271         return 0;
7272
7273 respond_in_thread:      /* call back to mid-layer using invocation thread */
7274         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7275         cmnd->result &= ~SDEG_RES_IMMED_MASK;
7276         if (cmnd->result == 0 && scsi_result != 0)
7277                 cmnd->result = scsi_result;
7278         scsi_done(cmnd);
7279         return 0;
7280 }
7281
7282 /* Note: The following macros create attribute files in the
7283    /sys/module/scsi_debug/parameters directory. Unfortunately this
7284    driver is unaware of a change and cannot trigger auxiliary actions
7285    as it can when the corresponding attribute in the
7286    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7287  */
7288 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7289 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7290 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7291 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7292 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7293 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7294 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7295 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7296 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7297 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7298 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7299 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7300 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7301 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7302 module_param_string(inq_product, sdebug_inq_product_id,
7303                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7304 module_param_string(inq_rev, sdebug_inq_product_rev,
7305                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7306 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7307                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7308 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7309 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7310 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7311 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7312 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7313 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7314 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7315 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7316 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7317 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7318                    S_IRUGO | S_IWUSR);
7319 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7320                    S_IRUGO | S_IWUSR);
7321 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7322 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7323 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7324 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7325 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7326 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7327 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7328 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7329 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7330 module_param_named(per_host_store, sdebug_per_host_store, bool,
7331                    S_IRUGO | S_IWUSR);
7332 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7333 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7334 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7335 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7336 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7337 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7338 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7339 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7340 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7341 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7342 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7343 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7344 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7345 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7346 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7347 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7348 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7349 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7350 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7351 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7352 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7353 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7354 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7355                    S_IRUGO | S_IWUSR);
7356 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7357 module_param_named(write_same_length, sdebug_write_same_length, int,
7358                    S_IRUGO | S_IWUSR);
7359 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7360 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7361 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7362 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7363 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7364 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7365
7366 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7367 MODULE_DESCRIPTION("SCSI debug adapter driver");
7368 MODULE_LICENSE("GPL");
7369 MODULE_VERSION(SDEBUG_VERSION);
7370
7371 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7372 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7373 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7374 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7375 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7376 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7377 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7378 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7379 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7380 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7381 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7382 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7383 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7384 MODULE_PARM_DESC(host_max_queue,
7385                  "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7386 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7387 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7388                  SDEBUG_VERSION "\")");
7389 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7390 MODULE_PARM_DESC(lbprz,
7391                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7392 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7393 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7394 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7395 MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7396 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7397 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7398 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7399 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7400 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7401 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7402 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7403 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7404 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7405 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7406 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7407 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7408 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7409 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7410 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7411 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7412 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7413 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7414 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7415 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7416 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7417 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7418 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7419 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7420 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7421 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7422 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7423 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7424 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7425 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7426 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7427 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7428 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7429 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7430 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7431 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7432 MODULE_PARM_DESC(uuid_ctl,
7433                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7434 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7435 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7436 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7437 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7438 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7439 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7440 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7441 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7442 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7443 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7444
7445 #define SDEBUG_INFO_LEN 256
7446 static char sdebug_info[SDEBUG_INFO_LEN];
7447
7448 static const char *scsi_debug_info(struct Scsi_Host *shp)
7449 {
7450         int k;
7451
7452         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7453                       my_name, SDEBUG_VERSION, sdebug_version_date);
7454         if (k >= (SDEBUG_INFO_LEN - 1))
7455                 return sdebug_info;
7456         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7457                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7458                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
7459                   "statistics", (int)sdebug_statistics);
7460         return sdebug_info;
7461 }
7462
7463 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
7464 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7465                                  int length)
7466 {
7467         char arr[16];
7468         int opts;
7469         int minLen = length > 15 ? 15 : length;
7470
7471         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7472                 return -EACCES;
7473         memcpy(arr, buffer, minLen);
7474         arr[minLen] = '\0';
7475         if (1 != sscanf(arr, "%d", &opts))
7476                 return -EINVAL;
7477         sdebug_opts = opts;
7478         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7479         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7480         if (sdebug_every_nth != 0)
7481                 tweak_cmnd_count();
7482         return length;
7483 }
7484
7485 struct sdebug_submit_queue_data {
7486         int *first;
7487         int *last;
7488         int queue_num;
7489 };
7490
7491 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7492 {
7493         struct sdebug_submit_queue_data *data = opaque;
7494         u32 unique_tag = blk_mq_unique_tag(rq);
7495         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7496         u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7497         int queue_num = data->queue_num;
7498
7499         if (hwq != queue_num)
7500                 return true;
7501
7502         /* Rely on iter'ing in ascending tag order */
7503         if (*data->first == -1)
7504                 *data->first = *data->last = tag;
7505         else
7506                 *data->last = tag;
7507
7508         return true;
7509 }
7510
7511 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7512  * same for each scsi_debug host (if more than one). Some of the counters
7513  * output are not atomics so might be inaccurate in a busy system. */
7514 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7515 {
7516         struct sdebug_host_info *sdhp;
7517         int j;
7518
7519         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7520                    SDEBUG_VERSION, sdebug_version_date);
7521         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7522                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7523                    sdebug_opts, sdebug_every_nth);
7524         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7525                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7526                    sdebug_sector_size, "bytes");
7527         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7528                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7529                    num_aborts);
7530         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7531                    num_dev_resets, num_target_resets, num_bus_resets,
7532                    num_host_resets);
7533         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7534                    dix_reads, dix_writes, dif_errors);
7535         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7536                    sdebug_statistics);
7537         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7538                    atomic_read(&sdebug_cmnd_count),
7539                    atomic_read(&sdebug_completions),
7540                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
7541                    atomic_read(&sdebug_a_tsf),
7542                    atomic_read(&sdeb_mq_poll_count));
7543
7544         seq_printf(m, "submit_queues=%d\n", submit_queues);
7545         for (j = 0; j < submit_queues; ++j) {
7546                 int f = -1, l = -1;
7547                 struct sdebug_submit_queue_data data = {
7548                         .queue_num = j,
7549                         .first = &f,
7550                         .last = &l,
7551                 };
7552                 seq_printf(m, "  queue %d:\n", j);
7553                 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7554                                         &data);
7555                 if (f >= 0) {
7556                         seq_printf(m, "    BUSY: %s: %d,%d\n",
7557                                    "first,last bits", f, l);
7558                 }
7559         }
7560
7561         seq_printf(m, "this host_no=%d\n", host->host_no);
7562         if (!xa_empty(per_store_ap)) {
7563                 bool niu;
7564                 int idx;
7565                 unsigned long l_idx;
7566                 struct sdeb_store_info *sip;
7567
7568                 seq_puts(m, "\nhost list:\n");
7569                 j = 0;
7570                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7571                         idx = sdhp->si_idx;
7572                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
7573                                    sdhp->shost->host_no, idx);
7574                         ++j;
7575                 }
7576                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7577                            sdeb_most_recent_idx);
7578                 j = 0;
7579                 xa_for_each(per_store_ap, l_idx, sip) {
7580                         niu = xa_get_mark(per_store_ap, l_idx,
7581                                           SDEB_XA_NOT_IN_USE);
7582                         idx = (int)l_idx;
7583                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
7584                                    (niu ? "  not_in_use" : ""));
7585                         ++j;
7586                 }
7587         }
7588         return 0;
7589 }
7590
7591 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7592 {
7593         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7594 }
7595 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7596  * of delay is jiffies.
7597  */
7598 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7599                            size_t count)
7600 {
7601         int jdelay, res;
7602
7603         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7604                 res = count;
7605                 if (sdebug_jdelay != jdelay) {
7606                         struct sdebug_host_info *sdhp;
7607
7608                         mutex_lock(&sdebug_host_list_mutex);
7609                         block_unblock_all_queues(true);
7610
7611                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7612                                 struct Scsi_Host *shost = sdhp->shost;
7613
7614                                 if (scsi_host_busy(shost)) {
7615                                         res = -EBUSY;   /* queued commands */
7616                                         break;
7617                                 }
7618                         }
7619                         if (res > 0) {
7620                                 sdebug_jdelay = jdelay;
7621                                 sdebug_ndelay = 0;
7622                         }
7623                         block_unblock_all_queues(false);
7624                         mutex_unlock(&sdebug_host_list_mutex);
7625                 }
7626                 return res;
7627         }
7628         return -EINVAL;
7629 }
7630 static DRIVER_ATTR_RW(delay);
7631
7632 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7633 {
7634         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7635 }
7636 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7637 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
7638 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7639                             size_t count)
7640 {
7641         int ndelay, res;
7642
7643         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7644             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7645                 res = count;
7646                 if (sdebug_ndelay != ndelay) {
7647                         struct sdebug_host_info *sdhp;
7648
7649                         mutex_lock(&sdebug_host_list_mutex);
7650                         block_unblock_all_queues(true);
7651
7652                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7653                                 struct Scsi_Host *shost = sdhp->shost;
7654
7655                                 if (scsi_host_busy(shost)) {
7656                                         res = -EBUSY;   /* queued commands */
7657                                         break;
7658                                 }
7659                         }
7660
7661                         if (res > 0) {
7662                                 sdebug_ndelay = ndelay;
7663                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
7664                                                         : DEF_JDELAY;
7665                         }
7666                         block_unblock_all_queues(false);
7667                         mutex_unlock(&sdebug_host_list_mutex);
7668                 }
7669                 return res;
7670         }
7671         return -EINVAL;
7672 }
7673 static DRIVER_ATTR_RW(ndelay);
7674
7675 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7676 {
7677         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7678 }
7679
7680 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7681                           size_t count)
7682 {
7683         int opts;
7684         char work[20];
7685
7686         if (sscanf(buf, "%10s", work) == 1) {
7687                 if (strncasecmp(work, "0x", 2) == 0) {
7688                         if (kstrtoint(work + 2, 16, &opts) == 0)
7689                                 goto opts_done;
7690                 } else {
7691                         if (kstrtoint(work, 10, &opts) == 0)
7692                                 goto opts_done;
7693                 }
7694         }
7695         return -EINVAL;
7696 opts_done:
7697         sdebug_opts = opts;
7698         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7699         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7700         tweak_cmnd_count();
7701         return count;
7702 }
7703 static DRIVER_ATTR_RW(opts);
7704
7705 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7706 {
7707         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7708 }
7709 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7710                            size_t count)
7711 {
7712         int n;
7713
7714         /* Cannot change from or to TYPE_ZBC with sysfs */
7715         if (sdebug_ptype == TYPE_ZBC)
7716                 return -EINVAL;
7717
7718         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7719                 if (n == TYPE_ZBC)
7720                         return -EINVAL;
7721                 sdebug_ptype = n;
7722                 return count;
7723         }
7724         return -EINVAL;
7725 }
7726 static DRIVER_ATTR_RW(ptype);
7727
7728 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7729 {
7730         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7731 }
7732 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7733                             size_t count)
7734 {
7735         int n;
7736
7737         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7738                 sdebug_dsense = n;
7739                 return count;
7740         }
7741         return -EINVAL;
7742 }
7743 static DRIVER_ATTR_RW(dsense);
7744
7745 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7746 {
7747         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7748 }
7749 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7750                              size_t count)
7751 {
7752         int n, idx;
7753
7754         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7755                 bool want_store = (n == 0);
7756                 struct sdebug_host_info *sdhp;
7757
7758                 n = (n > 0);
7759                 sdebug_fake_rw = (sdebug_fake_rw > 0);
7760                 if (sdebug_fake_rw == n)
7761                         return count;   /* not transitioning so do nothing */
7762
7763                 if (want_store) {       /* 1 --> 0 transition, set up store */
7764                         if (sdeb_first_idx < 0) {
7765                                 idx = sdebug_add_store();
7766                                 if (idx < 0)
7767                                         return idx;
7768                         } else {
7769                                 idx = sdeb_first_idx;
7770                                 xa_clear_mark(per_store_ap, idx,
7771                                               SDEB_XA_NOT_IN_USE);
7772                         }
7773                         /* make all hosts use same store */
7774                         list_for_each_entry(sdhp, &sdebug_host_list,
7775                                             host_list) {
7776                                 if (sdhp->si_idx != idx) {
7777                                         xa_set_mark(per_store_ap, sdhp->si_idx,
7778                                                     SDEB_XA_NOT_IN_USE);
7779                                         sdhp->si_idx = idx;
7780                                 }
7781                         }
7782                         sdeb_most_recent_idx = idx;
7783                 } else {        /* 0 --> 1 transition is trigger for shrink */
7784                         sdebug_erase_all_stores(true /* apart from first */);
7785                 }
7786                 sdebug_fake_rw = n;
7787                 return count;
7788         }
7789         return -EINVAL;
7790 }
7791 static DRIVER_ATTR_RW(fake_rw);
7792
7793 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7794 {
7795         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7796 }
7797 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7798                               size_t count)
7799 {
7800         int n;
7801
7802         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7803                 sdebug_no_lun_0 = n;
7804                 return count;
7805         }
7806         return -EINVAL;
7807 }
7808 static DRIVER_ATTR_RW(no_lun_0);
7809
7810 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7811 {
7812         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7813 }
7814 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7815                               size_t count)
7816 {
7817         int n;
7818
7819         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7820                 sdebug_num_tgts = n;
7821                 sdebug_max_tgts_luns();
7822                 return count;
7823         }
7824         return -EINVAL;
7825 }
7826 static DRIVER_ATTR_RW(num_tgts);
7827
7828 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7829 {
7830         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7831 }
7832 static DRIVER_ATTR_RO(dev_size_mb);
7833
7834 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7835 {
7836         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7837 }
7838
7839 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7840                                     size_t count)
7841 {
7842         bool v;
7843
7844         if (kstrtobool(buf, &v))
7845                 return -EINVAL;
7846
7847         sdebug_per_host_store = v;
7848         return count;
7849 }
7850 static DRIVER_ATTR_RW(per_host_store);
7851
7852 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7853 {
7854         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7855 }
7856 static DRIVER_ATTR_RO(num_parts);
7857
7858 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7859 {
7860         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7861 }
7862 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7863                                size_t count)
7864 {
7865         int nth;
7866         char work[20];
7867
7868         if (sscanf(buf, "%10s", work) == 1) {
7869                 if (strncasecmp(work, "0x", 2) == 0) {
7870                         if (kstrtoint(work + 2, 16, &nth) == 0)
7871                                 goto every_nth_done;
7872                 } else {
7873                         if (kstrtoint(work, 10, &nth) == 0)
7874                                 goto every_nth_done;
7875                 }
7876         }
7877         return -EINVAL;
7878
7879 every_nth_done:
7880         sdebug_every_nth = nth;
7881         if (nth && !sdebug_statistics) {
7882                 pr_info("every_nth needs statistics=1, set it\n");
7883                 sdebug_statistics = true;
7884         }
7885         tweak_cmnd_count();
7886         return count;
7887 }
7888 static DRIVER_ATTR_RW(every_nth);
7889
7890 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7891 {
7892         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7893 }
7894 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7895                                 size_t count)
7896 {
7897         int n;
7898         bool changed;
7899
7900         if (kstrtoint(buf, 0, &n))
7901                 return -EINVAL;
7902         if (n >= 0) {
7903                 if (n > (int)SAM_LUN_AM_FLAT) {
7904                         pr_warn("only LUN address methods 0 and 1 are supported\n");
7905                         return -EINVAL;
7906                 }
7907                 changed = ((int)sdebug_lun_am != n);
7908                 sdebug_lun_am = n;
7909                 if (changed && sdebug_scsi_level >= 5) {        /* >= SPC-3 */
7910                         struct sdebug_host_info *sdhp;
7911                         struct sdebug_dev_info *dp;
7912
7913                         mutex_lock(&sdebug_host_list_mutex);
7914                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7915                                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7916                                         set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7917                                 }
7918                         }
7919                         mutex_unlock(&sdebug_host_list_mutex);
7920                 }
7921                 return count;
7922         }
7923         return -EINVAL;
7924 }
7925 static DRIVER_ATTR_RW(lun_format);
7926
7927 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7928 {
7929         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7930 }
7931 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7932                               size_t count)
7933 {
7934         int n;
7935         bool changed;
7936
7937         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7938                 if (n > 256) {
7939                         pr_warn("max_luns can be no more than 256\n");
7940                         return -EINVAL;
7941                 }
7942                 changed = (sdebug_max_luns != n);
7943                 sdebug_max_luns = n;
7944                 sdebug_max_tgts_luns();
7945                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
7946                         struct sdebug_host_info *sdhp;
7947                         struct sdebug_dev_info *dp;
7948
7949                         mutex_lock(&sdebug_host_list_mutex);
7950                         list_for_each_entry(sdhp, &sdebug_host_list,
7951                                             host_list) {
7952                                 list_for_each_entry(dp, &sdhp->dev_info_list,
7953                                                     dev_list) {
7954                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
7955                                                 dp->uas_bm);
7956                                 }
7957                         }
7958                         mutex_unlock(&sdebug_host_list_mutex);
7959                 }
7960                 return count;
7961         }
7962         return -EINVAL;
7963 }
7964 static DRIVER_ATTR_RW(max_luns);
7965
7966 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
7967 {
7968         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
7969 }
7970 /* N.B. max_queue can be changed while there are queued commands. In flight
7971  * commands beyond the new max_queue will be completed. */
7972 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
7973                                size_t count)
7974 {
7975         int n;
7976
7977         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
7978             (n <= SDEBUG_CANQUEUE) &&
7979             (sdebug_host_max_queue == 0)) {
7980                 mutex_lock(&sdebug_host_list_mutex);
7981
7982                 /* We may only change sdebug_max_queue when we have no shosts */
7983                 if (list_empty(&sdebug_host_list))
7984                         sdebug_max_queue = n;
7985                 else
7986                         count = -EBUSY;
7987                 mutex_unlock(&sdebug_host_list_mutex);
7988                 return count;
7989         }
7990         return -EINVAL;
7991 }
7992 static DRIVER_ATTR_RW(max_queue);
7993
7994 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
7995 {
7996         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
7997 }
7998
7999 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8000 {
8001         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8002 }
8003
8004 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8005 {
8006         bool v;
8007
8008         if (kstrtobool(buf, &v))
8009                 return -EINVAL;
8010
8011         sdebug_no_rwlock = v;
8012         return count;
8013 }
8014 static DRIVER_ATTR_RW(no_rwlock);
8015
8016 /*
8017  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8018  * in range [0, sdebug_host_max_queue), we can't change it.
8019  */
8020 static DRIVER_ATTR_RO(host_max_queue);
8021
8022 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8023 {
8024         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8025 }
8026 static DRIVER_ATTR_RO(no_uld);
8027
8028 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8029 {
8030         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8031 }
8032 static DRIVER_ATTR_RO(scsi_level);
8033
8034 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8035 {
8036         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8037 }
8038 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8039                                 size_t count)
8040 {
8041         int n;
8042         bool changed;
8043
8044         /* Ignore capacity change for ZBC drives for now */
8045         if (sdeb_zbc_in_use)
8046                 return -ENOTSUPP;
8047
8048         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8049                 changed = (sdebug_virtual_gb != n);
8050                 sdebug_virtual_gb = n;
8051                 sdebug_capacity = get_sdebug_capacity();
8052                 if (changed) {
8053                         struct sdebug_host_info *sdhp;
8054                         struct sdebug_dev_info *dp;
8055
8056                         mutex_lock(&sdebug_host_list_mutex);
8057                         list_for_each_entry(sdhp, &sdebug_host_list,
8058                                             host_list) {
8059                                 list_for_each_entry(dp, &sdhp->dev_info_list,
8060                                                     dev_list) {
8061                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8062                                                 dp->uas_bm);
8063                                 }
8064                         }
8065                         mutex_unlock(&sdebug_host_list_mutex);
8066                 }
8067                 return count;
8068         }
8069         return -EINVAL;
8070 }
8071 static DRIVER_ATTR_RW(virtual_gb);
8072
8073 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8074 {
8075         /* absolute number of hosts currently active is what is shown */
8076         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8077 }
8078
8079 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8080                               size_t count)
8081 {
8082         bool found;
8083         unsigned long idx;
8084         struct sdeb_store_info *sip;
8085         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8086         int delta_hosts;
8087
8088         if (sscanf(buf, "%d", &delta_hosts) != 1)
8089                 return -EINVAL;
8090         if (delta_hosts > 0) {
8091                 do {
8092                         found = false;
8093                         if (want_phs) {
8094                                 xa_for_each_marked(per_store_ap, idx, sip,
8095                                                    SDEB_XA_NOT_IN_USE) {
8096                                         sdeb_most_recent_idx = (int)idx;
8097                                         found = true;
8098                                         break;
8099                                 }
8100                                 if (found)      /* re-use case */
8101                                         sdebug_add_host_helper((int)idx);
8102                                 else
8103                                         sdebug_do_add_host(true);
8104                         } else {
8105                                 sdebug_do_add_host(false);
8106                         }
8107                 } while (--delta_hosts);
8108         } else if (delta_hosts < 0) {
8109                 do {
8110                         sdebug_do_remove_host(false);
8111                 } while (++delta_hosts);
8112         }
8113         return count;
8114 }
8115 static DRIVER_ATTR_RW(add_host);
8116
8117 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8118 {
8119         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8120 }
8121 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8122                                     size_t count)
8123 {
8124         int n;
8125
8126         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8127                 sdebug_vpd_use_hostno = n;
8128                 return count;
8129         }
8130         return -EINVAL;
8131 }
8132 static DRIVER_ATTR_RW(vpd_use_hostno);
8133
8134 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8135 {
8136         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8137 }
8138 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8139                                 size_t count)
8140 {
8141         int n;
8142
8143         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8144                 if (n > 0)
8145                         sdebug_statistics = true;
8146                 else {
8147                         clear_queue_stats();
8148                         sdebug_statistics = false;
8149                 }
8150                 return count;
8151         }
8152         return -EINVAL;
8153 }
8154 static DRIVER_ATTR_RW(statistics);
8155
8156 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8157 {
8158         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8159 }
8160 static DRIVER_ATTR_RO(sector_size);
8161
8162 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8163 {
8164         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8165 }
8166 static DRIVER_ATTR_RO(submit_queues);
8167
8168 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8169 {
8170         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8171 }
8172 static DRIVER_ATTR_RO(dix);
8173
8174 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8175 {
8176         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8177 }
8178 static DRIVER_ATTR_RO(dif);
8179
8180 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8181 {
8182         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8183 }
8184 static DRIVER_ATTR_RO(guard);
8185
8186 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8187 {
8188         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8189 }
8190 static DRIVER_ATTR_RO(ato);
8191
8192 static ssize_t map_show(struct device_driver *ddp, char *buf)
8193 {
8194         ssize_t count = 0;
8195
8196         if (!scsi_debug_lbp())
8197                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8198                                  sdebug_store_sectors);
8199
8200         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8201                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8202
8203                 if (sip)
8204                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8205                                           (int)map_size, sip->map_storep);
8206         }
8207         buf[count++] = '\n';
8208         buf[count] = '\0';
8209
8210         return count;
8211 }
8212 static DRIVER_ATTR_RO(map);
8213
8214 static ssize_t random_show(struct device_driver *ddp, char *buf)
8215 {
8216         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8217 }
8218
8219 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8220                             size_t count)
8221 {
8222         bool v;
8223
8224         if (kstrtobool(buf, &v))
8225                 return -EINVAL;
8226
8227         sdebug_random = v;
8228         return count;
8229 }
8230 static DRIVER_ATTR_RW(random);
8231
8232 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8233 {
8234         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8235 }
8236 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8237                                size_t count)
8238 {
8239         int n;
8240
8241         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8242                 sdebug_removable = (n > 0);
8243                 return count;
8244         }
8245         return -EINVAL;
8246 }
8247 static DRIVER_ATTR_RW(removable);
8248
8249 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8250 {
8251         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8252 }
8253 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
8254 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8255                                size_t count)
8256 {
8257         int n;
8258
8259         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8260                 sdebug_host_lock = (n > 0);
8261                 return count;
8262         }
8263         return -EINVAL;
8264 }
8265 static DRIVER_ATTR_RW(host_lock);
8266
8267 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8268 {
8269         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8270 }
8271 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8272                             size_t count)
8273 {
8274         int n;
8275
8276         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8277                 sdebug_strict = (n > 0);
8278                 return count;
8279         }
8280         return -EINVAL;
8281 }
8282 static DRIVER_ATTR_RW(strict);
8283
8284 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8285 {
8286         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8287 }
8288 static DRIVER_ATTR_RO(uuid_ctl);
8289
8290 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8291 {
8292         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8293 }
8294 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8295                              size_t count)
8296 {
8297         int ret, n;
8298
8299         ret = kstrtoint(buf, 0, &n);
8300         if (ret)
8301                 return ret;
8302         sdebug_cdb_len = n;
8303         all_config_cdb_len();
8304         return count;
8305 }
8306 static DRIVER_ATTR_RW(cdb_len);
8307
8308 static const char * const zbc_model_strs_a[] = {
8309         [BLK_ZONED_NONE] = "none",
8310         [BLK_ZONED_HA]   = "host-aware",
8311         [BLK_ZONED_HM]   = "host-managed",
8312 };
8313
8314 static const char * const zbc_model_strs_b[] = {
8315         [BLK_ZONED_NONE] = "no",
8316         [BLK_ZONED_HA]   = "aware",
8317         [BLK_ZONED_HM]   = "managed",
8318 };
8319
8320 static const char * const zbc_model_strs_c[] = {
8321         [BLK_ZONED_NONE] = "0",
8322         [BLK_ZONED_HA]   = "1",
8323         [BLK_ZONED_HM]   = "2",
8324 };
8325
8326 static int sdeb_zbc_model_str(const char *cp)
8327 {
8328         int res = sysfs_match_string(zbc_model_strs_a, cp);
8329
8330         if (res < 0) {
8331                 res = sysfs_match_string(zbc_model_strs_b, cp);
8332                 if (res < 0) {
8333                         res = sysfs_match_string(zbc_model_strs_c, cp);
8334                         if (res < 0)
8335                                 return -EINVAL;
8336                 }
8337         }
8338         return res;
8339 }
8340
8341 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8342 {
8343         return scnprintf(buf, PAGE_SIZE, "%s\n",
8344                          zbc_model_strs_a[sdeb_zbc_model]);
8345 }
8346 static DRIVER_ATTR_RO(zbc);
8347
8348 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8349 {
8350         return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8351 }
8352 static DRIVER_ATTR_RO(tur_ms_to_ready);
8353
8354 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8355 {
8356         char *p = buf, *end = buf + PAGE_SIZE;
8357         int i;
8358
8359         for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8360                 p += scnprintf(p, end - p, "%d %ld\n", i,
8361                                atomic_long_read(&writes_by_group_number[i]));
8362
8363         return p - buf;
8364 }
8365
8366 static ssize_t group_number_stats_store(struct device_driver *ddp,
8367                                         const char *buf, size_t count)
8368 {
8369         int i;
8370
8371         for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8372                 atomic_long_set(&writes_by_group_number[i], 0);
8373
8374         return count;
8375 }
8376 static DRIVER_ATTR_RW(group_number_stats);
8377
8378 /* Note: The following array creates attribute files in the
8379    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8380    files (over those found in the /sys/module/scsi_debug/parameters
8381    directory) is that auxiliary actions can be triggered when an attribute
8382    is changed. For example see: add_host_store() above.
8383  */
8384
8385 static struct attribute *sdebug_drv_attrs[] = {
8386         &driver_attr_delay.attr,
8387         &driver_attr_opts.attr,
8388         &driver_attr_ptype.attr,
8389         &driver_attr_dsense.attr,
8390         &driver_attr_fake_rw.attr,
8391         &driver_attr_host_max_queue.attr,
8392         &driver_attr_no_lun_0.attr,
8393         &driver_attr_num_tgts.attr,
8394         &driver_attr_dev_size_mb.attr,
8395         &driver_attr_num_parts.attr,
8396         &driver_attr_every_nth.attr,
8397         &driver_attr_lun_format.attr,
8398         &driver_attr_max_luns.attr,
8399         &driver_attr_max_queue.attr,
8400         &driver_attr_no_rwlock.attr,
8401         &driver_attr_no_uld.attr,
8402         &driver_attr_scsi_level.attr,
8403         &driver_attr_virtual_gb.attr,
8404         &driver_attr_add_host.attr,
8405         &driver_attr_per_host_store.attr,
8406         &driver_attr_vpd_use_hostno.attr,
8407         &driver_attr_sector_size.attr,
8408         &driver_attr_statistics.attr,
8409         &driver_attr_submit_queues.attr,
8410         &driver_attr_dix.attr,
8411         &driver_attr_dif.attr,
8412         &driver_attr_guard.attr,
8413         &driver_attr_ato.attr,
8414         &driver_attr_map.attr,
8415         &driver_attr_random.attr,
8416         &driver_attr_removable.attr,
8417         &driver_attr_host_lock.attr,
8418         &driver_attr_ndelay.attr,
8419         &driver_attr_strict.attr,
8420         &driver_attr_uuid_ctl.attr,
8421         &driver_attr_cdb_len.attr,
8422         &driver_attr_tur_ms_to_ready.attr,
8423         &driver_attr_zbc.attr,
8424         &driver_attr_group_number_stats.attr,
8425         NULL,
8426 };
8427 ATTRIBUTE_GROUPS(sdebug_drv);
8428
8429 static struct device *pseudo_primary;
8430
8431 static int __init scsi_debug_init(void)
8432 {
8433         bool want_store = (sdebug_fake_rw == 0);
8434         unsigned long sz;
8435         int k, ret, hosts_to_add;
8436         int idx = -1;
8437
8438         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8439                 pr_warn("ndelay must be less than 1 second, ignored\n");
8440                 sdebug_ndelay = 0;
8441         } else if (sdebug_ndelay > 0)
8442                 sdebug_jdelay = JDELAY_OVERRIDDEN;
8443
8444         switch (sdebug_sector_size) {
8445         case  512:
8446         case 1024:
8447         case 2048:
8448         case 4096:
8449                 break;
8450         default:
8451                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
8452                 return -EINVAL;
8453         }
8454
8455         switch (sdebug_dif) {
8456         case T10_PI_TYPE0_PROTECTION:
8457                 break;
8458         case T10_PI_TYPE1_PROTECTION:
8459         case T10_PI_TYPE2_PROTECTION:
8460         case T10_PI_TYPE3_PROTECTION:
8461                 have_dif_prot = true;
8462                 break;
8463
8464         default:
8465                 pr_err("dif must be 0, 1, 2 or 3\n");
8466                 return -EINVAL;
8467         }
8468
8469         if (sdebug_num_tgts < 0) {
8470                 pr_err("num_tgts must be >= 0\n");
8471                 return -EINVAL;
8472         }
8473
8474         if (sdebug_guard > 1) {
8475                 pr_err("guard must be 0 or 1\n");
8476                 return -EINVAL;
8477         }
8478
8479         if (sdebug_ato > 1) {
8480                 pr_err("ato must be 0 or 1\n");
8481                 return -EINVAL;
8482         }
8483
8484         if (sdebug_physblk_exp > 15) {
8485                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8486                 return -EINVAL;
8487         }
8488
8489         sdebug_lun_am = sdebug_lun_am_i;
8490         if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8491                 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8492                 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8493         }
8494
8495         if (sdebug_max_luns > 256) {
8496                 if (sdebug_max_luns > 16384) {
8497                         pr_warn("max_luns can be no more than 16384, use default\n");
8498                         sdebug_max_luns = DEF_MAX_LUNS;
8499                 }
8500                 sdebug_lun_am = SAM_LUN_AM_FLAT;
8501         }
8502
8503         if (sdebug_lowest_aligned > 0x3fff) {
8504                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8505                 return -EINVAL;
8506         }
8507
8508         if (submit_queues < 1) {
8509                 pr_err("submit_queues must be 1 or more\n");
8510                 return -EINVAL;
8511         }
8512
8513         if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8514                 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8515                 return -EINVAL;
8516         }
8517
8518         if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8519             (sdebug_host_max_queue < 0)) {
8520                 pr_err("host_max_queue must be in range [0 %d]\n",
8521                        SDEBUG_CANQUEUE);
8522                 return -EINVAL;
8523         }
8524
8525         if (sdebug_host_max_queue &&
8526             (sdebug_max_queue != sdebug_host_max_queue)) {
8527                 sdebug_max_queue = sdebug_host_max_queue;
8528                 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8529                         sdebug_max_queue);
8530         }
8531
8532         /*
8533          * check for host managed zoned block device specified with
8534          * ptype=0x14 or zbc=XXX.
8535          */
8536         if (sdebug_ptype == TYPE_ZBC) {
8537                 sdeb_zbc_model = BLK_ZONED_HM;
8538         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8539                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8540                 if (k < 0)
8541                         return k;
8542                 sdeb_zbc_model = k;
8543                 switch (sdeb_zbc_model) {
8544                 case BLK_ZONED_NONE:
8545                 case BLK_ZONED_HA:
8546                         sdebug_ptype = TYPE_DISK;
8547                         break;
8548                 case BLK_ZONED_HM:
8549                         sdebug_ptype = TYPE_ZBC;
8550                         break;
8551                 default:
8552                         pr_err("Invalid ZBC model\n");
8553                         return -EINVAL;
8554                 }
8555         }
8556         if (sdeb_zbc_model != BLK_ZONED_NONE) {
8557                 sdeb_zbc_in_use = true;
8558                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8559                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8560         }
8561
8562         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8563                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8564         if (sdebug_dev_size_mb < 1)
8565                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
8566         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8567         sdebug_store_sectors = sz / sdebug_sector_size;
8568         sdebug_capacity = get_sdebug_capacity();
8569
8570         /* play around with geometry, don't waste too much on track 0 */
8571         sdebug_heads = 8;
8572         sdebug_sectors_per = 32;
8573         if (sdebug_dev_size_mb >= 256)
8574                 sdebug_heads = 64;
8575         else if (sdebug_dev_size_mb >= 16)
8576                 sdebug_heads = 32;
8577         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8578                                (sdebug_sectors_per * sdebug_heads);
8579         if (sdebug_cylinders_per >= 1024) {
8580                 /* other LLDs do this; implies >= 1GB ram disk ... */
8581                 sdebug_heads = 255;
8582                 sdebug_sectors_per = 63;
8583                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8584                                (sdebug_sectors_per * sdebug_heads);
8585         }
8586         if (scsi_debug_lbp()) {
8587                 sdebug_unmap_max_blocks =
8588                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8589
8590                 sdebug_unmap_max_desc =
8591                         clamp(sdebug_unmap_max_desc, 0U, 256U);
8592
8593                 sdebug_unmap_granularity =
8594                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8595
8596                 if (sdebug_unmap_alignment &&
8597                     sdebug_unmap_granularity <=
8598                     sdebug_unmap_alignment) {
8599                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8600                         return -EINVAL;
8601                 }
8602         }
8603
8604         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8605         if (want_store) {
8606                 idx = sdebug_add_store();
8607                 if (idx < 0)
8608                         return idx;
8609         }
8610
8611         pseudo_primary = root_device_register("pseudo_0");
8612         if (IS_ERR(pseudo_primary)) {
8613                 pr_warn("root_device_register() error\n");
8614                 ret = PTR_ERR(pseudo_primary);
8615                 goto free_vm;
8616         }
8617         ret = bus_register(&pseudo_lld_bus);
8618         if (ret < 0) {
8619                 pr_warn("bus_register error: %d\n", ret);
8620                 goto dev_unreg;
8621         }
8622         ret = driver_register(&sdebug_driverfs_driver);
8623         if (ret < 0) {
8624                 pr_warn("driver_register error: %d\n", ret);
8625                 goto bus_unreg;
8626         }
8627
8628         hosts_to_add = sdebug_add_host;
8629         sdebug_add_host = 0;
8630
8631         sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8632         if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8633                 pr_info("%s: failed to create initial debugfs directory\n", __func__);
8634
8635         for (k = 0; k < hosts_to_add; k++) {
8636                 if (want_store && k == 0) {
8637                         ret = sdebug_add_host_helper(idx);
8638                         if (ret < 0) {
8639                                 pr_err("add_host_helper k=%d, error=%d\n",
8640                                        k, -ret);
8641                                 break;
8642                         }
8643                 } else {
8644                         ret = sdebug_do_add_host(want_store &&
8645                                                  sdebug_per_host_store);
8646                         if (ret < 0) {
8647                                 pr_err("add_host k=%d error=%d\n", k, -ret);
8648                                 break;
8649                         }
8650                 }
8651         }
8652         if (sdebug_verbose)
8653                 pr_info("built %d host(s)\n", sdebug_num_hosts);
8654
8655         return 0;
8656
8657 bus_unreg:
8658         bus_unregister(&pseudo_lld_bus);
8659 dev_unreg:
8660         root_device_unregister(pseudo_primary);
8661 free_vm:
8662         sdebug_erase_store(idx, NULL);
8663         return ret;
8664 }
8665
8666 static void __exit scsi_debug_exit(void)
8667 {
8668         int k = sdebug_num_hosts;
8669
8670         for (; k; k--)
8671                 sdebug_do_remove_host(true);
8672         driver_unregister(&sdebug_driverfs_driver);
8673         bus_unregister(&pseudo_lld_bus);
8674         root_device_unregister(pseudo_primary);
8675
8676         sdebug_erase_all_stores(false);
8677         xa_destroy(per_store_ap);
8678         debugfs_remove(sdebug_debugfs_root);
8679 }
8680
8681 device_initcall(scsi_debug_init);
8682 module_exit(scsi_debug_exit);
8683
8684 static void sdebug_release_adapter(struct device *dev)
8685 {
8686         struct sdebug_host_info *sdbg_host;
8687
8688         sdbg_host = dev_to_sdebug_host(dev);
8689         kfree(sdbg_host);
8690 }
8691
8692 /* idx must be valid, if sip is NULL then it will be obtained using idx */
8693 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8694 {
8695         if (idx < 0)
8696                 return;
8697         if (!sip) {
8698                 if (xa_empty(per_store_ap))
8699                         return;
8700                 sip = xa_load(per_store_ap, idx);
8701                 if (!sip)
8702                         return;
8703         }
8704         vfree(sip->map_storep);
8705         vfree(sip->dif_storep);
8706         vfree(sip->storep);
8707         xa_erase(per_store_ap, idx);
8708         kfree(sip);
8709 }
8710
8711 /* Assume apart_from_first==false only in shutdown case. */
8712 static void sdebug_erase_all_stores(bool apart_from_first)
8713 {
8714         unsigned long idx;
8715         struct sdeb_store_info *sip = NULL;
8716
8717         xa_for_each(per_store_ap, idx, sip) {
8718                 if (apart_from_first)
8719                         apart_from_first = false;
8720                 else
8721                         sdebug_erase_store(idx, sip);
8722         }
8723         if (apart_from_first)
8724                 sdeb_most_recent_idx = sdeb_first_idx;
8725 }
8726
8727 /*
8728  * Returns store xarray new element index (idx) if >=0 else negated errno.
8729  * Limit the number of stores to 65536.
8730  */
8731 static int sdebug_add_store(void)
8732 {
8733         int res;
8734         u32 n_idx;
8735         unsigned long iflags;
8736         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8737         struct sdeb_store_info *sip = NULL;
8738         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8739
8740         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
8741         if (!sip)
8742                 return -ENOMEM;
8743
8744         xa_lock_irqsave(per_store_ap, iflags);
8745         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8746         if (unlikely(res < 0)) {
8747                 xa_unlock_irqrestore(per_store_ap, iflags);
8748                 kfree(sip);
8749                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
8750                 return res;
8751         }
8752         sdeb_most_recent_idx = n_idx;
8753         if (sdeb_first_idx < 0)
8754                 sdeb_first_idx = n_idx;
8755         xa_unlock_irqrestore(per_store_ap, iflags);
8756
8757         res = -ENOMEM;
8758         sip->storep = vzalloc(sz);
8759         if (!sip->storep) {
8760                 pr_err("user data oom\n");
8761                 goto err;
8762         }
8763         if (sdebug_num_parts > 0)
8764                 sdebug_build_parts(sip->storep, sz);
8765
8766         /* DIF/DIX: what T10 calls Protection Information (PI) */
8767         if (sdebug_dix) {
8768                 int dif_size;
8769
8770                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8771                 sip->dif_storep = vmalloc(dif_size);
8772
8773                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
8774                         sip->dif_storep);
8775
8776                 if (!sip->dif_storep) {
8777                         pr_err("DIX oom\n");
8778                         goto err;
8779                 }
8780                 memset(sip->dif_storep, 0xff, dif_size);
8781         }
8782         /* Logical Block Provisioning */
8783         if (scsi_debug_lbp()) {
8784                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8785                 sip->map_storep = vmalloc(array_size(sizeof(long),
8786                                                      BITS_TO_LONGS(map_size)));
8787
8788                 pr_info("%lu provisioning blocks\n", map_size);
8789
8790                 if (!sip->map_storep) {
8791                         pr_err("LBP map oom\n");
8792                         goto err;
8793                 }
8794
8795                 bitmap_zero(sip->map_storep, map_size);
8796
8797                 /* Map first 1KB for partition table */
8798                 if (sdebug_num_parts)
8799                         map_region(sip, 0, 2);
8800         }
8801
8802         rwlock_init(&sip->macc_data_lck);
8803         rwlock_init(&sip->macc_meta_lck);
8804         rwlock_init(&sip->macc_sector_lck);
8805         return (int)n_idx;
8806 err:
8807         sdebug_erase_store((int)n_idx, sip);
8808         pr_warn("%s: failed, errno=%d\n", __func__, -res);
8809         return res;
8810 }
8811
8812 static int sdebug_add_host_helper(int per_host_idx)
8813 {
8814         int k, devs_per_host, idx;
8815         int error = -ENOMEM;
8816         struct sdebug_host_info *sdbg_host;
8817         struct sdebug_dev_info *sdbg_devinfo, *tmp;
8818
8819         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
8820         if (!sdbg_host)
8821                 return -ENOMEM;
8822         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8823         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8824                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8825         sdbg_host->si_idx = idx;
8826
8827         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8828
8829         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8830         for (k = 0; k < devs_per_host; k++) {
8831                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8832                 if (!sdbg_devinfo)
8833                         goto clean;
8834         }
8835
8836         mutex_lock(&sdebug_host_list_mutex);
8837         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8838         mutex_unlock(&sdebug_host_list_mutex);
8839
8840         sdbg_host->dev.bus = &pseudo_lld_bus;
8841         sdbg_host->dev.parent = pseudo_primary;
8842         sdbg_host->dev.release = &sdebug_release_adapter;
8843         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8844
8845         error = device_register(&sdbg_host->dev);
8846         if (error) {
8847                 mutex_lock(&sdebug_host_list_mutex);
8848                 list_del(&sdbg_host->host_list);
8849                 mutex_unlock(&sdebug_host_list_mutex);
8850                 goto clean;
8851         }
8852
8853         ++sdebug_num_hosts;
8854         return 0;
8855
8856 clean:
8857         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8858                                  dev_list) {
8859                 list_del(&sdbg_devinfo->dev_list);
8860                 kfree(sdbg_devinfo->zstate);
8861                 kfree(sdbg_devinfo);
8862         }
8863         if (sdbg_host->dev.release)
8864                 put_device(&sdbg_host->dev);
8865         else
8866                 kfree(sdbg_host);
8867         pr_warn("%s: failed, errno=%d\n", __func__, -error);
8868         return error;
8869 }
8870
8871 static int sdebug_do_add_host(bool mk_new_store)
8872 {
8873         int ph_idx = sdeb_most_recent_idx;
8874
8875         if (mk_new_store) {
8876                 ph_idx = sdebug_add_store();
8877                 if (ph_idx < 0)
8878                         return ph_idx;
8879         }
8880         return sdebug_add_host_helper(ph_idx);
8881 }
8882
8883 static void sdebug_do_remove_host(bool the_end)
8884 {
8885         int idx = -1;
8886         struct sdebug_host_info *sdbg_host = NULL;
8887         struct sdebug_host_info *sdbg_host2;
8888
8889         mutex_lock(&sdebug_host_list_mutex);
8890         if (!list_empty(&sdebug_host_list)) {
8891                 sdbg_host = list_entry(sdebug_host_list.prev,
8892                                        struct sdebug_host_info, host_list);
8893                 idx = sdbg_host->si_idx;
8894         }
8895         if (!the_end && idx >= 0) {
8896                 bool unique = true;
8897
8898                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8899                         if (sdbg_host2 == sdbg_host)
8900                                 continue;
8901                         if (idx == sdbg_host2->si_idx) {
8902                                 unique = false;
8903                                 break;
8904                         }
8905                 }
8906                 if (unique) {
8907                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8908                         if (idx == sdeb_most_recent_idx)
8909                                 --sdeb_most_recent_idx;
8910                 }
8911         }
8912         if (sdbg_host)
8913                 list_del(&sdbg_host->host_list);
8914         mutex_unlock(&sdebug_host_list_mutex);
8915
8916         if (!sdbg_host)
8917                 return;
8918
8919         device_unregister(&sdbg_host->dev);
8920         --sdebug_num_hosts;
8921 }
8922
8923 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8924 {
8925         struct sdebug_dev_info *devip = sdev->hostdata;
8926
8927         if (!devip)
8928                 return  -ENODEV;
8929
8930         mutex_lock(&sdebug_host_list_mutex);
8931         block_unblock_all_queues(true);
8932
8933         if (qdepth > SDEBUG_CANQUEUE) {
8934                 qdepth = SDEBUG_CANQUEUE;
8935                 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
8936                         qdepth, SDEBUG_CANQUEUE);
8937         }
8938         if (qdepth < 1)
8939                 qdepth = 1;
8940         if (qdepth != sdev->queue_depth)
8941                 scsi_change_queue_depth(sdev, qdepth);
8942
8943         block_unblock_all_queues(false);
8944         mutex_unlock(&sdebug_host_list_mutex);
8945
8946         if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8947                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
8948
8949         return sdev->queue_depth;
8950 }
8951
8952 static bool fake_timeout(struct scsi_cmnd *scp)
8953 {
8954         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
8955                 if (sdebug_every_nth < -1)
8956                         sdebug_every_nth = -1;
8957                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
8958                         return true; /* ignore command causing timeout */
8959                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
8960                          scsi_medium_access_command(scp))
8961                         return true; /* time out reads and writes */
8962         }
8963         return false;
8964 }
8965
8966 /* Response to TUR or media access command when device stopped */
8967 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
8968 {
8969         int stopped_state;
8970         u64 diff_ns = 0;
8971         ktime_t now_ts = ktime_get_boottime();
8972         struct scsi_device *sdp = scp->device;
8973
8974         stopped_state = atomic_read(&devip->stopped);
8975         if (stopped_state == 2) {
8976                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
8977                         diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
8978                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
8979                                 /* tur_ms_to_ready timer extinguished */
8980                                 atomic_set(&devip->stopped, 0);
8981                                 return 0;
8982                         }
8983                 }
8984                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
8985                 if (sdebug_verbose)
8986                         sdev_printk(KERN_INFO, sdp,
8987                                     "%s: Not ready: in process of becoming ready\n", my_name);
8988                 if (scp->cmnd[0] == TEST_UNIT_READY) {
8989                         u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
8990
8991                         if (diff_ns <= tur_nanosecs_to_ready)
8992                                 diff_ns = tur_nanosecs_to_ready - diff_ns;
8993                         else
8994                                 diff_ns = tur_nanosecs_to_ready;
8995                         /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
8996                         do_div(diff_ns, 1000000);       /* diff_ns becomes milliseconds */
8997                         scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
8998                                                    diff_ns);
8999                         return check_condition_result;
9000                 }
9001         }
9002         mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9003         if (sdebug_verbose)
9004                 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9005                             my_name);
9006         return check_condition_result;
9007 }
9008
9009 static void sdebug_map_queues(struct Scsi_Host *shost)
9010 {
9011         int i, qoff;
9012
9013         if (shost->nr_hw_queues == 1)
9014                 return;
9015
9016         for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9017                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9018
9019                 map->nr_queues  = 0;
9020
9021                 if (i == HCTX_TYPE_DEFAULT)
9022                         map->nr_queues = submit_queues - poll_queues;
9023                 else if (i == HCTX_TYPE_POLL)
9024                         map->nr_queues = poll_queues;
9025
9026                 if (!map->nr_queues) {
9027                         BUG_ON(i == HCTX_TYPE_DEFAULT);
9028                         continue;
9029                 }
9030
9031                 map->queue_offset = qoff;
9032                 blk_mq_map_queues(map);
9033
9034                 qoff += map->nr_queues;
9035         }
9036 }
9037
9038 struct sdebug_blk_mq_poll_data {
9039         unsigned int queue_num;
9040         int *num_entries;
9041 };
9042
9043 /*
9044  * We don't handle aborted commands here, but it does not seem possible to have
9045  * aborted polled commands from schedule_resp()
9046  */
9047 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9048 {
9049         struct sdebug_blk_mq_poll_data *data = opaque;
9050         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9051         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9052         struct sdebug_defer *sd_dp;
9053         u32 unique_tag = blk_mq_unique_tag(rq);
9054         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9055         unsigned long flags;
9056         int queue_num = data->queue_num;
9057         ktime_t time;
9058
9059         /* We're only interested in one queue for this iteration */
9060         if (hwq != queue_num)
9061                 return true;
9062
9063         /* Subsequent checks would fail if this failed, but check anyway */
9064         if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9065                 return true;
9066
9067         time = ktime_get_boottime();
9068
9069         spin_lock_irqsave(&sdsc->lock, flags);
9070         sd_dp = &sdsc->sd_dp;
9071         if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
9072                 spin_unlock_irqrestore(&sdsc->lock, flags);
9073                 return true;
9074         }
9075
9076         if (time < sd_dp->cmpl_ts) {
9077                 spin_unlock_irqrestore(&sdsc->lock, flags);
9078                 return true;
9079         }
9080         spin_unlock_irqrestore(&sdsc->lock, flags);
9081
9082         if (sdebug_statistics) {
9083                 atomic_inc(&sdebug_completions);
9084                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9085                         atomic_inc(&sdebug_miss_cpus);
9086         }
9087
9088         scsi_done(cmd); /* callback to mid level */
9089         (*data->num_entries)++;
9090         return true;
9091 }
9092
9093 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9094 {
9095         int num_entries = 0;
9096         struct sdebug_blk_mq_poll_data data = {
9097                 .queue_num = queue_num,
9098                 .num_entries = &num_entries,
9099         };
9100
9101         blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9102                                 &data);
9103
9104         if (num_entries > 0)
9105                 atomic_add(num_entries, &sdeb_mq_poll_count);
9106         return num_entries;
9107 }
9108
9109 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9110 {
9111         struct scsi_device *sdp = cmnd->device;
9112         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9113         struct sdebug_err_inject *err;
9114         unsigned char *cmd = cmnd->cmnd;
9115         int ret = 0;
9116
9117         if (devip == NULL)
9118                 return 0;
9119
9120         rcu_read_lock();
9121         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9122                 if (err->type == ERR_TMOUT_CMD &&
9123                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
9124                         ret = !!err->cnt;
9125                         if (err->cnt < 0)
9126                                 err->cnt++;
9127
9128                         rcu_read_unlock();
9129                         return ret;
9130                 }
9131         }
9132         rcu_read_unlock();
9133
9134         return 0;
9135 }
9136
9137 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9138 {
9139         struct scsi_device *sdp = cmnd->device;
9140         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9141         struct sdebug_err_inject *err;
9142         unsigned char *cmd = cmnd->cmnd;
9143         int ret = 0;
9144
9145         if (devip == NULL)
9146                 return 0;
9147
9148         rcu_read_lock();
9149         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9150                 if (err->type == ERR_FAIL_QUEUE_CMD &&
9151                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
9152                         ret = err->cnt ? err->queuecmd_ret : 0;
9153                         if (err->cnt < 0)
9154                                 err->cnt++;
9155
9156                         rcu_read_unlock();
9157                         return ret;
9158                 }
9159         }
9160         rcu_read_unlock();
9161
9162         return 0;
9163 }
9164
9165 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9166                            struct sdebug_err_inject *info)
9167 {
9168         struct scsi_device *sdp = cmnd->device;
9169         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9170         struct sdebug_err_inject *err;
9171         unsigned char *cmd = cmnd->cmnd;
9172         int ret = 0;
9173         int result;
9174
9175         if (devip == NULL)
9176                 return 0;
9177
9178         rcu_read_lock();
9179         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9180                 if (err->type == ERR_FAIL_CMD &&
9181                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
9182                         if (!err->cnt) {
9183                                 rcu_read_unlock();
9184                                 return 0;
9185                         }
9186
9187                         ret = !!err->cnt;
9188                         rcu_read_unlock();
9189                         goto out_handle;
9190                 }
9191         }
9192         rcu_read_unlock();
9193
9194         return 0;
9195
9196 out_handle:
9197         if (err->cnt < 0)
9198                 err->cnt++;
9199         mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9200         result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9201         *info = *err;
9202         *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9203
9204         return ret;
9205 }
9206
9207 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
9208                                    struct scsi_cmnd *scp)
9209 {
9210         u8 sdeb_i;
9211         struct scsi_device *sdp = scp->device;
9212         const struct opcode_info_t *oip;
9213         const struct opcode_info_t *r_oip;
9214         struct sdebug_dev_info *devip;
9215         u8 *cmd = scp->cmnd;
9216         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9217         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9218         int k, na;
9219         int errsts = 0;
9220         u64 lun_index = sdp->lun & 0x3FFF;
9221         u32 flags;
9222         u16 sa;
9223         u8 opcode = cmd[0];
9224         u32 devsel = sdebug_get_devsel(scp->device);
9225         bool has_wlun_rl;
9226         bool inject_now;
9227         int ret = 0;
9228         struct sdebug_err_inject err;
9229
9230         scsi_set_resid(scp, 0);
9231         if (sdebug_statistics) {
9232                 atomic_inc(&sdebug_cmnd_count);
9233                 inject_now = inject_on_this_cmd();
9234         } else {
9235                 inject_now = false;
9236         }
9237         if (unlikely(sdebug_verbose &&
9238                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9239                 char b[120];
9240                 int n, len, sb;
9241
9242                 len = scp->cmd_len;
9243                 sb = (int)sizeof(b);
9244                 if (len > 32)
9245                         strcpy(b, "too long, over 32 bytes");
9246                 else {
9247                         for (k = 0, n = 0; k < len && n < sb; ++k)
9248                                 n += scnprintf(b + n, sb - n, "%02x ",
9249                                                (u32)cmd[k]);
9250                 }
9251                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9252                             blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9253         }
9254         if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9255                 return SCSI_MLQUEUE_HOST_BUSY;
9256         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9257         if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9258                 goto err_out;
9259
9260         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
9261         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
9262         devip = (struct sdebug_dev_info *)sdp->hostdata;
9263         if (unlikely(!devip)) {
9264                 devip = find_build_dev_info(sdp);
9265                 if (NULL == devip)
9266                         goto err_out;
9267         }
9268
9269         if (sdebug_timeout_cmd(scp)) {
9270                 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9271                 return 0;
9272         }
9273
9274         ret = sdebug_fail_queue_cmd(scp);
9275         if (ret) {
9276                 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9277                                 opcode, ret);
9278                 return ret;
9279         }
9280
9281         if (sdebug_fail_cmd(scp, &ret, &err)) {
9282                 scmd_printk(KERN_INFO, scp,
9283                         "fail command 0x%x with hostbyte=0x%x, "
9284                         "driverbyte=0x%x, statusbyte=0x%x, "
9285                         "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9286                         opcode, err.host_byte, err.driver_byte,
9287                         err.status_byte, err.sense_key, err.asc, err.asq);
9288                 return ret;
9289         }
9290
9291         if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9292                 atomic_set(&sdeb_inject_pending, 1);
9293
9294         na = oip->num_attached;
9295         r_pfp = oip->pfp;
9296         if (na) {       /* multiple commands with this opcode */
9297                 r_oip = oip;
9298                 if (FF_SA & r_oip->flags) {
9299                         if (F_SA_LOW & oip->flags)
9300                                 sa = 0x1f & cmd[1];
9301                         else
9302                                 sa = get_unaligned_be16(cmd + 8);
9303                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9304                                 if (opcode == oip->opcode && sa == oip->sa &&
9305                                         (devsel & oip->devsel) != 0)
9306                                         break;
9307                         }
9308                 } else {   /* since no service action only check opcode */
9309                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9310                                 if (opcode == oip->opcode &&
9311                                         (devsel & oip->devsel) != 0)
9312                                         break;
9313                         }
9314                 }
9315                 if (k > na) {
9316                         if (F_SA_LOW & r_oip->flags)
9317                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9318                         else if (F_SA_HIGH & r_oip->flags)
9319                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9320                         else
9321                                 mk_sense_invalid_opcode(scp);
9322                         goto check_cond;
9323                 }
9324         }       /* else (when na==0) we assume the oip is a match */
9325         flags = oip->flags;
9326         if (unlikely(F_INV_OP & flags)) {
9327                 mk_sense_invalid_opcode(scp);
9328                 goto check_cond;
9329         }
9330         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9331                 if (sdebug_verbose)
9332                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9333                                     my_name, opcode, " supported for wlun");
9334                 mk_sense_invalid_opcode(scp);
9335                 goto check_cond;
9336         }
9337         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
9338                 u8 rem;
9339                 int j;
9340
9341                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9342                         rem = ~oip->len_mask[k] & cmd[k];
9343                         if (rem) {
9344                                 for (j = 7; j >= 0; --j, rem <<= 1) {
9345                                         if (0x80 & rem)
9346                                                 break;
9347                                 }
9348                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9349                                 goto check_cond;
9350                         }
9351                 }
9352         }
9353         if (unlikely(!(F_SKIP_UA & flags) &&
9354                      find_first_bit(devip->uas_bm,
9355                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9356                 errsts = make_ua(scp, devip);
9357                 if (errsts)
9358                         goto check_cond;
9359         }
9360         if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9361                      atomic_read(&devip->stopped))) {
9362                 errsts = resp_not_ready(scp, devip);
9363                 if (errsts)
9364                         goto fini;
9365         }
9366         if (sdebug_fake_rw && (F_FAKE_RW & flags))
9367                 goto fini;
9368         if (unlikely(sdebug_every_nth)) {
9369                 if (fake_timeout(scp))
9370                         return 0;       /* ignore command: make trouble */
9371         }
9372         if (likely(oip->pfp))
9373                 pfp = oip->pfp; /* calls a resp_* function */
9374         else
9375                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
9376
9377 fini:
9378         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
9379                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9380         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9381                                             sdebug_ndelay > 10000)) {
9382                 /*
9383                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
9384                  * for Start Stop Unit (SSU) want at least 1 second delay and
9385                  * if sdebug_jdelay>1 want a long delay of that many seconds.
9386                  * For Synchronize Cache want 1/20 of SSU's delay.
9387                  */
9388                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9389                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9390
9391                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9392                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9393         } else
9394                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9395                                      sdebug_ndelay);
9396 check_cond:
9397         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9398 err_out:
9399         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9400 }
9401
9402 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9403 {
9404         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9405         struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9406
9407         spin_lock_init(&sdsc->lock);
9408         hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9409                       HRTIMER_MODE_REL_PINNED);
9410         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9411
9412         return 0;
9413 }
9414
9415 static const struct scsi_host_template sdebug_driver_template = {
9416         .show_info =            scsi_debug_show_info,
9417         .write_info =           scsi_debug_write_info,
9418         .proc_name =            sdebug_proc_name,
9419         .name =                 "SCSI DEBUG",
9420         .info =                 scsi_debug_info,
9421         .sdev_init =            scsi_debug_sdev_init,
9422         .sdev_configure =       scsi_debug_sdev_configure,
9423         .sdev_destroy =         scsi_debug_sdev_destroy,
9424         .ioctl =                scsi_debug_ioctl,
9425         .queuecommand =         scsi_debug_queuecommand,
9426         .change_queue_depth =   sdebug_change_qdepth,
9427         .map_queues =           sdebug_map_queues,
9428         .mq_poll =              sdebug_blk_mq_poll,
9429         .eh_abort_handler =     scsi_debug_abort,
9430         .eh_device_reset_handler = scsi_debug_device_reset,
9431         .eh_target_reset_handler = scsi_debug_target_reset,
9432         .eh_bus_reset_handler = scsi_debug_bus_reset,
9433         .eh_host_reset_handler = scsi_debug_host_reset,
9434         .can_queue =            SDEBUG_CANQUEUE,
9435         .this_id =              7,
9436         .sg_tablesize =         SG_MAX_SEGMENTS,
9437         .cmd_per_lun =          DEF_CMD_PER_LUN,
9438         .max_sectors =          -1U,
9439         .max_segment_size =     -1U,
9440         .module =               THIS_MODULE,
9441         .skip_settle_delay =    1,
9442         .track_queue_depth =    1,
9443         .cmd_size = sizeof(struct sdebug_scsi_cmd),
9444         .init_cmd_priv = sdebug_init_cmd_priv,
9445         .target_alloc =         sdebug_target_alloc,
9446         .target_destroy =       sdebug_target_destroy,
9447 };
9448
9449 static int sdebug_driver_probe(struct device *dev)
9450 {
9451         int error = 0;
9452         struct sdebug_host_info *sdbg_host;
9453         struct Scsi_Host *hpnt;
9454         int hprot;
9455
9456         sdbg_host = dev_to_sdebug_host(dev);
9457
9458         hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9459         if (NULL == hpnt) {
9460                 pr_err("scsi_host_alloc failed\n");
9461                 error = -ENODEV;
9462                 return error;
9463         }
9464         hpnt->can_queue = sdebug_max_queue;
9465         hpnt->cmd_per_lun = sdebug_max_queue;
9466         if (!sdebug_clustering)
9467                 hpnt->dma_boundary = PAGE_SIZE - 1;
9468
9469         if (submit_queues > nr_cpu_ids) {
9470                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9471                         my_name, submit_queues, nr_cpu_ids);
9472                 submit_queues = nr_cpu_ids;
9473         }
9474         /*
9475          * Decide whether to tell scsi subsystem that we want mq. The
9476          * following should give the same answer for each host.
9477          */
9478         hpnt->nr_hw_queues = submit_queues;
9479         if (sdebug_host_max_queue)
9480                 hpnt->host_tagset = 1;
9481
9482         /* poll queues are possible for nr_hw_queues > 1 */
9483         if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9484                 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9485                          my_name, poll_queues, hpnt->nr_hw_queues);
9486                 poll_queues = 0;
9487         }
9488
9489         /*
9490          * Poll queues don't need interrupts, but we need at least one I/O queue
9491          * left over for non-polled I/O.
9492          * If condition not met, trim poll_queues to 1 (just for simplicity).
9493          */
9494         if (poll_queues >= submit_queues) {
9495                 if (submit_queues < 3)
9496                         pr_warn("%s: trim poll_queues to 1\n", my_name);
9497                 else
9498                         pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9499                                 my_name, submit_queues - 1);
9500                 poll_queues = 1;
9501         }
9502         if (poll_queues)
9503                 hpnt->nr_maps = 3;
9504
9505         sdbg_host->shost = hpnt;
9506         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9507                 hpnt->max_id = sdebug_num_tgts + 1;
9508         else
9509                 hpnt->max_id = sdebug_num_tgts;
9510         /* = sdebug_max_luns; */
9511         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9512
9513         hprot = 0;
9514
9515         switch (sdebug_dif) {
9516
9517         case T10_PI_TYPE1_PROTECTION:
9518                 hprot = SHOST_DIF_TYPE1_PROTECTION;
9519                 if (sdebug_dix)
9520                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
9521                 break;
9522
9523         case T10_PI_TYPE2_PROTECTION:
9524                 hprot = SHOST_DIF_TYPE2_PROTECTION;
9525                 if (sdebug_dix)
9526                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
9527                 break;
9528
9529         case T10_PI_TYPE3_PROTECTION:
9530                 hprot = SHOST_DIF_TYPE3_PROTECTION;
9531                 if (sdebug_dix)
9532                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
9533                 break;
9534
9535         default:
9536                 if (sdebug_dix)
9537                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
9538                 break;
9539         }
9540
9541         scsi_host_set_prot(hpnt, hprot);
9542
9543         if (have_dif_prot || sdebug_dix)
9544                 pr_info("host protection%s%s%s%s%s%s%s\n",
9545                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9546                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9547                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9548                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9549                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9550                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9551                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9552
9553         if (sdebug_guard == 1)
9554                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9555         else
9556                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9557
9558         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9559         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9560         if (sdebug_every_nth)   /* need stats counters for every_nth */
9561                 sdebug_statistics = true;
9562         error = scsi_add_host(hpnt, &sdbg_host->dev);
9563         if (error) {
9564                 pr_err("scsi_add_host failed\n");
9565                 error = -ENODEV;
9566                 scsi_host_put(hpnt);
9567         } else {
9568                 scsi_scan_host(hpnt);
9569         }
9570
9571         return error;
9572 }
9573
9574 static void sdebug_driver_remove(struct device *dev)
9575 {
9576         struct sdebug_host_info *sdbg_host;
9577         struct sdebug_dev_info *sdbg_devinfo, *tmp;
9578
9579         sdbg_host = dev_to_sdebug_host(dev);
9580
9581         scsi_remove_host(sdbg_host->shost);
9582
9583         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9584                                  dev_list) {
9585                 list_del(&sdbg_devinfo->dev_list);
9586                 kfree(sdbg_devinfo->zstate);
9587                 kfree(sdbg_devinfo);
9588         }
9589
9590         scsi_host_put(sdbg_host->shost);
9591 }
9592
9593 static const struct bus_type pseudo_lld_bus = {
9594         .name = "pseudo",
9595         .probe = sdebug_driver_probe,
9596         .remove = sdebug_driver_remove,
9597         .drv_groups = sdebug_drv_groups,
9598 };