1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 static struct kmem_cache *queued_cmd_cache;
255 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
258 /* Zone types (zbcr05 table 25) */
263 /* ZBC_ZTYPE_SOBR = 0x4, */
267 /* enumeration names taken from table 26, zbcr05 */
269 ZBC_NOT_WRITE_POINTER = 0x0,
271 ZC2_IMPLICIT_OPEN = 0x2,
272 ZC3_EXPLICIT_OPEN = 0x3,
279 struct sdeb_zone_state { /* ZBC: per zone state */
280 enum sdebug_z_type z_type;
281 enum sdebug_z_cond z_cond;
282 bool z_non_seq_resource;
288 struct sdebug_dev_info {
289 struct list_head dev_list;
290 unsigned int channel;
294 struct sdebug_host_info *sdbg_host;
295 unsigned long uas_bm[1];
296 atomic_t stopped; /* 1: by SSU, 2: device start */
299 /* For ZBC devices */
300 enum blk_zoned_model zmodel;
303 unsigned int zsize_shift;
304 unsigned int nr_zones;
305 unsigned int nr_conv_zones;
306 unsigned int nr_seq_zones;
307 unsigned int nr_imp_open;
308 unsigned int nr_exp_open;
309 unsigned int nr_closed;
310 unsigned int max_open;
311 ktime_t create_ts; /* time since bootup that this device was created */
312 struct sdeb_zone_state *zstate;
315 struct sdebug_host_info {
316 struct list_head host_list;
317 int si_idx; /* sdeb_store_info (per host) xarray index */
318 struct Scsi_Host *shost;
320 struct list_head dev_info_list;
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 rwlock_t macc_lck; /* for atomic media access on this store */
326 u8 *storep; /* user data storage (ram) */
327 struct t10_pi_tuple *dif_storep; /* protection info */
328 void *map_storep; /* provisioning map */
331 #define dev_to_sdebug_host(d) \
332 container_of(d, struct sdebug_host_info, dev)
334 #define shost_to_sdebug_host(shost) \
335 dev_to_sdebug_host(shost->dma_dev)
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
340 struct sdebug_defer {
342 struct execute_work ew;
343 ktime_t cmpl_ts;/* time since boot to complete this cmd */
345 bool aborted; /* true when blk_abort_request() already called */
346 enum sdeb_defer_type defer_t;
349 struct sdebug_queued_cmd {
350 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
351 * instance indicates this slot is in use.
353 struct sdebug_defer sd_dp;
354 struct scsi_cmnd *scmd;
357 struct sdebug_scsi_cmd {
361 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
362 static atomic_t sdebug_completions; /* count of deferred completions */
363 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
364 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
365 static atomic_t sdeb_inject_pending;
366 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
368 struct opcode_info_t {
369 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
370 /* for terminating element */
371 u8 opcode; /* if num_attached > 0, preferred */
372 u16 sa; /* service action */
373 u32 flags; /* OR-ed set of SDEB_F_* */
374 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
375 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
376 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
377 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
381 enum sdeb_opcode_index {
382 SDEB_I_INVALID_OPCODE = 0,
384 SDEB_I_REPORT_LUNS = 2,
385 SDEB_I_REQUEST_SENSE = 3,
386 SDEB_I_TEST_UNIT_READY = 4,
387 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
388 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
389 SDEB_I_LOG_SENSE = 7,
390 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
391 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
392 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
393 SDEB_I_START_STOP = 11,
394 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
395 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
396 SDEB_I_MAINT_IN = 14,
397 SDEB_I_MAINT_OUT = 15,
398 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
399 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
400 SDEB_I_RESERVE = 18, /* 6, 10 */
401 SDEB_I_RELEASE = 19, /* 6, 10 */
402 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
403 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
404 SDEB_I_ATA_PT = 22, /* 12, 16 */
405 SDEB_I_SEND_DIAG = 23,
407 SDEB_I_WRITE_BUFFER = 25,
408 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
409 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
410 SDEB_I_COMP_WRITE = 28,
411 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
412 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
413 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
414 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
418 static const unsigned char opcode_ind_arr[256] = {
419 /* 0x0; 0x0->0x1f: 6 byte cdbs */
420 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
423 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
425 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
426 SDEB_I_ALLOW_REMOVAL, 0,
427 /* 0x20; 0x20->0x3f: 10 byte cdbs */
428 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
429 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
430 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
431 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
432 /* 0x40; 0x40->0x5f: 10 byte cdbs */
433 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
434 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
435 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
437 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
438 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
439 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
441 0, SDEB_I_VARIABLE_LEN,
442 /* 0x80; 0x80->0x9f: 16 byte cdbs */
443 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
444 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
445 0, 0, 0, SDEB_I_VERIFY,
446 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
447 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
448 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
449 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
450 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
451 SDEB_I_MAINT_OUT, 0, 0, 0,
452 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
453 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0,
456 /* 0xc0; 0xc0->0xff: vendor specific */
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
464 * The following "response" functions return the SCSI mid-level's 4 byte
465 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
466 * command completion, they can mask their return value with
467 * SDEG_RES_IMMED_MASK .
469 #define SDEG_RES_IMMED_MASK 0x40000000
471 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int sdebug_do_add_host(bool mk_new_store);
502 static int sdebug_add_host_helper(int per_host_idx);
503 static void sdebug_do_remove_host(bool the_end);
504 static int sdebug_add_store(void);
505 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
506 static void sdebug_erase_all_stores(bool apart_from_first);
508 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 SAM_LUN_AM_FLAT = 0x1,
801 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity; /* in sectors */
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810 may still need them */
811 static int sdebug_heads; /* heads per disk */
812 static int sdebug_cylinders_per; /* cylinders per surface */
813 static int sdebug_sectors_per; /* sectors per cylinder */
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_MUTEX(sdebug_host_list_mutex);
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1; /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_cap_mb;
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
844 static char sdebug_proc_name[] = MY_NAME;
845 static const char *my_name = MY_NAME;
847 static struct bus_type pseudo_lld_bus;
849 static struct device_driver sdebug_driverfs_driver = {
850 .name = sdebug_proc_name,
851 .bus = &pseudo_lld_bus,
854 static const int check_condition_result =
855 SAM_STAT_CHECK_CONDITION;
857 static const int illegal_condition_result =
858 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
860 static const int device_qfull_result =
861 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
863 static const int condition_met_result = SAM_STAT_CONDITION_MET;
866 /* Only do the extra work involved in logical block provisioning if one or
867 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
868 * real reads and writes (i.e. not skipping them for speed).
870 static inline bool scsi_debug_lbp(void)
872 return 0 == sdebug_fake_rw &&
873 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
876 static void *lba2fake_store(struct sdeb_store_info *sip,
877 unsigned long long lba)
879 struct sdeb_store_info *lsip = sip;
881 lba = do_div(lba, sdebug_store_sectors);
882 if (!sip || !sip->storep) {
884 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
886 return lsip->storep + lba * sdebug_sector_size;
889 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
892 sector = sector_div(sector, sdebug_store_sectors);
894 return sip->dif_storep + sector;
897 static void sdebug_max_tgts_luns(void)
899 struct sdebug_host_info *sdbg_host;
900 struct Scsi_Host *hpnt;
902 mutex_lock(&sdebug_host_list_mutex);
903 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
904 hpnt = sdbg_host->shost;
905 if ((hpnt->this_id >= 0) &&
906 (sdebug_num_tgts > hpnt->this_id))
907 hpnt->max_id = sdebug_num_tgts + 1;
909 hpnt->max_id = sdebug_num_tgts;
910 /* sdebug_max_luns; */
911 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
913 mutex_unlock(&sdebug_host_list_mutex);
916 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
918 /* Set in_bit to -1 to indicate no bit position of invalid field */
919 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
920 enum sdeb_cmd_data c_d,
921 int in_byte, int in_bit)
923 unsigned char *sbuff;
927 sbuff = scp->sense_buffer;
929 sdev_printk(KERN_ERR, scp->device,
930 "%s: sense_buffer is NULL\n", __func__);
933 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
934 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
935 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
936 memset(sks, 0, sizeof(sks));
942 sks[0] |= 0x7 & in_bit;
944 put_unaligned_be16(in_byte, sks + 1);
950 memcpy(sbuff + sl + 4, sks, 3);
952 memcpy(sbuff + 15, sks, 3);
954 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
955 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
956 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
959 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
961 if (!scp->sense_buffer) {
962 sdev_printk(KERN_ERR, scp->device,
963 "%s: sense_buffer is NULL\n", __func__);
966 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
968 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
971 sdev_printk(KERN_INFO, scp->device,
972 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
973 my_name, key, asc, asq);
976 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
978 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
981 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
984 if (sdebug_verbose) {
986 sdev_printk(KERN_INFO, dev,
987 "%s: BLKFLSBUF [0x1261]\n", __func__);
988 else if (0x5331 == cmd)
989 sdev_printk(KERN_INFO, dev,
990 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
993 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
997 /* return -ENOTTY; // correct return but upsets fdisk */
1000 static void config_cdb_len(struct scsi_device *sdev)
1002 switch (sdebug_cdb_len) {
1003 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1004 sdev->use_10_for_rw = false;
1005 sdev->use_16_for_rw = false;
1006 sdev->use_10_for_ms = false;
1008 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1009 sdev->use_10_for_rw = true;
1010 sdev->use_16_for_rw = false;
1011 sdev->use_10_for_ms = false;
1013 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1014 sdev->use_10_for_rw = true;
1015 sdev->use_16_for_rw = false;
1016 sdev->use_10_for_ms = true;
1019 sdev->use_10_for_rw = false;
1020 sdev->use_16_for_rw = true;
1021 sdev->use_10_for_ms = true;
1023 case 32: /* No knobs to suggest this so same as 16 for now */
1024 sdev->use_10_for_rw = false;
1025 sdev->use_16_for_rw = true;
1026 sdev->use_10_for_ms = true;
1029 pr_warn("unexpected cdb_len=%d, force to 10\n",
1031 sdev->use_10_for_rw = true;
1032 sdev->use_16_for_rw = false;
1033 sdev->use_10_for_ms = false;
1034 sdebug_cdb_len = 10;
1039 static void all_config_cdb_len(void)
1041 struct sdebug_host_info *sdbg_host;
1042 struct Scsi_Host *shost;
1043 struct scsi_device *sdev;
1045 mutex_lock(&sdebug_host_list_mutex);
1046 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1047 shost = sdbg_host->shost;
1048 shost_for_each_device(sdev, shost) {
1049 config_cdb_len(sdev);
1052 mutex_unlock(&sdebug_host_list_mutex);
1055 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1057 struct sdebug_host_info *sdhp = devip->sdbg_host;
1058 struct sdebug_dev_info *dp;
1060 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 if ((devip->sdbg_host == dp->sdbg_host) &&
1062 (devip->target == dp->target)) {
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1068 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1072 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1073 if (k != SDEBUG_NUM_UAS) {
1074 const char *cp = NULL;
1078 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1079 POWER_ON_RESET_ASCQ);
1081 cp = "power on reset";
1083 case SDEBUG_UA_POOCCUR:
1084 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1085 POWER_ON_OCCURRED_ASCQ);
1087 cp = "power on occurred";
1089 case SDEBUG_UA_BUS_RESET:
1090 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 case SDEBUG_UA_MODE_CHANGED:
1096 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 cp = "mode parameters changed";
1101 case SDEBUG_UA_CAPACITY_CHANGED:
1102 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1103 CAPACITY_CHANGED_ASCQ);
1105 cp = "capacity data changed";
1107 case SDEBUG_UA_MICROCODE_CHANGED:
1108 mk_sense_buffer(scp, UNIT_ATTENTION,
1110 MICROCODE_CHANGED_ASCQ);
1112 cp = "microcode has been changed";
1114 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1115 mk_sense_buffer(scp, UNIT_ATTENTION,
1117 MICROCODE_CHANGED_WO_RESET_ASCQ);
1119 cp = "microcode has been changed without reset";
1121 case SDEBUG_UA_LUNS_CHANGED:
1123 * SPC-3 behavior is to report a UNIT ATTENTION with
1124 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1125 * on the target, until a REPORT LUNS command is
1126 * received. SPC-4 behavior is to report it only once.
1127 * NOTE: sdebug_scsi_level does not use the same
1128 * values as struct scsi_device->scsi_level.
1130 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1131 clear_luns_changed_on_target(devip);
1132 mk_sense_buffer(scp, UNIT_ATTENTION,
1136 cp = "reported luns data has changed";
1139 pr_warn("unexpected unit attention code=%d\n", k);
1144 clear_bit(k, devip->uas_bm);
1146 sdev_printk(KERN_INFO, scp->device,
1147 "%s reports: Unit attention: %s\n",
1149 return check_condition_result;
1154 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1155 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1159 struct scsi_data_buffer *sdb = &scp->sdb;
1163 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1164 return DID_ERROR << 16;
1166 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1168 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1173 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1174 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1175 * calls, not required to write in ascending offset order. Assumes resid
1176 * set to scsi_bufflen() prior to any calls.
1178 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1179 int arr_len, unsigned int off_dst)
1181 unsigned int act_len, n;
1182 struct scsi_data_buffer *sdb = &scp->sdb;
1183 off_t skip = off_dst;
1185 if (sdb->length <= off_dst)
1187 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1188 return DID_ERROR << 16;
1190 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1191 arr, arr_len, skip);
1192 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1193 __func__, off_dst, scsi_bufflen(scp), act_len,
1194 scsi_get_resid(scp));
1195 n = scsi_bufflen(scp) - (off_dst + act_len);
1196 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1200 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1201 * 'arr' or -1 if error.
1203 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1206 if (!scsi_bufflen(scp))
1208 if (scp->sc_data_direction != DMA_TO_DEVICE)
1211 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1215 static char sdebug_inq_vendor_id[9] = "Linux ";
1216 static char sdebug_inq_product_id[17] = "scsi_debug ";
1217 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1218 /* Use some locally assigned NAAs for SAS addresses. */
1219 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1220 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1221 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1223 /* Device identification VPD page. Returns number of bytes placed in arr */
1224 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1225 int target_dev_id, int dev_id_num,
1226 const char *dev_id_str, int dev_id_str_len,
1227 const uuid_t *lu_name)
1232 port_a = target_dev_id + 1;
1233 /* T10 vendor identifier field format (faked) */
1234 arr[0] = 0x2; /* ASCII */
1237 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1238 memcpy(&arr[12], sdebug_inq_product_id, 16);
1239 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1240 num = 8 + 16 + dev_id_str_len;
1243 if (dev_id_num >= 0) {
1244 if (sdebug_uuid_ctl) {
1245 /* Locally assigned UUID */
1246 arr[num++] = 0x1; /* binary (not necessarily sas) */
1247 arr[num++] = 0xa; /* PIV=0, lu, naa */
1250 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1252 memcpy(arr + num, lu_name, 16);
1255 /* NAA-3, Logical unit identifier (binary) */
1256 arr[num++] = 0x1; /* binary (not necessarily sas) */
1257 arr[num++] = 0x3; /* PIV=0, lu, naa */
1260 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1263 /* Target relative port number */
1264 arr[num++] = 0x61; /* proto=sas, binary */
1265 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1266 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x4; /* length */
1268 arr[num++] = 0x0; /* reserved */
1269 arr[num++] = 0x0; /* reserved */
1271 arr[num++] = 0x1; /* relative port A */
1273 /* NAA-3, Target port identifier */
1274 arr[num++] = 0x61; /* proto=sas, binary */
1275 arr[num++] = 0x93; /* piv=1, target port, naa */
1278 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1280 /* NAA-3, Target port group identifier */
1281 arr[num++] = 0x61; /* proto=sas, binary */
1282 arr[num++] = 0x95; /* piv=1, target port group id */
1287 put_unaligned_be16(port_group_id, arr + num);
1289 /* NAA-3, Target device identifier */
1290 arr[num++] = 0x61; /* proto=sas, binary */
1291 arr[num++] = 0xa3; /* piv=1, target device, naa */
1294 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1296 /* SCSI name string: Target device identifier */
1297 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1298 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1301 memcpy(arr + num, "naa.32222220", 12);
1303 snprintf(b, sizeof(b), "%08X", target_dev_id);
1304 memcpy(arr + num, b, 8);
1306 memset(arr + num, 0, 4);
1311 static unsigned char vpd84_data[] = {
1312 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1313 0x22,0x22,0x22,0x0,0xbb,0x1,
1314 0x22,0x22,0x22,0x0,0xbb,0x2,
1317 /* Software interface identification VPD page */
1318 static int inquiry_vpd_84(unsigned char *arr)
1320 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1321 return sizeof(vpd84_data);
1324 /* Management network addresses VPD page */
1325 static int inquiry_vpd_85(unsigned char *arr)
1328 const char *na1 = "https://www.kernel.org/config";
1329 const char *na2 = "http://www.kernel.org/log";
1332 arr[num++] = 0x1; /* lu, storage config */
1333 arr[num++] = 0x0; /* reserved */
1338 plen = ((plen / 4) + 1) * 4;
1339 arr[num++] = plen; /* length, null termianted, padded */
1340 memcpy(arr + num, na1, olen);
1341 memset(arr + num + olen, 0, plen - olen);
1344 arr[num++] = 0x4; /* lu, logging */
1345 arr[num++] = 0x0; /* reserved */
1350 plen = ((plen / 4) + 1) * 4;
1351 arr[num++] = plen; /* length, null terminated, padded */
1352 memcpy(arr + num, na2, olen);
1353 memset(arr + num + olen, 0, plen - olen);
1359 /* SCSI ports VPD page */
1360 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1365 port_a = target_dev_id + 1;
1366 port_b = port_a + 1;
1367 arr[num++] = 0x0; /* reserved */
1368 arr[num++] = 0x0; /* reserved */
1370 arr[num++] = 0x1; /* relative port 1 (primary) */
1371 memset(arr + num, 0, 6);
1374 arr[num++] = 12; /* length tp descriptor */
1375 /* naa-5 target port identifier (A) */
1376 arr[num++] = 0x61; /* proto=sas, binary */
1377 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x8; /* length */
1380 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1382 arr[num++] = 0x0; /* reserved */
1383 arr[num++] = 0x0; /* reserved */
1385 arr[num++] = 0x2; /* relative port 2 (secondary) */
1386 memset(arr + num, 0, 6);
1389 arr[num++] = 12; /* length tp descriptor */
1390 /* naa-5 target port identifier (B) */
1391 arr[num++] = 0x61; /* proto=sas, binary */
1392 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1393 arr[num++] = 0x0; /* reserved */
1394 arr[num++] = 0x8; /* length */
1395 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1402 static unsigned char vpd89_data[] = {
1403 /* from 4th byte */ 0,0,0,0,
1404 'l','i','n','u','x',' ',' ',' ',
1405 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1407 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1409 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1410 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1412 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1414 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1418 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1419 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1420 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1422 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1423 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1424 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1429 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1430 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1431 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1446 /* ATA Information VPD page */
1447 static int inquiry_vpd_89(unsigned char *arr)
1449 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1450 return sizeof(vpd89_data);
1454 static unsigned char vpdb0_data[] = {
1455 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1461 /* Block limits VPD page (SBC-3) */
1462 static int inquiry_vpd_b0(unsigned char *arr)
1466 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1468 /* Optimal transfer length granularity */
1469 if (sdebug_opt_xferlen_exp != 0 &&
1470 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1471 gran = 1 << sdebug_opt_xferlen_exp;
1473 gran = 1 << sdebug_physblk_exp;
1474 put_unaligned_be16(gran, arr + 2);
1476 /* Maximum Transfer Length */
1477 if (sdebug_store_sectors > 0x400)
1478 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1480 /* Optimal Transfer Length */
1481 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1484 /* Maximum Unmap LBA Count */
1485 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1487 /* Maximum Unmap Block Descriptor Count */
1488 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1491 /* Unmap Granularity Alignment */
1492 if (sdebug_unmap_alignment) {
1493 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1494 arr[28] |= 0x80; /* UGAVALID */
1497 /* Optimal Unmap Granularity */
1498 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1500 /* Maximum WRITE SAME Length */
1501 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1503 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1506 /* Block device characteristics VPD page (SBC-3) */
1507 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1509 memset(arr, 0, 0x3c);
1511 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1513 arr[3] = 5; /* less than 1.8" */
1514 if (devip->zmodel == BLK_ZONED_HA)
1515 arr[4] = 1 << 4; /* zoned field = 01b */
1520 /* Logical block provisioning VPD page (SBC-4) */
1521 static int inquiry_vpd_b2(unsigned char *arr)
1523 memset(arr, 0, 0x4);
1524 arr[0] = 0; /* threshold exponent */
1531 if (sdebug_lbprz && scsi_debug_lbp())
1532 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1533 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1534 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1535 /* threshold_percentage=0 */
1539 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1540 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1542 memset(arr, 0, 0x3c);
1543 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1545 * Set Optimal number of open sequential write preferred zones and
1546 * Optimal number of non-sequentially written sequential write
1547 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1548 * fields set to zero, apart from Max. number of open swrz_s field.
1550 put_unaligned_be32(0xffffffff, &arr[4]);
1551 put_unaligned_be32(0xffffffff, &arr[8]);
1552 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1553 put_unaligned_be32(devip->max_open, &arr[12]);
1555 put_unaligned_be32(0xffffffff, &arr[12]);
1556 if (devip->zcap < devip->zsize) {
1557 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1558 put_unaligned_be64(devip->zsize, &arr[20]);
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1570 unsigned char pq_pdt;
1572 unsigned char *cmd = scp->cmnd;
1575 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1577 alloc_len = get_unaligned_be16(cmd + 3);
1578 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1580 return DID_REQUEUE << 16;
1581 is_disk = (sdebug_ptype == TYPE_DISK);
1582 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1583 is_disk_zbc = (is_disk || is_zbc);
1584 have_wlun = scsi_is_wlun(scp->device->lun);
1586 pq_pdt = TYPE_WLUN; /* present, wlun */
1587 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1588 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1590 pq_pdt = (sdebug_ptype & 0x1f);
1592 if (0x2 & cmd[1]) { /* CMDDT bit set */
1593 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1595 return check_condition_result;
1596 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1597 int lu_id_num, port_group_id, target_dev_id;
1600 int host_no = devip->sdbg_host->shost->host_no;
1602 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1603 (devip->channel & 0x7f);
1604 if (sdebug_vpd_use_hostno == 0)
1606 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1607 (devip->target * 1000) + devip->lun);
1608 target_dev_id = ((host_no + 1) * 2000) +
1609 (devip->target * 1000) - 3;
1610 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1611 if (0 == cmd[2]) { /* supported vital product data pages */
1612 arr[1] = cmd[2]; /*sanity */
1614 arr[n++] = 0x0; /* this page */
1615 arr[n++] = 0x80; /* unit serial number */
1616 arr[n++] = 0x83; /* device identification */
1617 arr[n++] = 0x84; /* software interface ident. */
1618 arr[n++] = 0x85; /* management network addresses */
1619 arr[n++] = 0x86; /* extended inquiry */
1620 arr[n++] = 0x87; /* mode page policy */
1621 arr[n++] = 0x88; /* SCSI ports */
1622 if (is_disk_zbc) { /* SBC or ZBC */
1623 arr[n++] = 0x89; /* ATA information */
1624 arr[n++] = 0xb0; /* Block limits */
1625 arr[n++] = 0xb1; /* Block characteristics */
1627 arr[n++] = 0xb2; /* LB Provisioning */
1629 arr[n++] = 0xb6; /* ZB dev. char. */
1631 arr[3] = n - 4; /* number of supported VPD pages */
1632 } else if (0x80 == cmd[2]) { /* unit serial number */
1633 arr[1] = cmd[2]; /*sanity */
1635 memcpy(&arr[4], lu_id_str, len);
1636 } else if (0x83 == cmd[2]) { /* device identification */
1637 arr[1] = cmd[2]; /*sanity */
1638 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1639 target_dev_id, lu_id_num,
1642 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1643 arr[1] = cmd[2]; /*sanity */
1644 arr[3] = inquiry_vpd_84(&arr[4]);
1645 } else if (0x85 == cmd[2]) { /* Management network addresses */
1646 arr[1] = cmd[2]; /*sanity */
1647 arr[3] = inquiry_vpd_85(&arr[4]);
1648 } else if (0x86 == cmd[2]) { /* extended inquiry */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = 0x3c; /* number of following entries */
1651 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1652 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1653 else if (have_dif_prot)
1654 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1656 arr[4] = 0x0; /* no protection stuff */
1657 arr[5] = 0x7; /* head of q, ordered + simple q's */
1658 } else if (0x87 == cmd[2]) { /* mode page policy */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = 0x8; /* number of following entries */
1661 arr[4] = 0x2; /* disconnect-reconnect mp */
1662 arr[6] = 0x80; /* mlus, shared */
1663 arr[8] = 0x18; /* protocol specific lu */
1664 arr[10] = 0x82; /* mlus, per initiator port */
1665 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1668 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1669 arr[1] = cmd[2]; /*sanity */
1670 n = inquiry_vpd_89(&arr[4]);
1671 put_unaligned_be16(n, arr + 2);
1672 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1673 arr[1] = cmd[2]; /*sanity */
1674 arr[3] = inquiry_vpd_b0(&arr[4]);
1675 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1676 arr[1] = cmd[2]; /*sanity */
1677 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1678 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1679 arr[1] = cmd[2]; /*sanity */
1680 arr[3] = inquiry_vpd_b2(&arr[4]);
1681 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1682 arr[1] = cmd[2]; /*sanity */
1683 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1685 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1687 return check_condition_result;
1689 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1690 ret = fill_from_dev_buffer(scp, arr,
1691 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1695 /* drops through here for a standard inquiry */
1696 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1697 arr[2] = sdebug_scsi_level;
1698 arr[3] = 2; /* response_data_format==2 */
1699 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1700 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1701 if (sdebug_vpd_use_hostno == 0)
1702 arr[5] |= 0x10; /* claim: implicit TPGS */
1703 arr[6] = 0x10; /* claim: MultiP */
1704 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1705 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1706 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1707 memcpy(&arr[16], sdebug_inq_product_id, 16);
1708 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1709 /* Use Vendor Specific area to place driver date in ASCII hex */
1710 memcpy(&arr[36], sdebug_version_date, 8);
1711 /* version descriptors (2 bytes each) follow */
1712 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1713 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1715 if (is_disk) { /* SBC-4 no version claimed */
1716 put_unaligned_be16(0x600, arr + n);
1718 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1719 put_unaligned_be16(0x525, arr + n);
1721 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1722 put_unaligned_be16(0x624, arr + n);
1725 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1726 ret = fill_from_dev_buffer(scp, arr,
1727 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1732 /* See resp_iec_m_pg() for how this data is manipulated */
1733 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1736 static int resp_requests(struct scsi_cmnd *scp,
1737 struct sdebug_dev_info *devip)
1739 unsigned char *cmd = scp->cmnd;
1740 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1741 bool dsense = !!(cmd[1] & 1);
1742 u32 alloc_len = cmd[4];
1744 int stopped_state = atomic_read(&devip->stopped);
1746 memset(arr, 0, sizeof(arr));
1747 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1751 arr[2] = LOGICAL_UNIT_NOT_READY;
1752 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1756 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1757 arr[7] = 0xa; /* 18 byte sense buffer */
1758 arr[12] = LOGICAL_UNIT_NOT_READY;
1759 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1761 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1762 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1765 arr[1] = 0x0; /* NO_SENSE in sense_key */
1766 arr[2] = THRESHOLD_EXCEEDED;
1767 arr[3] = 0xff; /* Failure prediction(false) */
1771 arr[2] = 0x0; /* NO_SENSE in sense_key */
1772 arr[7] = 0xa; /* 18 byte sense buffer */
1773 arr[12] = THRESHOLD_EXCEEDED;
1774 arr[13] = 0xff; /* Failure prediction(false) */
1776 } else { /* nothing to report */
1779 memset(arr, 0, len);
1782 memset(arr, 0, len);
1787 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1790 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1792 unsigned char *cmd = scp->cmnd;
1793 int power_cond, want_stop, stopped_state;
1796 power_cond = (cmd[4] & 0xf0) >> 4;
1798 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1799 return check_condition_result;
1801 want_stop = !(cmd[4] & 1);
1802 stopped_state = atomic_read(&devip->stopped);
1803 if (stopped_state == 2) {
1804 ktime_t now_ts = ktime_get_boottime();
1806 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1807 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1809 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1810 /* tur_ms_to_ready timer extinguished */
1811 atomic_set(&devip->stopped, 0);
1815 if (stopped_state == 2) {
1817 stopped_state = 1; /* dummy up success */
1818 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1820 return check_condition_result;
1824 changing = (stopped_state != want_stop);
1826 atomic_xchg(&devip->stopped, want_stop);
1827 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1828 return SDEG_RES_IMMED_MASK;
1833 static sector_t get_sdebug_capacity(void)
1835 static const unsigned int gibibyte = 1073741824;
1837 if (sdebug_virtual_gb > 0)
1838 return (sector_t)sdebug_virtual_gb *
1839 (gibibyte / sdebug_sector_size);
1841 return sdebug_store_sectors;
1844 #define SDEBUG_READCAP_ARR_SZ 8
1845 static int resp_readcap(struct scsi_cmnd *scp,
1846 struct sdebug_dev_info *devip)
1848 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1851 /* following just in case virtual_gb changed */
1852 sdebug_capacity = get_sdebug_capacity();
1853 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1854 if (sdebug_capacity < 0xffffffff) {
1855 capac = (unsigned int)sdebug_capacity - 1;
1856 put_unaligned_be32(capac, arr + 0);
1858 put_unaligned_be32(0xffffffff, arr + 0);
1859 put_unaligned_be16(sdebug_sector_size, arr + 6);
1860 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1863 #define SDEBUG_READCAP16_ARR_SZ 32
1864 static int resp_readcap16(struct scsi_cmnd *scp,
1865 struct sdebug_dev_info *devip)
1867 unsigned char *cmd = scp->cmnd;
1868 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1871 alloc_len = get_unaligned_be32(cmd + 10);
1872 /* following just in case virtual_gb changed */
1873 sdebug_capacity = get_sdebug_capacity();
1874 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1875 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1876 put_unaligned_be32(sdebug_sector_size, arr + 8);
1877 arr[13] = sdebug_physblk_exp & 0xf;
1878 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1880 if (scsi_debug_lbp()) {
1881 arr[14] |= 0x80; /* LBPME */
1882 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1883 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1884 * in the wider field maps to 0 in this field.
1886 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1891 * Since the scsi_debug READ CAPACITY implementation always reports the
1892 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1894 if (devip->zmodel == BLK_ZONED_HM)
1897 arr[15] = sdebug_lowest_aligned & 0xff;
1899 if (have_dif_prot) {
1900 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1901 arr[12] |= 1; /* PROT_EN */
1904 return fill_from_dev_buffer(scp, arr,
1905 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1908 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1910 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1911 struct sdebug_dev_info *devip)
1913 unsigned char *cmd = scp->cmnd;
1915 int host_no = devip->sdbg_host->shost->host_no;
1916 int port_group_a, port_group_b, port_a, port_b;
1920 alen = get_unaligned_be32(cmd + 6);
1921 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1923 return DID_REQUEUE << 16;
1925 * EVPD page 0x88 states we have two ports, one
1926 * real and a fake port with no device connected.
1927 * So we create two port groups with one port each
1928 * and set the group with port B to unavailable.
1930 port_a = 0x1; /* relative port A */
1931 port_b = 0x2; /* relative port B */
1932 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1933 (devip->channel & 0x7f);
1934 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1935 (devip->channel & 0x7f) + 0x80;
1938 * The asymmetric access state is cycled according to the host_id.
1941 if (sdebug_vpd_use_hostno == 0) {
1942 arr[n++] = host_no % 3; /* Asymm access state */
1943 arr[n++] = 0x0F; /* claim: all states are supported */
1945 arr[n++] = 0x0; /* Active/Optimized path */
1946 arr[n++] = 0x01; /* only support active/optimized paths */
1948 put_unaligned_be16(port_group_a, arr + n);
1950 arr[n++] = 0; /* Reserved */
1951 arr[n++] = 0; /* Status code */
1952 arr[n++] = 0; /* Vendor unique */
1953 arr[n++] = 0x1; /* One port per group */
1954 arr[n++] = 0; /* Reserved */
1955 arr[n++] = 0; /* Reserved */
1956 put_unaligned_be16(port_a, arr + n);
1958 arr[n++] = 3; /* Port unavailable */
1959 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1960 put_unaligned_be16(port_group_b, arr + n);
1962 arr[n++] = 0; /* Reserved */
1963 arr[n++] = 0; /* Status code */
1964 arr[n++] = 0; /* Vendor unique */
1965 arr[n++] = 0x1; /* One port per group */
1966 arr[n++] = 0; /* Reserved */
1967 arr[n++] = 0; /* Reserved */
1968 put_unaligned_be16(port_b, arr + n);
1972 put_unaligned_be32(rlen, arr + 0);
1975 * Return the smallest value of either
1976 * - The allocated length
1977 * - The constructed command length
1978 * - The maximum array size
1980 rlen = min(alen, n);
1981 ret = fill_from_dev_buffer(scp, arr,
1982 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1987 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1988 struct sdebug_dev_info *devip)
1991 u8 reporting_opts, req_opcode, sdeb_i, supp;
1993 u32 alloc_len, a_len;
1994 int k, offset, len, errsts, count, bump, na;
1995 const struct opcode_info_t *oip;
1996 const struct opcode_info_t *r_oip;
1998 u8 *cmd = scp->cmnd;
2000 rctd = !!(cmd[2] & 0x80);
2001 reporting_opts = cmd[2] & 0x7;
2002 req_opcode = cmd[3];
2003 req_sa = get_unaligned_be16(cmd + 4);
2004 alloc_len = get_unaligned_be32(cmd + 6);
2005 if (alloc_len < 4 || alloc_len > 0xffff) {
2006 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2007 return check_condition_result;
2009 if (alloc_len > 8192)
2013 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2015 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2017 return check_condition_result;
2019 switch (reporting_opts) {
2020 case 0: /* all commands */
2021 /* count number of commands */
2022 for (count = 0, oip = opcode_info_arr;
2023 oip->num_attached != 0xff; ++oip) {
2024 if (F_INV_OP & oip->flags)
2026 count += (oip->num_attached + 1);
2028 bump = rctd ? 20 : 8;
2029 put_unaligned_be32(count * bump, arr);
2030 for (offset = 4, oip = opcode_info_arr;
2031 oip->num_attached != 0xff && offset < a_len; ++oip) {
2032 if (F_INV_OP & oip->flags)
2034 na = oip->num_attached;
2035 arr[offset] = oip->opcode;
2036 put_unaligned_be16(oip->sa, arr + offset + 2);
2038 arr[offset + 5] |= 0x2;
2039 if (FF_SA & oip->flags)
2040 arr[offset + 5] |= 0x1;
2041 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2043 put_unaligned_be16(0xa, arr + offset + 8);
2045 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2046 if (F_INV_OP & oip->flags)
2049 arr[offset] = oip->opcode;
2050 put_unaligned_be16(oip->sa, arr + offset + 2);
2052 arr[offset + 5] |= 0x2;
2053 if (FF_SA & oip->flags)
2054 arr[offset + 5] |= 0x1;
2055 put_unaligned_be16(oip->len_mask[0],
2058 put_unaligned_be16(0xa,
2065 case 1: /* one command: opcode only */
2066 case 2: /* one command: opcode plus service action */
2067 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2068 sdeb_i = opcode_ind_arr[req_opcode];
2069 oip = &opcode_info_arr[sdeb_i];
2070 if (F_INV_OP & oip->flags) {
2074 if (1 == reporting_opts) {
2075 if (FF_SA & oip->flags) {
2076 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2079 return check_condition_result;
2082 } else if (2 == reporting_opts &&
2083 0 == (FF_SA & oip->flags)) {
2084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2085 kfree(arr); /* point at requested sa */
2086 return check_condition_result;
2088 if (0 == (FF_SA & oip->flags) &&
2089 req_opcode == oip->opcode)
2091 else if (0 == (FF_SA & oip->flags)) {
2092 na = oip->num_attached;
2093 for (k = 0, oip = oip->arrp; k < na;
2095 if (req_opcode == oip->opcode)
2098 supp = (k >= na) ? 1 : 3;
2099 } else if (req_sa != oip->sa) {
2100 na = oip->num_attached;
2101 for (k = 0, oip = oip->arrp; k < na;
2103 if (req_sa == oip->sa)
2106 supp = (k >= na) ? 1 : 3;
2110 u = oip->len_mask[0];
2111 put_unaligned_be16(u, arr + 2);
2112 arr[4] = oip->opcode;
2113 for (k = 1; k < u; ++k)
2114 arr[4 + k] = (k < 16) ?
2115 oip->len_mask[k] : 0xff;
2120 arr[1] = (rctd ? 0x80 : 0) | supp;
2122 put_unaligned_be16(0xa, arr + offset);
2127 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2129 return check_condition_result;
2131 offset = (offset < a_len) ? offset : a_len;
2132 len = (offset < alloc_len) ? offset : alloc_len;
2133 errsts = fill_from_dev_buffer(scp, arr, len);
2138 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2139 struct sdebug_dev_info *devip)
2144 u8 *cmd = scp->cmnd;
2146 memset(arr, 0, sizeof(arr));
2147 repd = !!(cmd[2] & 0x80);
2148 alloc_len = get_unaligned_be32(cmd + 6);
2149 if (alloc_len < 4) {
2150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2151 return check_condition_result;
2153 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2154 arr[1] = 0x1; /* ITNRS */
2161 len = (len < alloc_len) ? len : alloc_len;
2162 return fill_from_dev_buffer(scp, arr, len);
2165 /* <<Following mode page info copied from ST318451LW>> */
2167 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2168 { /* Read-Write Error Recovery page for mode_sense */
2169 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2172 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2174 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2175 return sizeof(err_recov_pg);
2178 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2179 { /* Disconnect-Reconnect page for mode_sense */
2180 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2181 0, 0, 0, 0, 0, 0, 0, 0};
2183 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2185 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2186 return sizeof(disconnect_pg);
2189 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2190 { /* Format device page for mode_sense */
2191 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2192 0, 0, 0, 0, 0, 0, 0, 0,
2193 0, 0, 0, 0, 0x40, 0, 0, 0};
2195 memcpy(p, format_pg, sizeof(format_pg));
2196 put_unaligned_be16(sdebug_sectors_per, p + 10);
2197 put_unaligned_be16(sdebug_sector_size, p + 12);
2198 if (sdebug_removable)
2199 p[20] |= 0x20; /* should agree with INQUIRY */
2201 memset(p + 2, 0, sizeof(format_pg) - 2);
2202 return sizeof(format_pg);
2205 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2206 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2209 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2210 { /* Caching page for mode_sense */
2211 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2212 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2213 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2214 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2216 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2217 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2218 memcpy(p, caching_pg, sizeof(caching_pg));
2220 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2221 else if (2 == pcontrol)
2222 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2223 return sizeof(caching_pg);
2226 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2229 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2230 { /* Control mode page for mode_sense */
2231 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2233 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2237 ctrl_m_pg[2] |= 0x4;
2239 ctrl_m_pg[2] &= ~0x4;
2242 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2244 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2246 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2247 else if (2 == pcontrol)
2248 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2249 return sizeof(ctrl_m_pg);
2253 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2254 { /* Informational Exceptions control mode page for mode_sense */
2255 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2257 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2260 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2262 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2263 else if (2 == pcontrol)
2264 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2265 return sizeof(iec_m_pg);
2268 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2269 { /* SAS SSP mode page - short format for mode_sense */
2270 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2271 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2273 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2275 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2276 return sizeof(sas_sf_m_pg);
2280 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2282 { /* SAS phy control and discover mode page for mode_sense */
2283 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2284 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2285 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2286 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2287 0x2, 0, 0, 0, 0, 0, 0, 0,
2288 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2289 0, 0, 0, 0, 0, 0, 0, 0,
2290 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2291 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2292 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2293 0x3, 0, 0, 0, 0, 0, 0, 0,
2294 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2295 0, 0, 0, 0, 0, 0, 0, 0,
2299 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2300 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2301 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2302 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2303 port_a = target_dev_id + 1;
2304 port_b = port_a + 1;
2305 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2306 put_unaligned_be32(port_a, p + 20);
2307 put_unaligned_be32(port_b, p + 48 + 20);
2309 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2310 return sizeof(sas_pcd_m_pg);
2313 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2314 { /* SAS SSP shared protocol specific port mode subpage */
2315 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2316 0, 0, 0, 0, 0, 0, 0, 0,
2319 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2321 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2322 return sizeof(sas_sha_m_pg);
2325 #define SDEBUG_MAX_MSENSE_SZ 256
2327 static int resp_mode_sense(struct scsi_cmnd *scp,
2328 struct sdebug_dev_info *devip)
2330 int pcontrol, pcode, subpcode, bd_len;
2331 unsigned char dev_spec;
2332 u32 alloc_len, offset, len;
2334 int target = scp->device->id;
2336 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2337 unsigned char *cmd = scp->cmnd;
2338 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2340 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2341 pcontrol = (cmd[2] & 0xc0) >> 6;
2342 pcode = cmd[2] & 0x3f;
2344 msense_6 = (MODE_SENSE == cmd[0]);
2345 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2346 is_disk = (sdebug_ptype == TYPE_DISK);
2347 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2348 if ((is_disk || is_zbc) && !dbd)
2349 bd_len = llbaa ? 16 : 8;
2352 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2353 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2354 if (0x3 == pcontrol) { /* Saving values not supported */
2355 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2356 return check_condition_result;
2358 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2359 (devip->target * 1000) - 3;
2360 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2361 if (is_disk || is_zbc) {
2362 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2374 arr[4] = 0x1; /* set LONGLBA bit */
2375 arr[7] = bd_len; /* assume 255 or less */
2379 if ((bd_len > 0) && (!sdebug_capacity))
2380 sdebug_capacity = get_sdebug_capacity();
2383 if (sdebug_capacity > 0xfffffffe)
2384 put_unaligned_be32(0xffffffff, ap + 0);
2386 put_unaligned_be32(sdebug_capacity, ap + 0);
2387 put_unaligned_be16(sdebug_sector_size, ap + 6);
2390 } else if (16 == bd_len) {
2391 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2392 put_unaligned_be32(sdebug_sector_size, ap + 12);
2397 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2398 /* TODO: Control Extension page */
2399 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2400 return check_condition_result;
2405 case 0x1: /* Read-Write error recovery page, direct access */
2406 len = resp_err_recov_pg(ap, pcontrol, target);
2409 case 0x2: /* Disconnect-Reconnect page, all devices */
2410 len = resp_disconnect_pg(ap, pcontrol, target);
2413 case 0x3: /* Format device page, direct access */
2415 len = resp_format_pg(ap, pcontrol, target);
2420 case 0x8: /* Caching page, direct access */
2421 if (is_disk || is_zbc) {
2422 len = resp_caching_pg(ap, pcontrol, target);
2427 case 0xa: /* Control Mode page, all devices */
2428 len = resp_ctrl_m_pg(ap, pcontrol, target);
2431 case 0x19: /* if spc==1 then sas phy, control+discover */
2432 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2433 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2434 return check_condition_result;
2437 if ((0x0 == subpcode) || (0xff == subpcode))
2438 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2439 if ((0x1 == subpcode) || (0xff == subpcode))
2440 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2442 if ((0x2 == subpcode) || (0xff == subpcode))
2443 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2446 case 0x1c: /* Informational Exceptions Mode page, all devices */
2447 len = resp_iec_m_pg(ap, pcontrol, target);
2450 case 0x3f: /* Read all Mode pages */
2451 if ((0 == subpcode) || (0xff == subpcode)) {
2452 len = resp_err_recov_pg(ap, pcontrol, target);
2453 len += resp_disconnect_pg(ap + len, pcontrol, target);
2455 len += resp_format_pg(ap + len, pcontrol,
2457 len += resp_caching_pg(ap + len, pcontrol,
2459 } else if (is_zbc) {
2460 len += resp_caching_pg(ap + len, pcontrol,
2463 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2464 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2465 if (0xff == subpcode) {
2466 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2467 target, target_dev_id);
2468 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2470 len += resp_iec_m_pg(ap + len, pcontrol, target);
2473 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2474 return check_condition_result;
2482 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2483 return check_condition_result;
2486 arr[0] = offset - 1;
2488 put_unaligned_be16((offset - 2), arr + 0);
2489 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2492 #define SDEBUG_MAX_MSELECT_SZ 512
2494 static int resp_mode_select(struct scsi_cmnd *scp,
2495 struct sdebug_dev_info *devip)
2497 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2498 int param_len, res, mpage;
2499 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2500 unsigned char *cmd = scp->cmnd;
2501 int mselect6 = (MODE_SELECT == cmd[0]);
2503 memset(arr, 0, sizeof(arr));
2506 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2507 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2508 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2509 return check_condition_result;
2511 res = fetch_to_dev_buffer(scp, arr, param_len);
2513 return DID_ERROR << 16;
2514 else if (sdebug_verbose && (res < param_len))
2515 sdev_printk(KERN_INFO, scp->device,
2516 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2517 __func__, param_len, res);
2518 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2519 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2520 off = bd_len + (mselect6 ? 4 : 8);
2521 if (md_len > 2 || off >= res) {
2522 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2523 return check_condition_result;
2525 mpage = arr[off] & 0x3f;
2526 ps = !!(arr[off] & 0x80);
2528 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2529 return check_condition_result;
2531 spf = !!(arr[off] & 0x40);
2532 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2534 if ((pg_len + off) > param_len) {
2535 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2536 PARAMETER_LIST_LENGTH_ERR, 0);
2537 return check_condition_result;
2540 case 0x8: /* Caching Mode page */
2541 if (caching_pg[1] == arr[off + 1]) {
2542 memcpy(caching_pg + 2, arr + off + 2,
2543 sizeof(caching_pg) - 2);
2544 goto set_mode_changed_ua;
2547 case 0xa: /* Control Mode page */
2548 if (ctrl_m_pg[1] == arr[off + 1]) {
2549 memcpy(ctrl_m_pg + 2, arr + off + 2,
2550 sizeof(ctrl_m_pg) - 2);
2551 if (ctrl_m_pg[4] & 0x8)
2555 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2556 goto set_mode_changed_ua;
2559 case 0x1c: /* Informational Exceptions Mode page */
2560 if (iec_m_pg[1] == arr[off + 1]) {
2561 memcpy(iec_m_pg + 2, arr + off + 2,
2562 sizeof(iec_m_pg) - 2);
2563 goto set_mode_changed_ua;
2569 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2570 return check_condition_result;
2571 set_mode_changed_ua:
2572 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2576 static int resp_temp_l_pg(unsigned char *arr)
2578 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2579 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2582 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2583 return sizeof(temp_l_pg);
2586 static int resp_ie_l_pg(unsigned char *arr)
2588 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2591 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2592 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2593 arr[4] = THRESHOLD_EXCEEDED;
2596 return sizeof(ie_l_pg);
2599 static int resp_env_rep_l_spg(unsigned char *arr)
2601 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2602 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2603 0x1, 0x0, 0x23, 0x8,
2604 0x0, 55, 72, 35, 55, 45, 0, 0,
2607 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2608 return sizeof(env_rep_l_spg);
2611 #define SDEBUG_MAX_LSENSE_SZ 512
2613 static int resp_log_sense(struct scsi_cmnd *scp,
2614 struct sdebug_dev_info *devip)
2616 int ppc, sp, pcode, subpcode;
2617 u32 alloc_len, len, n;
2618 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2619 unsigned char *cmd = scp->cmnd;
2621 memset(arr, 0, sizeof(arr));
2625 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2626 return check_condition_result;
2628 pcode = cmd[2] & 0x3f;
2629 subpcode = cmd[3] & 0xff;
2630 alloc_len = get_unaligned_be16(cmd + 7);
2632 if (0 == subpcode) {
2634 case 0x0: /* Supported log pages log page */
2636 arr[n++] = 0x0; /* this page */
2637 arr[n++] = 0xd; /* Temperature */
2638 arr[n++] = 0x2f; /* Informational exceptions */
2641 case 0xd: /* Temperature log page */
2642 arr[3] = resp_temp_l_pg(arr + 4);
2644 case 0x2f: /* Informational exceptions log page */
2645 arr[3] = resp_ie_l_pg(arr + 4);
2648 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2649 return check_condition_result;
2651 } else if (0xff == subpcode) {
2655 case 0x0: /* Supported log pages and subpages log page */
2658 arr[n++] = 0x0; /* 0,0 page */
2660 arr[n++] = 0xff; /* this page */
2662 arr[n++] = 0x0; /* Temperature */
2664 arr[n++] = 0x1; /* Environment reporting */
2666 arr[n++] = 0xff; /* all 0xd subpages */
2668 arr[n++] = 0x0; /* Informational exceptions */
2670 arr[n++] = 0xff; /* all 0x2f subpages */
2673 case 0xd: /* Temperature subpages */
2676 arr[n++] = 0x0; /* Temperature */
2678 arr[n++] = 0x1; /* Environment reporting */
2680 arr[n++] = 0xff; /* these subpages */
2683 case 0x2f: /* Informational exceptions subpages */
2686 arr[n++] = 0x0; /* Informational exceptions */
2688 arr[n++] = 0xff; /* these subpages */
2692 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2693 return check_condition_result;
2695 } else if (subpcode > 0) {
2698 if (pcode == 0xd && subpcode == 1)
2699 arr[3] = resp_env_rep_l_spg(arr + 4);
2701 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2702 return check_condition_result;
2705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2706 return check_condition_result;
2708 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2709 return fill_from_dev_buffer(scp, arr,
2710 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2713 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2715 return devip->nr_zones != 0;
2718 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2719 unsigned long long lba)
2721 u32 zno = lba >> devip->zsize_shift;
2722 struct sdeb_zone_state *zsp;
2724 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2725 return &devip->zstate[zno];
2728 * If the zone capacity is less than the zone size, adjust for gap
2731 zno = 2 * zno - devip->nr_conv_zones;
2732 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2733 zsp = &devip->zstate[zno];
2734 if (lba >= zsp->z_start + zsp->z_size)
2736 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2740 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2742 return zsp->z_type == ZBC_ZTYPE_CNV;
2745 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2747 return zsp->z_type == ZBC_ZTYPE_GAP;
2750 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2752 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2755 static void zbc_close_zone(struct sdebug_dev_info *devip,
2756 struct sdeb_zone_state *zsp)
2758 enum sdebug_z_cond zc;
2760 if (!zbc_zone_is_seq(zsp))
2764 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2767 if (zc == ZC2_IMPLICIT_OPEN)
2768 devip->nr_imp_open--;
2770 devip->nr_exp_open--;
2772 if (zsp->z_wp == zsp->z_start) {
2773 zsp->z_cond = ZC1_EMPTY;
2775 zsp->z_cond = ZC4_CLOSED;
2780 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2782 struct sdeb_zone_state *zsp = &devip->zstate[0];
2785 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2786 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2787 zbc_close_zone(devip, zsp);
2793 static void zbc_open_zone(struct sdebug_dev_info *devip,
2794 struct sdeb_zone_state *zsp, bool explicit)
2796 enum sdebug_z_cond zc;
2798 if (!zbc_zone_is_seq(zsp))
2802 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2803 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2806 /* Close an implicit open zone if necessary */
2807 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2808 zbc_close_zone(devip, zsp);
2809 else if (devip->max_open &&
2810 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2811 zbc_close_imp_open_zone(devip);
2813 if (zsp->z_cond == ZC4_CLOSED)
2816 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2817 devip->nr_exp_open++;
2819 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2820 devip->nr_imp_open++;
2824 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2825 struct sdeb_zone_state *zsp)
2827 switch (zsp->z_cond) {
2828 case ZC2_IMPLICIT_OPEN:
2829 devip->nr_imp_open--;
2831 case ZC3_EXPLICIT_OPEN:
2832 devip->nr_exp_open--;
2835 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2836 zsp->z_start, zsp->z_cond);
2839 zsp->z_cond = ZC5_FULL;
2842 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2843 unsigned long long lba, unsigned int num)
2845 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2846 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2848 if (!zbc_zone_is_seq(zsp))
2851 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2853 if (zsp->z_wp >= zend)
2854 zbc_set_zone_full(devip, zsp);
2859 if (lba != zsp->z_wp)
2860 zsp->z_non_seq_resource = true;
2866 } else if (end > zsp->z_wp) {
2872 if (zsp->z_wp >= zend)
2873 zbc_set_zone_full(devip, zsp);
2879 zend = zsp->z_start + zsp->z_size;
2884 static int check_zbc_access_params(struct scsi_cmnd *scp,
2885 unsigned long long lba, unsigned int num, bool write)
2887 struct scsi_device *sdp = scp->device;
2888 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2889 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2890 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2893 if (devip->zmodel == BLK_ZONED_HA)
2895 /* For host-managed, reads cannot cross zone types boundaries */
2896 if (zsp->z_type != zsp_end->z_type) {
2897 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2900 return check_condition_result;
2905 /* Writing into a gap zone is not allowed */
2906 if (zbc_zone_is_gap(zsp)) {
2907 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2908 ATTEMPT_ACCESS_GAP);
2909 return check_condition_result;
2912 /* No restrictions for writes within conventional zones */
2913 if (zbc_zone_is_conv(zsp)) {
2914 if (!zbc_zone_is_conv(zsp_end)) {
2915 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2917 WRITE_BOUNDARY_ASCQ);
2918 return check_condition_result;
2923 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2924 /* Writes cannot cross sequential zone boundaries */
2925 if (zsp_end != zsp) {
2926 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2928 WRITE_BOUNDARY_ASCQ);
2929 return check_condition_result;
2931 /* Cannot write full zones */
2932 if (zsp->z_cond == ZC5_FULL) {
2933 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2934 INVALID_FIELD_IN_CDB, 0);
2935 return check_condition_result;
2937 /* Writes must be aligned to the zone WP */
2938 if (lba != zsp->z_wp) {
2939 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2941 UNALIGNED_WRITE_ASCQ);
2942 return check_condition_result;
2946 /* Handle implicit open of closed and empty zones */
2947 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2948 if (devip->max_open &&
2949 devip->nr_exp_open >= devip->max_open) {
2950 mk_sense_buffer(scp, DATA_PROTECT,
2953 return check_condition_result;
2955 zbc_open_zone(devip, zsp, false);
2961 static inline int check_device_access_params
2962 (struct scsi_cmnd *scp, unsigned long long lba,
2963 unsigned int num, bool write)
2965 struct scsi_device *sdp = scp->device;
2966 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2968 if (lba + num > sdebug_capacity) {
2969 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2970 return check_condition_result;
2972 /* transfer length excessive (tie in to block limits VPD page) */
2973 if (num > sdebug_store_sectors) {
2974 /* needs work to find which cdb byte 'num' comes from */
2975 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2976 return check_condition_result;
2978 if (write && unlikely(sdebug_wp)) {
2979 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2980 return check_condition_result;
2982 if (sdebug_dev_is_zoned(devip))
2983 return check_zbc_access_params(scp, lba, num, write);
2989 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2990 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2991 * that access any of the "stores" in struct sdeb_store_info should call this
2992 * function with bug_if_fake_rw set to true.
2994 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2995 bool bug_if_fake_rw)
2997 if (sdebug_fake_rw) {
2998 BUG_ON(bug_if_fake_rw); /* See note above */
3001 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3004 /* Returns number of bytes copied or -1 if error. */
3005 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3006 u32 sg_skip, u64 lba, u32 num, bool do_write)
3009 u64 block, rest = 0;
3010 enum dma_data_direction dir;
3011 struct scsi_data_buffer *sdb = &scp->sdb;
3015 dir = DMA_TO_DEVICE;
3016 write_since_sync = true;
3018 dir = DMA_FROM_DEVICE;
3021 if (!sdb->length || !sip)
3023 if (scp->sc_data_direction != dir)
3027 block = do_div(lba, sdebug_store_sectors);
3028 if (block + num > sdebug_store_sectors)
3029 rest = block + num - sdebug_store_sectors;
3031 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3032 fsp + (block * sdebug_sector_size),
3033 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3034 if (ret != (num - rest) * sdebug_sector_size)
3038 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3039 fsp, rest * sdebug_sector_size,
3040 sg_skip + ((num - rest) * sdebug_sector_size),
3047 /* Returns number of bytes copied or -1 if error. */
3048 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3050 struct scsi_data_buffer *sdb = &scp->sdb;
3054 if (scp->sc_data_direction != DMA_TO_DEVICE)
3056 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3057 num * sdebug_sector_size, 0, true);
3060 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3061 * arr into sip->storep+lba and return true. If comparison fails then
3063 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3064 const u8 *arr, bool compare_only)
3067 u64 block, rest = 0;
3068 u32 store_blks = sdebug_store_sectors;
3069 u32 lb_size = sdebug_sector_size;
3070 u8 *fsp = sip->storep;
3072 block = do_div(lba, store_blks);
3073 if (block + num > store_blks)
3074 rest = block + num - store_blks;
3076 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3080 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3086 arr += num * lb_size;
3087 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3089 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3093 static __be16 dif_compute_csum(const void *buf, int len)
3098 csum = (__force __be16)ip_compute_csum(buf, len);
3100 csum = cpu_to_be16(crc_t10dif(buf, len));
3105 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3106 sector_t sector, u32 ei_lba)
3108 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3110 if (sdt->guard_tag != csum) {
3111 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3112 (unsigned long)sector,
3113 be16_to_cpu(sdt->guard_tag),
3117 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3118 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3119 pr_err("REF check failed on sector %lu\n",
3120 (unsigned long)sector);
3123 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3124 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3125 pr_err("REF check failed on sector %lu\n",
3126 (unsigned long)sector);
3132 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3133 unsigned int sectors, bool read)
3137 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3138 scp->device->hostdata, true);
3139 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3140 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3141 struct sg_mapping_iter miter;
3143 /* Bytes of protection data to copy into sgl */
3144 resid = sectors * sizeof(*dif_storep);
3146 sg_miter_start(&miter, scsi_prot_sglist(scp),
3147 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3148 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3150 while (sg_miter_next(&miter) && resid > 0) {
3151 size_t len = min_t(size_t, miter.length, resid);
3152 void *start = dif_store(sip, sector);
3155 if (dif_store_end < start + len)
3156 rest = start + len - dif_store_end;
3161 memcpy(paddr, start, len - rest);
3163 memcpy(start, paddr, len - rest);
3167 memcpy(paddr + len - rest, dif_storep, rest);
3169 memcpy(dif_storep, paddr + len - rest, rest);
3172 sector += len / sizeof(*dif_storep);
3175 sg_miter_stop(&miter);
3178 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3179 unsigned int sectors, u32 ei_lba)
3184 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3185 scp->device->hostdata, true);
3186 struct t10_pi_tuple *sdt;
3188 for (i = 0; i < sectors; i++, ei_lba++) {
3189 sector = start_sec + i;
3190 sdt = dif_store(sip, sector);
3192 if (sdt->app_tag == cpu_to_be16(0xffff))
3196 * Because scsi_debug acts as both initiator and
3197 * target we proceed to verify the PI even if
3198 * RDPROTECT=3. This is done so the "initiator" knows
3199 * which type of error to return. Otherwise we would
3200 * have to iterate over the PI twice.
3202 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3203 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3212 dif_copy_prot(scp, start_sec, sectors, true);
3219 sdeb_read_lock(struct sdeb_store_info *sip)
3221 if (sdebug_no_rwlock) {
3223 __acquire(&sip->macc_lck);
3225 __acquire(&sdeb_fake_rw_lck);
3228 read_lock(&sip->macc_lck);
3230 read_lock(&sdeb_fake_rw_lck);
3235 sdeb_read_unlock(struct sdeb_store_info *sip)
3237 if (sdebug_no_rwlock) {
3239 __release(&sip->macc_lck);
3241 __release(&sdeb_fake_rw_lck);
3244 read_unlock(&sip->macc_lck);
3246 read_unlock(&sdeb_fake_rw_lck);
3251 sdeb_write_lock(struct sdeb_store_info *sip)
3253 if (sdebug_no_rwlock) {
3255 __acquire(&sip->macc_lck);
3257 __acquire(&sdeb_fake_rw_lck);
3260 write_lock(&sip->macc_lck);
3262 write_lock(&sdeb_fake_rw_lck);
3267 sdeb_write_unlock(struct sdeb_store_info *sip)
3269 if (sdebug_no_rwlock) {
3271 __release(&sip->macc_lck);
3273 __release(&sdeb_fake_rw_lck);
3276 write_unlock(&sip->macc_lck);
3278 write_unlock(&sdeb_fake_rw_lck);
3282 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3289 struct sdeb_store_info *sip = devip2sip(devip, true);
3290 u8 *cmd = scp->cmnd;
3295 lba = get_unaligned_be64(cmd + 2);
3296 num = get_unaligned_be32(cmd + 10);
3301 lba = get_unaligned_be32(cmd + 2);
3302 num = get_unaligned_be16(cmd + 7);
3307 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3308 (u32)(cmd[1] & 0x1f) << 16;
3309 num = (0 == cmd[4]) ? 256 : cmd[4];
3314 lba = get_unaligned_be32(cmd + 2);
3315 num = get_unaligned_be32(cmd + 6);
3318 case XDWRITEREAD_10:
3320 lba = get_unaligned_be32(cmd + 2);
3321 num = get_unaligned_be16(cmd + 7);
3324 default: /* assume READ(32) */
3325 lba = get_unaligned_be64(cmd + 12);
3326 ei_lba = get_unaligned_be32(cmd + 20);
3327 num = get_unaligned_be32(cmd + 28);
3331 if (unlikely(have_dif_prot && check_prot)) {
3332 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3334 mk_sense_invalid_opcode(scp);
3335 return check_condition_result;
3337 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3338 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3339 (cmd[1] & 0xe0) == 0)
3340 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3343 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3344 atomic_read(&sdeb_inject_pending))) {
3346 atomic_set(&sdeb_inject_pending, 0);
3349 ret = check_device_access_params(scp, lba, num, false);
3352 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3353 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3354 ((lba + num) > sdebug_medium_error_start))) {
3355 /* claim unrecoverable read error */
3356 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3357 /* set info field and valid bit for fixed descriptor */
3358 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3359 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3360 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3361 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3362 put_unaligned_be32(ret, scp->sense_buffer + 3);
3364 scsi_set_resid(scp, scsi_bufflen(scp));
3365 return check_condition_result;
3368 sdeb_read_lock(sip);
3371 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3372 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3373 case 1: /* Guard tag error */
3374 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3375 sdeb_read_unlock(sip);
3376 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3377 return check_condition_result;
3378 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3379 sdeb_read_unlock(sip);
3380 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3381 return illegal_condition_result;
3384 case 3: /* Reference tag error */
3385 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3386 sdeb_read_unlock(sip);
3387 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3388 return check_condition_result;
3389 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3390 sdeb_read_unlock(sip);
3391 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3392 return illegal_condition_result;
3398 ret = do_device_access(sip, scp, 0, lba, num, false);
3399 sdeb_read_unlock(sip);
3400 if (unlikely(ret == -1))
3401 return DID_ERROR << 16;
3403 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3405 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3406 atomic_read(&sdeb_inject_pending))) {
3407 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3408 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3409 atomic_set(&sdeb_inject_pending, 0);
3410 return check_condition_result;
3411 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3412 /* Logical block guard check failed */
3413 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3414 atomic_set(&sdeb_inject_pending, 0);
3415 return illegal_condition_result;
3416 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3417 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3418 atomic_set(&sdeb_inject_pending, 0);
3419 return illegal_condition_result;
3425 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3426 unsigned int sectors, u32 ei_lba)
3429 struct t10_pi_tuple *sdt;
3431 sector_t sector = start_sec;
3434 struct sg_mapping_iter diter;
3435 struct sg_mapping_iter piter;
3437 BUG_ON(scsi_sg_count(SCpnt) == 0);
3438 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3440 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3441 scsi_prot_sg_count(SCpnt),
3442 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3443 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3444 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3446 /* For each protection page */
3447 while (sg_miter_next(&piter)) {
3449 if (WARN_ON(!sg_miter_next(&diter))) {
3454 for (ppage_offset = 0; ppage_offset < piter.length;
3455 ppage_offset += sizeof(struct t10_pi_tuple)) {
3456 /* If we're at the end of the current
3457 * data page advance to the next one
3459 if (dpage_offset >= diter.length) {
3460 if (WARN_ON(!sg_miter_next(&diter))) {
3467 sdt = piter.addr + ppage_offset;
3468 daddr = diter.addr + dpage_offset;
3470 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3471 ret = dif_verify(sdt, daddr, sector, ei_lba);
3478 dpage_offset += sdebug_sector_size;
3480 diter.consumed = dpage_offset;
3481 sg_miter_stop(&diter);
3483 sg_miter_stop(&piter);
3485 dif_copy_prot(SCpnt, start_sec, sectors, false);
3492 sg_miter_stop(&diter);
3493 sg_miter_stop(&piter);
3497 static unsigned long lba_to_map_index(sector_t lba)
3499 if (sdebug_unmap_alignment)
3500 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3501 sector_div(lba, sdebug_unmap_granularity);
3505 static sector_t map_index_to_lba(unsigned long index)
3507 sector_t lba = index * sdebug_unmap_granularity;
3509 if (sdebug_unmap_alignment)
3510 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3514 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3518 unsigned int mapped;
3519 unsigned long index;
3522 index = lba_to_map_index(lba);
3523 mapped = test_bit(index, sip->map_storep);
3526 next = find_next_zero_bit(sip->map_storep, map_size, index);
3528 next = find_next_bit(sip->map_storep, map_size, index);
3530 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3535 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3538 sector_t end = lba + len;
3541 unsigned long index = lba_to_map_index(lba);
3543 if (index < map_size)
3544 set_bit(index, sip->map_storep);
3546 lba = map_index_to_lba(index + 1);
3550 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3553 sector_t end = lba + len;
3554 u8 *fsp = sip->storep;
3557 unsigned long index = lba_to_map_index(lba);
3559 if (lba == map_index_to_lba(index) &&
3560 lba + sdebug_unmap_granularity <= end &&
3562 clear_bit(index, sip->map_storep);
3563 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3564 memset(fsp + lba * sdebug_sector_size,
3565 (sdebug_lbprz & 1) ? 0 : 0xff,
3566 sdebug_sector_size *
3567 sdebug_unmap_granularity);
3569 if (sip->dif_storep) {
3570 memset(sip->dif_storep + lba, 0xff,
3571 sizeof(*sip->dif_storep) *
3572 sdebug_unmap_granularity);
3575 lba = map_index_to_lba(index + 1);
3579 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3586 struct sdeb_store_info *sip = devip2sip(devip, true);
3587 u8 *cmd = scp->cmnd;
3592 lba = get_unaligned_be64(cmd + 2);
3593 num = get_unaligned_be32(cmd + 10);
3598 lba = get_unaligned_be32(cmd + 2);
3599 num = get_unaligned_be16(cmd + 7);
3604 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3605 (u32)(cmd[1] & 0x1f) << 16;
3606 num = (0 == cmd[4]) ? 256 : cmd[4];
3611 lba = get_unaligned_be32(cmd + 2);
3612 num = get_unaligned_be32(cmd + 6);
3615 case 0x53: /* XDWRITEREAD(10) */
3617 lba = get_unaligned_be32(cmd + 2);
3618 num = get_unaligned_be16(cmd + 7);
3621 default: /* assume WRITE(32) */
3622 lba = get_unaligned_be64(cmd + 12);
3623 ei_lba = get_unaligned_be32(cmd + 20);
3624 num = get_unaligned_be32(cmd + 28);
3628 if (unlikely(have_dif_prot && check_prot)) {
3629 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3631 mk_sense_invalid_opcode(scp);
3632 return check_condition_result;
3634 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3635 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3636 (cmd[1] & 0xe0) == 0)
3637 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3641 sdeb_write_lock(sip);
3642 ret = check_device_access_params(scp, lba, num, true);
3644 sdeb_write_unlock(sip);
3649 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3650 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3651 case 1: /* Guard tag error */
3652 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3653 sdeb_write_unlock(sip);
3654 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3655 return illegal_condition_result;
3656 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3657 sdeb_write_unlock(sip);
3658 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3659 return check_condition_result;
3662 case 3: /* Reference tag error */
3663 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3664 sdeb_write_unlock(sip);
3665 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3666 return illegal_condition_result;
3667 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3668 sdeb_write_unlock(sip);
3669 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3670 return check_condition_result;
3676 ret = do_device_access(sip, scp, 0, lba, num, true);
3677 if (unlikely(scsi_debug_lbp()))
3678 map_region(sip, lba, num);
3679 /* If ZBC zone then bump its write pointer */
3680 if (sdebug_dev_is_zoned(devip))
3681 zbc_inc_wp(devip, lba, num);
3682 sdeb_write_unlock(sip);
3683 if (unlikely(-1 == ret))
3684 return DID_ERROR << 16;
3685 else if (unlikely(sdebug_verbose &&
3686 (ret < (num * sdebug_sector_size))))
3687 sdev_printk(KERN_INFO, scp->device,
3688 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3689 my_name, num * sdebug_sector_size, ret);
3691 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3692 atomic_read(&sdeb_inject_pending))) {
3693 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3694 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3695 atomic_set(&sdeb_inject_pending, 0);
3696 return check_condition_result;
3697 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3698 /* Logical block guard check failed */
3699 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3700 atomic_set(&sdeb_inject_pending, 0);
3701 return illegal_condition_result;
3702 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3703 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3704 atomic_set(&sdeb_inject_pending, 0);
3705 return illegal_condition_result;
3712 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3713 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3715 static int resp_write_scat(struct scsi_cmnd *scp,
3716 struct sdebug_dev_info *devip)
3718 u8 *cmd = scp->cmnd;
3721 struct sdeb_store_info *sip = devip2sip(devip, true);
3723 u16 lbdof, num_lrd, k;
3724 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3725 u32 lb_size = sdebug_sector_size;
3730 static const u32 lrd_size = 32; /* + parameter list header size */
3732 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3734 wrprotect = (cmd[10] >> 5) & 0x7;
3735 lbdof = get_unaligned_be16(cmd + 12);
3736 num_lrd = get_unaligned_be16(cmd + 16);
3737 bt_len = get_unaligned_be32(cmd + 28);
3738 } else { /* that leaves WRITE SCATTERED(16) */
3740 wrprotect = (cmd[2] >> 5) & 0x7;
3741 lbdof = get_unaligned_be16(cmd + 4);
3742 num_lrd = get_unaligned_be16(cmd + 8);
3743 bt_len = get_unaligned_be32(cmd + 10);
3744 if (unlikely(have_dif_prot)) {
3745 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3747 mk_sense_invalid_opcode(scp);
3748 return illegal_condition_result;
3750 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3751 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3753 sdev_printk(KERN_ERR, scp->device,
3754 "Unprotected WR to DIF device\n");
3757 if ((num_lrd == 0) || (bt_len == 0))
3758 return 0; /* T10 says these do-nothings are not errors */
3761 sdev_printk(KERN_INFO, scp->device,
3762 "%s: %s: LB Data Offset field bad\n",
3764 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3765 return illegal_condition_result;
3767 lbdof_blen = lbdof * lb_size;
3768 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3770 sdev_printk(KERN_INFO, scp->device,
3771 "%s: %s: LBA range descriptors don't fit\n",
3773 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3774 return illegal_condition_result;
3776 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3778 return SCSI_MLQUEUE_HOST_BUSY;
3780 sdev_printk(KERN_INFO, scp->device,
3781 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3782 my_name, __func__, lbdof_blen);
3783 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3785 ret = DID_ERROR << 16;
3789 sdeb_write_lock(sip);
3790 sg_off = lbdof_blen;
3791 /* Spec says Buffer xfer Length field in number of LBs in dout */
3793 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3794 lba = get_unaligned_be64(up + 0);
3795 num = get_unaligned_be32(up + 8);
3797 sdev_printk(KERN_INFO, scp->device,
3798 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3799 my_name, __func__, k, lba, num, sg_off);
3802 ret = check_device_access_params(scp, lba, num, true);
3804 goto err_out_unlock;
3805 num_by = num * lb_size;
3806 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3808 if ((cum_lb + num) > bt_len) {
3810 sdev_printk(KERN_INFO, scp->device,
3811 "%s: %s: sum of blocks > data provided\n",
3813 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3815 ret = illegal_condition_result;
3816 goto err_out_unlock;
3820 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3821 int prot_ret = prot_verify_write(scp, lba, num,
3825 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3827 ret = illegal_condition_result;
3828 goto err_out_unlock;
3832 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3833 /* If ZBC zone then bump its write pointer */
3834 if (sdebug_dev_is_zoned(devip))
3835 zbc_inc_wp(devip, lba, num);
3836 if (unlikely(scsi_debug_lbp()))
3837 map_region(sip, lba, num);
3838 if (unlikely(-1 == ret)) {
3839 ret = DID_ERROR << 16;
3840 goto err_out_unlock;
3841 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3842 sdev_printk(KERN_INFO, scp->device,
3843 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3844 my_name, num_by, ret);
3846 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3847 atomic_read(&sdeb_inject_pending))) {
3848 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3849 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3850 atomic_set(&sdeb_inject_pending, 0);
3851 ret = check_condition_result;
3852 goto err_out_unlock;
3853 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3854 /* Logical block guard check failed */
3855 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3856 atomic_set(&sdeb_inject_pending, 0);
3857 ret = illegal_condition_result;
3858 goto err_out_unlock;
3859 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3860 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3861 atomic_set(&sdeb_inject_pending, 0);
3862 ret = illegal_condition_result;
3863 goto err_out_unlock;
3871 sdeb_write_unlock(sip);
3877 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3878 u32 ei_lba, bool unmap, bool ndob)
3880 struct scsi_device *sdp = scp->device;
3881 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3882 unsigned long long i;
3884 u32 lb_size = sdebug_sector_size;
3886 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3887 scp->device->hostdata, true);
3891 sdeb_write_lock(sip);
3893 ret = check_device_access_params(scp, lba, num, true);
3895 sdeb_write_unlock(sip);
3899 if (unmap && scsi_debug_lbp()) {
3900 unmap_region(sip, lba, num);
3904 block = do_div(lbaa, sdebug_store_sectors);
3905 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3907 fs1p = fsp + (block * lb_size);
3909 memset(fs1p, 0, lb_size);
3912 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3915 sdeb_write_unlock(sip);
3916 return DID_ERROR << 16;
3917 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3918 sdev_printk(KERN_INFO, scp->device,
3919 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3920 my_name, "write same", lb_size, ret);
3922 /* Copy first sector to remaining blocks */
3923 for (i = 1 ; i < num ; i++) {
3925 block = do_div(lbaa, sdebug_store_sectors);
3926 memmove(fsp + (block * lb_size), fs1p, lb_size);
3928 if (scsi_debug_lbp())
3929 map_region(sip, lba, num);
3930 /* If ZBC zone then bump its write pointer */
3931 if (sdebug_dev_is_zoned(devip))
3932 zbc_inc_wp(devip, lba, num);
3934 sdeb_write_unlock(sip);
3939 static int resp_write_same_10(struct scsi_cmnd *scp,
3940 struct sdebug_dev_info *devip)
3942 u8 *cmd = scp->cmnd;
3949 if (sdebug_lbpws10 == 0) {
3950 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3951 return check_condition_result;
3955 lba = get_unaligned_be32(cmd + 2);
3956 num = get_unaligned_be16(cmd + 7);
3957 if (num > sdebug_write_same_length) {
3958 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3959 return check_condition_result;
3961 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3964 static int resp_write_same_16(struct scsi_cmnd *scp,
3965 struct sdebug_dev_info *devip)
3967 u8 *cmd = scp->cmnd;
3974 if (cmd[1] & 0x8) { /* UNMAP */
3975 if (sdebug_lbpws == 0) {
3976 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3977 return check_condition_result;
3981 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3983 lba = get_unaligned_be64(cmd + 2);
3984 num = get_unaligned_be32(cmd + 10);
3985 if (num > sdebug_write_same_length) {
3986 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3987 return check_condition_result;
3989 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3992 /* Note the mode field is in the same position as the (lower) service action
3993 * field. For the Report supported operation codes command, SPC-4 suggests
3994 * each mode of this command should be reported separately; for future. */
3995 static int resp_write_buffer(struct scsi_cmnd *scp,
3996 struct sdebug_dev_info *devip)
3998 u8 *cmd = scp->cmnd;
3999 struct scsi_device *sdp = scp->device;
4000 struct sdebug_dev_info *dp;
4003 mode = cmd[1] & 0x1f;
4005 case 0x4: /* download microcode (MC) and activate (ACT) */
4006 /* set UAs on this device only */
4007 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4008 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4010 case 0x5: /* download MC, save and ACT */
4011 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4013 case 0x6: /* download MC with offsets and ACT */
4014 /* set UAs on most devices (LUs) in this target */
4015 list_for_each_entry(dp,
4016 &devip->sdbg_host->dev_info_list,
4018 if (dp->target == sdp->id) {
4019 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4021 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4025 case 0x7: /* download MC with offsets, save, and ACT */
4026 /* set UA on all devices (LUs) in this target */
4027 list_for_each_entry(dp,
4028 &devip->sdbg_host->dev_info_list,
4030 if (dp->target == sdp->id)
4031 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4035 /* do nothing for this command for other mode values */
4041 static int resp_comp_write(struct scsi_cmnd *scp,
4042 struct sdebug_dev_info *devip)
4044 u8 *cmd = scp->cmnd;
4046 struct sdeb_store_info *sip = devip2sip(devip, true);
4049 u32 lb_size = sdebug_sector_size;
4054 lba = get_unaligned_be64(cmd + 2);
4055 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4057 return 0; /* degenerate case, not an error */
4058 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4060 mk_sense_invalid_opcode(scp);
4061 return check_condition_result;
4063 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4064 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4065 (cmd[1] & 0xe0) == 0)
4066 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4068 ret = check_device_access_params(scp, lba, num, false);
4072 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4074 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4076 return check_condition_result;
4079 sdeb_write_lock(sip);
4081 ret = do_dout_fetch(scp, dnum, arr);
4083 retval = DID_ERROR << 16;
4085 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4086 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4087 "indicated=%u, IO sent=%d bytes\n", my_name,
4088 dnum * lb_size, ret);
4089 if (!comp_write_worker(sip, lba, num, arr, false)) {
4090 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4091 retval = check_condition_result;
4094 if (scsi_debug_lbp())
4095 map_region(sip, lba, num);
4097 sdeb_write_unlock(sip);
4102 struct unmap_block_desc {
4108 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4111 struct unmap_block_desc *desc;
4112 struct sdeb_store_info *sip = devip2sip(devip, true);
4113 unsigned int i, payload_len, descriptors;
4116 if (!scsi_debug_lbp())
4117 return 0; /* fib and say its done */
4118 payload_len = get_unaligned_be16(scp->cmnd + 7);
4119 BUG_ON(scsi_bufflen(scp) != payload_len);
4121 descriptors = (payload_len - 8) / 16;
4122 if (descriptors > sdebug_unmap_max_desc) {
4123 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4124 return check_condition_result;
4127 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4129 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4131 return check_condition_result;
4134 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4136 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4137 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4139 desc = (void *)&buf[8];
4141 sdeb_write_lock(sip);
4143 for (i = 0 ; i < descriptors ; i++) {
4144 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4145 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4147 ret = check_device_access_params(scp, lba, num, true);
4151 unmap_region(sip, lba, num);
4157 sdeb_write_unlock(sip);
4163 #define SDEBUG_GET_LBA_STATUS_LEN 32
4165 static int resp_get_lba_status(struct scsi_cmnd *scp,
4166 struct sdebug_dev_info *devip)
4168 u8 *cmd = scp->cmnd;
4170 u32 alloc_len, mapped, num;
4172 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4174 lba = get_unaligned_be64(cmd + 2);
4175 alloc_len = get_unaligned_be32(cmd + 10);
4180 ret = check_device_access_params(scp, lba, 1, false);
4184 if (scsi_debug_lbp()) {
4185 struct sdeb_store_info *sip = devip2sip(devip, true);
4187 mapped = map_state(sip, lba, &num);
4190 /* following just in case virtual_gb changed */
4191 sdebug_capacity = get_sdebug_capacity();
4192 if (sdebug_capacity - lba <= 0xffffffff)
4193 num = sdebug_capacity - lba;
4198 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4199 put_unaligned_be32(20, arr); /* Parameter Data Length */
4200 put_unaligned_be64(lba, arr + 8); /* LBA */
4201 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4202 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4204 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4207 static int resp_sync_cache(struct scsi_cmnd *scp,
4208 struct sdebug_dev_info *devip)
4213 u8 *cmd = scp->cmnd;
4215 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4216 lba = get_unaligned_be32(cmd + 2);
4217 num_blocks = get_unaligned_be16(cmd + 7);
4218 } else { /* SYNCHRONIZE_CACHE(16) */
4219 lba = get_unaligned_be64(cmd + 2);
4220 num_blocks = get_unaligned_be32(cmd + 10);
4222 if (lba + num_blocks > sdebug_capacity) {
4223 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4224 return check_condition_result;
4226 if (!write_since_sync || (cmd[1] & 0x2))
4227 res = SDEG_RES_IMMED_MASK;
4228 else /* delay if write_since_sync and IMMED clear */
4229 write_since_sync = false;
4234 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4235 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4236 * a GOOD status otherwise. Model a disk with a big cache and yield
4237 * CONDITION MET. Actually tries to bring range in main memory into the
4238 * cache associated with the CPU(s).
4240 static int resp_pre_fetch(struct scsi_cmnd *scp,
4241 struct sdebug_dev_info *devip)
4245 u64 block, rest = 0;
4247 u8 *cmd = scp->cmnd;
4248 struct sdeb_store_info *sip = devip2sip(devip, true);
4249 u8 *fsp = sip->storep;
4251 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4252 lba = get_unaligned_be32(cmd + 2);
4253 nblks = get_unaligned_be16(cmd + 7);
4254 } else { /* PRE-FETCH(16) */
4255 lba = get_unaligned_be64(cmd + 2);
4256 nblks = get_unaligned_be32(cmd + 10);
4258 if (lba + nblks > sdebug_capacity) {
4259 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4260 return check_condition_result;
4264 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4265 block = do_div(lba, sdebug_store_sectors);
4266 if (block + nblks > sdebug_store_sectors)
4267 rest = block + nblks - sdebug_store_sectors;
4269 /* Try to bring the PRE-FETCH range into CPU's cache */
4270 sdeb_read_lock(sip);
4271 prefetch_range(fsp + (sdebug_sector_size * block),
4272 (nblks - rest) * sdebug_sector_size);
4274 prefetch_range(fsp, rest * sdebug_sector_size);
4275 sdeb_read_unlock(sip);
4278 res = SDEG_RES_IMMED_MASK;
4279 return res | condition_met_result;
4282 #define RL_BUCKET_ELEMS 8
4284 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4285 * (W-LUN), the normal Linux scanning logic does not associate it with a
4286 * device (e.g. /dev/sg7). The following magic will make that association:
4287 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4288 * where <n> is a host number. If there are multiple targets in a host then
4289 * the above will associate a W-LUN to each target. To only get a W-LUN
4290 * for target 2, then use "echo '- 2 49409' > scan" .
4292 static int resp_report_luns(struct scsi_cmnd *scp,
4293 struct sdebug_dev_info *devip)
4295 unsigned char *cmd = scp->cmnd;
4296 unsigned int alloc_len;
4297 unsigned char select_report;
4299 struct scsi_lun *lun_p;
4300 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4301 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4302 unsigned int wlun_cnt; /* report luns W-LUN count */
4303 unsigned int tlun_cnt; /* total LUN count */
4304 unsigned int rlen; /* response length (in bytes) */
4306 unsigned int off_rsp = 0;
4307 const int sz_lun = sizeof(struct scsi_lun);
4309 clear_luns_changed_on_target(devip);
4311 select_report = cmd[2];
4312 alloc_len = get_unaligned_be32(cmd + 6);
4314 if (alloc_len < 4) {
4315 pr_err("alloc len too small %d\n", alloc_len);
4316 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4317 return check_condition_result;
4320 switch (select_report) {
4321 case 0: /* all LUNs apart from W-LUNs */
4322 lun_cnt = sdebug_max_luns;
4325 case 1: /* only W-LUNs */
4329 case 2: /* all LUNs */
4330 lun_cnt = sdebug_max_luns;
4333 case 0x10: /* only administrative LUs */
4334 case 0x11: /* see SPC-5 */
4335 case 0x12: /* only subsiduary LUs owned by referenced LU */
4337 pr_debug("select report invalid %d\n", select_report);
4338 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4339 return check_condition_result;
4342 if (sdebug_no_lun_0 && (lun_cnt > 0))
4345 tlun_cnt = lun_cnt + wlun_cnt;
4346 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4347 scsi_set_resid(scp, scsi_bufflen(scp));
4348 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4349 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4351 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4352 lun = sdebug_no_lun_0 ? 1 : 0;
4353 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4354 memset(arr, 0, sizeof(arr));
4355 lun_p = (struct scsi_lun *)&arr[0];
4357 put_unaligned_be32(rlen, &arr[0]);
4361 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4362 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4364 int_to_scsilun(lun++, lun_p);
4365 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4366 lun_p->scsi_lun[0] |= 0x40;
4368 if (j < RL_BUCKET_ELEMS)
4371 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4377 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4381 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4385 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4387 bool is_bytchk3 = false;
4390 u32 vnum, a_num, off;
4391 const u32 lb_size = sdebug_sector_size;
4394 u8 *cmd = scp->cmnd;
4395 struct sdeb_store_info *sip = devip2sip(devip, true);
4397 bytchk = (cmd[1] >> 1) & 0x3;
4399 return 0; /* always claim internal verify okay */
4400 } else if (bytchk == 2) {
4401 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4402 return check_condition_result;
4403 } else if (bytchk == 3) {
4404 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4408 lba = get_unaligned_be64(cmd + 2);
4409 vnum = get_unaligned_be32(cmd + 10);
4411 case VERIFY: /* is VERIFY(10) */
4412 lba = get_unaligned_be32(cmd + 2);
4413 vnum = get_unaligned_be16(cmd + 7);
4416 mk_sense_invalid_opcode(scp);
4417 return check_condition_result;
4420 return 0; /* not an error */
4421 a_num = is_bytchk3 ? 1 : vnum;
4422 /* Treat following check like one for read (i.e. no write) access */
4423 ret = check_device_access_params(scp, lba, a_num, false);
4427 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4429 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4431 return check_condition_result;
4433 /* Not changing store, so only need read access */
4434 sdeb_read_lock(sip);
4436 ret = do_dout_fetch(scp, a_num, arr);
4438 ret = DID_ERROR << 16;
4440 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4441 sdev_printk(KERN_INFO, scp->device,
4442 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4443 my_name, __func__, a_num * lb_size, ret);
4446 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4447 memcpy(arr + off, arr, lb_size);
4450 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4451 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4452 ret = check_condition_result;
4456 sdeb_read_unlock(sip);
4461 #define RZONES_DESC_HD 64
4463 /* Report zones depending on start LBA and reporting options */
4464 static int resp_report_zones(struct scsi_cmnd *scp,
4465 struct sdebug_dev_info *devip)
4467 unsigned int rep_max_zones, nrz = 0;
4469 u32 alloc_len, rep_opts, rep_len;
4472 u8 *arr = NULL, *desc;
4473 u8 *cmd = scp->cmnd;
4474 struct sdeb_zone_state *zsp = NULL;
4475 struct sdeb_store_info *sip = devip2sip(devip, false);
4477 if (!sdebug_dev_is_zoned(devip)) {
4478 mk_sense_invalid_opcode(scp);
4479 return check_condition_result;
4481 zs_lba = get_unaligned_be64(cmd + 2);
4482 alloc_len = get_unaligned_be32(cmd + 10);
4484 return 0; /* not an error */
4485 rep_opts = cmd[14] & 0x3f;
4486 partial = cmd[14] & 0x80;
4488 if (zs_lba >= sdebug_capacity) {
4489 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4490 return check_condition_result;
4493 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4495 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4497 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4499 return check_condition_result;
4502 sdeb_read_lock(sip);
4505 for (lba = zs_lba; lba < sdebug_capacity;
4506 lba = zsp->z_start + zsp->z_size) {
4507 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4509 zsp = zbc_zone(devip, lba);
4516 if (zsp->z_cond != ZC1_EMPTY)
4520 /* Implicit open zones */
4521 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4525 /* Explicit open zones */
4526 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4531 if (zsp->z_cond != ZC4_CLOSED)
4536 if (zsp->z_cond != ZC5_FULL)
4543 * Read-only, offline, reset WP recommended are
4544 * not emulated: no zones to report;
4548 /* non-seq-resource set */
4549 if (!zsp->z_non_seq_resource)
4553 /* All zones except gap zones. */
4554 if (zbc_zone_is_gap(zsp))
4558 /* Not write pointer (conventional) zones */
4559 if (zbc_zone_is_seq(zsp))
4563 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4564 INVALID_FIELD_IN_CDB, 0);
4565 ret = check_condition_result;
4569 if (nrz < rep_max_zones) {
4570 /* Fill zone descriptor */
4571 desc[0] = zsp->z_type;
4572 desc[1] = zsp->z_cond << 4;
4573 if (zsp->z_non_seq_resource)
4575 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4576 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4577 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4581 if (partial && nrz >= rep_max_zones)
4588 /* Zone list length. */
4589 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4591 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4592 /* Zone starting LBA granularity. */
4593 if (devip->zcap < devip->zsize)
4594 put_unaligned_be64(devip->zsize, arr + 16);
4596 rep_len = (unsigned long)desc - (unsigned long)arr;
4597 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4600 sdeb_read_unlock(sip);
4605 /* Logic transplanted from tcmu-runner, file_zbc.c */
4606 static void zbc_open_all(struct sdebug_dev_info *devip)
4608 struct sdeb_zone_state *zsp = &devip->zstate[0];
4611 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4612 if (zsp->z_cond == ZC4_CLOSED)
4613 zbc_open_zone(devip, &devip->zstate[i], true);
4617 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4621 enum sdebug_z_cond zc;
4622 u8 *cmd = scp->cmnd;
4623 struct sdeb_zone_state *zsp;
4624 bool all = cmd[14] & 0x01;
4625 struct sdeb_store_info *sip = devip2sip(devip, false);
4627 if (!sdebug_dev_is_zoned(devip)) {
4628 mk_sense_invalid_opcode(scp);
4629 return check_condition_result;
4632 sdeb_write_lock(sip);
4635 /* Check if all closed zones can be open */
4636 if (devip->max_open &&
4637 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4638 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4640 res = check_condition_result;
4643 /* Open all closed zones */
4644 zbc_open_all(devip);
4648 /* Open the specified zone */
4649 z_id = get_unaligned_be64(cmd + 2);
4650 if (z_id >= sdebug_capacity) {
4651 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4652 res = check_condition_result;
4656 zsp = zbc_zone(devip, z_id);
4657 if (z_id != zsp->z_start) {
4658 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4659 res = check_condition_result;
4662 if (zbc_zone_is_conv(zsp)) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 res = check_condition_result;
4669 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4672 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4673 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4675 res = check_condition_result;
4679 zbc_open_zone(devip, zsp, true);
4681 sdeb_write_unlock(sip);
4685 static void zbc_close_all(struct sdebug_dev_info *devip)
4689 for (i = 0; i < devip->nr_zones; i++)
4690 zbc_close_zone(devip, &devip->zstate[i]);
4693 static int resp_close_zone(struct scsi_cmnd *scp,
4694 struct sdebug_dev_info *devip)
4698 u8 *cmd = scp->cmnd;
4699 struct sdeb_zone_state *zsp;
4700 bool all = cmd[14] & 0x01;
4701 struct sdeb_store_info *sip = devip2sip(devip, false);
4703 if (!sdebug_dev_is_zoned(devip)) {
4704 mk_sense_invalid_opcode(scp);
4705 return check_condition_result;
4708 sdeb_write_lock(sip);
4711 zbc_close_all(devip);
4715 /* Close specified zone */
4716 z_id = get_unaligned_be64(cmd + 2);
4717 if (z_id >= sdebug_capacity) {
4718 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4719 res = check_condition_result;
4723 zsp = zbc_zone(devip, z_id);
4724 if (z_id != zsp->z_start) {
4725 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 res = check_condition_result;
4729 if (zbc_zone_is_conv(zsp)) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731 res = check_condition_result;
4735 zbc_close_zone(devip, zsp);
4737 sdeb_write_unlock(sip);
4741 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4742 struct sdeb_zone_state *zsp, bool empty)
4744 enum sdebug_z_cond zc = zsp->z_cond;
4746 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4747 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4748 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4749 zbc_close_zone(devip, zsp);
4750 if (zsp->z_cond == ZC4_CLOSED)
4752 zsp->z_wp = zsp->z_start + zsp->z_size;
4753 zsp->z_cond = ZC5_FULL;
4757 static void zbc_finish_all(struct sdebug_dev_info *devip)
4761 for (i = 0; i < devip->nr_zones; i++)
4762 zbc_finish_zone(devip, &devip->zstate[i], false);
4765 static int resp_finish_zone(struct scsi_cmnd *scp,
4766 struct sdebug_dev_info *devip)
4768 struct sdeb_zone_state *zsp;
4771 u8 *cmd = scp->cmnd;
4772 bool all = cmd[14] & 0x01;
4773 struct sdeb_store_info *sip = devip2sip(devip, false);
4775 if (!sdebug_dev_is_zoned(devip)) {
4776 mk_sense_invalid_opcode(scp);
4777 return check_condition_result;
4780 sdeb_write_lock(sip);
4783 zbc_finish_all(devip);
4787 /* Finish the specified zone */
4788 z_id = get_unaligned_be64(cmd + 2);
4789 if (z_id >= sdebug_capacity) {
4790 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4791 res = check_condition_result;
4795 zsp = zbc_zone(devip, z_id);
4796 if (z_id != zsp->z_start) {
4797 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4798 res = check_condition_result;
4801 if (zbc_zone_is_conv(zsp)) {
4802 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803 res = check_condition_result;
4807 zbc_finish_zone(devip, zsp, true);
4809 sdeb_write_unlock(sip);
4813 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4814 struct sdeb_zone_state *zsp)
4816 enum sdebug_z_cond zc;
4817 struct sdeb_store_info *sip = devip2sip(devip, false);
4819 if (!zbc_zone_is_seq(zsp))
4823 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4824 zbc_close_zone(devip, zsp);
4826 if (zsp->z_cond == ZC4_CLOSED)
4829 if (zsp->z_wp > zsp->z_start)
4830 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4831 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4833 zsp->z_non_seq_resource = false;
4834 zsp->z_wp = zsp->z_start;
4835 zsp->z_cond = ZC1_EMPTY;
4838 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4842 for (i = 0; i < devip->nr_zones; i++)
4843 zbc_rwp_zone(devip, &devip->zstate[i]);
4846 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4848 struct sdeb_zone_state *zsp;
4851 u8 *cmd = scp->cmnd;
4852 bool all = cmd[14] & 0x01;
4853 struct sdeb_store_info *sip = devip2sip(devip, false);
4855 if (!sdebug_dev_is_zoned(devip)) {
4856 mk_sense_invalid_opcode(scp);
4857 return check_condition_result;
4860 sdeb_write_lock(sip);
4867 z_id = get_unaligned_be64(cmd + 2);
4868 if (z_id >= sdebug_capacity) {
4869 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4870 res = check_condition_result;
4874 zsp = zbc_zone(devip, z_id);
4875 if (z_id != zsp->z_start) {
4876 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4877 res = check_condition_result;
4880 if (zbc_zone_is_conv(zsp)) {
4881 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882 res = check_condition_result;
4886 zbc_rwp_zone(devip, zsp);
4888 sdeb_write_unlock(sip);
4892 static u32 get_tag(struct scsi_cmnd *cmnd)
4894 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4897 /* Queued (deferred) command completions converge here. */
4898 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4900 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4901 unsigned long flags;
4902 struct scsi_cmnd *scp = sqcp->scmd;
4903 struct sdebug_scsi_cmd *sdsc;
4906 if (sdebug_statistics) {
4907 atomic_inc(&sdebug_completions);
4908 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4909 atomic_inc(&sdebug_miss_cpus);
4913 pr_err("scmd=NULL\n");
4917 sdsc = scsi_cmd_priv(scp);
4918 spin_lock_irqsave(&sdsc->lock, flags);
4919 aborted = sd_dp->aborted;
4920 if (unlikely(aborted))
4921 sd_dp->aborted = false;
4922 ASSIGN_QUEUED_CMD(scp, NULL);
4924 spin_unlock_irqrestore(&sdsc->lock, flags);
4927 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4928 blk_abort_request(scsi_cmd_to_rq(scp));
4932 scsi_done(scp); /* callback to mid level */
4934 sdebug_free_queued_cmd(sqcp);
4937 /* When high resolution timer goes off this function is called. */
4938 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4940 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4942 sdebug_q_cmd_complete(sd_dp);
4943 return HRTIMER_NORESTART;
4946 /* When work queue schedules work, it calls this function. */
4947 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4949 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4951 sdebug_q_cmd_complete(sd_dp);
4954 static bool got_shared_uuid;
4955 static uuid_t shared_uuid;
4957 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4959 struct sdeb_zone_state *zsp;
4960 sector_t capacity = get_sdebug_capacity();
4961 sector_t conv_capacity;
4962 sector_t zstart = 0;
4966 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4967 * a zone size allowing for at least 4 zones on the device. Otherwise,
4968 * use the specified zone size checking that at least 2 zones can be
4969 * created for the device.
4971 if (!sdeb_zbc_zone_size_mb) {
4972 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4973 >> ilog2(sdebug_sector_size);
4974 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4976 if (devip->zsize < 2) {
4977 pr_err("Device capacity too small\n");
4981 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4982 pr_err("Zone size is not a power of 2\n");
4985 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4986 >> ilog2(sdebug_sector_size);
4987 if (devip->zsize >= capacity) {
4988 pr_err("Zone size too large for device capacity\n");
4993 devip->zsize_shift = ilog2(devip->zsize);
4994 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4996 if (sdeb_zbc_zone_cap_mb == 0) {
4997 devip->zcap = devip->zsize;
4999 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5000 ilog2(sdebug_sector_size);
5001 if (devip->zcap > devip->zsize) {
5002 pr_err("Zone capacity too large\n");
5007 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5008 if (conv_capacity >= capacity) {
5009 pr_err("Number of conventional zones too large\n");
5012 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5013 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5015 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5017 /* Add gap zones if zone capacity is smaller than the zone size */
5018 if (devip->zcap < devip->zsize)
5019 devip->nr_zones += devip->nr_seq_zones;
5021 if (devip->zmodel == BLK_ZONED_HM) {
5022 /* zbc_max_open_zones can be 0, meaning "not reported" */
5023 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5024 devip->max_open = (devip->nr_zones - 1) / 2;
5026 devip->max_open = sdeb_zbc_max_open;
5029 devip->zstate = kcalloc(devip->nr_zones,
5030 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5034 for (i = 0; i < devip->nr_zones; i++) {
5035 zsp = &devip->zstate[i];
5037 zsp->z_start = zstart;
5039 if (i < devip->nr_conv_zones) {
5040 zsp->z_type = ZBC_ZTYPE_CNV;
5041 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5042 zsp->z_wp = (sector_t)-1;
5044 min_t(u64, devip->zsize, capacity - zstart);
5045 } else if ((zstart & (devip->zsize - 1)) == 0) {
5046 if (devip->zmodel == BLK_ZONED_HM)
5047 zsp->z_type = ZBC_ZTYPE_SWR;
5049 zsp->z_type = ZBC_ZTYPE_SWP;
5050 zsp->z_cond = ZC1_EMPTY;
5051 zsp->z_wp = zsp->z_start;
5053 min_t(u64, devip->zcap, capacity - zstart);
5055 zsp->z_type = ZBC_ZTYPE_GAP;
5056 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5057 zsp->z_wp = (sector_t)-1;
5058 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5062 WARN_ON_ONCE((int)zsp->z_size <= 0);
5063 zstart += zsp->z_size;
5069 static struct sdebug_dev_info *sdebug_device_create(
5070 struct sdebug_host_info *sdbg_host, gfp_t flags)
5072 struct sdebug_dev_info *devip;
5074 devip = kzalloc(sizeof(*devip), flags);
5076 if (sdebug_uuid_ctl == 1)
5077 uuid_gen(&devip->lu_name);
5078 else if (sdebug_uuid_ctl == 2) {
5079 if (got_shared_uuid)
5080 devip->lu_name = shared_uuid;
5082 uuid_gen(&shared_uuid);
5083 got_shared_uuid = true;
5084 devip->lu_name = shared_uuid;
5087 devip->sdbg_host = sdbg_host;
5088 if (sdeb_zbc_in_use) {
5089 devip->zmodel = sdeb_zbc_model;
5090 if (sdebug_device_create_zones(devip)) {
5095 devip->zmodel = BLK_ZONED_NONE;
5097 devip->create_ts = ktime_get_boottime();
5098 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5099 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5104 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5106 struct sdebug_host_info *sdbg_host;
5107 struct sdebug_dev_info *open_devip = NULL;
5108 struct sdebug_dev_info *devip;
5110 sdbg_host = shost_to_sdebug_host(sdev->host);
5112 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5113 if ((devip->used) && (devip->channel == sdev->channel) &&
5114 (devip->target == sdev->id) &&
5115 (devip->lun == sdev->lun))
5118 if ((!devip->used) && (!open_devip))
5122 if (!open_devip) { /* try and make a new one */
5123 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5125 pr_err("out of memory at line %d\n", __LINE__);
5130 open_devip->channel = sdev->channel;
5131 open_devip->target = sdev->id;
5132 open_devip->lun = sdev->lun;
5133 open_devip->sdbg_host = sdbg_host;
5134 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5135 open_devip->used = true;
5139 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5142 pr_info("slave_alloc <%u %u %u %llu>\n",
5143 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5147 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5149 struct sdebug_dev_info *devip =
5150 (struct sdebug_dev_info *)sdp->hostdata;
5153 pr_info("slave_configure <%u %u %u %llu>\n",
5154 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5155 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5156 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5157 if (devip == NULL) {
5158 devip = find_build_dev_info(sdp);
5160 return 1; /* no resources, will be marked offline */
5162 sdp->hostdata = devip;
5164 sdp->no_uld_attach = 1;
5165 config_cdb_len(sdp);
5169 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5171 struct sdebug_dev_info *devip =
5172 (struct sdebug_dev_info *)sdp->hostdata;
5175 pr_info("slave_destroy <%u %u %u %llu>\n",
5176 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5178 /* make this slot available for re-use */
5179 devip->used = false;
5180 sdp->hostdata = NULL;
5184 /* Returns true if we require the queued memory to be freed by the caller. */
5185 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5186 enum sdeb_defer_type defer_t)
5188 if (defer_t == SDEB_DEFER_HRT) {
5189 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5192 case 0: /* Not active, it must have already run */
5193 case -1: /* -1 It's executing the CB */
5195 case 1: /* Was active, we've now cancelled */
5199 } else if (defer_t == SDEB_DEFER_WQ) {
5200 /* Cancel if pending */
5201 if (cancel_work_sync(&sd_dp->ew.work))
5203 /* Was not pending, so it must have run */
5205 } else if (defer_t == SDEB_DEFER_POLL) {
5213 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5215 enum sdeb_defer_type l_defer_t;
5216 struct sdebug_defer *sd_dp;
5217 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5218 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5220 lockdep_assert_held(&sdsc->lock);
5224 sd_dp = &sqcp->sd_dp;
5225 l_defer_t = READ_ONCE(sd_dp->defer_t);
5226 ASSIGN_QUEUED_CMD(cmnd, NULL);
5228 if (stop_qc_helper(sd_dp, l_defer_t))
5229 sdebug_free_queued_cmd(sqcp);
5235 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5237 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5239 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5240 unsigned long flags;
5243 spin_lock_irqsave(&sdsc->lock, flags);
5244 res = scsi_debug_stop_cmnd(cmnd);
5245 spin_unlock_irqrestore(&sdsc->lock, flags);
5251 * All we can do is set the cmnd as internally aborted and wait for it to
5252 * finish. We cannot call scsi_done() as normal completion path may do that.
5254 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5256 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5261 /* Deletes (stops) timers or work queues of all queued commands */
5262 static void stop_all_queued(void)
5264 struct sdebug_host_info *sdhp;
5266 mutex_lock(&sdebug_host_list_mutex);
5267 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5268 struct Scsi_Host *shost = sdhp->shost;
5270 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5272 mutex_unlock(&sdebug_host_list_mutex);
5275 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5277 bool ok = scsi_debug_abort_cmnd(SCpnt);
5281 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5282 sdev_printk(KERN_INFO, SCpnt->device,
5283 "%s: command%s found\n", __func__,
5289 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5291 struct scsi_device *sdp = data;
5292 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5294 if (scmd->device == sdp)
5295 scsi_debug_abort_cmnd(scmd);
5300 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5301 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5303 struct Scsi_Host *shost = sdp->host;
5305 blk_mq_tagset_busy_iter(&shost->tag_set,
5306 scsi_debug_stop_all_queued_iter, sdp);
5309 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5311 struct scsi_device *sdp = SCpnt->device;
5312 struct sdebug_dev_info *devip = sdp->hostdata;
5316 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5317 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5319 scsi_debug_stop_all_queued(sdp);
5321 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5326 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5328 struct scsi_device *sdp = SCpnt->device;
5329 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5330 struct sdebug_dev_info *devip;
5333 ++num_target_resets;
5334 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5335 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5337 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5338 if (devip->target == sdp->id) {
5339 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5344 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5345 sdev_printk(KERN_INFO, sdp,
5346 "%s: %d device(s) found in target\n", __func__, k);
5351 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5353 struct scsi_device *sdp = SCpnt->device;
5354 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5355 struct sdebug_dev_info *devip;
5360 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5363 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5364 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5368 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5369 sdev_printk(KERN_INFO, sdp,
5370 "%s: %d device(s) found in host\n", __func__, k);
5374 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5376 struct sdebug_host_info *sdbg_host;
5377 struct sdebug_dev_info *devip;
5381 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5382 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5383 mutex_lock(&sdebug_host_list_mutex);
5384 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5385 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5387 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5391 mutex_unlock(&sdebug_host_list_mutex);
5393 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5394 sdev_printk(KERN_INFO, SCpnt->device,
5395 "%s: %d device(s) found\n", __func__, k);
5399 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5401 struct msdos_partition *pp;
5402 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5403 int sectors_per_part, num_sectors, k;
5404 int heads_by_sects, start_sec, end_sec;
5406 /* assume partition table already zeroed */
5407 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5409 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5410 sdebug_num_parts = SDEBUG_MAX_PARTS;
5411 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5413 num_sectors = (int)get_sdebug_capacity();
5414 sectors_per_part = (num_sectors - sdebug_sectors_per)
5416 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5417 starts[0] = sdebug_sectors_per;
5418 max_part_secs = sectors_per_part;
5419 for (k = 1; k < sdebug_num_parts; ++k) {
5420 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5422 if (starts[k] - starts[k - 1] < max_part_secs)
5423 max_part_secs = starts[k] - starts[k - 1];
5425 starts[sdebug_num_parts] = num_sectors;
5426 starts[sdebug_num_parts + 1] = 0;
5428 ramp[510] = 0x55; /* magic partition markings */
5430 pp = (struct msdos_partition *)(ramp + 0x1be);
5431 for (k = 0; starts[k + 1]; ++k, ++pp) {
5432 start_sec = starts[k];
5433 end_sec = starts[k] + max_part_secs - 1;
5436 pp->cyl = start_sec / heads_by_sects;
5437 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5438 / sdebug_sectors_per;
5439 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5441 pp->end_cyl = end_sec / heads_by_sects;
5442 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5443 / sdebug_sectors_per;
5444 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5446 pp->start_sect = cpu_to_le32(start_sec);
5447 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5448 pp->sys_ind = 0x83; /* plain Linux partition */
5452 static void block_unblock_all_queues(bool block)
5454 struct sdebug_host_info *sdhp;
5456 lockdep_assert_held(&sdebug_host_list_mutex);
5458 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5459 struct Scsi_Host *shost = sdhp->shost;
5462 scsi_block_requests(shost);
5464 scsi_unblock_requests(shost);
5468 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5469 * commands will be processed normally before triggers occur.
5471 static void tweak_cmnd_count(void)
5475 modulo = abs(sdebug_every_nth);
5479 mutex_lock(&sdebug_host_list_mutex);
5480 block_unblock_all_queues(true);
5481 count = atomic_read(&sdebug_cmnd_count);
5482 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5483 block_unblock_all_queues(false);
5484 mutex_unlock(&sdebug_host_list_mutex);
5487 static void clear_queue_stats(void)
5489 atomic_set(&sdebug_cmnd_count, 0);
5490 atomic_set(&sdebug_completions, 0);
5491 atomic_set(&sdebug_miss_cpus, 0);
5492 atomic_set(&sdebug_a_tsf, 0);
5495 static bool inject_on_this_cmd(void)
5497 if (sdebug_every_nth == 0)
5499 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5502 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5505 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5508 kmem_cache_free(queued_cmd_cache, sqcp);
5511 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5513 struct sdebug_queued_cmd *sqcp;
5514 struct sdebug_defer *sd_dp;
5516 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5520 sd_dp = &sqcp->sd_dp;
5522 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5523 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5524 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5531 /* Complete the processing of the thread that queued a SCSI command to this
5532 * driver. It either completes the command by calling cmnd_done() or
5533 * schedules a hr timer or work queue then returns 0. Returns
5534 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5536 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5538 int (*pfp)(struct scsi_cmnd *,
5539 struct sdebug_dev_info *),
5540 int delta_jiff, int ndelay)
5542 struct request *rq = scsi_cmd_to_rq(cmnd);
5543 bool polled = rq->cmd_flags & REQ_POLLED;
5544 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5545 unsigned long flags;
5546 u64 ns_from_boot = 0;
5547 struct sdebug_queued_cmd *sqcp;
5548 struct scsi_device *sdp;
5549 struct sdebug_defer *sd_dp;
5551 if (unlikely(devip == NULL)) {
5552 if (scsi_result == 0)
5553 scsi_result = DID_NO_CONNECT << 16;
5554 goto respond_in_thread;
5558 if (delta_jiff == 0)
5559 goto respond_in_thread;
5562 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5563 (scsi_result == 0))) {
5564 int num_in_q = scsi_device_busy(sdp);
5565 int qdepth = cmnd->device->queue_depth;
5567 if ((num_in_q == qdepth) &&
5568 (atomic_inc_return(&sdebug_a_tsf) >=
5569 abs(sdebug_every_nth))) {
5570 atomic_set(&sdebug_a_tsf, 0);
5571 scsi_result = device_qfull_result;
5573 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5574 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5575 __func__, num_in_q);
5579 sqcp = sdebug_alloc_queued_cmd(cmnd);
5581 pr_err("%s no alloc\n", __func__);
5582 return SCSI_MLQUEUE_HOST_BUSY;
5584 sd_dp = &sqcp->sd_dp;
5587 ns_from_boot = ktime_get_boottime_ns();
5589 /* one of the resp_*() response functions is called here */
5590 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5591 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5592 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5593 delta_jiff = ndelay = 0;
5595 if (cmnd->result == 0 && scsi_result != 0)
5596 cmnd->result = scsi_result;
5597 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5598 if (atomic_read(&sdeb_inject_pending)) {
5599 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5600 atomic_set(&sdeb_inject_pending, 0);
5601 cmnd->result = check_condition_result;
5605 if (unlikely(sdebug_verbose && cmnd->result))
5606 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5607 __func__, cmnd->result);
5609 if (delta_jiff > 0 || ndelay > 0) {
5612 if (delta_jiff > 0) {
5613 u64 ns = jiffies_to_nsecs(delta_jiff);
5615 if (sdebug_random && ns < U32_MAX) {
5616 ns = get_random_u32_below((u32)ns);
5617 } else if (sdebug_random) {
5618 ns >>= 12; /* scale to 4 usec precision */
5619 if (ns < U32_MAX) /* over 4 hours max */
5620 ns = get_random_u32_below((u32)ns);
5623 kt = ns_to_ktime(ns);
5624 } else { /* ndelay has a 4.2 second max */
5625 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5627 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5628 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5630 if (kt <= d) { /* elapsed duration >= kt */
5631 /* call scsi_done() from this thread */
5632 sdebug_free_queued_cmd(sqcp);
5636 /* otherwise reduce kt by elapsed time */
5640 if (sdebug_statistics)
5641 sd_dp->issuing_cpu = raw_smp_processor_id();
5643 spin_lock_irqsave(&sdsc->lock, flags);
5644 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5645 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5646 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5647 spin_unlock_irqrestore(&sdsc->lock, flags);
5649 /* schedule the invocation of scsi_done() for a later time */
5650 spin_lock_irqsave(&sdsc->lock, flags);
5651 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5652 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5653 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5655 * The completion handler will try to grab sqcp->lock,
5656 * so there is no chance that the completion handler
5657 * will call scsi_done() until we release the lock
5658 * here (so ok to keep referencing sdsc).
5660 spin_unlock_irqrestore(&sdsc->lock, flags);
5662 } else { /* jdelay < 0, use work queue */
5663 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5664 atomic_read(&sdeb_inject_pending))) {
5665 sd_dp->aborted = true;
5666 atomic_set(&sdeb_inject_pending, 0);
5667 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5668 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5671 if (sdebug_statistics)
5672 sd_dp->issuing_cpu = raw_smp_processor_id();
5674 spin_lock_irqsave(&sdsc->lock, flags);
5675 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5676 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5677 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5678 spin_unlock_irqrestore(&sdsc->lock, flags);
5680 spin_lock_irqsave(&sdsc->lock, flags);
5681 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5682 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5683 schedule_work(&sd_dp->ew.work);
5684 spin_unlock_irqrestore(&sdsc->lock, flags);
5690 respond_in_thread: /* call back to mid-layer using invocation thread */
5691 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5692 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5693 if (cmnd->result == 0 && scsi_result != 0)
5694 cmnd->result = scsi_result;
5699 /* Note: The following macros create attribute files in the
5700 /sys/module/scsi_debug/parameters directory. Unfortunately this
5701 driver is unaware of a change and cannot trigger auxiliary actions
5702 as it can when the corresponding attribute in the
5703 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5705 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5706 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5707 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5708 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5709 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5710 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5711 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5712 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5713 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5714 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5715 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5716 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5717 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5718 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5719 module_param_string(inq_product, sdebug_inq_product_id,
5720 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5721 module_param_string(inq_rev, sdebug_inq_product_rev,
5722 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5723 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5724 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5725 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5726 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5727 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5728 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5729 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5730 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5731 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5732 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5733 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5735 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5737 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5738 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5739 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5740 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5741 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5742 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5743 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5744 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5745 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5746 module_param_named(per_host_store, sdebug_per_host_store, bool,
5748 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5749 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5750 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5751 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5752 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5753 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5754 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5755 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5756 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5757 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5758 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5759 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5760 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5761 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5762 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5763 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5764 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5765 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5767 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5768 module_param_named(write_same_length, sdebug_write_same_length, int,
5770 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5771 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5772 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5773 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5774 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5776 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5777 MODULE_DESCRIPTION("SCSI debug adapter driver");
5778 MODULE_LICENSE("GPL");
5779 MODULE_VERSION(SDEBUG_VERSION);
5781 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5782 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5783 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5784 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5785 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5786 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5787 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5788 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5789 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5790 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5791 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5792 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5793 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5794 MODULE_PARM_DESC(host_max_queue,
5795 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5796 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5797 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5798 SDEBUG_VERSION "\")");
5799 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5800 MODULE_PARM_DESC(lbprz,
5801 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5802 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5803 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5804 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5805 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5806 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5807 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5808 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5809 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5810 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5811 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5812 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5813 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5814 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5815 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5816 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5817 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5818 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5819 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5820 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5821 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5822 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5823 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5824 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5825 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5826 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5827 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5828 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5829 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5830 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5831 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5832 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5833 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5834 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5835 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5836 MODULE_PARM_DESC(uuid_ctl,
5837 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5838 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5839 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5840 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5841 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5842 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5843 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5844 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5845 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5846 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5848 #define SDEBUG_INFO_LEN 256
5849 static char sdebug_info[SDEBUG_INFO_LEN];
5851 static const char *scsi_debug_info(struct Scsi_Host *shp)
5855 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5856 my_name, SDEBUG_VERSION, sdebug_version_date);
5857 if (k >= (SDEBUG_INFO_LEN - 1))
5859 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5860 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5861 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5862 "statistics", (int)sdebug_statistics);
5866 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5867 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5872 int minLen = length > 15 ? 15 : length;
5874 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5876 memcpy(arr, buffer, minLen);
5878 if (1 != sscanf(arr, "%d", &opts))
5881 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5882 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5883 if (sdebug_every_nth != 0)
5888 struct sdebug_submit_queue_data {
5894 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
5896 struct sdebug_submit_queue_data *data = opaque;
5897 u32 unique_tag = blk_mq_unique_tag(rq);
5898 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
5899 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
5900 int queue_num = data->queue_num;
5902 if (hwq != queue_num)
5905 /* Rely on iter'ing in ascending tag order */
5906 if (*data->first == -1)
5907 *data->first = *data->last = tag;
5914 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5915 * same for each scsi_debug host (if more than one). Some of the counters
5916 * output are not atomics so might be inaccurate in a busy system. */
5917 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5919 struct sdebug_host_info *sdhp;
5922 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5923 SDEBUG_VERSION, sdebug_version_date);
5924 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5925 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5926 sdebug_opts, sdebug_every_nth);
5927 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5928 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5929 sdebug_sector_size, "bytes");
5930 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5931 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5933 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5934 num_dev_resets, num_target_resets, num_bus_resets,
5936 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5937 dix_reads, dix_writes, dif_errors);
5938 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5940 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5941 atomic_read(&sdebug_cmnd_count),
5942 atomic_read(&sdebug_completions),
5943 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5944 atomic_read(&sdebug_a_tsf),
5945 atomic_read(&sdeb_mq_poll_count));
5947 seq_printf(m, "submit_queues=%d\n", submit_queues);
5948 for (j = 0; j < submit_queues; ++j) {
5950 struct sdebug_submit_queue_data data = {
5955 seq_printf(m, " queue %d:\n", j);
5956 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
5959 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5960 "first,last bits", f, l);
5964 seq_printf(m, "this host_no=%d\n", host->host_no);
5965 if (!xa_empty(per_store_ap)) {
5968 unsigned long l_idx;
5969 struct sdeb_store_info *sip;
5971 seq_puts(m, "\nhost list:\n");
5973 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5975 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5976 sdhp->shost->host_no, idx);
5979 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5980 sdeb_most_recent_idx);
5982 xa_for_each(per_store_ap, l_idx, sip) {
5983 niu = xa_get_mark(per_store_ap, l_idx,
5984 SDEB_XA_NOT_IN_USE);
5986 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5987 (niu ? " not_in_use" : ""));
5994 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5996 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5998 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5999 * of delay is jiffies.
6001 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6006 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6008 if (sdebug_jdelay != jdelay) {
6009 struct sdebug_host_info *sdhp;
6011 mutex_lock(&sdebug_host_list_mutex);
6012 block_unblock_all_queues(true);
6014 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6015 struct Scsi_Host *shost = sdhp->shost;
6017 if (scsi_host_busy(shost)) {
6018 res = -EBUSY; /* queued commands */
6023 sdebug_jdelay = jdelay;
6026 block_unblock_all_queues(false);
6027 mutex_unlock(&sdebug_host_list_mutex);
6033 static DRIVER_ATTR_RW(delay);
6035 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6037 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6039 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6040 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6041 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6046 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6047 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6049 if (sdebug_ndelay != ndelay) {
6050 struct sdebug_host_info *sdhp;
6052 mutex_lock(&sdebug_host_list_mutex);
6053 block_unblock_all_queues(true);
6055 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6056 struct Scsi_Host *shost = sdhp->shost;
6058 if (scsi_host_busy(shost)) {
6059 res = -EBUSY; /* queued commands */
6065 sdebug_ndelay = ndelay;
6066 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6069 block_unblock_all_queues(false);
6070 mutex_unlock(&sdebug_host_list_mutex);
6076 static DRIVER_ATTR_RW(ndelay);
6078 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6080 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6083 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6089 if (sscanf(buf, "%10s", work) == 1) {
6090 if (strncasecmp(work, "0x", 2) == 0) {
6091 if (kstrtoint(work + 2, 16, &opts) == 0)
6094 if (kstrtoint(work, 10, &opts) == 0)
6101 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6102 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6106 static DRIVER_ATTR_RW(opts);
6108 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6110 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6112 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6117 /* Cannot change from or to TYPE_ZBC with sysfs */
6118 if (sdebug_ptype == TYPE_ZBC)
6121 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6129 static DRIVER_ATTR_RW(ptype);
6131 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6133 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6135 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6140 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6146 static DRIVER_ATTR_RW(dsense);
6148 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6150 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6152 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6157 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6158 bool want_store = (n == 0);
6159 struct sdebug_host_info *sdhp;
6162 sdebug_fake_rw = (sdebug_fake_rw > 0);
6163 if (sdebug_fake_rw == n)
6164 return count; /* not transitioning so do nothing */
6166 if (want_store) { /* 1 --> 0 transition, set up store */
6167 if (sdeb_first_idx < 0) {
6168 idx = sdebug_add_store();
6172 idx = sdeb_first_idx;
6173 xa_clear_mark(per_store_ap, idx,
6174 SDEB_XA_NOT_IN_USE);
6176 /* make all hosts use same store */
6177 list_for_each_entry(sdhp, &sdebug_host_list,
6179 if (sdhp->si_idx != idx) {
6180 xa_set_mark(per_store_ap, sdhp->si_idx,
6181 SDEB_XA_NOT_IN_USE);
6185 sdeb_most_recent_idx = idx;
6186 } else { /* 0 --> 1 transition is trigger for shrink */
6187 sdebug_erase_all_stores(true /* apart from first */);
6194 static DRIVER_ATTR_RW(fake_rw);
6196 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6198 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6200 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6205 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6206 sdebug_no_lun_0 = n;
6211 static DRIVER_ATTR_RW(no_lun_0);
6213 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6215 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6217 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6222 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6223 sdebug_num_tgts = n;
6224 sdebug_max_tgts_luns();
6229 static DRIVER_ATTR_RW(num_tgts);
6231 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6233 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6235 static DRIVER_ATTR_RO(dev_size_mb);
6237 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6239 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6242 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6247 if (kstrtobool(buf, &v))
6250 sdebug_per_host_store = v;
6253 static DRIVER_ATTR_RW(per_host_store);
6255 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6257 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6259 static DRIVER_ATTR_RO(num_parts);
6261 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6263 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6265 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6271 if (sscanf(buf, "%10s", work) == 1) {
6272 if (strncasecmp(work, "0x", 2) == 0) {
6273 if (kstrtoint(work + 2, 16, &nth) == 0)
6274 goto every_nth_done;
6276 if (kstrtoint(work, 10, &nth) == 0)
6277 goto every_nth_done;
6283 sdebug_every_nth = nth;
6284 if (nth && !sdebug_statistics) {
6285 pr_info("every_nth needs statistics=1, set it\n");
6286 sdebug_statistics = true;
6291 static DRIVER_ATTR_RW(every_nth);
6293 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6295 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6297 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6303 if (kstrtoint(buf, 0, &n))
6306 if (n > (int)SAM_LUN_AM_FLAT) {
6307 pr_warn("only LUN address methods 0 and 1 are supported\n");
6310 changed = ((int)sdebug_lun_am != n);
6312 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6313 struct sdebug_host_info *sdhp;
6314 struct sdebug_dev_info *dp;
6316 mutex_lock(&sdebug_host_list_mutex);
6317 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6318 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6319 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6322 mutex_unlock(&sdebug_host_list_mutex);
6328 static DRIVER_ATTR_RW(lun_format);
6330 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6332 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6334 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6340 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6342 pr_warn("max_luns can be no more than 256\n");
6345 changed = (sdebug_max_luns != n);
6346 sdebug_max_luns = n;
6347 sdebug_max_tgts_luns();
6348 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6349 struct sdebug_host_info *sdhp;
6350 struct sdebug_dev_info *dp;
6352 mutex_lock(&sdebug_host_list_mutex);
6353 list_for_each_entry(sdhp, &sdebug_host_list,
6355 list_for_each_entry(dp, &sdhp->dev_info_list,
6357 set_bit(SDEBUG_UA_LUNS_CHANGED,
6361 mutex_unlock(&sdebug_host_list_mutex);
6367 static DRIVER_ATTR_RW(max_luns);
6369 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6371 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6373 /* N.B. max_queue can be changed while there are queued commands. In flight
6374 * commands beyond the new max_queue will be completed. */
6375 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6380 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6381 (n <= SDEBUG_CANQUEUE) &&
6382 (sdebug_host_max_queue == 0)) {
6383 mutex_lock(&sdebug_host_list_mutex);
6385 /* We may only change sdebug_max_queue when we have no shosts */
6386 if (list_empty(&sdebug_host_list))
6387 sdebug_max_queue = n;
6390 mutex_unlock(&sdebug_host_list_mutex);
6395 static DRIVER_ATTR_RW(max_queue);
6397 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6399 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6402 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6404 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6407 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6411 if (kstrtobool(buf, &v))
6414 sdebug_no_rwlock = v;
6417 static DRIVER_ATTR_RW(no_rwlock);
6420 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6421 * in range [0, sdebug_host_max_queue), we can't change it.
6423 static DRIVER_ATTR_RO(host_max_queue);
6425 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6427 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6429 static DRIVER_ATTR_RO(no_uld);
6431 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6433 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6435 static DRIVER_ATTR_RO(scsi_level);
6437 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6439 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6441 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6447 /* Ignore capacity change for ZBC drives for now */
6448 if (sdeb_zbc_in_use)
6451 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6452 changed = (sdebug_virtual_gb != n);
6453 sdebug_virtual_gb = n;
6454 sdebug_capacity = get_sdebug_capacity();
6456 struct sdebug_host_info *sdhp;
6457 struct sdebug_dev_info *dp;
6459 mutex_lock(&sdebug_host_list_mutex);
6460 list_for_each_entry(sdhp, &sdebug_host_list,
6462 list_for_each_entry(dp, &sdhp->dev_info_list,
6464 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6468 mutex_unlock(&sdebug_host_list_mutex);
6474 static DRIVER_ATTR_RW(virtual_gb);
6476 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6478 /* absolute number of hosts currently active is what is shown */
6479 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6482 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6487 struct sdeb_store_info *sip;
6488 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6491 if (sscanf(buf, "%d", &delta_hosts) != 1)
6493 if (delta_hosts > 0) {
6497 xa_for_each_marked(per_store_ap, idx, sip,
6498 SDEB_XA_NOT_IN_USE) {
6499 sdeb_most_recent_idx = (int)idx;
6503 if (found) /* re-use case */
6504 sdebug_add_host_helper((int)idx);
6506 sdebug_do_add_host(true);
6508 sdebug_do_add_host(false);
6510 } while (--delta_hosts);
6511 } else if (delta_hosts < 0) {
6513 sdebug_do_remove_host(false);
6514 } while (++delta_hosts);
6518 static DRIVER_ATTR_RW(add_host);
6520 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6522 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6524 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6529 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6530 sdebug_vpd_use_hostno = n;
6535 static DRIVER_ATTR_RW(vpd_use_hostno);
6537 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6539 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6541 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6546 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6548 sdebug_statistics = true;
6550 clear_queue_stats();
6551 sdebug_statistics = false;
6557 static DRIVER_ATTR_RW(statistics);
6559 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6561 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6563 static DRIVER_ATTR_RO(sector_size);
6565 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6567 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6569 static DRIVER_ATTR_RO(submit_queues);
6571 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6573 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6575 static DRIVER_ATTR_RO(dix);
6577 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6579 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6581 static DRIVER_ATTR_RO(dif);
6583 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6585 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6587 static DRIVER_ATTR_RO(guard);
6589 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6591 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6593 static DRIVER_ATTR_RO(ato);
6595 static ssize_t map_show(struct device_driver *ddp, char *buf)
6599 if (!scsi_debug_lbp())
6600 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6601 sdebug_store_sectors);
6603 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6604 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6607 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6608 (int)map_size, sip->map_storep);
6610 buf[count++] = '\n';
6615 static DRIVER_ATTR_RO(map);
6617 static ssize_t random_show(struct device_driver *ddp, char *buf)
6619 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6622 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6627 if (kstrtobool(buf, &v))
6633 static DRIVER_ATTR_RW(random);
6635 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6637 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6639 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6644 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6645 sdebug_removable = (n > 0);
6650 static DRIVER_ATTR_RW(removable);
6652 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6656 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6657 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6662 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6663 sdebug_host_lock = (n > 0);
6668 static DRIVER_ATTR_RW(host_lock);
6670 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6672 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6674 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6679 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6680 sdebug_strict = (n > 0);
6685 static DRIVER_ATTR_RW(strict);
6687 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6689 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6691 static DRIVER_ATTR_RO(uuid_ctl);
6693 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6695 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6697 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6702 ret = kstrtoint(buf, 0, &n);
6706 all_config_cdb_len();
6709 static DRIVER_ATTR_RW(cdb_len);
6711 static const char * const zbc_model_strs_a[] = {
6712 [BLK_ZONED_NONE] = "none",
6713 [BLK_ZONED_HA] = "host-aware",
6714 [BLK_ZONED_HM] = "host-managed",
6717 static const char * const zbc_model_strs_b[] = {
6718 [BLK_ZONED_NONE] = "no",
6719 [BLK_ZONED_HA] = "aware",
6720 [BLK_ZONED_HM] = "managed",
6723 static const char * const zbc_model_strs_c[] = {
6724 [BLK_ZONED_NONE] = "0",
6725 [BLK_ZONED_HA] = "1",
6726 [BLK_ZONED_HM] = "2",
6729 static int sdeb_zbc_model_str(const char *cp)
6731 int res = sysfs_match_string(zbc_model_strs_a, cp);
6734 res = sysfs_match_string(zbc_model_strs_b, cp);
6736 res = sysfs_match_string(zbc_model_strs_c, cp);
6744 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6746 return scnprintf(buf, PAGE_SIZE, "%s\n",
6747 zbc_model_strs_a[sdeb_zbc_model]);
6749 static DRIVER_ATTR_RO(zbc);
6751 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6753 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6755 static DRIVER_ATTR_RO(tur_ms_to_ready);
6757 /* Note: The following array creates attribute files in the
6758 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6759 files (over those found in the /sys/module/scsi_debug/parameters
6760 directory) is that auxiliary actions can be triggered when an attribute
6761 is changed. For example see: add_host_store() above.
6764 static struct attribute *sdebug_drv_attrs[] = {
6765 &driver_attr_delay.attr,
6766 &driver_attr_opts.attr,
6767 &driver_attr_ptype.attr,
6768 &driver_attr_dsense.attr,
6769 &driver_attr_fake_rw.attr,
6770 &driver_attr_host_max_queue.attr,
6771 &driver_attr_no_lun_0.attr,
6772 &driver_attr_num_tgts.attr,
6773 &driver_attr_dev_size_mb.attr,
6774 &driver_attr_num_parts.attr,
6775 &driver_attr_every_nth.attr,
6776 &driver_attr_lun_format.attr,
6777 &driver_attr_max_luns.attr,
6778 &driver_attr_max_queue.attr,
6779 &driver_attr_no_rwlock.attr,
6780 &driver_attr_no_uld.attr,
6781 &driver_attr_scsi_level.attr,
6782 &driver_attr_virtual_gb.attr,
6783 &driver_attr_add_host.attr,
6784 &driver_attr_per_host_store.attr,
6785 &driver_attr_vpd_use_hostno.attr,
6786 &driver_attr_sector_size.attr,
6787 &driver_attr_statistics.attr,
6788 &driver_attr_submit_queues.attr,
6789 &driver_attr_dix.attr,
6790 &driver_attr_dif.attr,
6791 &driver_attr_guard.attr,
6792 &driver_attr_ato.attr,
6793 &driver_attr_map.attr,
6794 &driver_attr_random.attr,
6795 &driver_attr_removable.attr,
6796 &driver_attr_host_lock.attr,
6797 &driver_attr_ndelay.attr,
6798 &driver_attr_strict.attr,
6799 &driver_attr_uuid_ctl.attr,
6800 &driver_attr_cdb_len.attr,
6801 &driver_attr_tur_ms_to_ready.attr,
6802 &driver_attr_zbc.attr,
6805 ATTRIBUTE_GROUPS(sdebug_drv);
6807 static struct device *pseudo_primary;
6809 static int __init scsi_debug_init(void)
6811 bool want_store = (sdebug_fake_rw == 0);
6813 int k, ret, hosts_to_add;
6816 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6817 pr_warn("ndelay must be less than 1 second, ignored\n");
6819 } else if (sdebug_ndelay > 0)
6820 sdebug_jdelay = JDELAY_OVERRIDDEN;
6822 switch (sdebug_sector_size) {
6829 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6833 switch (sdebug_dif) {
6834 case T10_PI_TYPE0_PROTECTION:
6836 case T10_PI_TYPE1_PROTECTION:
6837 case T10_PI_TYPE2_PROTECTION:
6838 case T10_PI_TYPE3_PROTECTION:
6839 have_dif_prot = true;
6843 pr_err("dif must be 0, 1, 2 or 3\n");
6847 if (sdebug_num_tgts < 0) {
6848 pr_err("num_tgts must be >= 0\n");
6852 if (sdebug_guard > 1) {
6853 pr_err("guard must be 0 or 1\n");
6857 if (sdebug_ato > 1) {
6858 pr_err("ato must be 0 or 1\n");
6862 if (sdebug_physblk_exp > 15) {
6863 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6867 sdebug_lun_am = sdebug_lun_am_i;
6868 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6869 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6870 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6873 if (sdebug_max_luns > 256) {
6874 if (sdebug_max_luns > 16384) {
6875 pr_warn("max_luns can be no more than 16384, use default\n");
6876 sdebug_max_luns = DEF_MAX_LUNS;
6878 sdebug_lun_am = SAM_LUN_AM_FLAT;
6881 if (sdebug_lowest_aligned > 0x3fff) {
6882 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6886 if (submit_queues < 1) {
6887 pr_err("submit_queues must be 1 or more\n");
6891 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6892 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6896 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6897 (sdebug_host_max_queue < 0)) {
6898 pr_err("host_max_queue must be in range [0 %d]\n",
6903 if (sdebug_host_max_queue &&
6904 (sdebug_max_queue != sdebug_host_max_queue)) {
6905 sdebug_max_queue = sdebug_host_max_queue;
6906 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6911 * check for host managed zoned block device specified with
6912 * ptype=0x14 or zbc=XXX.
6914 if (sdebug_ptype == TYPE_ZBC) {
6915 sdeb_zbc_model = BLK_ZONED_HM;
6916 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6917 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6921 switch (sdeb_zbc_model) {
6922 case BLK_ZONED_NONE:
6924 sdebug_ptype = TYPE_DISK;
6927 sdebug_ptype = TYPE_ZBC;
6930 pr_err("Invalid ZBC model\n");
6934 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6935 sdeb_zbc_in_use = true;
6936 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6937 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6940 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6941 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6942 if (sdebug_dev_size_mb < 1)
6943 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6944 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6945 sdebug_store_sectors = sz / sdebug_sector_size;
6946 sdebug_capacity = get_sdebug_capacity();
6948 /* play around with geometry, don't waste too much on track 0 */
6950 sdebug_sectors_per = 32;
6951 if (sdebug_dev_size_mb >= 256)
6953 else if (sdebug_dev_size_mb >= 16)
6955 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6956 (sdebug_sectors_per * sdebug_heads);
6957 if (sdebug_cylinders_per >= 1024) {
6958 /* other LLDs do this; implies >= 1GB ram disk ... */
6960 sdebug_sectors_per = 63;
6961 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6962 (sdebug_sectors_per * sdebug_heads);
6964 if (scsi_debug_lbp()) {
6965 sdebug_unmap_max_blocks =
6966 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6968 sdebug_unmap_max_desc =
6969 clamp(sdebug_unmap_max_desc, 0U, 256U);
6971 sdebug_unmap_granularity =
6972 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6974 if (sdebug_unmap_alignment &&
6975 sdebug_unmap_granularity <=
6976 sdebug_unmap_alignment) {
6977 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6981 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6983 idx = sdebug_add_store();
6988 pseudo_primary = root_device_register("pseudo_0");
6989 if (IS_ERR(pseudo_primary)) {
6990 pr_warn("root_device_register() error\n");
6991 ret = PTR_ERR(pseudo_primary);
6994 ret = bus_register(&pseudo_lld_bus);
6996 pr_warn("bus_register error: %d\n", ret);
6999 ret = driver_register(&sdebug_driverfs_driver);
7001 pr_warn("driver_register error: %d\n", ret);
7005 hosts_to_add = sdebug_add_host;
7006 sdebug_add_host = 0;
7008 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7009 if (!queued_cmd_cache) {
7014 for (k = 0; k < hosts_to_add; k++) {
7015 if (want_store && k == 0) {
7016 ret = sdebug_add_host_helper(idx);
7018 pr_err("add_host_helper k=%d, error=%d\n",
7023 ret = sdebug_do_add_host(want_store &&
7024 sdebug_per_host_store);
7026 pr_err("add_host k=%d error=%d\n", k, -ret);
7032 pr_info("built %d host(s)\n", sdebug_num_hosts);
7037 driver_unregister(&sdebug_driverfs_driver);
7039 bus_unregister(&pseudo_lld_bus);
7041 root_device_unregister(pseudo_primary);
7043 sdebug_erase_store(idx, NULL);
7047 static void __exit scsi_debug_exit(void)
7049 int k = sdebug_num_hosts;
7052 sdebug_do_remove_host(true);
7053 kmem_cache_destroy(queued_cmd_cache);
7054 driver_unregister(&sdebug_driverfs_driver);
7055 bus_unregister(&pseudo_lld_bus);
7056 root_device_unregister(pseudo_primary);
7058 sdebug_erase_all_stores(false);
7059 xa_destroy(per_store_ap);
7062 device_initcall(scsi_debug_init);
7063 module_exit(scsi_debug_exit);
7065 static void sdebug_release_adapter(struct device *dev)
7067 struct sdebug_host_info *sdbg_host;
7069 sdbg_host = dev_to_sdebug_host(dev);
7073 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7074 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7079 if (xa_empty(per_store_ap))
7081 sip = xa_load(per_store_ap, idx);
7085 vfree(sip->map_storep);
7086 vfree(sip->dif_storep);
7088 xa_erase(per_store_ap, idx);
7092 /* Assume apart_from_first==false only in shutdown case. */
7093 static void sdebug_erase_all_stores(bool apart_from_first)
7096 struct sdeb_store_info *sip = NULL;
7098 xa_for_each(per_store_ap, idx, sip) {
7099 if (apart_from_first)
7100 apart_from_first = false;
7102 sdebug_erase_store(idx, sip);
7104 if (apart_from_first)
7105 sdeb_most_recent_idx = sdeb_first_idx;
7109 * Returns store xarray new element index (idx) if >=0 else negated errno.
7110 * Limit the number of stores to 65536.
7112 static int sdebug_add_store(void)
7116 unsigned long iflags;
7117 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7118 struct sdeb_store_info *sip = NULL;
7119 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7121 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7125 xa_lock_irqsave(per_store_ap, iflags);
7126 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7127 if (unlikely(res < 0)) {
7128 xa_unlock_irqrestore(per_store_ap, iflags);
7130 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7133 sdeb_most_recent_idx = n_idx;
7134 if (sdeb_first_idx < 0)
7135 sdeb_first_idx = n_idx;
7136 xa_unlock_irqrestore(per_store_ap, iflags);
7139 sip->storep = vzalloc(sz);
7141 pr_err("user data oom\n");
7144 if (sdebug_num_parts > 0)
7145 sdebug_build_parts(sip->storep, sz);
7147 /* DIF/DIX: what T10 calls Protection Information (PI) */
7151 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7152 sip->dif_storep = vmalloc(dif_size);
7154 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7157 if (!sip->dif_storep) {
7158 pr_err("DIX oom\n");
7161 memset(sip->dif_storep, 0xff, dif_size);
7163 /* Logical Block Provisioning */
7164 if (scsi_debug_lbp()) {
7165 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7166 sip->map_storep = vmalloc(array_size(sizeof(long),
7167 BITS_TO_LONGS(map_size)));
7169 pr_info("%lu provisioning blocks\n", map_size);
7171 if (!sip->map_storep) {
7172 pr_err("LBP map oom\n");
7176 bitmap_zero(sip->map_storep, map_size);
7178 /* Map first 1KB for partition table */
7179 if (sdebug_num_parts)
7180 map_region(sip, 0, 2);
7183 rwlock_init(&sip->macc_lck);
7186 sdebug_erase_store((int)n_idx, sip);
7187 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7191 static int sdebug_add_host_helper(int per_host_idx)
7193 int k, devs_per_host, idx;
7194 int error = -ENOMEM;
7195 struct sdebug_host_info *sdbg_host;
7196 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7198 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7201 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7202 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7203 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7204 sdbg_host->si_idx = idx;
7206 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7208 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7209 for (k = 0; k < devs_per_host; k++) {
7210 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7215 mutex_lock(&sdebug_host_list_mutex);
7216 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7217 mutex_unlock(&sdebug_host_list_mutex);
7219 sdbg_host->dev.bus = &pseudo_lld_bus;
7220 sdbg_host->dev.parent = pseudo_primary;
7221 sdbg_host->dev.release = &sdebug_release_adapter;
7222 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7224 error = device_register(&sdbg_host->dev);
7226 mutex_lock(&sdebug_host_list_mutex);
7227 list_del(&sdbg_host->host_list);
7228 mutex_unlock(&sdebug_host_list_mutex);
7236 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7238 list_del(&sdbg_devinfo->dev_list);
7239 kfree(sdbg_devinfo->zstate);
7240 kfree(sdbg_devinfo);
7242 if (sdbg_host->dev.release)
7243 put_device(&sdbg_host->dev);
7246 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7250 static int sdebug_do_add_host(bool mk_new_store)
7252 int ph_idx = sdeb_most_recent_idx;
7255 ph_idx = sdebug_add_store();
7259 return sdebug_add_host_helper(ph_idx);
7262 static void sdebug_do_remove_host(bool the_end)
7265 struct sdebug_host_info *sdbg_host = NULL;
7266 struct sdebug_host_info *sdbg_host2;
7268 mutex_lock(&sdebug_host_list_mutex);
7269 if (!list_empty(&sdebug_host_list)) {
7270 sdbg_host = list_entry(sdebug_host_list.prev,
7271 struct sdebug_host_info, host_list);
7272 idx = sdbg_host->si_idx;
7274 if (!the_end && idx >= 0) {
7277 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7278 if (sdbg_host2 == sdbg_host)
7280 if (idx == sdbg_host2->si_idx) {
7286 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7287 if (idx == sdeb_most_recent_idx)
7288 --sdeb_most_recent_idx;
7292 list_del(&sdbg_host->host_list);
7293 mutex_unlock(&sdebug_host_list_mutex);
7298 device_unregister(&sdbg_host->dev);
7302 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7304 struct sdebug_dev_info *devip = sdev->hostdata;
7309 mutex_lock(&sdebug_host_list_mutex);
7310 block_unblock_all_queues(true);
7312 if (qdepth > SDEBUG_CANQUEUE) {
7313 qdepth = SDEBUG_CANQUEUE;
7314 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7315 qdepth, SDEBUG_CANQUEUE);
7319 if (qdepth != sdev->queue_depth)
7320 scsi_change_queue_depth(sdev, qdepth);
7322 block_unblock_all_queues(false);
7323 mutex_unlock(&sdebug_host_list_mutex);
7325 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7326 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7328 return sdev->queue_depth;
7331 static bool fake_timeout(struct scsi_cmnd *scp)
7333 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7334 if (sdebug_every_nth < -1)
7335 sdebug_every_nth = -1;
7336 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7337 return true; /* ignore command causing timeout */
7338 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7339 scsi_medium_access_command(scp))
7340 return true; /* time out reads and writes */
7345 /* Response to TUR or media access command when device stopped */
7346 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7350 ktime_t now_ts = ktime_get_boottime();
7351 struct scsi_device *sdp = scp->device;
7353 stopped_state = atomic_read(&devip->stopped);
7354 if (stopped_state == 2) {
7355 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7356 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7357 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7358 /* tur_ms_to_ready timer extinguished */
7359 atomic_set(&devip->stopped, 0);
7363 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7365 sdev_printk(KERN_INFO, sdp,
7366 "%s: Not ready: in process of becoming ready\n", my_name);
7367 if (scp->cmnd[0] == TEST_UNIT_READY) {
7368 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7370 if (diff_ns <= tur_nanosecs_to_ready)
7371 diff_ns = tur_nanosecs_to_ready - diff_ns;
7373 diff_ns = tur_nanosecs_to_ready;
7374 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7375 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7376 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7378 return check_condition_result;
7381 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7383 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7385 return check_condition_result;
7388 static void sdebug_map_queues(struct Scsi_Host *shost)
7392 if (shost->nr_hw_queues == 1)
7395 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7396 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7400 if (i == HCTX_TYPE_DEFAULT)
7401 map->nr_queues = submit_queues - poll_queues;
7402 else if (i == HCTX_TYPE_POLL)
7403 map->nr_queues = poll_queues;
7405 if (!map->nr_queues) {
7406 BUG_ON(i == HCTX_TYPE_DEFAULT);
7410 map->queue_offset = qoff;
7411 blk_mq_map_queues(map);
7413 qoff += map->nr_queues;
7417 struct sdebug_blk_mq_poll_data {
7418 unsigned int queue_num;
7423 * We don't handle aborted commands here, but it does not seem possible to have
7424 * aborted polled commands from schedule_resp()
7426 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7428 struct sdebug_blk_mq_poll_data *data = opaque;
7429 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7430 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7431 struct sdebug_defer *sd_dp;
7432 u32 unique_tag = blk_mq_unique_tag(rq);
7433 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7434 struct sdebug_queued_cmd *sqcp;
7435 unsigned long flags;
7436 int queue_num = data->queue_num;
7439 /* We're only interested in one queue for this iteration */
7440 if (hwq != queue_num)
7443 /* Subsequent checks would fail if this failed, but check anyway */
7444 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7447 time = ktime_get_boottime();
7449 spin_lock_irqsave(&sdsc->lock, flags);
7450 sqcp = TO_QUEUED_CMD(cmd);
7452 spin_unlock_irqrestore(&sdsc->lock, flags);
7456 sd_dp = &sqcp->sd_dp;
7457 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7458 spin_unlock_irqrestore(&sdsc->lock, flags);
7462 if (time < sd_dp->cmpl_ts) {
7463 spin_unlock_irqrestore(&sdsc->lock, flags);
7467 ASSIGN_QUEUED_CMD(cmd, NULL);
7468 spin_unlock_irqrestore(&sdsc->lock, flags);
7470 if (sdebug_statistics) {
7471 atomic_inc(&sdebug_completions);
7472 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7473 atomic_inc(&sdebug_miss_cpus);
7476 sdebug_free_queued_cmd(sqcp);
7478 scsi_done(cmd); /* callback to mid level */
7479 (*data->num_entries)++;
7483 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7485 int num_entries = 0;
7486 struct sdebug_blk_mq_poll_data data = {
7487 .queue_num = queue_num,
7488 .num_entries = &num_entries,
7491 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7494 if (num_entries > 0)
7495 atomic_add(num_entries, &sdeb_mq_poll_count);
7499 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7500 struct scsi_cmnd *scp)
7503 struct scsi_device *sdp = scp->device;
7504 const struct opcode_info_t *oip;
7505 const struct opcode_info_t *r_oip;
7506 struct sdebug_dev_info *devip;
7507 u8 *cmd = scp->cmnd;
7508 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7509 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7512 u64 lun_index = sdp->lun & 0x3FFF;
7519 scsi_set_resid(scp, 0);
7520 if (sdebug_statistics) {
7521 atomic_inc(&sdebug_cmnd_count);
7522 inject_now = inject_on_this_cmd();
7526 if (unlikely(sdebug_verbose &&
7527 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7532 sb = (int)sizeof(b);
7534 strcpy(b, "too long, over 32 bytes");
7536 for (k = 0, n = 0; k < len && n < sb; ++k)
7537 n += scnprintf(b + n, sb - n, "%02x ",
7540 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7541 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7543 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7544 return SCSI_MLQUEUE_HOST_BUSY;
7545 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7546 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7549 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7550 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7551 devip = (struct sdebug_dev_info *)sdp->hostdata;
7552 if (unlikely(!devip)) {
7553 devip = find_build_dev_info(sdp);
7557 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7558 atomic_set(&sdeb_inject_pending, 1);
7560 na = oip->num_attached;
7562 if (na) { /* multiple commands with this opcode */
7564 if (FF_SA & r_oip->flags) {
7565 if (F_SA_LOW & oip->flags)
7568 sa = get_unaligned_be16(cmd + 8);
7569 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7570 if (opcode == oip->opcode && sa == oip->sa)
7573 } else { /* since no service action only check opcode */
7574 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7575 if (opcode == oip->opcode)
7580 if (F_SA_LOW & r_oip->flags)
7581 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7582 else if (F_SA_HIGH & r_oip->flags)
7583 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7585 mk_sense_invalid_opcode(scp);
7588 } /* else (when na==0) we assume the oip is a match */
7590 if (unlikely(F_INV_OP & flags)) {
7591 mk_sense_invalid_opcode(scp);
7594 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7596 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7597 my_name, opcode, " supported for wlun");
7598 mk_sense_invalid_opcode(scp);
7601 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7605 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7606 rem = ~oip->len_mask[k] & cmd[k];
7608 for (j = 7; j >= 0; --j, rem <<= 1) {
7612 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7617 if (unlikely(!(F_SKIP_UA & flags) &&
7618 find_first_bit(devip->uas_bm,
7619 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7620 errsts = make_ua(scp, devip);
7624 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7625 atomic_read(&devip->stopped))) {
7626 errsts = resp_not_ready(scp, devip);
7630 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7632 if (unlikely(sdebug_every_nth)) {
7633 if (fake_timeout(scp))
7634 return 0; /* ignore command: make trouble */
7636 if (likely(oip->pfp))
7637 pfp = oip->pfp; /* calls a resp_* function */
7639 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7642 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7643 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7644 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7645 sdebug_ndelay > 10000)) {
7647 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7648 * for Start Stop Unit (SSU) want at least 1 second delay and
7649 * if sdebug_jdelay>1 want a long delay of that many seconds.
7650 * For Synchronize Cache want 1/20 of SSU's delay.
7652 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7653 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7655 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7656 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7658 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7661 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7663 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7666 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7668 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7670 spin_lock_init(&sdsc->lock);
7676 static struct scsi_host_template sdebug_driver_template = {
7677 .show_info = scsi_debug_show_info,
7678 .write_info = scsi_debug_write_info,
7679 .proc_name = sdebug_proc_name,
7680 .name = "SCSI DEBUG",
7681 .info = scsi_debug_info,
7682 .slave_alloc = scsi_debug_slave_alloc,
7683 .slave_configure = scsi_debug_slave_configure,
7684 .slave_destroy = scsi_debug_slave_destroy,
7685 .ioctl = scsi_debug_ioctl,
7686 .queuecommand = scsi_debug_queuecommand,
7687 .change_queue_depth = sdebug_change_qdepth,
7688 .map_queues = sdebug_map_queues,
7689 .mq_poll = sdebug_blk_mq_poll,
7690 .eh_abort_handler = scsi_debug_abort,
7691 .eh_device_reset_handler = scsi_debug_device_reset,
7692 .eh_target_reset_handler = scsi_debug_target_reset,
7693 .eh_bus_reset_handler = scsi_debug_bus_reset,
7694 .eh_host_reset_handler = scsi_debug_host_reset,
7695 .can_queue = SDEBUG_CANQUEUE,
7697 .sg_tablesize = SG_MAX_SEGMENTS,
7698 .cmd_per_lun = DEF_CMD_PER_LUN,
7700 .max_segment_size = -1U,
7701 .module = THIS_MODULE,
7702 .track_queue_depth = 1,
7703 .cmd_size = sizeof(struct sdebug_scsi_cmd),
7704 .init_cmd_priv = sdebug_init_cmd_priv,
7707 static int sdebug_driver_probe(struct device *dev)
7710 struct sdebug_host_info *sdbg_host;
7711 struct Scsi_Host *hpnt;
7714 sdbg_host = dev_to_sdebug_host(dev);
7716 sdebug_driver_template.can_queue = sdebug_max_queue;
7717 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7718 if (!sdebug_clustering)
7719 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7721 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7723 pr_err("scsi_host_alloc failed\n");
7727 if (submit_queues > nr_cpu_ids) {
7728 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7729 my_name, submit_queues, nr_cpu_ids);
7730 submit_queues = nr_cpu_ids;
7733 * Decide whether to tell scsi subsystem that we want mq. The
7734 * following should give the same answer for each host.
7736 hpnt->nr_hw_queues = submit_queues;
7737 if (sdebug_host_max_queue)
7738 hpnt->host_tagset = 1;
7740 /* poll queues are possible for nr_hw_queues > 1 */
7741 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7742 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7743 my_name, poll_queues, hpnt->nr_hw_queues);
7748 * Poll queues don't need interrupts, but we need at least one I/O queue
7749 * left over for non-polled I/O.
7750 * If condition not met, trim poll_queues to 1 (just for simplicity).
7752 if (poll_queues >= submit_queues) {
7753 if (submit_queues < 3)
7754 pr_warn("%s: trim poll_queues to 1\n", my_name);
7756 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7757 my_name, submit_queues - 1);
7763 sdbg_host->shost = hpnt;
7764 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7765 hpnt->max_id = sdebug_num_tgts + 1;
7767 hpnt->max_id = sdebug_num_tgts;
7768 /* = sdebug_max_luns; */
7769 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7773 switch (sdebug_dif) {
7775 case T10_PI_TYPE1_PROTECTION:
7776 hprot = SHOST_DIF_TYPE1_PROTECTION;
7778 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7781 case T10_PI_TYPE2_PROTECTION:
7782 hprot = SHOST_DIF_TYPE2_PROTECTION;
7784 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7787 case T10_PI_TYPE3_PROTECTION:
7788 hprot = SHOST_DIF_TYPE3_PROTECTION;
7790 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7795 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7799 scsi_host_set_prot(hpnt, hprot);
7801 if (have_dif_prot || sdebug_dix)
7802 pr_info("host protection%s%s%s%s%s%s%s\n",
7803 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7804 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7805 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7806 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7807 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7808 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7809 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7811 if (sdebug_guard == 1)
7812 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7814 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7816 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7817 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7818 if (sdebug_every_nth) /* need stats counters for every_nth */
7819 sdebug_statistics = true;
7820 error = scsi_add_host(hpnt, &sdbg_host->dev);
7822 pr_err("scsi_add_host failed\n");
7824 scsi_host_put(hpnt);
7826 scsi_scan_host(hpnt);
7832 static void sdebug_driver_remove(struct device *dev)
7834 struct sdebug_host_info *sdbg_host;
7835 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7837 sdbg_host = dev_to_sdebug_host(dev);
7839 scsi_remove_host(sdbg_host->shost);
7841 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7843 list_del(&sdbg_devinfo->dev_list);
7844 kfree(sdbg_devinfo->zstate);
7845 kfree(sdbg_devinfo);
7848 scsi_host_put(sdbg_host->shost);
7851 static struct bus_type pseudo_lld_bus = {
7853 .probe = sdebug_driver_probe,
7854 .remove = sdebug_driver_remove,
7855 .drv_groups = sdebug_drv_groups,