1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
127 #define DEF_HOST_LOCK 0
130 #define DEF_LBPWS10 0
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
164 #define SDEBUG_LUN_0_VAL 0
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
247 #define SDEBUG_MAX_PARTS 4
249 #define SDEBUG_MAX_CMD_LEN 32
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 static struct kmem_cache *queued_cmd_cache;
255 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
258 /* Zone types (zbcr05 table 25) */
263 /* ZBC_ZTYPE_SOBR = 0x4, */
267 /* enumeration names taken from table 26, zbcr05 */
269 ZBC_NOT_WRITE_POINTER = 0x0,
271 ZC2_IMPLICIT_OPEN = 0x2,
272 ZC3_EXPLICIT_OPEN = 0x3,
279 struct sdeb_zone_state { /* ZBC: per zone state */
280 enum sdebug_z_type z_type;
281 enum sdebug_z_cond z_cond;
282 bool z_non_seq_resource;
288 struct sdebug_dev_info {
289 struct list_head dev_list;
290 unsigned int channel;
294 struct sdebug_host_info *sdbg_host;
295 unsigned long uas_bm[1];
296 atomic_t stopped; /* 1: by SSU, 2: device start */
299 /* For ZBC devices */
300 enum blk_zoned_model zmodel;
303 unsigned int zsize_shift;
304 unsigned int nr_zones;
305 unsigned int nr_conv_zones;
306 unsigned int nr_seq_zones;
307 unsigned int nr_imp_open;
308 unsigned int nr_exp_open;
309 unsigned int nr_closed;
310 unsigned int max_open;
311 ktime_t create_ts; /* time since bootup that this device was created */
312 struct sdeb_zone_state *zstate;
315 struct sdebug_host_info {
316 struct list_head host_list;
317 int si_idx; /* sdeb_store_info (per host) xarray index */
318 struct Scsi_Host *shost;
320 struct list_head dev_info_list;
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 rwlock_t macc_lck; /* for atomic media access on this store */
326 u8 *storep; /* user data storage (ram) */
327 struct t10_pi_tuple *dif_storep; /* protection info */
328 void *map_storep; /* provisioning map */
331 #define dev_to_sdebug_host(d) \
332 container_of(d, struct sdebug_host_info, dev)
334 #define shost_to_sdebug_host(shost) \
335 dev_to_sdebug_host(shost->dma_dev)
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
340 struct sdebug_defer {
342 struct execute_work ew;
343 ktime_t cmpl_ts;/* time since boot to complete this cmd */
345 bool aborted; /* true when blk_abort_request() already called */
346 enum sdeb_defer_type defer_t;
349 struct sdebug_queued_cmd {
350 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
351 * instance indicates this slot is in use.
353 struct sdebug_defer sd_dp;
354 struct scsi_cmnd *scmd;
357 struct sdebug_scsi_cmd {
361 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
362 static atomic_t sdebug_completions; /* count of deferred completions */
363 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
364 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
365 static atomic_t sdeb_inject_pending;
366 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
368 struct opcode_info_t {
369 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
370 /* for terminating element */
371 u8 opcode; /* if num_attached > 0, preferred */
372 u16 sa; /* service action */
373 u32 flags; /* OR-ed set of SDEB_F_* */
374 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
375 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
376 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
377 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
381 enum sdeb_opcode_index {
382 SDEB_I_INVALID_OPCODE = 0,
384 SDEB_I_REPORT_LUNS = 2,
385 SDEB_I_REQUEST_SENSE = 3,
386 SDEB_I_TEST_UNIT_READY = 4,
387 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
388 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
389 SDEB_I_LOG_SENSE = 7,
390 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
391 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
392 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
393 SDEB_I_START_STOP = 11,
394 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
395 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
396 SDEB_I_MAINT_IN = 14,
397 SDEB_I_MAINT_OUT = 15,
398 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
399 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
400 SDEB_I_RESERVE = 18, /* 6, 10 */
401 SDEB_I_RELEASE = 19, /* 6, 10 */
402 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
403 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
404 SDEB_I_ATA_PT = 22, /* 12, 16 */
405 SDEB_I_SEND_DIAG = 23,
407 SDEB_I_WRITE_BUFFER = 25,
408 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
409 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
410 SDEB_I_COMP_WRITE = 28,
411 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
412 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
413 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
414 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
418 static const unsigned char opcode_ind_arr[256] = {
419 /* 0x0; 0x0->0x1f: 6 byte cdbs */
420 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
423 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
425 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
426 SDEB_I_ALLOW_REMOVAL, 0,
427 /* 0x20; 0x20->0x3f: 10 byte cdbs */
428 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
429 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
430 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
431 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
432 /* 0x40; 0x40->0x5f: 10 byte cdbs */
433 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
434 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
435 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
437 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
438 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
439 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
441 0, SDEB_I_VARIABLE_LEN,
442 /* 0x80; 0x80->0x9f: 16 byte cdbs */
443 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
444 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
445 0, 0, 0, SDEB_I_VERIFY,
446 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
447 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
448 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
449 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
450 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
451 SDEB_I_MAINT_OUT, 0, 0, 0,
452 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
453 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0,
456 /* 0xc0; 0xc0->0xff: vendor specific */
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
464 * The following "response" functions return the SCSI mid-level's 4 byte
465 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
466 * command completion, they can mask their return value with
467 * SDEG_RES_IMMED_MASK .
469 #define SDEG_RES_IMMED_MASK 0x40000000
471 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int sdebug_do_add_host(bool mk_new_store);
502 static int sdebug_add_host_helper(int per_host_idx);
503 static void sdebug_do_remove_host(bool the_end);
504 static int sdebug_add_store(void);
505 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
506 static void sdebug_erase_all_stores(bool apart_from_first);
508 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 SAM_LUN_AM_FLAT = 0x1,
801 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity; /* in sectors */
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810 may still need them */
811 static int sdebug_heads; /* heads per disk */
812 static int sdebug_cylinders_per; /* cylinders per surface */
813 static int sdebug_sectors_per; /* sectors per cylinder */
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_MUTEX(sdebug_host_list_mutex);
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1; /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_cap_mb;
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
844 static DEFINE_RWLOCK(atomic_rw);
845 static DEFINE_RWLOCK(atomic_rw2);
847 static rwlock_t *ramdisk_lck_a[2];
849 static char sdebug_proc_name[] = MY_NAME;
850 static const char *my_name = MY_NAME;
852 static struct bus_type pseudo_lld_bus;
854 static struct device_driver sdebug_driverfs_driver = {
855 .name = sdebug_proc_name,
856 .bus = &pseudo_lld_bus,
859 static const int check_condition_result =
860 SAM_STAT_CHECK_CONDITION;
862 static const int illegal_condition_result =
863 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
865 static const int device_qfull_result =
866 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
868 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 /* Only do the extra work involved in logical block provisioning if one or
872 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
873 * real reads and writes (i.e. not skipping them for speed).
875 static inline bool scsi_debug_lbp(void)
877 return 0 == sdebug_fake_rw &&
878 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 static void *lba2fake_store(struct sdeb_store_info *sip,
882 unsigned long long lba)
884 struct sdeb_store_info *lsip = sip;
886 lba = do_div(lba, sdebug_store_sectors);
887 if (!sip || !sip->storep) {
889 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
891 return lsip->storep + lba * sdebug_sector_size;
894 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 sector = sector_div(sector, sdebug_store_sectors);
899 return sip->dif_storep + sector;
902 static void sdebug_max_tgts_luns(void)
904 struct sdebug_host_info *sdbg_host;
905 struct Scsi_Host *hpnt;
907 mutex_lock(&sdebug_host_list_mutex);
908 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
909 hpnt = sdbg_host->shost;
910 if ((hpnt->this_id >= 0) &&
911 (sdebug_num_tgts > hpnt->this_id))
912 hpnt->max_id = sdebug_num_tgts + 1;
914 hpnt->max_id = sdebug_num_tgts;
915 /* sdebug_max_luns; */
916 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
918 mutex_unlock(&sdebug_host_list_mutex);
921 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
923 /* Set in_bit to -1 to indicate no bit position of invalid field */
924 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
925 enum sdeb_cmd_data c_d,
926 int in_byte, int in_bit)
928 unsigned char *sbuff;
932 sbuff = scp->sense_buffer;
934 sdev_printk(KERN_ERR, scp->device,
935 "%s: sense_buffer is NULL\n", __func__);
938 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
939 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
940 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
941 memset(sks, 0, sizeof(sks));
947 sks[0] |= 0x7 & in_bit;
949 put_unaligned_be16(in_byte, sks + 1);
955 memcpy(sbuff + sl + 4, sks, 3);
957 memcpy(sbuff + 15, sks, 3);
959 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
960 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
961 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
966 if (!scp->sense_buffer) {
967 sdev_printk(KERN_ERR, scp->device,
968 "%s: sense_buffer is NULL\n", __func__);
971 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
973 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 sdev_printk(KERN_INFO, scp->device,
977 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
978 my_name, key, asc, asq);
981 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
983 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 if (sdebug_verbose) {
991 sdev_printk(KERN_INFO, dev,
992 "%s: BLKFLSBUF [0x1261]\n", __func__);
993 else if (0x5331 == cmd)
994 sdev_printk(KERN_INFO, dev,
995 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1002 /* return -ENOTTY; // correct return but upsets fdisk */
1005 static void config_cdb_len(struct scsi_device *sdev)
1007 switch (sdebug_cdb_len) {
1008 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1009 sdev->use_10_for_rw = false;
1010 sdev->use_16_for_rw = false;
1011 sdev->use_10_for_ms = false;
1013 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1014 sdev->use_10_for_rw = true;
1015 sdev->use_16_for_rw = false;
1016 sdev->use_10_for_ms = false;
1018 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1019 sdev->use_10_for_rw = true;
1020 sdev->use_16_for_rw = false;
1021 sdev->use_10_for_ms = true;
1024 sdev->use_10_for_rw = false;
1025 sdev->use_16_for_rw = true;
1026 sdev->use_10_for_ms = true;
1028 case 32: /* No knobs to suggest this so same as 16 for now */
1029 sdev->use_10_for_rw = false;
1030 sdev->use_16_for_rw = true;
1031 sdev->use_10_for_ms = true;
1034 pr_warn("unexpected cdb_len=%d, force to 10\n",
1036 sdev->use_10_for_rw = true;
1037 sdev->use_16_for_rw = false;
1038 sdev->use_10_for_ms = false;
1039 sdebug_cdb_len = 10;
1044 static void all_config_cdb_len(void)
1046 struct sdebug_host_info *sdbg_host;
1047 struct Scsi_Host *shost;
1048 struct scsi_device *sdev;
1050 mutex_lock(&sdebug_host_list_mutex);
1051 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1052 shost = sdbg_host->shost;
1053 shost_for_each_device(sdev, shost) {
1054 config_cdb_len(sdev);
1057 mutex_unlock(&sdebug_host_list_mutex);
1060 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1062 struct sdebug_host_info *sdhp = devip->sdbg_host;
1063 struct sdebug_dev_info *dp;
1065 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1066 if ((devip->sdbg_host == dp->sdbg_host) &&
1067 (devip->target == dp->target)) {
1068 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1077 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1078 if (k != SDEBUG_NUM_UAS) {
1079 const char *cp = NULL;
1083 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1084 POWER_ON_RESET_ASCQ);
1086 cp = "power on reset";
1088 case SDEBUG_UA_POOCCUR:
1089 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1090 POWER_ON_OCCURRED_ASCQ);
1092 cp = "power on occurred";
1094 case SDEBUG_UA_BUS_RESET:
1095 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1100 case SDEBUG_UA_MODE_CHANGED:
1101 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1104 cp = "mode parameters changed";
1106 case SDEBUG_UA_CAPACITY_CHANGED:
1107 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1108 CAPACITY_CHANGED_ASCQ);
1110 cp = "capacity data changed";
1112 case SDEBUG_UA_MICROCODE_CHANGED:
1113 mk_sense_buffer(scp, UNIT_ATTENTION,
1115 MICROCODE_CHANGED_ASCQ);
1117 cp = "microcode has been changed";
1119 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1120 mk_sense_buffer(scp, UNIT_ATTENTION,
1122 MICROCODE_CHANGED_WO_RESET_ASCQ);
1124 cp = "microcode has been changed without reset";
1126 case SDEBUG_UA_LUNS_CHANGED:
1128 * SPC-3 behavior is to report a UNIT ATTENTION with
1129 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1130 * on the target, until a REPORT LUNS command is
1131 * received. SPC-4 behavior is to report it only once.
1132 * NOTE: sdebug_scsi_level does not use the same
1133 * values as struct scsi_device->scsi_level.
1135 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1136 clear_luns_changed_on_target(devip);
1137 mk_sense_buffer(scp, UNIT_ATTENTION,
1141 cp = "reported luns data has changed";
1144 pr_warn("unexpected unit attention code=%d\n", k);
1149 clear_bit(k, devip->uas_bm);
1151 sdev_printk(KERN_INFO, scp->device,
1152 "%s reports: Unit attention: %s\n",
1154 return check_condition_result;
1159 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1160 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1164 struct scsi_data_buffer *sdb = &scp->sdb;
1168 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1169 return DID_ERROR << 16;
1171 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1173 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1178 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1179 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1180 * calls, not required to write in ascending offset order. Assumes resid
1181 * set to scsi_bufflen() prior to any calls.
1183 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1184 int arr_len, unsigned int off_dst)
1186 unsigned int act_len, n;
1187 struct scsi_data_buffer *sdb = &scp->sdb;
1188 off_t skip = off_dst;
1190 if (sdb->length <= off_dst)
1192 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1193 return DID_ERROR << 16;
1195 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1196 arr, arr_len, skip);
1197 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1198 __func__, off_dst, scsi_bufflen(scp), act_len,
1199 scsi_get_resid(scp));
1200 n = scsi_bufflen(scp) - (off_dst + act_len);
1201 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1205 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1206 * 'arr' or -1 if error.
1208 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1211 if (!scsi_bufflen(scp))
1213 if (scp->sc_data_direction != DMA_TO_DEVICE)
1216 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1220 static char sdebug_inq_vendor_id[9] = "Linux ";
1221 static char sdebug_inq_product_id[17] = "scsi_debug ";
1222 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1223 /* Use some locally assigned NAAs for SAS addresses. */
1224 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1225 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1226 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1228 /* Device identification VPD page. Returns number of bytes placed in arr */
1229 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1230 int target_dev_id, int dev_id_num,
1231 const char *dev_id_str, int dev_id_str_len,
1232 const uuid_t *lu_name)
1237 port_a = target_dev_id + 1;
1238 /* T10 vendor identifier field format (faked) */
1239 arr[0] = 0x2; /* ASCII */
1242 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1243 memcpy(&arr[12], sdebug_inq_product_id, 16);
1244 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1245 num = 8 + 16 + dev_id_str_len;
1248 if (dev_id_num >= 0) {
1249 if (sdebug_uuid_ctl) {
1250 /* Locally assigned UUID */
1251 arr[num++] = 0x1; /* binary (not necessarily sas) */
1252 arr[num++] = 0xa; /* PIV=0, lu, naa */
1255 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1257 memcpy(arr + num, lu_name, 16);
1260 /* NAA-3, Logical unit identifier (binary) */
1261 arr[num++] = 0x1; /* binary (not necessarily sas) */
1262 arr[num++] = 0x3; /* PIV=0, lu, naa */
1265 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1268 /* Target relative port number */
1269 arr[num++] = 0x61; /* proto=sas, binary */
1270 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1271 arr[num++] = 0x0; /* reserved */
1272 arr[num++] = 0x4; /* length */
1273 arr[num++] = 0x0; /* reserved */
1274 arr[num++] = 0x0; /* reserved */
1276 arr[num++] = 0x1; /* relative port A */
1278 /* NAA-3, Target port identifier */
1279 arr[num++] = 0x61; /* proto=sas, binary */
1280 arr[num++] = 0x93; /* piv=1, target port, naa */
1283 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1285 /* NAA-3, Target port group identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0x95; /* piv=1, target port group id */
1292 put_unaligned_be16(port_group_id, arr + num);
1294 /* NAA-3, Target device identifier */
1295 arr[num++] = 0x61; /* proto=sas, binary */
1296 arr[num++] = 0xa3; /* piv=1, target device, naa */
1299 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1301 /* SCSI name string: Target device identifier */
1302 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1303 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1306 memcpy(arr + num, "naa.32222220", 12);
1308 snprintf(b, sizeof(b), "%08X", target_dev_id);
1309 memcpy(arr + num, b, 8);
1311 memset(arr + num, 0, 4);
1316 static unsigned char vpd84_data[] = {
1317 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1318 0x22,0x22,0x22,0x0,0xbb,0x1,
1319 0x22,0x22,0x22,0x0,0xbb,0x2,
1322 /* Software interface identification VPD page */
1323 static int inquiry_vpd_84(unsigned char *arr)
1325 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1326 return sizeof(vpd84_data);
1329 /* Management network addresses VPD page */
1330 static int inquiry_vpd_85(unsigned char *arr)
1333 const char *na1 = "https://www.kernel.org/config";
1334 const char *na2 = "http://www.kernel.org/log";
1337 arr[num++] = 0x1; /* lu, storage config */
1338 arr[num++] = 0x0; /* reserved */
1343 plen = ((plen / 4) + 1) * 4;
1344 arr[num++] = plen; /* length, null termianted, padded */
1345 memcpy(arr + num, na1, olen);
1346 memset(arr + num + olen, 0, plen - olen);
1349 arr[num++] = 0x4; /* lu, logging */
1350 arr[num++] = 0x0; /* reserved */
1355 plen = ((plen / 4) + 1) * 4;
1356 arr[num++] = plen; /* length, null terminated, padded */
1357 memcpy(arr + num, na2, olen);
1358 memset(arr + num + olen, 0, plen - olen);
1364 /* SCSI ports VPD page */
1365 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1370 port_a = target_dev_id + 1;
1371 port_b = port_a + 1;
1372 arr[num++] = 0x0; /* reserved */
1373 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x1; /* relative port 1 (primary) */
1376 memset(arr + num, 0, 6);
1379 arr[num++] = 12; /* length tp descriptor */
1380 /* naa-5 target port identifier (A) */
1381 arr[num++] = 0x61; /* proto=sas, binary */
1382 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1383 arr[num++] = 0x0; /* reserved */
1384 arr[num++] = 0x8; /* length */
1385 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1387 arr[num++] = 0x0; /* reserved */
1388 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x2; /* relative port 2 (secondary) */
1391 memset(arr + num, 0, 6);
1394 arr[num++] = 12; /* length tp descriptor */
1395 /* naa-5 target port identifier (B) */
1396 arr[num++] = 0x61; /* proto=sas, binary */
1397 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1398 arr[num++] = 0x0; /* reserved */
1399 arr[num++] = 0x8; /* length */
1400 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1407 static unsigned char vpd89_data[] = {
1408 /* from 4th byte */ 0,0,0,0,
1409 'l','i','n','u','x',' ',' ',' ',
1410 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1412 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1414 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1415 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1417 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1419 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1423 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1424 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1425 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1427 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1428 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1429 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1434 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1435 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1436 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1451 /* ATA Information VPD page */
1452 static int inquiry_vpd_89(unsigned char *arr)
1454 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1455 return sizeof(vpd89_data);
1459 static unsigned char vpdb0_data[] = {
1460 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1461 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1462 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 /* Block limits VPD page (SBC-3) */
1467 static int inquiry_vpd_b0(unsigned char *arr)
1471 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1473 /* Optimal transfer length granularity */
1474 if (sdebug_opt_xferlen_exp != 0 &&
1475 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1476 gran = 1 << sdebug_opt_xferlen_exp;
1478 gran = 1 << sdebug_physblk_exp;
1479 put_unaligned_be16(gran, arr + 2);
1481 /* Maximum Transfer Length */
1482 if (sdebug_store_sectors > 0x400)
1483 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1485 /* Optimal Transfer Length */
1486 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1489 /* Maximum Unmap LBA Count */
1490 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1492 /* Maximum Unmap Block Descriptor Count */
1493 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1496 /* Unmap Granularity Alignment */
1497 if (sdebug_unmap_alignment) {
1498 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1499 arr[28] |= 0x80; /* UGAVALID */
1502 /* Optimal Unmap Granularity */
1503 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1505 /* Maximum WRITE SAME Length */
1506 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1508 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1511 /* Block device characteristics VPD page (SBC-3) */
1512 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1514 memset(arr, 0, 0x3c);
1516 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1518 arr[3] = 5; /* less than 1.8" */
1519 if (devip->zmodel == BLK_ZONED_HA)
1520 arr[4] = 1 << 4; /* zoned field = 01b */
1525 /* Logical block provisioning VPD page (SBC-4) */
1526 static int inquiry_vpd_b2(unsigned char *arr)
1528 memset(arr, 0, 0x4);
1529 arr[0] = 0; /* threshold exponent */
1536 if (sdebug_lbprz && scsi_debug_lbp())
1537 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1538 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1539 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1540 /* threshold_percentage=0 */
1544 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1545 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1547 memset(arr, 0, 0x3c);
1548 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1550 * Set Optimal number of open sequential write preferred zones and
1551 * Optimal number of non-sequentially written sequential write
1552 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1553 * fields set to zero, apart from Max. number of open swrz_s field.
1555 put_unaligned_be32(0xffffffff, &arr[4]);
1556 put_unaligned_be32(0xffffffff, &arr[8]);
1557 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1558 put_unaligned_be32(devip->max_open, &arr[12]);
1560 put_unaligned_be32(0xffffffff, &arr[12]);
1561 if (devip->zcap < devip->zsize) {
1562 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1563 put_unaligned_be64(devip->zsize, &arr[20]);
1570 #define SDEBUG_LONG_INQ_SZ 96
1571 #define SDEBUG_MAX_INQ_ARR_SZ 584
1573 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1575 unsigned char pq_pdt;
1577 unsigned char *cmd = scp->cmnd;
1580 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1582 alloc_len = get_unaligned_be16(cmd + 3);
1583 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1585 return DID_REQUEUE << 16;
1586 is_disk = (sdebug_ptype == TYPE_DISK);
1587 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1588 is_disk_zbc = (is_disk || is_zbc);
1589 have_wlun = scsi_is_wlun(scp->device->lun);
1591 pq_pdt = TYPE_WLUN; /* present, wlun */
1592 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1593 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1595 pq_pdt = (sdebug_ptype & 0x1f);
1597 if (0x2 & cmd[1]) { /* CMDDT bit set */
1598 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1600 return check_condition_result;
1601 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1602 int lu_id_num, port_group_id, target_dev_id;
1605 int host_no = devip->sdbg_host->shost->host_no;
1607 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1608 (devip->channel & 0x7f);
1609 if (sdebug_vpd_use_hostno == 0)
1611 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1612 (devip->target * 1000) + devip->lun);
1613 target_dev_id = ((host_no + 1) * 2000) +
1614 (devip->target * 1000) - 3;
1615 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1616 if (0 == cmd[2]) { /* supported vital product data pages */
1617 arr[1] = cmd[2]; /*sanity */
1619 arr[n++] = 0x0; /* this page */
1620 arr[n++] = 0x80; /* unit serial number */
1621 arr[n++] = 0x83; /* device identification */
1622 arr[n++] = 0x84; /* software interface ident. */
1623 arr[n++] = 0x85; /* management network addresses */
1624 arr[n++] = 0x86; /* extended inquiry */
1625 arr[n++] = 0x87; /* mode page policy */
1626 arr[n++] = 0x88; /* SCSI ports */
1627 if (is_disk_zbc) { /* SBC or ZBC */
1628 arr[n++] = 0x89; /* ATA information */
1629 arr[n++] = 0xb0; /* Block limits */
1630 arr[n++] = 0xb1; /* Block characteristics */
1632 arr[n++] = 0xb2; /* LB Provisioning */
1634 arr[n++] = 0xb6; /* ZB dev. char. */
1636 arr[3] = n - 4; /* number of supported VPD pages */
1637 } else if (0x80 == cmd[2]) { /* unit serial number */
1638 arr[1] = cmd[2]; /*sanity */
1640 memcpy(&arr[4], lu_id_str, len);
1641 } else if (0x83 == cmd[2]) { /* device identification */
1642 arr[1] = cmd[2]; /*sanity */
1643 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1644 target_dev_id, lu_id_num,
1647 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1648 arr[1] = cmd[2]; /*sanity */
1649 arr[3] = inquiry_vpd_84(&arr[4]);
1650 } else if (0x85 == cmd[2]) { /* Management network addresses */
1651 arr[1] = cmd[2]; /*sanity */
1652 arr[3] = inquiry_vpd_85(&arr[4]);
1653 } else if (0x86 == cmd[2]) { /* extended inquiry */
1654 arr[1] = cmd[2]; /*sanity */
1655 arr[3] = 0x3c; /* number of following entries */
1656 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1657 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1658 else if (have_dif_prot)
1659 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1661 arr[4] = 0x0; /* no protection stuff */
1662 arr[5] = 0x7; /* head of q, ordered + simple q's */
1663 } else if (0x87 == cmd[2]) { /* mode page policy */
1664 arr[1] = cmd[2]; /*sanity */
1665 arr[3] = 0x8; /* number of following entries */
1666 arr[4] = 0x2; /* disconnect-reconnect mp */
1667 arr[6] = 0x80; /* mlus, shared */
1668 arr[8] = 0x18; /* protocol specific lu */
1669 arr[10] = 0x82; /* mlus, per initiator port */
1670 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1673 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1674 arr[1] = cmd[2]; /*sanity */
1675 n = inquiry_vpd_89(&arr[4]);
1676 put_unaligned_be16(n, arr + 2);
1677 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1678 arr[1] = cmd[2]; /*sanity */
1679 arr[3] = inquiry_vpd_b0(&arr[4]);
1680 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1681 arr[1] = cmd[2]; /*sanity */
1682 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1683 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1684 arr[1] = cmd[2]; /*sanity */
1685 arr[3] = inquiry_vpd_b2(&arr[4]);
1686 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1687 arr[1] = cmd[2]; /*sanity */
1688 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1690 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1692 return check_condition_result;
1694 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1695 ret = fill_from_dev_buffer(scp, arr,
1696 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1700 /* drops through here for a standard inquiry */
1701 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1702 arr[2] = sdebug_scsi_level;
1703 arr[3] = 2; /* response_data_format==2 */
1704 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1705 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1706 if (sdebug_vpd_use_hostno == 0)
1707 arr[5] |= 0x10; /* claim: implicit TPGS */
1708 arr[6] = 0x10; /* claim: MultiP */
1709 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1710 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1711 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1712 memcpy(&arr[16], sdebug_inq_product_id, 16);
1713 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1714 /* Use Vendor Specific area to place driver date in ASCII hex */
1715 memcpy(&arr[36], sdebug_version_date, 8);
1716 /* version descriptors (2 bytes each) follow */
1717 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1718 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1720 if (is_disk) { /* SBC-4 no version claimed */
1721 put_unaligned_be16(0x600, arr + n);
1723 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1724 put_unaligned_be16(0x525, arr + n);
1726 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1727 put_unaligned_be16(0x624, arr + n);
1730 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1731 ret = fill_from_dev_buffer(scp, arr,
1732 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1737 /* See resp_iec_m_pg() for how this data is manipulated */
1738 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1741 static int resp_requests(struct scsi_cmnd *scp,
1742 struct sdebug_dev_info *devip)
1744 unsigned char *cmd = scp->cmnd;
1745 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1746 bool dsense = !!(cmd[1] & 1);
1747 u32 alloc_len = cmd[4];
1749 int stopped_state = atomic_read(&devip->stopped);
1751 memset(arr, 0, sizeof(arr));
1752 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1756 arr[2] = LOGICAL_UNIT_NOT_READY;
1757 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1761 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1762 arr[7] = 0xa; /* 18 byte sense buffer */
1763 arr[12] = LOGICAL_UNIT_NOT_READY;
1764 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1766 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1767 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1770 arr[1] = 0x0; /* NO_SENSE in sense_key */
1771 arr[2] = THRESHOLD_EXCEEDED;
1772 arr[3] = 0xff; /* Failure prediction(false) */
1776 arr[2] = 0x0; /* NO_SENSE in sense_key */
1777 arr[7] = 0xa; /* 18 byte sense buffer */
1778 arr[12] = THRESHOLD_EXCEEDED;
1779 arr[13] = 0xff; /* Failure prediction(false) */
1781 } else { /* nothing to report */
1784 memset(arr, 0, len);
1787 memset(arr, 0, len);
1792 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1795 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1797 unsigned char *cmd = scp->cmnd;
1798 int power_cond, want_stop, stopped_state;
1801 power_cond = (cmd[4] & 0xf0) >> 4;
1803 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1804 return check_condition_result;
1806 want_stop = !(cmd[4] & 1);
1807 stopped_state = atomic_read(&devip->stopped);
1808 if (stopped_state == 2) {
1809 ktime_t now_ts = ktime_get_boottime();
1811 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1812 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1814 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1815 /* tur_ms_to_ready timer extinguished */
1816 atomic_set(&devip->stopped, 0);
1820 if (stopped_state == 2) {
1822 stopped_state = 1; /* dummy up success */
1823 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1824 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1825 return check_condition_result;
1829 changing = (stopped_state != want_stop);
1831 atomic_xchg(&devip->stopped, want_stop);
1832 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1833 return SDEG_RES_IMMED_MASK;
1838 static sector_t get_sdebug_capacity(void)
1840 static const unsigned int gibibyte = 1073741824;
1842 if (sdebug_virtual_gb > 0)
1843 return (sector_t)sdebug_virtual_gb *
1844 (gibibyte / sdebug_sector_size);
1846 return sdebug_store_sectors;
1849 #define SDEBUG_READCAP_ARR_SZ 8
1850 static int resp_readcap(struct scsi_cmnd *scp,
1851 struct sdebug_dev_info *devip)
1853 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1856 /* following just in case virtual_gb changed */
1857 sdebug_capacity = get_sdebug_capacity();
1858 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1859 if (sdebug_capacity < 0xffffffff) {
1860 capac = (unsigned int)sdebug_capacity - 1;
1861 put_unaligned_be32(capac, arr + 0);
1863 put_unaligned_be32(0xffffffff, arr + 0);
1864 put_unaligned_be16(sdebug_sector_size, arr + 6);
1865 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1868 #define SDEBUG_READCAP16_ARR_SZ 32
1869 static int resp_readcap16(struct scsi_cmnd *scp,
1870 struct sdebug_dev_info *devip)
1872 unsigned char *cmd = scp->cmnd;
1873 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1876 alloc_len = get_unaligned_be32(cmd + 10);
1877 /* following just in case virtual_gb changed */
1878 sdebug_capacity = get_sdebug_capacity();
1879 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1880 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1881 put_unaligned_be32(sdebug_sector_size, arr + 8);
1882 arr[13] = sdebug_physblk_exp & 0xf;
1883 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1885 if (scsi_debug_lbp()) {
1886 arr[14] |= 0x80; /* LBPME */
1887 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1888 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1889 * in the wider field maps to 0 in this field.
1891 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1896 * Since the scsi_debug READ CAPACITY implementation always reports the
1897 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1899 if (devip->zmodel == BLK_ZONED_HM)
1902 arr[15] = sdebug_lowest_aligned & 0xff;
1904 if (have_dif_prot) {
1905 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1906 arr[12] |= 1; /* PROT_EN */
1909 return fill_from_dev_buffer(scp, arr,
1910 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1913 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1915 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1916 struct sdebug_dev_info *devip)
1918 unsigned char *cmd = scp->cmnd;
1920 int host_no = devip->sdbg_host->shost->host_no;
1921 int port_group_a, port_group_b, port_a, port_b;
1925 alen = get_unaligned_be32(cmd + 6);
1926 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1928 return DID_REQUEUE << 16;
1930 * EVPD page 0x88 states we have two ports, one
1931 * real and a fake port with no device connected.
1932 * So we create two port groups with one port each
1933 * and set the group with port B to unavailable.
1935 port_a = 0x1; /* relative port A */
1936 port_b = 0x2; /* relative port B */
1937 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1938 (devip->channel & 0x7f);
1939 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1940 (devip->channel & 0x7f) + 0x80;
1943 * The asymmetric access state is cycled according to the host_id.
1946 if (sdebug_vpd_use_hostno == 0) {
1947 arr[n++] = host_no % 3; /* Asymm access state */
1948 arr[n++] = 0x0F; /* claim: all states are supported */
1950 arr[n++] = 0x0; /* Active/Optimized path */
1951 arr[n++] = 0x01; /* only support active/optimized paths */
1953 put_unaligned_be16(port_group_a, arr + n);
1955 arr[n++] = 0; /* Reserved */
1956 arr[n++] = 0; /* Status code */
1957 arr[n++] = 0; /* Vendor unique */
1958 arr[n++] = 0x1; /* One port per group */
1959 arr[n++] = 0; /* Reserved */
1960 arr[n++] = 0; /* Reserved */
1961 put_unaligned_be16(port_a, arr + n);
1963 arr[n++] = 3; /* Port unavailable */
1964 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1965 put_unaligned_be16(port_group_b, arr + n);
1967 arr[n++] = 0; /* Reserved */
1968 arr[n++] = 0; /* Status code */
1969 arr[n++] = 0; /* Vendor unique */
1970 arr[n++] = 0x1; /* One port per group */
1971 arr[n++] = 0; /* Reserved */
1972 arr[n++] = 0; /* Reserved */
1973 put_unaligned_be16(port_b, arr + n);
1977 put_unaligned_be32(rlen, arr + 0);
1980 * Return the smallest value of either
1981 * - The allocated length
1982 * - The constructed command length
1983 * - The maximum array size
1985 rlen = min(alen, n);
1986 ret = fill_from_dev_buffer(scp, arr,
1987 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1992 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1993 struct sdebug_dev_info *devip)
1996 u8 reporting_opts, req_opcode, sdeb_i, supp;
1998 u32 alloc_len, a_len;
1999 int k, offset, len, errsts, count, bump, na;
2000 const struct opcode_info_t *oip;
2001 const struct opcode_info_t *r_oip;
2003 u8 *cmd = scp->cmnd;
2005 rctd = !!(cmd[2] & 0x80);
2006 reporting_opts = cmd[2] & 0x7;
2007 req_opcode = cmd[3];
2008 req_sa = get_unaligned_be16(cmd + 4);
2009 alloc_len = get_unaligned_be32(cmd + 6);
2010 if (alloc_len < 4 || alloc_len > 0xffff) {
2011 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2012 return check_condition_result;
2014 if (alloc_len > 8192)
2018 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2020 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2022 return check_condition_result;
2024 switch (reporting_opts) {
2025 case 0: /* all commands */
2026 /* count number of commands */
2027 for (count = 0, oip = opcode_info_arr;
2028 oip->num_attached != 0xff; ++oip) {
2029 if (F_INV_OP & oip->flags)
2031 count += (oip->num_attached + 1);
2033 bump = rctd ? 20 : 8;
2034 put_unaligned_be32(count * bump, arr);
2035 for (offset = 4, oip = opcode_info_arr;
2036 oip->num_attached != 0xff && offset < a_len; ++oip) {
2037 if (F_INV_OP & oip->flags)
2039 na = oip->num_attached;
2040 arr[offset] = oip->opcode;
2041 put_unaligned_be16(oip->sa, arr + offset + 2);
2043 arr[offset + 5] |= 0x2;
2044 if (FF_SA & oip->flags)
2045 arr[offset + 5] |= 0x1;
2046 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2048 put_unaligned_be16(0xa, arr + offset + 8);
2050 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2051 if (F_INV_OP & oip->flags)
2054 arr[offset] = oip->opcode;
2055 put_unaligned_be16(oip->sa, arr + offset + 2);
2057 arr[offset + 5] |= 0x2;
2058 if (FF_SA & oip->flags)
2059 arr[offset + 5] |= 0x1;
2060 put_unaligned_be16(oip->len_mask[0],
2063 put_unaligned_be16(0xa,
2070 case 1: /* one command: opcode only */
2071 case 2: /* one command: opcode plus service action */
2072 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2073 sdeb_i = opcode_ind_arr[req_opcode];
2074 oip = &opcode_info_arr[sdeb_i];
2075 if (F_INV_OP & oip->flags) {
2079 if (1 == reporting_opts) {
2080 if (FF_SA & oip->flags) {
2081 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2084 return check_condition_result;
2087 } else if (2 == reporting_opts &&
2088 0 == (FF_SA & oip->flags)) {
2089 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2090 kfree(arr); /* point at requested sa */
2091 return check_condition_result;
2093 if (0 == (FF_SA & oip->flags) &&
2094 req_opcode == oip->opcode)
2096 else if (0 == (FF_SA & oip->flags)) {
2097 na = oip->num_attached;
2098 for (k = 0, oip = oip->arrp; k < na;
2100 if (req_opcode == oip->opcode)
2103 supp = (k >= na) ? 1 : 3;
2104 } else if (req_sa != oip->sa) {
2105 na = oip->num_attached;
2106 for (k = 0, oip = oip->arrp; k < na;
2108 if (req_sa == oip->sa)
2111 supp = (k >= na) ? 1 : 3;
2115 u = oip->len_mask[0];
2116 put_unaligned_be16(u, arr + 2);
2117 arr[4] = oip->opcode;
2118 for (k = 1; k < u; ++k)
2119 arr[4 + k] = (k < 16) ?
2120 oip->len_mask[k] : 0xff;
2125 arr[1] = (rctd ? 0x80 : 0) | supp;
2127 put_unaligned_be16(0xa, arr + offset);
2132 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2134 return check_condition_result;
2136 offset = (offset < a_len) ? offset : a_len;
2137 len = (offset < alloc_len) ? offset : alloc_len;
2138 errsts = fill_from_dev_buffer(scp, arr, len);
2143 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2144 struct sdebug_dev_info *devip)
2149 u8 *cmd = scp->cmnd;
2151 memset(arr, 0, sizeof(arr));
2152 repd = !!(cmd[2] & 0x80);
2153 alloc_len = get_unaligned_be32(cmd + 6);
2154 if (alloc_len < 4) {
2155 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2156 return check_condition_result;
2158 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2159 arr[1] = 0x1; /* ITNRS */
2166 len = (len < alloc_len) ? len : alloc_len;
2167 return fill_from_dev_buffer(scp, arr, len);
2170 /* <<Following mode page info copied from ST318451LW>> */
2172 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2173 { /* Read-Write Error Recovery page for mode_sense */
2174 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2177 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2179 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2180 return sizeof(err_recov_pg);
2183 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2184 { /* Disconnect-Reconnect page for mode_sense */
2185 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2186 0, 0, 0, 0, 0, 0, 0, 0};
2188 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2190 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2191 return sizeof(disconnect_pg);
2194 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2195 { /* Format device page for mode_sense */
2196 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0,
2198 0, 0, 0, 0, 0x40, 0, 0, 0};
2200 memcpy(p, format_pg, sizeof(format_pg));
2201 put_unaligned_be16(sdebug_sectors_per, p + 10);
2202 put_unaligned_be16(sdebug_sector_size, p + 12);
2203 if (sdebug_removable)
2204 p[20] |= 0x20; /* should agree with INQUIRY */
2206 memset(p + 2, 0, sizeof(format_pg) - 2);
2207 return sizeof(format_pg);
2210 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2214 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2215 { /* Caching page for mode_sense */
2216 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2218 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2221 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2222 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2223 memcpy(p, caching_pg, sizeof(caching_pg));
2225 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2226 else if (2 == pcontrol)
2227 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2228 return sizeof(caching_pg);
2231 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2234 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2235 { /* Control mode page for mode_sense */
2236 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2238 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2242 ctrl_m_pg[2] |= 0x4;
2244 ctrl_m_pg[2] &= ~0x4;
2247 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2249 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2251 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2252 else if (2 == pcontrol)
2253 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2254 return sizeof(ctrl_m_pg);
2258 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2259 { /* Informational Exceptions control mode page for mode_sense */
2260 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2262 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2265 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2267 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2268 else if (2 == pcontrol)
2269 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2270 return sizeof(iec_m_pg);
2273 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2274 { /* SAS SSP mode page - short format for mode_sense */
2275 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2276 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2278 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2280 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2281 return sizeof(sas_sf_m_pg);
2285 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2287 { /* SAS phy control and discover mode page for mode_sense */
2288 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2289 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2290 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2291 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2292 0x2, 0, 0, 0, 0, 0, 0, 0,
2293 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2294 0, 0, 0, 0, 0, 0, 0, 0,
2295 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2296 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2297 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2298 0x3, 0, 0, 0, 0, 0, 0, 0,
2299 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2300 0, 0, 0, 0, 0, 0, 0, 0,
2304 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2305 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2306 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2307 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2308 port_a = target_dev_id + 1;
2309 port_b = port_a + 1;
2310 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2311 put_unaligned_be32(port_a, p + 20);
2312 put_unaligned_be32(port_b, p + 48 + 20);
2314 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2315 return sizeof(sas_pcd_m_pg);
2318 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2319 { /* SAS SSP shared protocol specific port mode subpage */
2320 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2321 0, 0, 0, 0, 0, 0, 0, 0,
2324 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2326 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2327 return sizeof(sas_sha_m_pg);
2330 #define SDEBUG_MAX_MSENSE_SZ 256
2332 static int resp_mode_sense(struct scsi_cmnd *scp,
2333 struct sdebug_dev_info *devip)
2335 int pcontrol, pcode, subpcode, bd_len;
2336 unsigned char dev_spec;
2337 u32 alloc_len, offset, len;
2339 int target = scp->device->id;
2341 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2342 unsigned char *cmd = scp->cmnd;
2343 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2345 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2346 pcontrol = (cmd[2] & 0xc0) >> 6;
2347 pcode = cmd[2] & 0x3f;
2349 msense_6 = (MODE_SENSE == cmd[0]);
2350 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2351 is_disk = (sdebug_ptype == TYPE_DISK);
2352 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2353 if ((is_disk || is_zbc) && !dbd)
2354 bd_len = llbaa ? 16 : 8;
2357 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2358 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2359 if (0x3 == pcontrol) { /* Saving values not supported */
2360 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2361 return check_condition_result;
2363 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2364 (devip->target * 1000) - 3;
2365 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2366 if (is_disk || is_zbc) {
2367 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2379 arr[4] = 0x1; /* set LONGLBA bit */
2380 arr[7] = bd_len; /* assume 255 or less */
2384 if ((bd_len > 0) && (!sdebug_capacity))
2385 sdebug_capacity = get_sdebug_capacity();
2388 if (sdebug_capacity > 0xfffffffe)
2389 put_unaligned_be32(0xffffffff, ap + 0);
2391 put_unaligned_be32(sdebug_capacity, ap + 0);
2392 put_unaligned_be16(sdebug_sector_size, ap + 6);
2395 } else if (16 == bd_len) {
2396 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2397 put_unaligned_be32(sdebug_sector_size, ap + 12);
2402 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2403 /* TODO: Control Extension page */
2404 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2405 return check_condition_result;
2410 case 0x1: /* Read-Write error recovery page, direct access */
2411 len = resp_err_recov_pg(ap, pcontrol, target);
2414 case 0x2: /* Disconnect-Reconnect page, all devices */
2415 len = resp_disconnect_pg(ap, pcontrol, target);
2418 case 0x3: /* Format device page, direct access */
2420 len = resp_format_pg(ap, pcontrol, target);
2425 case 0x8: /* Caching page, direct access */
2426 if (is_disk || is_zbc) {
2427 len = resp_caching_pg(ap, pcontrol, target);
2432 case 0xa: /* Control Mode page, all devices */
2433 len = resp_ctrl_m_pg(ap, pcontrol, target);
2436 case 0x19: /* if spc==1 then sas phy, control+discover */
2437 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2438 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2439 return check_condition_result;
2442 if ((0x0 == subpcode) || (0xff == subpcode))
2443 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2444 if ((0x1 == subpcode) || (0xff == subpcode))
2445 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2447 if ((0x2 == subpcode) || (0xff == subpcode))
2448 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2451 case 0x1c: /* Informational Exceptions Mode page, all devices */
2452 len = resp_iec_m_pg(ap, pcontrol, target);
2455 case 0x3f: /* Read all Mode pages */
2456 if ((0 == subpcode) || (0xff == subpcode)) {
2457 len = resp_err_recov_pg(ap, pcontrol, target);
2458 len += resp_disconnect_pg(ap + len, pcontrol, target);
2460 len += resp_format_pg(ap + len, pcontrol,
2462 len += resp_caching_pg(ap + len, pcontrol,
2464 } else if (is_zbc) {
2465 len += resp_caching_pg(ap + len, pcontrol,
2468 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2469 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2470 if (0xff == subpcode) {
2471 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2472 target, target_dev_id);
2473 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2475 len += resp_iec_m_pg(ap + len, pcontrol, target);
2478 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2479 return check_condition_result;
2487 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2488 return check_condition_result;
2491 arr[0] = offset - 1;
2493 put_unaligned_be16((offset - 2), arr + 0);
2494 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2497 #define SDEBUG_MAX_MSELECT_SZ 512
2499 static int resp_mode_select(struct scsi_cmnd *scp,
2500 struct sdebug_dev_info *devip)
2502 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2503 int param_len, res, mpage;
2504 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2505 unsigned char *cmd = scp->cmnd;
2506 int mselect6 = (MODE_SELECT == cmd[0]);
2508 memset(arr, 0, sizeof(arr));
2511 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2512 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2513 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2514 return check_condition_result;
2516 res = fetch_to_dev_buffer(scp, arr, param_len);
2518 return DID_ERROR << 16;
2519 else if (sdebug_verbose && (res < param_len))
2520 sdev_printk(KERN_INFO, scp->device,
2521 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2522 __func__, param_len, res);
2523 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2524 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2525 off = bd_len + (mselect6 ? 4 : 8);
2526 if (md_len > 2 || off >= res) {
2527 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2528 return check_condition_result;
2530 mpage = arr[off] & 0x3f;
2531 ps = !!(arr[off] & 0x80);
2533 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2534 return check_condition_result;
2536 spf = !!(arr[off] & 0x40);
2537 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2539 if ((pg_len + off) > param_len) {
2540 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2541 PARAMETER_LIST_LENGTH_ERR, 0);
2542 return check_condition_result;
2545 case 0x8: /* Caching Mode page */
2546 if (caching_pg[1] == arr[off + 1]) {
2547 memcpy(caching_pg + 2, arr + off + 2,
2548 sizeof(caching_pg) - 2);
2549 goto set_mode_changed_ua;
2552 case 0xa: /* Control Mode page */
2553 if (ctrl_m_pg[1] == arr[off + 1]) {
2554 memcpy(ctrl_m_pg + 2, arr + off + 2,
2555 sizeof(ctrl_m_pg) - 2);
2556 if (ctrl_m_pg[4] & 0x8)
2560 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2561 goto set_mode_changed_ua;
2564 case 0x1c: /* Informational Exceptions Mode page */
2565 if (iec_m_pg[1] == arr[off + 1]) {
2566 memcpy(iec_m_pg + 2, arr + off + 2,
2567 sizeof(iec_m_pg) - 2);
2568 goto set_mode_changed_ua;
2574 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2575 return check_condition_result;
2576 set_mode_changed_ua:
2577 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2581 static int resp_temp_l_pg(unsigned char *arr)
2583 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2584 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2587 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2588 return sizeof(temp_l_pg);
2591 static int resp_ie_l_pg(unsigned char *arr)
2593 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2596 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2597 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2598 arr[4] = THRESHOLD_EXCEEDED;
2601 return sizeof(ie_l_pg);
2604 static int resp_env_rep_l_spg(unsigned char *arr)
2606 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2607 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2608 0x1, 0x0, 0x23, 0x8,
2609 0x0, 55, 72, 35, 55, 45, 0, 0,
2612 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2613 return sizeof(env_rep_l_spg);
2616 #define SDEBUG_MAX_LSENSE_SZ 512
2618 static int resp_log_sense(struct scsi_cmnd *scp,
2619 struct sdebug_dev_info *devip)
2621 int ppc, sp, pcode, subpcode;
2622 u32 alloc_len, len, n;
2623 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2624 unsigned char *cmd = scp->cmnd;
2626 memset(arr, 0, sizeof(arr));
2630 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2631 return check_condition_result;
2633 pcode = cmd[2] & 0x3f;
2634 subpcode = cmd[3] & 0xff;
2635 alloc_len = get_unaligned_be16(cmd + 7);
2637 if (0 == subpcode) {
2639 case 0x0: /* Supported log pages log page */
2641 arr[n++] = 0x0; /* this page */
2642 arr[n++] = 0xd; /* Temperature */
2643 arr[n++] = 0x2f; /* Informational exceptions */
2646 case 0xd: /* Temperature log page */
2647 arr[3] = resp_temp_l_pg(arr + 4);
2649 case 0x2f: /* Informational exceptions log page */
2650 arr[3] = resp_ie_l_pg(arr + 4);
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 return check_condition_result;
2656 } else if (0xff == subpcode) {
2660 case 0x0: /* Supported log pages and subpages log page */
2663 arr[n++] = 0x0; /* 0,0 page */
2665 arr[n++] = 0xff; /* this page */
2667 arr[n++] = 0x0; /* Temperature */
2669 arr[n++] = 0x1; /* Environment reporting */
2671 arr[n++] = 0xff; /* all 0xd subpages */
2673 arr[n++] = 0x0; /* Informational exceptions */
2675 arr[n++] = 0xff; /* all 0x2f subpages */
2678 case 0xd: /* Temperature subpages */
2681 arr[n++] = 0x0; /* Temperature */
2683 arr[n++] = 0x1; /* Environment reporting */
2685 arr[n++] = 0xff; /* these subpages */
2688 case 0x2f: /* Informational exceptions subpages */
2691 arr[n++] = 0x0; /* Informational exceptions */
2693 arr[n++] = 0xff; /* these subpages */
2697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2698 return check_condition_result;
2700 } else if (subpcode > 0) {
2703 if (pcode == 0xd && subpcode == 1)
2704 arr[3] = resp_env_rep_l_spg(arr + 4);
2706 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707 return check_condition_result;
2710 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711 return check_condition_result;
2713 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2714 return fill_from_dev_buffer(scp, arr,
2715 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2718 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2720 return devip->nr_zones != 0;
2723 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2724 unsigned long long lba)
2726 u32 zno = lba >> devip->zsize_shift;
2727 struct sdeb_zone_state *zsp;
2729 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2730 return &devip->zstate[zno];
2733 * If the zone capacity is less than the zone size, adjust for gap
2736 zno = 2 * zno - devip->nr_conv_zones;
2737 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2738 zsp = &devip->zstate[zno];
2739 if (lba >= zsp->z_start + zsp->z_size)
2741 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2745 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2747 return zsp->z_type == ZBC_ZTYPE_CNV;
2750 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2752 return zsp->z_type == ZBC_ZTYPE_GAP;
2755 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2757 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2760 static void zbc_close_zone(struct sdebug_dev_info *devip,
2761 struct sdeb_zone_state *zsp)
2763 enum sdebug_z_cond zc;
2765 if (!zbc_zone_is_seq(zsp))
2769 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2772 if (zc == ZC2_IMPLICIT_OPEN)
2773 devip->nr_imp_open--;
2775 devip->nr_exp_open--;
2777 if (zsp->z_wp == zsp->z_start) {
2778 zsp->z_cond = ZC1_EMPTY;
2780 zsp->z_cond = ZC4_CLOSED;
2785 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2787 struct sdeb_zone_state *zsp = &devip->zstate[0];
2790 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2791 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2792 zbc_close_zone(devip, zsp);
2798 static void zbc_open_zone(struct sdebug_dev_info *devip,
2799 struct sdeb_zone_state *zsp, bool explicit)
2801 enum sdebug_z_cond zc;
2803 if (!zbc_zone_is_seq(zsp))
2807 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2808 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2811 /* Close an implicit open zone if necessary */
2812 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2813 zbc_close_zone(devip, zsp);
2814 else if (devip->max_open &&
2815 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2816 zbc_close_imp_open_zone(devip);
2818 if (zsp->z_cond == ZC4_CLOSED)
2821 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2822 devip->nr_exp_open++;
2824 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2825 devip->nr_imp_open++;
2829 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2830 struct sdeb_zone_state *zsp)
2832 switch (zsp->z_cond) {
2833 case ZC2_IMPLICIT_OPEN:
2834 devip->nr_imp_open--;
2836 case ZC3_EXPLICIT_OPEN:
2837 devip->nr_exp_open--;
2840 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2841 zsp->z_start, zsp->z_cond);
2844 zsp->z_cond = ZC5_FULL;
2847 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2848 unsigned long long lba, unsigned int num)
2850 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2851 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2853 if (!zbc_zone_is_seq(zsp))
2856 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2858 if (zsp->z_wp >= zend)
2859 zbc_set_zone_full(devip, zsp);
2864 if (lba != zsp->z_wp)
2865 zsp->z_non_seq_resource = true;
2871 } else if (end > zsp->z_wp) {
2877 if (zsp->z_wp >= zend)
2878 zbc_set_zone_full(devip, zsp);
2884 zend = zsp->z_start + zsp->z_size;
2889 static int check_zbc_access_params(struct scsi_cmnd *scp,
2890 unsigned long long lba, unsigned int num, bool write)
2892 struct scsi_device *sdp = scp->device;
2893 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2894 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2895 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2898 if (devip->zmodel == BLK_ZONED_HA)
2900 /* For host-managed, reads cannot cross zone types boundaries */
2901 if (zsp->z_type != zsp_end->z_type) {
2902 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2905 return check_condition_result;
2910 /* Writing into a gap zone is not allowed */
2911 if (zbc_zone_is_gap(zsp)) {
2912 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2913 ATTEMPT_ACCESS_GAP);
2914 return check_condition_result;
2917 /* No restrictions for writes within conventional zones */
2918 if (zbc_zone_is_conv(zsp)) {
2919 if (!zbc_zone_is_conv(zsp_end)) {
2920 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2922 WRITE_BOUNDARY_ASCQ);
2923 return check_condition_result;
2928 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2929 /* Writes cannot cross sequential zone boundaries */
2930 if (zsp_end != zsp) {
2931 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2933 WRITE_BOUNDARY_ASCQ);
2934 return check_condition_result;
2936 /* Cannot write full zones */
2937 if (zsp->z_cond == ZC5_FULL) {
2938 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2939 INVALID_FIELD_IN_CDB, 0);
2940 return check_condition_result;
2942 /* Writes must be aligned to the zone WP */
2943 if (lba != zsp->z_wp) {
2944 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2946 UNALIGNED_WRITE_ASCQ);
2947 return check_condition_result;
2951 /* Handle implicit open of closed and empty zones */
2952 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2953 if (devip->max_open &&
2954 devip->nr_exp_open >= devip->max_open) {
2955 mk_sense_buffer(scp, DATA_PROTECT,
2958 return check_condition_result;
2960 zbc_open_zone(devip, zsp, false);
2966 static inline int check_device_access_params
2967 (struct scsi_cmnd *scp, unsigned long long lba,
2968 unsigned int num, bool write)
2970 struct scsi_device *sdp = scp->device;
2971 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2973 if (lba + num > sdebug_capacity) {
2974 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2975 return check_condition_result;
2977 /* transfer length excessive (tie in to block limits VPD page) */
2978 if (num > sdebug_store_sectors) {
2979 /* needs work to find which cdb byte 'num' comes from */
2980 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2981 return check_condition_result;
2983 if (write && unlikely(sdebug_wp)) {
2984 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2985 return check_condition_result;
2987 if (sdebug_dev_is_zoned(devip))
2988 return check_zbc_access_params(scp, lba, num, write);
2994 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2995 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2996 * that access any of the "stores" in struct sdeb_store_info should call this
2997 * function with bug_if_fake_rw set to true.
2999 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3000 bool bug_if_fake_rw)
3002 if (sdebug_fake_rw) {
3003 BUG_ON(bug_if_fake_rw); /* See note above */
3006 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3009 /* Returns number of bytes copied or -1 if error. */
3010 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3011 u32 sg_skip, u64 lba, u32 num, bool do_write)
3014 u64 block, rest = 0;
3015 enum dma_data_direction dir;
3016 struct scsi_data_buffer *sdb = &scp->sdb;
3020 dir = DMA_TO_DEVICE;
3021 write_since_sync = true;
3023 dir = DMA_FROM_DEVICE;
3026 if (!sdb->length || !sip)
3028 if (scp->sc_data_direction != dir)
3032 block = do_div(lba, sdebug_store_sectors);
3033 if (block + num > sdebug_store_sectors)
3034 rest = block + num - sdebug_store_sectors;
3036 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3037 fsp + (block * sdebug_sector_size),
3038 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3039 if (ret != (num - rest) * sdebug_sector_size)
3043 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3044 fsp, rest * sdebug_sector_size,
3045 sg_skip + ((num - rest) * sdebug_sector_size),
3052 /* Returns number of bytes copied or -1 if error. */
3053 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3055 struct scsi_data_buffer *sdb = &scp->sdb;
3059 if (scp->sc_data_direction != DMA_TO_DEVICE)
3061 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3062 num * sdebug_sector_size, 0, true);
3065 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3066 * arr into sip->storep+lba and return true. If comparison fails then
3068 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3069 const u8 *arr, bool compare_only)
3072 u64 block, rest = 0;
3073 u32 store_blks = sdebug_store_sectors;
3074 u32 lb_size = sdebug_sector_size;
3075 u8 *fsp = sip->storep;
3077 block = do_div(lba, store_blks);
3078 if (block + num > store_blks)
3079 rest = block + num - store_blks;
3081 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3085 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3091 arr += num * lb_size;
3092 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3094 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3098 static __be16 dif_compute_csum(const void *buf, int len)
3103 csum = (__force __be16)ip_compute_csum(buf, len);
3105 csum = cpu_to_be16(crc_t10dif(buf, len));
3110 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3111 sector_t sector, u32 ei_lba)
3113 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3115 if (sdt->guard_tag != csum) {
3116 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3117 (unsigned long)sector,
3118 be16_to_cpu(sdt->guard_tag),
3122 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3123 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3124 pr_err("REF check failed on sector %lu\n",
3125 (unsigned long)sector);
3128 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3129 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3130 pr_err("REF check failed on sector %lu\n",
3131 (unsigned long)sector);
3137 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3138 unsigned int sectors, bool read)
3142 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3143 scp->device->hostdata, true);
3144 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3145 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3146 struct sg_mapping_iter miter;
3148 /* Bytes of protection data to copy into sgl */
3149 resid = sectors * sizeof(*dif_storep);
3151 sg_miter_start(&miter, scsi_prot_sglist(scp),
3152 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3153 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3155 while (sg_miter_next(&miter) && resid > 0) {
3156 size_t len = min_t(size_t, miter.length, resid);
3157 void *start = dif_store(sip, sector);
3160 if (dif_store_end < start + len)
3161 rest = start + len - dif_store_end;
3166 memcpy(paddr, start, len - rest);
3168 memcpy(start, paddr, len - rest);
3172 memcpy(paddr + len - rest, dif_storep, rest);
3174 memcpy(dif_storep, paddr + len - rest, rest);
3177 sector += len / sizeof(*dif_storep);
3180 sg_miter_stop(&miter);
3183 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3184 unsigned int sectors, u32 ei_lba)
3189 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3190 scp->device->hostdata, true);
3191 struct t10_pi_tuple *sdt;
3193 for (i = 0; i < sectors; i++, ei_lba++) {
3194 sector = start_sec + i;
3195 sdt = dif_store(sip, sector);
3197 if (sdt->app_tag == cpu_to_be16(0xffff))
3201 * Because scsi_debug acts as both initiator and
3202 * target we proceed to verify the PI even if
3203 * RDPROTECT=3. This is done so the "initiator" knows
3204 * which type of error to return. Otherwise we would
3205 * have to iterate over the PI twice.
3207 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3208 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3217 dif_copy_prot(scp, start_sec, sectors, true);
3224 sdeb_read_lock(struct sdeb_store_info *sip)
3226 if (sdebug_no_rwlock) {
3228 __acquire(&sip->macc_lck);
3230 __acquire(&sdeb_fake_rw_lck);
3233 read_lock(&sip->macc_lck);
3235 read_lock(&sdeb_fake_rw_lck);
3240 sdeb_read_unlock(struct sdeb_store_info *sip)
3242 if (sdebug_no_rwlock) {
3244 __release(&sip->macc_lck);
3246 __release(&sdeb_fake_rw_lck);
3249 read_unlock(&sip->macc_lck);
3251 read_unlock(&sdeb_fake_rw_lck);
3256 sdeb_write_lock(struct sdeb_store_info *sip)
3258 if (sdebug_no_rwlock) {
3260 __acquire(&sip->macc_lck);
3262 __acquire(&sdeb_fake_rw_lck);
3265 write_lock(&sip->macc_lck);
3267 write_lock(&sdeb_fake_rw_lck);
3272 sdeb_write_unlock(struct sdeb_store_info *sip)
3274 if (sdebug_no_rwlock) {
3276 __release(&sip->macc_lck);
3278 __release(&sdeb_fake_rw_lck);
3281 write_unlock(&sip->macc_lck);
3283 write_unlock(&sdeb_fake_rw_lck);
3287 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3294 struct sdeb_store_info *sip = devip2sip(devip, true);
3295 u8 *cmd = scp->cmnd;
3300 lba = get_unaligned_be64(cmd + 2);
3301 num = get_unaligned_be32(cmd + 10);
3306 lba = get_unaligned_be32(cmd + 2);
3307 num = get_unaligned_be16(cmd + 7);
3312 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3313 (u32)(cmd[1] & 0x1f) << 16;
3314 num = (0 == cmd[4]) ? 256 : cmd[4];
3319 lba = get_unaligned_be32(cmd + 2);
3320 num = get_unaligned_be32(cmd + 6);
3323 case XDWRITEREAD_10:
3325 lba = get_unaligned_be32(cmd + 2);
3326 num = get_unaligned_be16(cmd + 7);
3329 default: /* assume READ(32) */
3330 lba = get_unaligned_be64(cmd + 12);
3331 ei_lba = get_unaligned_be32(cmd + 20);
3332 num = get_unaligned_be32(cmd + 28);
3336 if (unlikely(have_dif_prot && check_prot)) {
3337 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3339 mk_sense_invalid_opcode(scp);
3340 return check_condition_result;
3342 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3343 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3344 (cmd[1] & 0xe0) == 0)
3345 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3348 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3349 atomic_read(&sdeb_inject_pending))) {
3351 atomic_set(&sdeb_inject_pending, 0);
3354 ret = check_device_access_params(scp, lba, num, false);
3357 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3358 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3359 ((lba + num) > sdebug_medium_error_start))) {
3360 /* claim unrecoverable read error */
3361 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3362 /* set info field and valid bit for fixed descriptor */
3363 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3364 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3365 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3366 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3367 put_unaligned_be32(ret, scp->sense_buffer + 3);
3369 scsi_set_resid(scp, scsi_bufflen(scp));
3370 return check_condition_result;
3373 sdeb_read_lock(sip);
3376 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3377 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3378 case 1: /* Guard tag error */
3379 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3380 sdeb_read_unlock(sip);
3381 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3382 return check_condition_result;
3383 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3384 sdeb_read_unlock(sip);
3385 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3386 return illegal_condition_result;
3389 case 3: /* Reference tag error */
3390 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3391 sdeb_read_unlock(sip);
3392 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3393 return check_condition_result;
3394 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3395 sdeb_read_unlock(sip);
3396 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3397 return illegal_condition_result;
3403 ret = do_device_access(sip, scp, 0, lba, num, false);
3404 sdeb_read_unlock(sip);
3405 if (unlikely(ret == -1))
3406 return DID_ERROR << 16;
3408 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3410 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3411 atomic_read(&sdeb_inject_pending))) {
3412 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3413 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3414 atomic_set(&sdeb_inject_pending, 0);
3415 return check_condition_result;
3416 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3417 /* Logical block guard check failed */
3418 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3419 atomic_set(&sdeb_inject_pending, 0);
3420 return illegal_condition_result;
3421 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3422 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3423 atomic_set(&sdeb_inject_pending, 0);
3424 return illegal_condition_result;
3430 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3431 unsigned int sectors, u32 ei_lba)
3434 struct t10_pi_tuple *sdt;
3436 sector_t sector = start_sec;
3439 struct sg_mapping_iter diter;
3440 struct sg_mapping_iter piter;
3442 BUG_ON(scsi_sg_count(SCpnt) == 0);
3443 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3445 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3446 scsi_prot_sg_count(SCpnt),
3447 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3448 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3449 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3451 /* For each protection page */
3452 while (sg_miter_next(&piter)) {
3454 if (WARN_ON(!sg_miter_next(&diter))) {
3459 for (ppage_offset = 0; ppage_offset < piter.length;
3460 ppage_offset += sizeof(struct t10_pi_tuple)) {
3461 /* If we're at the end of the current
3462 * data page advance to the next one
3464 if (dpage_offset >= diter.length) {
3465 if (WARN_ON(!sg_miter_next(&diter))) {
3472 sdt = piter.addr + ppage_offset;
3473 daddr = diter.addr + dpage_offset;
3475 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3476 ret = dif_verify(sdt, daddr, sector, ei_lba);
3483 dpage_offset += sdebug_sector_size;
3485 diter.consumed = dpage_offset;
3486 sg_miter_stop(&diter);
3488 sg_miter_stop(&piter);
3490 dif_copy_prot(SCpnt, start_sec, sectors, false);
3497 sg_miter_stop(&diter);
3498 sg_miter_stop(&piter);
3502 static unsigned long lba_to_map_index(sector_t lba)
3504 if (sdebug_unmap_alignment)
3505 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3506 sector_div(lba, sdebug_unmap_granularity);
3510 static sector_t map_index_to_lba(unsigned long index)
3512 sector_t lba = index * sdebug_unmap_granularity;
3514 if (sdebug_unmap_alignment)
3515 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3519 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3523 unsigned int mapped;
3524 unsigned long index;
3527 index = lba_to_map_index(lba);
3528 mapped = test_bit(index, sip->map_storep);
3531 next = find_next_zero_bit(sip->map_storep, map_size, index);
3533 next = find_next_bit(sip->map_storep, map_size, index);
3535 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3540 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3543 sector_t end = lba + len;
3546 unsigned long index = lba_to_map_index(lba);
3548 if (index < map_size)
3549 set_bit(index, sip->map_storep);
3551 lba = map_index_to_lba(index + 1);
3555 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3558 sector_t end = lba + len;
3559 u8 *fsp = sip->storep;
3562 unsigned long index = lba_to_map_index(lba);
3564 if (lba == map_index_to_lba(index) &&
3565 lba + sdebug_unmap_granularity <= end &&
3567 clear_bit(index, sip->map_storep);
3568 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3569 memset(fsp + lba * sdebug_sector_size,
3570 (sdebug_lbprz & 1) ? 0 : 0xff,
3571 sdebug_sector_size *
3572 sdebug_unmap_granularity);
3574 if (sip->dif_storep) {
3575 memset(sip->dif_storep + lba, 0xff,
3576 sizeof(*sip->dif_storep) *
3577 sdebug_unmap_granularity);
3580 lba = map_index_to_lba(index + 1);
3584 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3591 struct sdeb_store_info *sip = devip2sip(devip, true);
3592 u8 *cmd = scp->cmnd;
3597 lba = get_unaligned_be64(cmd + 2);
3598 num = get_unaligned_be32(cmd + 10);
3603 lba = get_unaligned_be32(cmd + 2);
3604 num = get_unaligned_be16(cmd + 7);
3609 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3610 (u32)(cmd[1] & 0x1f) << 16;
3611 num = (0 == cmd[4]) ? 256 : cmd[4];
3616 lba = get_unaligned_be32(cmd + 2);
3617 num = get_unaligned_be32(cmd + 6);
3620 case 0x53: /* XDWRITEREAD(10) */
3622 lba = get_unaligned_be32(cmd + 2);
3623 num = get_unaligned_be16(cmd + 7);
3626 default: /* assume WRITE(32) */
3627 lba = get_unaligned_be64(cmd + 12);
3628 ei_lba = get_unaligned_be32(cmd + 20);
3629 num = get_unaligned_be32(cmd + 28);
3633 if (unlikely(have_dif_prot && check_prot)) {
3634 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3636 mk_sense_invalid_opcode(scp);
3637 return check_condition_result;
3639 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3640 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3641 (cmd[1] & 0xe0) == 0)
3642 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3646 sdeb_write_lock(sip);
3647 ret = check_device_access_params(scp, lba, num, true);
3649 sdeb_write_unlock(sip);
3654 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3655 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3656 case 1: /* Guard tag error */
3657 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3658 sdeb_write_unlock(sip);
3659 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3660 return illegal_condition_result;
3661 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3662 sdeb_write_unlock(sip);
3663 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3664 return check_condition_result;
3667 case 3: /* Reference tag error */
3668 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3669 sdeb_write_unlock(sip);
3670 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3671 return illegal_condition_result;
3672 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3673 sdeb_write_unlock(sip);
3674 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3675 return check_condition_result;
3681 ret = do_device_access(sip, scp, 0, lba, num, true);
3682 if (unlikely(scsi_debug_lbp()))
3683 map_region(sip, lba, num);
3684 /* If ZBC zone then bump its write pointer */
3685 if (sdebug_dev_is_zoned(devip))
3686 zbc_inc_wp(devip, lba, num);
3687 sdeb_write_unlock(sip);
3688 if (unlikely(-1 == ret))
3689 return DID_ERROR << 16;
3690 else if (unlikely(sdebug_verbose &&
3691 (ret < (num * sdebug_sector_size))))
3692 sdev_printk(KERN_INFO, scp->device,
3693 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3694 my_name, num * sdebug_sector_size, ret);
3696 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3697 atomic_read(&sdeb_inject_pending))) {
3698 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3699 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3700 atomic_set(&sdeb_inject_pending, 0);
3701 return check_condition_result;
3702 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3703 /* Logical block guard check failed */
3704 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3705 atomic_set(&sdeb_inject_pending, 0);
3706 return illegal_condition_result;
3707 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3708 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3709 atomic_set(&sdeb_inject_pending, 0);
3710 return illegal_condition_result;
3717 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3718 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3720 static int resp_write_scat(struct scsi_cmnd *scp,
3721 struct sdebug_dev_info *devip)
3723 u8 *cmd = scp->cmnd;
3726 struct sdeb_store_info *sip = devip2sip(devip, true);
3728 u16 lbdof, num_lrd, k;
3729 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3730 u32 lb_size = sdebug_sector_size;
3735 static const u32 lrd_size = 32; /* + parameter list header size */
3737 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3739 wrprotect = (cmd[10] >> 5) & 0x7;
3740 lbdof = get_unaligned_be16(cmd + 12);
3741 num_lrd = get_unaligned_be16(cmd + 16);
3742 bt_len = get_unaligned_be32(cmd + 28);
3743 } else { /* that leaves WRITE SCATTERED(16) */
3745 wrprotect = (cmd[2] >> 5) & 0x7;
3746 lbdof = get_unaligned_be16(cmd + 4);
3747 num_lrd = get_unaligned_be16(cmd + 8);
3748 bt_len = get_unaligned_be32(cmd + 10);
3749 if (unlikely(have_dif_prot)) {
3750 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3752 mk_sense_invalid_opcode(scp);
3753 return illegal_condition_result;
3755 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3756 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3758 sdev_printk(KERN_ERR, scp->device,
3759 "Unprotected WR to DIF device\n");
3762 if ((num_lrd == 0) || (bt_len == 0))
3763 return 0; /* T10 says these do-nothings are not errors */
3766 sdev_printk(KERN_INFO, scp->device,
3767 "%s: %s: LB Data Offset field bad\n",
3769 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3770 return illegal_condition_result;
3772 lbdof_blen = lbdof * lb_size;
3773 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3775 sdev_printk(KERN_INFO, scp->device,
3776 "%s: %s: LBA range descriptors don't fit\n",
3778 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3779 return illegal_condition_result;
3781 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3783 return SCSI_MLQUEUE_HOST_BUSY;
3785 sdev_printk(KERN_INFO, scp->device,
3786 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3787 my_name, __func__, lbdof_blen);
3788 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3790 ret = DID_ERROR << 16;
3794 sdeb_write_lock(sip);
3795 sg_off = lbdof_blen;
3796 /* Spec says Buffer xfer Length field in number of LBs in dout */
3798 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3799 lba = get_unaligned_be64(up + 0);
3800 num = get_unaligned_be32(up + 8);
3802 sdev_printk(KERN_INFO, scp->device,
3803 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3804 my_name, __func__, k, lba, num, sg_off);
3807 ret = check_device_access_params(scp, lba, num, true);
3809 goto err_out_unlock;
3810 num_by = num * lb_size;
3811 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3813 if ((cum_lb + num) > bt_len) {
3815 sdev_printk(KERN_INFO, scp->device,
3816 "%s: %s: sum of blocks > data provided\n",
3818 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3820 ret = illegal_condition_result;
3821 goto err_out_unlock;
3825 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3826 int prot_ret = prot_verify_write(scp, lba, num,
3830 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3832 ret = illegal_condition_result;
3833 goto err_out_unlock;
3837 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3838 /* If ZBC zone then bump its write pointer */
3839 if (sdebug_dev_is_zoned(devip))
3840 zbc_inc_wp(devip, lba, num);
3841 if (unlikely(scsi_debug_lbp()))
3842 map_region(sip, lba, num);
3843 if (unlikely(-1 == ret)) {
3844 ret = DID_ERROR << 16;
3845 goto err_out_unlock;
3846 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3847 sdev_printk(KERN_INFO, scp->device,
3848 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3849 my_name, num_by, ret);
3851 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3852 atomic_read(&sdeb_inject_pending))) {
3853 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3854 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3855 atomic_set(&sdeb_inject_pending, 0);
3856 ret = check_condition_result;
3857 goto err_out_unlock;
3858 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3859 /* Logical block guard check failed */
3860 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3861 atomic_set(&sdeb_inject_pending, 0);
3862 ret = illegal_condition_result;
3863 goto err_out_unlock;
3864 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3865 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3866 atomic_set(&sdeb_inject_pending, 0);
3867 ret = illegal_condition_result;
3868 goto err_out_unlock;
3876 sdeb_write_unlock(sip);
3882 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3883 u32 ei_lba, bool unmap, bool ndob)
3885 struct scsi_device *sdp = scp->device;
3886 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3887 unsigned long long i;
3889 u32 lb_size = sdebug_sector_size;
3891 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3892 scp->device->hostdata, true);
3896 sdeb_write_lock(sip);
3898 ret = check_device_access_params(scp, lba, num, true);
3900 sdeb_write_unlock(sip);
3904 if (unmap && scsi_debug_lbp()) {
3905 unmap_region(sip, lba, num);
3909 block = do_div(lbaa, sdebug_store_sectors);
3910 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3912 fs1p = fsp + (block * lb_size);
3914 memset(fs1p, 0, lb_size);
3917 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3920 sdeb_write_unlock(sip);
3921 return DID_ERROR << 16;
3922 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3923 sdev_printk(KERN_INFO, scp->device,
3924 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3925 my_name, "write same", lb_size, ret);
3927 /* Copy first sector to remaining blocks */
3928 for (i = 1 ; i < num ; i++) {
3930 block = do_div(lbaa, sdebug_store_sectors);
3931 memmove(fsp + (block * lb_size), fs1p, lb_size);
3933 if (scsi_debug_lbp())
3934 map_region(sip, lba, num);
3935 /* If ZBC zone then bump its write pointer */
3936 if (sdebug_dev_is_zoned(devip))
3937 zbc_inc_wp(devip, lba, num);
3939 sdeb_write_unlock(sip);
3944 static int resp_write_same_10(struct scsi_cmnd *scp,
3945 struct sdebug_dev_info *devip)
3947 u8 *cmd = scp->cmnd;
3954 if (sdebug_lbpws10 == 0) {
3955 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3956 return check_condition_result;
3960 lba = get_unaligned_be32(cmd + 2);
3961 num = get_unaligned_be16(cmd + 7);
3962 if (num > sdebug_write_same_length) {
3963 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3964 return check_condition_result;
3966 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3969 static int resp_write_same_16(struct scsi_cmnd *scp,
3970 struct sdebug_dev_info *devip)
3972 u8 *cmd = scp->cmnd;
3979 if (cmd[1] & 0x8) { /* UNMAP */
3980 if (sdebug_lbpws == 0) {
3981 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3982 return check_condition_result;
3986 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3988 lba = get_unaligned_be64(cmd + 2);
3989 num = get_unaligned_be32(cmd + 10);
3990 if (num > sdebug_write_same_length) {
3991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3992 return check_condition_result;
3994 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3997 /* Note the mode field is in the same position as the (lower) service action
3998 * field. For the Report supported operation codes command, SPC-4 suggests
3999 * each mode of this command should be reported separately; for future. */
4000 static int resp_write_buffer(struct scsi_cmnd *scp,
4001 struct sdebug_dev_info *devip)
4003 u8 *cmd = scp->cmnd;
4004 struct scsi_device *sdp = scp->device;
4005 struct sdebug_dev_info *dp;
4008 mode = cmd[1] & 0x1f;
4010 case 0x4: /* download microcode (MC) and activate (ACT) */
4011 /* set UAs on this device only */
4012 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4013 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4015 case 0x5: /* download MC, save and ACT */
4016 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4018 case 0x6: /* download MC with offsets and ACT */
4019 /* set UAs on most devices (LUs) in this target */
4020 list_for_each_entry(dp,
4021 &devip->sdbg_host->dev_info_list,
4023 if (dp->target == sdp->id) {
4024 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4026 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4030 case 0x7: /* download MC with offsets, save, and ACT */
4031 /* set UA on all devices (LUs) in this target */
4032 list_for_each_entry(dp,
4033 &devip->sdbg_host->dev_info_list,
4035 if (dp->target == sdp->id)
4036 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4040 /* do nothing for this command for other mode values */
4046 static int resp_comp_write(struct scsi_cmnd *scp,
4047 struct sdebug_dev_info *devip)
4049 u8 *cmd = scp->cmnd;
4051 struct sdeb_store_info *sip = devip2sip(devip, true);
4054 u32 lb_size = sdebug_sector_size;
4059 lba = get_unaligned_be64(cmd + 2);
4060 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4062 return 0; /* degenerate case, not an error */
4063 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4065 mk_sense_invalid_opcode(scp);
4066 return check_condition_result;
4068 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4069 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4070 (cmd[1] & 0xe0) == 0)
4071 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4073 ret = check_device_access_params(scp, lba, num, false);
4077 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4081 return check_condition_result;
4084 sdeb_write_lock(sip);
4086 ret = do_dout_fetch(scp, dnum, arr);
4088 retval = DID_ERROR << 16;
4090 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4091 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4092 "indicated=%u, IO sent=%d bytes\n", my_name,
4093 dnum * lb_size, ret);
4094 if (!comp_write_worker(sip, lba, num, arr, false)) {
4095 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4096 retval = check_condition_result;
4099 if (scsi_debug_lbp())
4100 map_region(sip, lba, num);
4102 sdeb_write_unlock(sip);
4107 struct unmap_block_desc {
4113 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4116 struct unmap_block_desc *desc;
4117 struct sdeb_store_info *sip = devip2sip(devip, true);
4118 unsigned int i, payload_len, descriptors;
4121 if (!scsi_debug_lbp())
4122 return 0; /* fib and say its done */
4123 payload_len = get_unaligned_be16(scp->cmnd + 7);
4124 BUG_ON(scsi_bufflen(scp) != payload_len);
4126 descriptors = (payload_len - 8) / 16;
4127 if (descriptors > sdebug_unmap_max_desc) {
4128 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4129 return check_condition_result;
4132 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4134 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4136 return check_condition_result;
4139 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4141 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4142 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4144 desc = (void *)&buf[8];
4146 sdeb_write_lock(sip);
4148 for (i = 0 ; i < descriptors ; i++) {
4149 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4150 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4152 ret = check_device_access_params(scp, lba, num, true);
4156 unmap_region(sip, lba, num);
4162 sdeb_write_unlock(sip);
4168 #define SDEBUG_GET_LBA_STATUS_LEN 32
4170 static int resp_get_lba_status(struct scsi_cmnd *scp,
4171 struct sdebug_dev_info *devip)
4173 u8 *cmd = scp->cmnd;
4175 u32 alloc_len, mapped, num;
4177 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4179 lba = get_unaligned_be64(cmd + 2);
4180 alloc_len = get_unaligned_be32(cmd + 10);
4185 ret = check_device_access_params(scp, lba, 1, false);
4189 if (scsi_debug_lbp()) {
4190 struct sdeb_store_info *sip = devip2sip(devip, true);
4192 mapped = map_state(sip, lba, &num);
4195 /* following just in case virtual_gb changed */
4196 sdebug_capacity = get_sdebug_capacity();
4197 if (sdebug_capacity - lba <= 0xffffffff)
4198 num = sdebug_capacity - lba;
4203 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4204 put_unaligned_be32(20, arr); /* Parameter Data Length */
4205 put_unaligned_be64(lba, arr + 8); /* LBA */
4206 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4207 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4209 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4212 static int resp_sync_cache(struct scsi_cmnd *scp,
4213 struct sdebug_dev_info *devip)
4218 u8 *cmd = scp->cmnd;
4220 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4221 lba = get_unaligned_be32(cmd + 2);
4222 num_blocks = get_unaligned_be16(cmd + 7);
4223 } else { /* SYNCHRONIZE_CACHE(16) */
4224 lba = get_unaligned_be64(cmd + 2);
4225 num_blocks = get_unaligned_be32(cmd + 10);
4227 if (lba + num_blocks > sdebug_capacity) {
4228 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4229 return check_condition_result;
4231 if (!write_since_sync || (cmd[1] & 0x2))
4232 res = SDEG_RES_IMMED_MASK;
4233 else /* delay if write_since_sync and IMMED clear */
4234 write_since_sync = false;
4239 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4240 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4241 * a GOOD status otherwise. Model a disk with a big cache and yield
4242 * CONDITION MET. Actually tries to bring range in main memory into the
4243 * cache associated with the CPU(s).
4245 static int resp_pre_fetch(struct scsi_cmnd *scp,
4246 struct sdebug_dev_info *devip)
4250 u64 block, rest = 0;
4252 u8 *cmd = scp->cmnd;
4253 struct sdeb_store_info *sip = devip2sip(devip, true);
4254 u8 *fsp = sip->storep;
4256 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4257 lba = get_unaligned_be32(cmd + 2);
4258 nblks = get_unaligned_be16(cmd + 7);
4259 } else { /* PRE-FETCH(16) */
4260 lba = get_unaligned_be64(cmd + 2);
4261 nblks = get_unaligned_be32(cmd + 10);
4263 if (lba + nblks > sdebug_capacity) {
4264 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4265 return check_condition_result;
4269 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4270 block = do_div(lba, sdebug_store_sectors);
4271 if (block + nblks > sdebug_store_sectors)
4272 rest = block + nblks - sdebug_store_sectors;
4274 /* Try to bring the PRE-FETCH range into CPU's cache */
4275 sdeb_read_lock(sip);
4276 prefetch_range(fsp + (sdebug_sector_size * block),
4277 (nblks - rest) * sdebug_sector_size);
4279 prefetch_range(fsp, rest * sdebug_sector_size);
4280 sdeb_read_unlock(sip);
4283 res = SDEG_RES_IMMED_MASK;
4284 return res | condition_met_result;
4287 #define RL_BUCKET_ELEMS 8
4289 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4290 * (W-LUN), the normal Linux scanning logic does not associate it with a
4291 * device (e.g. /dev/sg7). The following magic will make that association:
4292 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4293 * where <n> is a host number. If there are multiple targets in a host then
4294 * the above will associate a W-LUN to each target. To only get a W-LUN
4295 * for target 2, then use "echo '- 2 49409' > scan" .
4297 static int resp_report_luns(struct scsi_cmnd *scp,
4298 struct sdebug_dev_info *devip)
4300 unsigned char *cmd = scp->cmnd;
4301 unsigned int alloc_len;
4302 unsigned char select_report;
4304 struct scsi_lun *lun_p;
4305 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4306 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4307 unsigned int wlun_cnt; /* report luns W-LUN count */
4308 unsigned int tlun_cnt; /* total LUN count */
4309 unsigned int rlen; /* response length (in bytes) */
4311 unsigned int off_rsp = 0;
4312 const int sz_lun = sizeof(struct scsi_lun);
4314 clear_luns_changed_on_target(devip);
4316 select_report = cmd[2];
4317 alloc_len = get_unaligned_be32(cmd + 6);
4319 if (alloc_len < 4) {
4320 pr_err("alloc len too small %d\n", alloc_len);
4321 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4322 return check_condition_result;
4325 switch (select_report) {
4326 case 0: /* all LUNs apart from W-LUNs */
4327 lun_cnt = sdebug_max_luns;
4330 case 1: /* only W-LUNs */
4334 case 2: /* all LUNs */
4335 lun_cnt = sdebug_max_luns;
4338 case 0x10: /* only administrative LUs */
4339 case 0x11: /* see SPC-5 */
4340 case 0x12: /* only subsiduary LUs owned by referenced LU */
4342 pr_debug("select report invalid %d\n", select_report);
4343 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4344 return check_condition_result;
4347 if (sdebug_no_lun_0 && (lun_cnt > 0))
4350 tlun_cnt = lun_cnt + wlun_cnt;
4351 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4352 scsi_set_resid(scp, scsi_bufflen(scp));
4353 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4354 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4356 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4357 lun = sdebug_no_lun_0 ? 1 : 0;
4358 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4359 memset(arr, 0, sizeof(arr));
4360 lun_p = (struct scsi_lun *)&arr[0];
4362 put_unaligned_be32(rlen, &arr[0]);
4366 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4367 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4369 int_to_scsilun(lun++, lun_p);
4370 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4371 lun_p->scsi_lun[0] |= 0x40;
4373 if (j < RL_BUCKET_ELEMS)
4376 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4382 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4386 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4390 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4392 bool is_bytchk3 = false;
4395 u32 vnum, a_num, off;
4396 const u32 lb_size = sdebug_sector_size;
4399 u8 *cmd = scp->cmnd;
4400 struct sdeb_store_info *sip = devip2sip(devip, true);
4402 bytchk = (cmd[1] >> 1) & 0x3;
4404 return 0; /* always claim internal verify okay */
4405 } else if (bytchk == 2) {
4406 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4407 return check_condition_result;
4408 } else if (bytchk == 3) {
4409 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4413 lba = get_unaligned_be64(cmd + 2);
4414 vnum = get_unaligned_be32(cmd + 10);
4416 case VERIFY: /* is VERIFY(10) */
4417 lba = get_unaligned_be32(cmd + 2);
4418 vnum = get_unaligned_be16(cmd + 7);
4421 mk_sense_invalid_opcode(scp);
4422 return check_condition_result;
4425 return 0; /* not an error */
4426 a_num = is_bytchk3 ? 1 : vnum;
4427 /* Treat following check like one for read (i.e. no write) access */
4428 ret = check_device_access_params(scp, lba, a_num, false);
4432 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4434 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4436 return check_condition_result;
4438 /* Not changing store, so only need read access */
4439 sdeb_read_lock(sip);
4441 ret = do_dout_fetch(scp, a_num, arr);
4443 ret = DID_ERROR << 16;
4445 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4446 sdev_printk(KERN_INFO, scp->device,
4447 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4448 my_name, __func__, a_num * lb_size, ret);
4451 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4452 memcpy(arr + off, arr, lb_size);
4455 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4456 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4457 ret = check_condition_result;
4461 sdeb_read_unlock(sip);
4466 #define RZONES_DESC_HD 64
4468 /* Report zones depending on start LBA and reporting options */
4469 static int resp_report_zones(struct scsi_cmnd *scp,
4470 struct sdebug_dev_info *devip)
4472 unsigned int rep_max_zones, nrz = 0;
4474 u32 alloc_len, rep_opts, rep_len;
4477 u8 *arr = NULL, *desc;
4478 u8 *cmd = scp->cmnd;
4479 struct sdeb_zone_state *zsp = NULL;
4480 struct sdeb_store_info *sip = devip2sip(devip, false);
4482 if (!sdebug_dev_is_zoned(devip)) {
4483 mk_sense_invalid_opcode(scp);
4484 return check_condition_result;
4486 zs_lba = get_unaligned_be64(cmd + 2);
4487 alloc_len = get_unaligned_be32(cmd + 10);
4489 return 0; /* not an error */
4490 rep_opts = cmd[14] & 0x3f;
4491 partial = cmd[14] & 0x80;
4493 if (zs_lba >= sdebug_capacity) {
4494 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4495 return check_condition_result;
4498 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4500 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4502 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4504 return check_condition_result;
4507 sdeb_read_lock(sip);
4510 for (lba = zs_lba; lba < sdebug_capacity;
4511 lba = zsp->z_start + zsp->z_size) {
4512 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4514 zsp = zbc_zone(devip, lba);
4521 if (zsp->z_cond != ZC1_EMPTY)
4525 /* Implicit open zones */
4526 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4530 /* Explicit open zones */
4531 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4536 if (zsp->z_cond != ZC4_CLOSED)
4541 if (zsp->z_cond != ZC5_FULL)
4548 * Read-only, offline, reset WP recommended are
4549 * not emulated: no zones to report;
4553 /* non-seq-resource set */
4554 if (!zsp->z_non_seq_resource)
4558 /* All zones except gap zones. */
4559 if (zbc_zone_is_gap(zsp))
4563 /* Not write pointer (conventional) zones */
4564 if (zbc_zone_is_seq(zsp))
4568 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4569 INVALID_FIELD_IN_CDB, 0);
4570 ret = check_condition_result;
4574 if (nrz < rep_max_zones) {
4575 /* Fill zone descriptor */
4576 desc[0] = zsp->z_type;
4577 desc[1] = zsp->z_cond << 4;
4578 if (zsp->z_non_seq_resource)
4580 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4581 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4582 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4586 if (partial && nrz >= rep_max_zones)
4593 /* Zone list length. */
4594 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4596 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4597 /* Zone starting LBA granularity. */
4598 if (devip->zcap < devip->zsize)
4599 put_unaligned_be64(devip->zsize, arr + 16);
4601 rep_len = (unsigned long)desc - (unsigned long)arr;
4602 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4605 sdeb_read_unlock(sip);
4610 /* Logic transplanted from tcmu-runner, file_zbc.c */
4611 static void zbc_open_all(struct sdebug_dev_info *devip)
4613 struct sdeb_zone_state *zsp = &devip->zstate[0];
4616 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4617 if (zsp->z_cond == ZC4_CLOSED)
4618 zbc_open_zone(devip, &devip->zstate[i], true);
4622 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4626 enum sdebug_z_cond zc;
4627 u8 *cmd = scp->cmnd;
4628 struct sdeb_zone_state *zsp;
4629 bool all = cmd[14] & 0x01;
4630 struct sdeb_store_info *sip = devip2sip(devip, false);
4632 if (!sdebug_dev_is_zoned(devip)) {
4633 mk_sense_invalid_opcode(scp);
4634 return check_condition_result;
4637 sdeb_write_lock(sip);
4640 /* Check if all closed zones can be open */
4641 if (devip->max_open &&
4642 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4643 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4645 res = check_condition_result;
4648 /* Open all closed zones */
4649 zbc_open_all(devip);
4653 /* Open the specified zone */
4654 z_id = get_unaligned_be64(cmd + 2);
4655 if (z_id >= sdebug_capacity) {
4656 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4657 res = check_condition_result;
4661 zsp = zbc_zone(devip, z_id);
4662 if (z_id != zsp->z_start) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 res = check_condition_result;
4667 if (zbc_zone_is_conv(zsp)) {
4668 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669 res = check_condition_result;
4674 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4677 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4678 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4680 res = check_condition_result;
4684 zbc_open_zone(devip, zsp, true);
4686 sdeb_write_unlock(sip);
4690 static void zbc_close_all(struct sdebug_dev_info *devip)
4694 for (i = 0; i < devip->nr_zones; i++)
4695 zbc_close_zone(devip, &devip->zstate[i]);
4698 static int resp_close_zone(struct scsi_cmnd *scp,
4699 struct sdebug_dev_info *devip)
4703 u8 *cmd = scp->cmnd;
4704 struct sdeb_zone_state *zsp;
4705 bool all = cmd[14] & 0x01;
4706 struct sdeb_store_info *sip = devip2sip(devip, false);
4708 if (!sdebug_dev_is_zoned(devip)) {
4709 mk_sense_invalid_opcode(scp);
4710 return check_condition_result;
4713 sdeb_write_lock(sip);
4716 zbc_close_all(devip);
4720 /* Close specified zone */
4721 z_id = get_unaligned_be64(cmd + 2);
4722 if (z_id >= sdebug_capacity) {
4723 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4724 res = check_condition_result;
4728 zsp = zbc_zone(devip, z_id);
4729 if (z_id != zsp->z_start) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731 res = check_condition_result;
4734 if (zbc_zone_is_conv(zsp)) {
4735 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4736 res = check_condition_result;
4740 zbc_close_zone(devip, zsp);
4742 sdeb_write_unlock(sip);
4746 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4747 struct sdeb_zone_state *zsp, bool empty)
4749 enum sdebug_z_cond zc = zsp->z_cond;
4751 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4752 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4753 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4754 zbc_close_zone(devip, zsp);
4755 if (zsp->z_cond == ZC4_CLOSED)
4757 zsp->z_wp = zsp->z_start + zsp->z_size;
4758 zsp->z_cond = ZC5_FULL;
4762 static void zbc_finish_all(struct sdebug_dev_info *devip)
4766 for (i = 0; i < devip->nr_zones; i++)
4767 zbc_finish_zone(devip, &devip->zstate[i], false);
4770 static int resp_finish_zone(struct scsi_cmnd *scp,
4771 struct sdebug_dev_info *devip)
4773 struct sdeb_zone_state *zsp;
4776 u8 *cmd = scp->cmnd;
4777 bool all = cmd[14] & 0x01;
4778 struct sdeb_store_info *sip = devip2sip(devip, false);
4780 if (!sdebug_dev_is_zoned(devip)) {
4781 mk_sense_invalid_opcode(scp);
4782 return check_condition_result;
4785 sdeb_write_lock(sip);
4788 zbc_finish_all(devip);
4792 /* Finish the specified zone */
4793 z_id = get_unaligned_be64(cmd + 2);
4794 if (z_id >= sdebug_capacity) {
4795 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4796 res = check_condition_result;
4800 zsp = zbc_zone(devip, z_id);
4801 if (z_id != zsp->z_start) {
4802 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803 res = check_condition_result;
4806 if (zbc_zone_is_conv(zsp)) {
4807 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4808 res = check_condition_result;
4812 zbc_finish_zone(devip, zsp, true);
4814 sdeb_write_unlock(sip);
4818 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4819 struct sdeb_zone_state *zsp)
4821 enum sdebug_z_cond zc;
4822 struct sdeb_store_info *sip = devip2sip(devip, false);
4824 if (!zbc_zone_is_seq(zsp))
4828 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4829 zbc_close_zone(devip, zsp);
4831 if (zsp->z_cond == ZC4_CLOSED)
4834 if (zsp->z_wp > zsp->z_start)
4835 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4836 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4838 zsp->z_non_seq_resource = false;
4839 zsp->z_wp = zsp->z_start;
4840 zsp->z_cond = ZC1_EMPTY;
4843 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4847 for (i = 0; i < devip->nr_zones; i++)
4848 zbc_rwp_zone(devip, &devip->zstate[i]);
4851 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4853 struct sdeb_zone_state *zsp;
4856 u8 *cmd = scp->cmnd;
4857 bool all = cmd[14] & 0x01;
4858 struct sdeb_store_info *sip = devip2sip(devip, false);
4860 if (!sdebug_dev_is_zoned(devip)) {
4861 mk_sense_invalid_opcode(scp);
4862 return check_condition_result;
4865 sdeb_write_lock(sip);
4872 z_id = get_unaligned_be64(cmd + 2);
4873 if (z_id >= sdebug_capacity) {
4874 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4875 res = check_condition_result;
4879 zsp = zbc_zone(devip, z_id);
4880 if (z_id != zsp->z_start) {
4881 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882 res = check_condition_result;
4885 if (zbc_zone_is_conv(zsp)) {
4886 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4887 res = check_condition_result;
4891 zbc_rwp_zone(devip, zsp);
4893 sdeb_write_unlock(sip);
4897 static u32 get_tag(struct scsi_cmnd *cmnd)
4899 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4902 /* Queued (deferred) command completions converge here. */
4903 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4905 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4906 unsigned long flags;
4907 struct scsi_cmnd *scp = sqcp->scmd;
4908 struct sdebug_scsi_cmd *sdsc;
4911 if (sdebug_statistics) {
4912 atomic_inc(&sdebug_completions);
4913 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4914 atomic_inc(&sdebug_miss_cpus);
4918 pr_err("scmd=NULL\n");
4922 sdsc = scsi_cmd_priv(scp);
4923 spin_lock_irqsave(&sdsc->lock, flags);
4924 aborted = sd_dp->aborted;
4925 if (unlikely(aborted))
4926 sd_dp->aborted = false;
4927 ASSIGN_QUEUED_CMD(scp, NULL);
4929 spin_unlock_irqrestore(&sdsc->lock, flags);
4932 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4933 blk_abort_request(scsi_cmd_to_rq(scp));
4937 scsi_done(scp); /* callback to mid level */
4939 sdebug_free_queued_cmd(sqcp);
4942 /* When high resolution timer goes off this function is called. */
4943 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4945 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4947 sdebug_q_cmd_complete(sd_dp);
4948 return HRTIMER_NORESTART;
4951 /* When work queue schedules work, it calls this function. */
4952 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4954 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4956 sdebug_q_cmd_complete(sd_dp);
4959 static bool got_shared_uuid;
4960 static uuid_t shared_uuid;
4962 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4964 struct sdeb_zone_state *zsp;
4965 sector_t capacity = get_sdebug_capacity();
4966 sector_t conv_capacity;
4967 sector_t zstart = 0;
4971 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4972 * a zone size allowing for at least 4 zones on the device. Otherwise,
4973 * use the specified zone size checking that at least 2 zones can be
4974 * created for the device.
4976 if (!sdeb_zbc_zone_size_mb) {
4977 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4978 >> ilog2(sdebug_sector_size);
4979 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4981 if (devip->zsize < 2) {
4982 pr_err("Device capacity too small\n");
4986 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4987 pr_err("Zone size is not a power of 2\n");
4990 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4991 >> ilog2(sdebug_sector_size);
4992 if (devip->zsize >= capacity) {
4993 pr_err("Zone size too large for device capacity\n");
4998 devip->zsize_shift = ilog2(devip->zsize);
4999 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5001 if (sdeb_zbc_zone_cap_mb == 0) {
5002 devip->zcap = devip->zsize;
5004 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5005 ilog2(sdebug_sector_size);
5006 if (devip->zcap > devip->zsize) {
5007 pr_err("Zone capacity too large\n");
5012 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5013 if (conv_capacity >= capacity) {
5014 pr_err("Number of conventional zones too large\n");
5017 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5018 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5020 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5022 /* Add gap zones if zone capacity is smaller than the zone size */
5023 if (devip->zcap < devip->zsize)
5024 devip->nr_zones += devip->nr_seq_zones;
5026 if (devip->zmodel == BLK_ZONED_HM) {
5027 /* zbc_max_open_zones can be 0, meaning "not reported" */
5028 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5029 devip->max_open = (devip->nr_zones - 1) / 2;
5031 devip->max_open = sdeb_zbc_max_open;
5034 devip->zstate = kcalloc(devip->nr_zones,
5035 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5039 for (i = 0; i < devip->nr_zones; i++) {
5040 zsp = &devip->zstate[i];
5042 zsp->z_start = zstart;
5044 if (i < devip->nr_conv_zones) {
5045 zsp->z_type = ZBC_ZTYPE_CNV;
5046 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5047 zsp->z_wp = (sector_t)-1;
5049 min_t(u64, devip->zsize, capacity - zstart);
5050 } else if ((zstart & (devip->zsize - 1)) == 0) {
5051 if (devip->zmodel == BLK_ZONED_HM)
5052 zsp->z_type = ZBC_ZTYPE_SWR;
5054 zsp->z_type = ZBC_ZTYPE_SWP;
5055 zsp->z_cond = ZC1_EMPTY;
5056 zsp->z_wp = zsp->z_start;
5058 min_t(u64, devip->zcap, capacity - zstart);
5060 zsp->z_type = ZBC_ZTYPE_GAP;
5061 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5062 zsp->z_wp = (sector_t)-1;
5063 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5067 WARN_ON_ONCE((int)zsp->z_size <= 0);
5068 zstart += zsp->z_size;
5074 static struct sdebug_dev_info *sdebug_device_create(
5075 struct sdebug_host_info *sdbg_host, gfp_t flags)
5077 struct sdebug_dev_info *devip;
5079 devip = kzalloc(sizeof(*devip), flags);
5081 if (sdebug_uuid_ctl == 1)
5082 uuid_gen(&devip->lu_name);
5083 else if (sdebug_uuid_ctl == 2) {
5084 if (got_shared_uuid)
5085 devip->lu_name = shared_uuid;
5087 uuid_gen(&shared_uuid);
5088 got_shared_uuid = true;
5089 devip->lu_name = shared_uuid;
5092 devip->sdbg_host = sdbg_host;
5093 if (sdeb_zbc_in_use) {
5094 devip->zmodel = sdeb_zbc_model;
5095 if (sdebug_device_create_zones(devip)) {
5100 devip->zmodel = BLK_ZONED_NONE;
5102 devip->create_ts = ktime_get_boottime();
5103 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5104 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5109 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5111 struct sdebug_host_info *sdbg_host;
5112 struct sdebug_dev_info *open_devip = NULL;
5113 struct sdebug_dev_info *devip;
5115 sdbg_host = shost_to_sdebug_host(sdev->host);
5117 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5118 if ((devip->used) && (devip->channel == sdev->channel) &&
5119 (devip->target == sdev->id) &&
5120 (devip->lun == sdev->lun))
5123 if ((!devip->used) && (!open_devip))
5127 if (!open_devip) { /* try and make a new one */
5128 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5130 pr_err("out of memory at line %d\n", __LINE__);
5135 open_devip->channel = sdev->channel;
5136 open_devip->target = sdev->id;
5137 open_devip->lun = sdev->lun;
5138 open_devip->sdbg_host = sdbg_host;
5139 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5140 open_devip->used = true;
5144 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5147 pr_info("slave_alloc <%u %u %u %llu>\n",
5148 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5152 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5154 struct sdebug_dev_info *devip =
5155 (struct sdebug_dev_info *)sdp->hostdata;
5158 pr_info("slave_configure <%u %u %u %llu>\n",
5159 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5160 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5161 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5162 if (devip == NULL) {
5163 devip = find_build_dev_info(sdp);
5165 return 1; /* no resources, will be marked offline */
5167 sdp->hostdata = devip;
5169 sdp->no_uld_attach = 1;
5170 config_cdb_len(sdp);
5174 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5176 struct sdebug_dev_info *devip =
5177 (struct sdebug_dev_info *)sdp->hostdata;
5180 pr_info("slave_destroy <%u %u %u %llu>\n",
5181 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5183 /* make this slot available for re-use */
5184 devip->used = false;
5185 sdp->hostdata = NULL;
5189 /* Returns true if we require the queued memory to be freed by the caller. */
5190 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5191 enum sdeb_defer_type defer_t)
5193 if (defer_t == SDEB_DEFER_HRT) {
5194 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5197 case 0: /* Not active, it must have already run */
5198 case -1: /* -1 It's executing the CB */
5200 case 1: /* Was active, we've now cancelled */
5204 } else if (defer_t == SDEB_DEFER_WQ) {
5205 /* Cancel if pending */
5206 if (cancel_work_sync(&sd_dp->ew.work))
5208 /* Was not pending, so it must have run */
5210 } else if (defer_t == SDEB_DEFER_POLL) {
5218 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5220 enum sdeb_defer_type l_defer_t;
5221 struct sdebug_defer *sd_dp;
5222 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5223 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5225 lockdep_assert_held(&sdsc->lock);
5229 sd_dp = &sqcp->sd_dp;
5230 l_defer_t = READ_ONCE(sd_dp->defer_t);
5231 ASSIGN_QUEUED_CMD(cmnd, NULL);
5233 if (stop_qc_helper(sd_dp, l_defer_t))
5234 sdebug_free_queued_cmd(sqcp);
5240 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5242 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5244 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5245 unsigned long flags;
5248 spin_lock_irqsave(&sdsc->lock, flags);
5249 res = scsi_debug_stop_cmnd(cmnd);
5250 spin_unlock_irqrestore(&sdsc->lock, flags);
5256 * All we can do is set the cmnd as internally aborted and wait for it to
5257 * finish. We cannot call scsi_done() as normal completion path may do that.
5259 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5261 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5266 /* Deletes (stops) timers or work queues of all queued commands */
5267 static void stop_all_queued(void)
5269 struct sdebug_host_info *sdhp;
5271 mutex_lock(&sdebug_host_list_mutex);
5272 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5273 struct Scsi_Host *shost = sdhp->shost;
5275 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5277 mutex_unlock(&sdebug_host_list_mutex);
5280 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5282 bool ok = scsi_debug_abort_cmnd(SCpnt);
5286 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5287 sdev_printk(KERN_INFO, SCpnt->device,
5288 "%s: command%s found\n", __func__,
5294 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5296 struct scsi_device *sdp = data;
5297 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5299 if (scmd->device == sdp)
5300 scsi_debug_abort_cmnd(scmd);
5305 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5306 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5308 struct Scsi_Host *shost = sdp->host;
5310 blk_mq_tagset_busy_iter(&shost->tag_set,
5311 scsi_debug_stop_all_queued_iter, sdp);
5314 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5316 struct scsi_device *sdp = SCpnt->device;
5317 struct sdebug_dev_info *devip = sdp->hostdata;
5321 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5322 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5324 scsi_debug_stop_all_queued(sdp);
5326 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5331 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5333 struct scsi_device *sdp = SCpnt->device;
5334 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5335 struct sdebug_dev_info *devip;
5338 ++num_target_resets;
5339 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5340 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5342 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5343 if (devip->target == sdp->id) {
5344 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5349 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5350 sdev_printk(KERN_INFO, sdp,
5351 "%s: %d device(s) found in target\n", __func__, k);
5356 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5358 struct scsi_device *sdp = SCpnt->device;
5359 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5360 struct sdebug_dev_info *devip;
5365 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5366 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5368 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5369 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5373 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5374 sdev_printk(KERN_INFO, sdp,
5375 "%s: %d device(s) found in host\n", __func__, k);
5379 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5381 struct sdebug_host_info *sdbg_host;
5382 struct sdebug_dev_info *devip;
5386 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5387 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5388 mutex_lock(&sdebug_host_list_mutex);
5389 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5390 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5392 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5396 mutex_unlock(&sdebug_host_list_mutex);
5398 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5399 sdev_printk(KERN_INFO, SCpnt->device,
5400 "%s: %d device(s) found\n", __func__, k);
5404 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5406 struct msdos_partition *pp;
5407 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5408 int sectors_per_part, num_sectors, k;
5409 int heads_by_sects, start_sec, end_sec;
5411 /* assume partition table already zeroed */
5412 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5414 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5415 sdebug_num_parts = SDEBUG_MAX_PARTS;
5416 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5418 num_sectors = (int)get_sdebug_capacity();
5419 sectors_per_part = (num_sectors - sdebug_sectors_per)
5421 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5422 starts[0] = sdebug_sectors_per;
5423 max_part_secs = sectors_per_part;
5424 for (k = 1; k < sdebug_num_parts; ++k) {
5425 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5427 if (starts[k] - starts[k - 1] < max_part_secs)
5428 max_part_secs = starts[k] - starts[k - 1];
5430 starts[sdebug_num_parts] = num_sectors;
5431 starts[sdebug_num_parts + 1] = 0;
5433 ramp[510] = 0x55; /* magic partition markings */
5435 pp = (struct msdos_partition *)(ramp + 0x1be);
5436 for (k = 0; starts[k + 1]; ++k, ++pp) {
5437 start_sec = starts[k];
5438 end_sec = starts[k] + max_part_secs - 1;
5441 pp->cyl = start_sec / heads_by_sects;
5442 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5443 / sdebug_sectors_per;
5444 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5446 pp->end_cyl = end_sec / heads_by_sects;
5447 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5448 / sdebug_sectors_per;
5449 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5451 pp->start_sect = cpu_to_le32(start_sec);
5452 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5453 pp->sys_ind = 0x83; /* plain Linux partition */
5457 static void block_unblock_all_queues(bool block)
5459 struct sdebug_host_info *sdhp;
5461 lockdep_assert_held(&sdebug_host_list_mutex);
5463 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5464 struct Scsi_Host *shost = sdhp->shost;
5467 scsi_block_requests(shost);
5469 scsi_unblock_requests(shost);
5473 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5474 * commands will be processed normally before triggers occur.
5476 static void tweak_cmnd_count(void)
5480 modulo = abs(sdebug_every_nth);
5484 mutex_lock(&sdebug_host_list_mutex);
5485 block_unblock_all_queues(true);
5486 count = atomic_read(&sdebug_cmnd_count);
5487 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5488 block_unblock_all_queues(false);
5489 mutex_unlock(&sdebug_host_list_mutex);
5492 static void clear_queue_stats(void)
5494 atomic_set(&sdebug_cmnd_count, 0);
5495 atomic_set(&sdebug_completions, 0);
5496 atomic_set(&sdebug_miss_cpus, 0);
5497 atomic_set(&sdebug_a_tsf, 0);
5500 static bool inject_on_this_cmd(void)
5502 if (sdebug_every_nth == 0)
5504 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5507 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5510 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5513 kmem_cache_free(queued_cmd_cache, sqcp);
5516 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5518 struct sdebug_queued_cmd *sqcp;
5519 struct sdebug_defer *sd_dp;
5521 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5525 sd_dp = &sqcp->sd_dp;
5527 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5528 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5529 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5536 /* Complete the processing of the thread that queued a SCSI command to this
5537 * driver. It either completes the command by calling cmnd_done() or
5538 * schedules a hr timer or work queue then returns 0. Returns
5539 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5541 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5543 int (*pfp)(struct scsi_cmnd *,
5544 struct sdebug_dev_info *),
5545 int delta_jiff, int ndelay)
5547 struct request *rq = scsi_cmd_to_rq(cmnd);
5548 bool polled = rq->cmd_flags & REQ_POLLED;
5549 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5550 unsigned long flags;
5551 u64 ns_from_boot = 0;
5552 struct sdebug_queued_cmd *sqcp;
5553 struct scsi_device *sdp;
5554 struct sdebug_defer *sd_dp;
5556 if (unlikely(devip == NULL)) {
5557 if (scsi_result == 0)
5558 scsi_result = DID_NO_CONNECT << 16;
5559 goto respond_in_thread;
5563 if (delta_jiff == 0)
5564 goto respond_in_thread;
5567 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5568 (scsi_result == 0))) {
5569 int num_in_q = scsi_device_busy(sdp);
5570 int qdepth = cmnd->device->queue_depth;
5572 if ((num_in_q == qdepth) &&
5573 (atomic_inc_return(&sdebug_a_tsf) >=
5574 abs(sdebug_every_nth))) {
5575 atomic_set(&sdebug_a_tsf, 0);
5576 scsi_result = device_qfull_result;
5578 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5579 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5580 __func__, num_in_q);
5584 sqcp = sdebug_alloc_queued_cmd(cmnd);
5586 pr_err("%s no alloc\n", __func__);
5587 return SCSI_MLQUEUE_HOST_BUSY;
5589 sd_dp = &sqcp->sd_dp;
5592 ns_from_boot = ktime_get_boottime_ns();
5594 /* one of the resp_*() response functions is called here */
5595 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5596 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5597 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5598 delta_jiff = ndelay = 0;
5600 if (cmnd->result == 0 && scsi_result != 0)
5601 cmnd->result = scsi_result;
5602 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5603 if (atomic_read(&sdeb_inject_pending)) {
5604 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5605 atomic_set(&sdeb_inject_pending, 0);
5606 cmnd->result = check_condition_result;
5610 if (unlikely(sdebug_verbose && cmnd->result))
5611 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5612 __func__, cmnd->result);
5614 if (delta_jiff > 0 || ndelay > 0) {
5617 if (delta_jiff > 0) {
5618 u64 ns = jiffies_to_nsecs(delta_jiff);
5620 if (sdebug_random && ns < U32_MAX) {
5621 ns = get_random_u32_below((u32)ns);
5622 } else if (sdebug_random) {
5623 ns >>= 12; /* scale to 4 usec precision */
5624 if (ns < U32_MAX) /* over 4 hours max */
5625 ns = get_random_u32_below((u32)ns);
5628 kt = ns_to_ktime(ns);
5629 } else { /* ndelay has a 4.2 second max */
5630 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5632 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5633 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5635 if (kt <= d) { /* elapsed duration >= kt */
5636 /* call scsi_done() from this thread */
5637 sdebug_free_queued_cmd(sqcp);
5641 /* otherwise reduce kt by elapsed time */
5645 if (sdebug_statistics)
5646 sd_dp->issuing_cpu = raw_smp_processor_id();
5648 spin_lock_irqsave(&sdsc->lock, flags);
5649 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5650 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5651 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5652 spin_unlock_irqrestore(&sdsc->lock, flags);
5654 /* schedule the invocation of scsi_done() for a later time */
5655 spin_lock_irqsave(&sdsc->lock, flags);
5656 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5657 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5658 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5660 * The completion handler will try to grab sqcp->lock,
5661 * so there is no chance that the completion handler
5662 * will call scsi_done() until we release the lock
5663 * here (so ok to keep referencing sdsc).
5665 spin_unlock_irqrestore(&sdsc->lock, flags);
5667 } else { /* jdelay < 0, use work queue */
5668 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5669 atomic_read(&sdeb_inject_pending))) {
5670 sd_dp->aborted = true;
5671 atomic_set(&sdeb_inject_pending, 0);
5672 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5673 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5676 if (sdebug_statistics)
5677 sd_dp->issuing_cpu = raw_smp_processor_id();
5679 spin_lock_irqsave(&sdsc->lock, flags);
5680 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5681 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5682 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5683 spin_unlock_irqrestore(&sdsc->lock, flags);
5685 spin_lock_irqsave(&sdsc->lock, flags);
5686 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5687 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5688 schedule_work(&sd_dp->ew.work);
5689 spin_unlock_irqrestore(&sdsc->lock, flags);
5695 respond_in_thread: /* call back to mid-layer using invocation thread */
5696 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5697 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5698 if (cmnd->result == 0 && scsi_result != 0)
5699 cmnd->result = scsi_result;
5704 /* Note: The following macros create attribute files in the
5705 /sys/module/scsi_debug/parameters directory. Unfortunately this
5706 driver is unaware of a change and cannot trigger auxiliary actions
5707 as it can when the corresponding attribute in the
5708 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5710 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5711 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5712 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5713 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5714 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5715 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5716 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5717 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5718 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5719 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5720 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5721 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5722 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5723 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5724 module_param_string(inq_product, sdebug_inq_product_id,
5725 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5726 module_param_string(inq_rev, sdebug_inq_product_rev,
5727 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5728 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5729 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5730 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5731 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5732 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5733 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5734 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5735 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5736 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5737 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5738 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5740 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5742 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5743 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5744 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5745 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5746 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5747 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5748 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5749 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5750 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5751 module_param_named(per_host_store, sdebug_per_host_store, bool,
5753 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5754 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5755 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5756 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5757 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5758 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5759 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5760 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5761 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5762 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5763 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5764 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5765 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5766 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5767 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5768 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5769 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5770 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5772 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5773 module_param_named(write_same_length, sdebug_write_same_length, int,
5775 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5776 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5777 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5778 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5779 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5781 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5782 MODULE_DESCRIPTION("SCSI debug adapter driver");
5783 MODULE_LICENSE("GPL");
5784 MODULE_VERSION(SDEBUG_VERSION);
5786 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5787 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5788 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5789 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5790 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5791 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5792 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5793 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5794 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5795 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5796 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5797 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5798 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5799 MODULE_PARM_DESC(host_max_queue,
5800 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5801 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5802 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5803 SDEBUG_VERSION "\")");
5804 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5805 MODULE_PARM_DESC(lbprz,
5806 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5807 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5808 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5809 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5810 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5811 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5812 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5813 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5814 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5815 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5816 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5817 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5818 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5819 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5820 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5821 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5822 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5823 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5824 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5825 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5826 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5827 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5828 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5829 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5830 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5831 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5832 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5833 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5834 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5835 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5836 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5837 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5838 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5839 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5840 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5841 MODULE_PARM_DESC(uuid_ctl,
5842 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5843 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5844 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5845 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5846 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5847 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5848 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5849 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5850 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5851 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5853 #define SDEBUG_INFO_LEN 256
5854 static char sdebug_info[SDEBUG_INFO_LEN];
5856 static const char *scsi_debug_info(struct Scsi_Host *shp)
5860 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5861 my_name, SDEBUG_VERSION, sdebug_version_date);
5862 if (k >= (SDEBUG_INFO_LEN - 1))
5864 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5865 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5866 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5867 "statistics", (int)sdebug_statistics);
5871 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5872 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5877 int minLen = length > 15 ? 15 : length;
5879 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5881 memcpy(arr, buffer, minLen);
5883 if (1 != sscanf(arr, "%d", &opts))
5886 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5887 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5888 if (sdebug_every_nth != 0)
5893 struct sdebug_submit_queue_data {
5899 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
5901 struct sdebug_submit_queue_data *data = opaque;
5902 u32 unique_tag = blk_mq_unique_tag(rq);
5903 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
5904 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
5905 int queue_num = data->queue_num;
5907 if (hwq != queue_num)
5910 /* Rely on iter'ing in ascending tag order */
5911 if (*data->first == -1)
5912 *data->first = *data->last = tag;
5919 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5920 * same for each scsi_debug host (if more than one). Some of the counters
5921 * output are not atomics so might be inaccurate in a busy system. */
5922 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5924 struct sdebug_host_info *sdhp;
5927 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5928 SDEBUG_VERSION, sdebug_version_date);
5929 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5930 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5931 sdebug_opts, sdebug_every_nth);
5932 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5933 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5934 sdebug_sector_size, "bytes");
5935 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5936 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5938 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5939 num_dev_resets, num_target_resets, num_bus_resets,
5941 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5942 dix_reads, dix_writes, dif_errors);
5943 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5945 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5946 atomic_read(&sdebug_cmnd_count),
5947 atomic_read(&sdebug_completions),
5948 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5949 atomic_read(&sdebug_a_tsf),
5950 atomic_read(&sdeb_mq_poll_count));
5952 seq_printf(m, "submit_queues=%d\n", submit_queues);
5953 for (j = 0; j < submit_queues; ++j) {
5955 struct sdebug_submit_queue_data data = {
5960 seq_printf(m, " queue %d:\n", j);
5961 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
5964 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5965 "first,last bits", f, l);
5969 seq_printf(m, "this host_no=%d\n", host->host_no);
5970 if (!xa_empty(per_store_ap)) {
5973 unsigned long l_idx;
5974 struct sdeb_store_info *sip;
5976 seq_puts(m, "\nhost list:\n");
5978 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5980 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5981 sdhp->shost->host_no, idx);
5984 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5985 sdeb_most_recent_idx);
5987 xa_for_each(per_store_ap, l_idx, sip) {
5988 niu = xa_get_mark(per_store_ap, l_idx,
5989 SDEB_XA_NOT_IN_USE);
5991 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5992 (niu ? " not_in_use" : ""));
5999 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6001 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6003 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6004 * of delay is jiffies.
6006 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6011 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6013 if (sdebug_jdelay != jdelay) {
6014 struct sdebug_host_info *sdhp;
6016 mutex_lock(&sdebug_host_list_mutex);
6017 block_unblock_all_queues(true);
6019 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6020 struct Scsi_Host *shost = sdhp->shost;
6022 if (scsi_host_busy(shost)) {
6023 res = -EBUSY; /* queued commands */
6028 sdebug_jdelay = jdelay;
6031 block_unblock_all_queues(false);
6032 mutex_unlock(&sdebug_host_list_mutex);
6038 static DRIVER_ATTR_RW(delay);
6040 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6042 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6044 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6045 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6046 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6051 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6052 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6054 if (sdebug_ndelay != ndelay) {
6055 struct sdebug_host_info *sdhp;
6057 mutex_lock(&sdebug_host_list_mutex);
6058 block_unblock_all_queues(true);
6060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6061 struct Scsi_Host *shost = sdhp->shost;
6063 if (scsi_host_busy(shost)) {
6064 res = -EBUSY; /* queued commands */
6070 sdebug_ndelay = ndelay;
6071 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6074 block_unblock_all_queues(false);
6075 mutex_unlock(&sdebug_host_list_mutex);
6081 static DRIVER_ATTR_RW(ndelay);
6083 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6085 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6088 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6094 if (sscanf(buf, "%10s", work) == 1) {
6095 if (strncasecmp(work, "0x", 2) == 0) {
6096 if (kstrtoint(work + 2, 16, &opts) == 0)
6099 if (kstrtoint(work, 10, &opts) == 0)
6106 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6107 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6111 static DRIVER_ATTR_RW(opts);
6113 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6115 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6117 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6122 /* Cannot change from or to TYPE_ZBC with sysfs */
6123 if (sdebug_ptype == TYPE_ZBC)
6126 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6134 static DRIVER_ATTR_RW(ptype);
6136 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6138 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6140 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6145 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6151 static DRIVER_ATTR_RW(dsense);
6153 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6155 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6157 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6162 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6163 bool want_store = (n == 0);
6164 struct sdebug_host_info *sdhp;
6167 sdebug_fake_rw = (sdebug_fake_rw > 0);
6168 if (sdebug_fake_rw == n)
6169 return count; /* not transitioning so do nothing */
6171 if (want_store) { /* 1 --> 0 transition, set up store */
6172 if (sdeb_first_idx < 0) {
6173 idx = sdebug_add_store();
6177 idx = sdeb_first_idx;
6178 xa_clear_mark(per_store_ap, idx,
6179 SDEB_XA_NOT_IN_USE);
6181 /* make all hosts use same store */
6182 list_for_each_entry(sdhp, &sdebug_host_list,
6184 if (sdhp->si_idx != idx) {
6185 xa_set_mark(per_store_ap, sdhp->si_idx,
6186 SDEB_XA_NOT_IN_USE);
6190 sdeb_most_recent_idx = idx;
6191 } else { /* 0 --> 1 transition is trigger for shrink */
6192 sdebug_erase_all_stores(true /* apart from first */);
6199 static DRIVER_ATTR_RW(fake_rw);
6201 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6203 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6205 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6210 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6211 sdebug_no_lun_0 = n;
6216 static DRIVER_ATTR_RW(no_lun_0);
6218 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6220 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6222 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6227 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228 sdebug_num_tgts = n;
6229 sdebug_max_tgts_luns();
6234 static DRIVER_ATTR_RW(num_tgts);
6236 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6238 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6240 static DRIVER_ATTR_RO(dev_size_mb);
6242 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6244 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6247 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6252 if (kstrtobool(buf, &v))
6255 sdebug_per_host_store = v;
6258 static DRIVER_ATTR_RW(per_host_store);
6260 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6262 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6264 static DRIVER_ATTR_RO(num_parts);
6266 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6268 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6270 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6276 if (sscanf(buf, "%10s", work) == 1) {
6277 if (strncasecmp(work, "0x", 2) == 0) {
6278 if (kstrtoint(work + 2, 16, &nth) == 0)
6279 goto every_nth_done;
6281 if (kstrtoint(work, 10, &nth) == 0)
6282 goto every_nth_done;
6288 sdebug_every_nth = nth;
6289 if (nth && !sdebug_statistics) {
6290 pr_info("every_nth needs statistics=1, set it\n");
6291 sdebug_statistics = true;
6296 static DRIVER_ATTR_RW(every_nth);
6298 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6300 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6302 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6308 if (kstrtoint(buf, 0, &n))
6311 if (n > (int)SAM_LUN_AM_FLAT) {
6312 pr_warn("only LUN address methods 0 and 1 are supported\n");
6315 changed = ((int)sdebug_lun_am != n);
6317 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6318 struct sdebug_host_info *sdhp;
6319 struct sdebug_dev_info *dp;
6321 mutex_lock(&sdebug_host_list_mutex);
6322 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6323 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6324 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6327 mutex_unlock(&sdebug_host_list_mutex);
6333 static DRIVER_ATTR_RW(lun_format);
6335 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6337 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6339 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6345 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6347 pr_warn("max_luns can be no more than 256\n");
6350 changed = (sdebug_max_luns != n);
6351 sdebug_max_luns = n;
6352 sdebug_max_tgts_luns();
6353 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6354 struct sdebug_host_info *sdhp;
6355 struct sdebug_dev_info *dp;
6357 mutex_lock(&sdebug_host_list_mutex);
6358 list_for_each_entry(sdhp, &sdebug_host_list,
6360 list_for_each_entry(dp, &sdhp->dev_info_list,
6362 set_bit(SDEBUG_UA_LUNS_CHANGED,
6366 mutex_unlock(&sdebug_host_list_mutex);
6372 static DRIVER_ATTR_RW(max_luns);
6374 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6376 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6378 /* N.B. max_queue can be changed while there are queued commands. In flight
6379 * commands beyond the new max_queue will be completed. */
6380 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6385 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6386 (n <= SDEBUG_CANQUEUE) &&
6387 (sdebug_host_max_queue == 0)) {
6388 mutex_lock(&sdebug_host_list_mutex);
6390 /* We may only change sdebug_max_queue when we have no shosts */
6391 if (list_empty(&sdebug_host_list))
6392 sdebug_max_queue = n;
6395 mutex_unlock(&sdebug_host_list_mutex);
6400 static DRIVER_ATTR_RW(max_queue);
6402 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6404 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6407 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6409 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6412 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6416 if (kstrtobool(buf, &v))
6419 sdebug_no_rwlock = v;
6422 static DRIVER_ATTR_RW(no_rwlock);
6425 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6426 * in range [0, sdebug_host_max_queue), we can't change it.
6428 static DRIVER_ATTR_RO(host_max_queue);
6430 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6432 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6434 static DRIVER_ATTR_RO(no_uld);
6436 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6438 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6440 static DRIVER_ATTR_RO(scsi_level);
6442 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6444 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6446 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6452 /* Ignore capacity change for ZBC drives for now */
6453 if (sdeb_zbc_in_use)
6456 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6457 changed = (sdebug_virtual_gb != n);
6458 sdebug_virtual_gb = n;
6459 sdebug_capacity = get_sdebug_capacity();
6461 struct sdebug_host_info *sdhp;
6462 struct sdebug_dev_info *dp;
6464 mutex_lock(&sdebug_host_list_mutex);
6465 list_for_each_entry(sdhp, &sdebug_host_list,
6467 list_for_each_entry(dp, &sdhp->dev_info_list,
6469 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6473 mutex_unlock(&sdebug_host_list_mutex);
6479 static DRIVER_ATTR_RW(virtual_gb);
6481 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6483 /* absolute number of hosts currently active is what is shown */
6484 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6487 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6492 struct sdeb_store_info *sip;
6493 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6496 if (sscanf(buf, "%d", &delta_hosts) != 1)
6498 if (delta_hosts > 0) {
6502 xa_for_each_marked(per_store_ap, idx, sip,
6503 SDEB_XA_NOT_IN_USE) {
6504 sdeb_most_recent_idx = (int)idx;
6508 if (found) /* re-use case */
6509 sdebug_add_host_helper((int)idx);
6511 sdebug_do_add_host(true);
6513 sdebug_do_add_host(false);
6515 } while (--delta_hosts);
6516 } else if (delta_hosts < 0) {
6518 sdebug_do_remove_host(false);
6519 } while (++delta_hosts);
6523 static DRIVER_ATTR_RW(add_host);
6525 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6527 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6529 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6534 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6535 sdebug_vpd_use_hostno = n;
6540 static DRIVER_ATTR_RW(vpd_use_hostno);
6542 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6544 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6546 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6551 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6553 sdebug_statistics = true;
6555 clear_queue_stats();
6556 sdebug_statistics = false;
6562 static DRIVER_ATTR_RW(statistics);
6564 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6566 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6568 static DRIVER_ATTR_RO(sector_size);
6570 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6572 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6574 static DRIVER_ATTR_RO(submit_queues);
6576 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6578 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6580 static DRIVER_ATTR_RO(dix);
6582 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6584 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6586 static DRIVER_ATTR_RO(dif);
6588 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6590 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6592 static DRIVER_ATTR_RO(guard);
6594 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6596 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6598 static DRIVER_ATTR_RO(ato);
6600 static ssize_t map_show(struct device_driver *ddp, char *buf)
6604 if (!scsi_debug_lbp())
6605 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6606 sdebug_store_sectors);
6608 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6609 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6612 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6613 (int)map_size, sip->map_storep);
6615 buf[count++] = '\n';
6620 static DRIVER_ATTR_RO(map);
6622 static ssize_t random_show(struct device_driver *ddp, char *buf)
6624 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6627 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6632 if (kstrtobool(buf, &v))
6638 static DRIVER_ATTR_RW(random);
6640 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6642 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6644 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6649 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6650 sdebug_removable = (n > 0);
6655 static DRIVER_ATTR_RW(removable);
6657 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6659 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6661 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6662 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6667 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6668 sdebug_host_lock = (n > 0);
6673 static DRIVER_ATTR_RW(host_lock);
6675 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6677 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6679 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6684 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6685 sdebug_strict = (n > 0);
6690 static DRIVER_ATTR_RW(strict);
6692 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6694 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6696 static DRIVER_ATTR_RO(uuid_ctl);
6698 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6700 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6702 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6707 ret = kstrtoint(buf, 0, &n);
6711 all_config_cdb_len();
6714 static DRIVER_ATTR_RW(cdb_len);
6716 static const char * const zbc_model_strs_a[] = {
6717 [BLK_ZONED_NONE] = "none",
6718 [BLK_ZONED_HA] = "host-aware",
6719 [BLK_ZONED_HM] = "host-managed",
6722 static const char * const zbc_model_strs_b[] = {
6723 [BLK_ZONED_NONE] = "no",
6724 [BLK_ZONED_HA] = "aware",
6725 [BLK_ZONED_HM] = "managed",
6728 static const char * const zbc_model_strs_c[] = {
6729 [BLK_ZONED_NONE] = "0",
6730 [BLK_ZONED_HA] = "1",
6731 [BLK_ZONED_HM] = "2",
6734 static int sdeb_zbc_model_str(const char *cp)
6736 int res = sysfs_match_string(zbc_model_strs_a, cp);
6739 res = sysfs_match_string(zbc_model_strs_b, cp);
6741 res = sysfs_match_string(zbc_model_strs_c, cp);
6749 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6751 return scnprintf(buf, PAGE_SIZE, "%s\n",
6752 zbc_model_strs_a[sdeb_zbc_model]);
6754 static DRIVER_ATTR_RO(zbc);
6756 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6758 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6760 static DRIVER_ATTR_RO(tur_ms_to_ready);
6762 /* Note: The following array creates attribute files in the
6763 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6764 files (over those found in the /sys/module/scsi_debug/parameters
6765 directory) is that auxiliary actions can be triggered when an attribute
6766 is changed. For example see: add_host_store() above.
6769 static struct attribute *sdebug_drv_attrs[] = {
6770 &driver_attr_delay.attr,
6771 &driver_attr_opts.attr,
6772 &driver_attr_ptype.attr,
6773 &driver_attr_dsense.attr,
6774 &driver_attr_fake_rw.attr,
6775 &driver_attr_host_max_queue.attr,
6776 &driver_attr_no_lun_0.attr,
6777 &driver_attr_num_tgts.attr,
6778 &driver_attr_dev_size_mb.attr,
6779 &driver_attr_num_parts.attr,
6780 &driver_attr_every_nth.attr,
6781 &driver_attr_lun_format.attr,
6782 &driver_attr_max_luns.attr,
6783 &driver_attr_max_queue.attr,
6784 &driver_attr_no_rwlock.attr,
6785 &driver_attr_no_uld.attr,
6786 &driver_attr_scsi_level.attr,
6787 &driver_attr_virtual_gb.attr,
6788 &driver_attr_add_host.attr,
6789 &driver_attr_per_host_store.attr,
6790 &driver_attr_vpd_use_hostno.attr,
6791 &driver_attr_sector_size.attr,
6792 &driver_attr_statistics.attr,
6793 &driver_attr_submit_queues.attr,
6794 &driver_attr_dix.attr,
6795 &driver_attr_dif.attr,
6796 &driver_attr_guard.attr,
6797 &driver_attr_ato.attr,
6798 &driver_attr_map.attr,
6799 &driver_attr_random.attr,
6800 &driver_attr_removable.attr,
6801 &driver_attr_host_lock.attr,
6802 &driver_attr_ndelay.attr,
6803 &driver_attr_strict.attr,
6804 &driver_attr_uuid_ctl.attr,
6805 &driver_attr_cdb_len.attr,
6806 &driver_attr_tur_ms_to_ready.attr,
6807 &driver_attr_zbc.attr,
6810 ATTRIBUTE_GROUPS(sdebug_drv);
6812 static struct device *pseudo_primary;
6814 static int __init scsi_debug_init(void)
6816 bool want_store = (sdebug_fake_rw == 0);
6818 int k, ret, hosts_to_add;
6821 ramdisk_lck_a[0] = &atomic_rw;
6822 ramdisk_lck_a[1] = &atomic_rw2;
6824 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6825 pr_warn("ndelay must be less than 1 second, ignored\n");
6827 } else if (sdebug_ndelay > 0)
6828 sdebug_jdelay = JDELAY_OVERRIDDEN;
6830 switch (sdebug_sector_size) {
6837 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6841 switch (sdebug_dif) {
6842 case T10_PI_TYPE0_PROTECTION:
6844 case T10_PI_TYPE1_PROTECTION:
6845 case T10_PI_TYPE2_PROTECTION:
6846 case T10_PI_TYPE3_PROTECTION:
6847 have_dif_prot = true;
6851 pr_err("dif must be 0, 1, 2 or 3\n");
6855 if (sdebug_num_tgts < 0) {
6856 pr_err("num_tgts must be >= 0\n");
6860 if (sdebug_guard > 1) {
6861 pr_err("guard must be 0 or 1\n");
6865 if (sdebug_ato > 1) {
6866 pr_err("ato must be 0 or 1\n");
6870 if (sdebug_physblk_exp > 15) {
6871 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6875 sdebug_lun_am = sdebug_lun_am_i;
6876 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6877 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6878 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6881 if (sdebug_max_luns > 256) {
6882 if (sdebug_max_luns > 16384) {
6883 pr_warn("max_luns can be no more than 16384, use default\n");
6884 sdebug_max_luns = DEF_MAX_LUNS;
6886 sdebug_lun_am = SAM_LUN_AM_FLAT;
6889 if (sdebug_lowest_aligned > 0x3fff) {
6890 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6894 if (submit_queues < 1) {
6895 pr_err("submit_queues must be 1 or more\n");
6899 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6900 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6904 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6905 (sdebug_host_max_queue < 0)) {
6906 pr_err("host_max_queue must be in range [0 %d]\n",
6911 if (sdebug_host_max_queue &&
6912 (sdebug_max_queue != sdebug_host_max_queue)) {
6913 sdebug_max_queue = sdebug_host_max_queue;
6914 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6919 * check for host managed zoned block device specified with
6920 * ptype=0x14 or zbc=XXX.
6922 if (sdebug_ptype == TYPE_ZBC) {
6923 sdeb_zbc_model = BLK_ZONED_HM;
6924 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6925 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6929 switch (sdeb_zbc_model) {
6930 case BLK_ZONED_NONE:
6932 sdebug_ptype = TYPE_DISK;
6935 sdebug_ptype = TYPE_ZBC;
6938 pr_err("Invalid ZBC model\n");
6942 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6943 sdeb_zbc_in_use = true;
6944 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6945 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6948 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6949 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6950 if (sdebug_dev_size_mb < 1)
6951 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6952 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6953 sdebug_store_sectors = sz / sdebug_sector_size;
6954 sdebug_capacity = get_sdebug_capacity();
6956 /* play around with geometry, don't waste too much on track 0 */
6958 sdebug_sectors_per = 32;
6959 if (sdebug_dev_size_mb >= 256)
6961 else if (sdebug_dev_size_mb >= 16)
6963 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6964 (sdebug_sectors_per * sdebug_heads);
6965 if (sdebug_cylinders_per >= 1024) {
6966 /* other LLDs do this; implies >= 1GB ram disk ... */
6968 sdebug_sectors_per = 63;
6969 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6970 (sdebug_sectors_per * sdebug_heads);
6972 if (scsi_debug_lbp()) {
6973 sdebug_unmap_max_blocks =
6974 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6976 sdebug_unmap_max_desc =
6977 clamp(sdebug_unmap_max_desc, 0U, 256U);
6979 sdebug_unmap_granularity =
6980 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6982 if (sdebug_unmap_alignment &&
6983 sdebug_unmap_granularity <=
6984 sdebug_unmap_alignment) {
6985 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6989 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6991 idx = sdebug_add_store();
6996 pseudo_primary = root_device_register("pseudo_0");
6997 if (IS_ERR(pseudo_primary)) {
6998 pr_warn("root_device_register() error\n");
6999 ret = PTR_ERR(pseudo_primary);
7002 ret = bus_register(&pseudo_lld_bus);
7004 pr_warn("bus_register error: %d\n", ret);
7007 ret = driver_register(&sdebug_driverfs_driver);
7009 pr_warn("driver_register error: %d\n", ret);
7013 hosts_to_add = sdebug_add_host;
7014 sdebug_add_host = 0;
7016 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7017 if (!queued_cmd_cache) {
7022 for (k = 0; k < hosts_to_add; k++) {
7023 if (want_store && k == 0) {
7024 ret = sdebug_add_host_helper(idx);
7026 pr_err("add_host_helper k=%d, error=%d\n",
7031 ret = sdebug_do_add_host(want_store &&
7032 sdebug_per_host_store);
7034 pr_err("add_host k=%d error=%d\n", k, -ret);
7040 pr_info("built %d host(s)\n", sdebug_num_hosts);
7045 driver_unregister(&sdebug_driverfs_driver);
7047 bus_unregister(&pseudo_lld_bus);
7049 root_device_unregister(pseudo_primary);
7051 sdebug_erase_store(idx, NULL);
7055 static void __exit scsi_debug_exit(void)
7057 int k = sdebug_num_hosts;
7060 sdebug_do_remove_host(true);
7061 kmem_cache_destroy(queued_cmd_cache);
7062 driver_unregister(&sdebug_driverfs_driver);
7063 bus_unregister(&pseudo_lld_bus);
7064 root_device_unregister(pseudo_primary);
7066 sdebug_erase_all_stores(false);
7067 xa_destroy(per_store_ap);
7070 device_initcall(scsi_debug_init);
7071 module_exit(scsi_debug_exit);
7073 static void sdebug_release_adapter(struct device *dev)
7075 struct sdebug_host_info *sdbg_host;
7077 sdbg_host = dev_to_sdebug_host(dev);
7081 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7082 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7087 if (xa_empty(per_store_ap))
7089 sip = xa_load(per_store_ap, idx);
7093 vfree(sip->map_storep);
7094 vfree(sip->dif_storep);
7096 xa_erase(per_store_ap, idx);
7100 /* Assume apart_from_first==false only in shutdown case. */
7101 static void sdebug_erase_all_stores(bool apart_from_first)
7104 struct sdeb_store_info *sip = NULL;
7106 xa_for_each(per_store_ap, idx, sip) {
7107 if (apart_from_first)
7108 apart_from_first = false;
7110 sdebug_erase_store(idx, sip);
7112 if (apart_from_first)
7113 sdeb_most_recent_idx = sdeb_first_idx;
7117 * Returns store xarray new element index (idx) if >=0 else negated errno.
7118 * Limit the number of stores to 65536.
7120 static int sdebug_add_store(void)
7124 unsigned long iflags;
7125 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7126 struct sdeb_store_info *sip = NULL;
7127 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7129 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7133 xa_lock_irqsave(per_store_ap, iflags);
7134 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7135 if (unlikely(res < 0)) {
7136 xa_unlock_irqrestore(per_store_ap, iflags);
7138 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7141 sdeb_most_recent_idx = n_idx;
7142 if (sdeb_first_idx < 0)
7143 sdeb_first_idx = n_idx;
7144 xa_unlock_irqrestore(per_store_ap, iflags);
7147 sip->storep = vzalloc(sz);
7149 pr_err("user data oom\n");
7152 if (sdebug_num_parts > 0)
7153 sdebug_build_parts(sip->storep, sz);
7155 /* DIF/DIX: what T10 calls Protection Information (PI) */
7159 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7160 sip->dif_storep = vmalloc(dif_size);
7162 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7165 if (!sip->dif_storep) {
7166 pr_err("DIX oom\n");
7169 memset(sip->dif_storep, 0xff, dif_size);
7171 /* Logical Block Provisioning */
7172 if (scsi_debug_lbp()) {
7173 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7174 sip->map_storep = vmalloc(array_size(sizeof(long),
7175 BITS_TO_LONGS(map_size)));
7177 pr_info("%lu provisioning blocks\n", map_size);
7179 if (!sip->map_storep) {
7180 pr_err("LBP map oom\n");
7184 bitmap_zero(sip->map_storep, map_size);
7186 /* Map first 1KB for partition table */
7187 if (sdebug_num_parts)
7188 map_region(sip, 0, 2);
7191 rwlock_init(&sip->macc_lck);
7194 sdebug_erase_store((int)n_idx, sip);
7195 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7199 static int sdebug_add_host_helper(int per_host_idx)
7201 int k, devs_per_host, idx;
7202 int error = -ENOMEM;
7203 struct sdebug_host_info *sdbg_host;
7204 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7206 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7209 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7210 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7211 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7212 sdbg_host->si_idx = idx;
7214 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7216 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7217 for (k = 0; k < devs_per_host; k++) {
7218 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7223 mutex_lock(&sdebug_host_list_mutex);
7224 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7225 mutex_unlock(&sdebug_host_list_mutex);
7227 sdbg_host->dev.bus = &pseudo_lld_bus;
7228 sdbg_host->dev.parent = pseudo_primary;
7229 sdbg_host->dev.release = &sdebug_release_adapter;
7230 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7232 error = device_register(&sdbg_host->dev);
7234 mutex_lock(&sdebug_host_list_mutex);
7235 list_del(&sdbg_host->host_list);
7236 mutex_unlock(&sdebug_host_list_mutex);
7244 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7246 list_del(&sdbg_devinfo->dev_list);
7247 kfree(sdbg_devinfo->zstate);
7248 kfree(sdbg_devinfo);
7250 if (sdbg_host->dev.release)
7251 put_device(&sdbg_host->dev);
7254 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7258 static int sdebug_do_add_host(bool mk_new_store)
7260 int ph_idx = sdeb_most_recent_idx;
7263 ph_idx = sdebug_add_store();
7267 return sdebug_add_host_helper(ph_idx);
7270 static void sdebug_do_remove_host(bool the_end)
7273 struct sdebug_host_info *sdbg_host = NULL;
7274 struct sdebug_host_info *sdbg_host2;
7276 mutex_lock(&sdebug_host_list_mutex);
7277 if (!list_empty(&sdebug_host_list)) {
7278 sdbg_host = list_entry(sdebug_host_list.prev,
7279 struct sdebug_host_info, host_list);
7280 idx = sdbg_host->si_idx;
7282 if (!the_end && idx >= 0) {
7285 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7286 if (sdbg_host2 == sdbg_host)
7288 if (idx == sdbg_host2->si_idx) {
7294 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7295 if (idx == sdeb_most_recent_idx)
7296 --sdeb_most_recent_idx;
7300 list_del(&sdbg_host->host_list);
7301 mutex_unlock(&sdebug_host_list_mutex);
7306 device_unregister(&sdbg_host->dev);
7310 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7312 struct sdebug_dev_info *devip = sdev->hostdata;
7317 mutex_lock(&sdebug_host_list_mutex);
7318 block_unblock_all_queues(true);
7320 if (qdepth > SDEBUG_CANQUEUE) {
7321 qdepth = SDEBUG_CANQUEUE;
7322 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7323 qdepth, SDEBUG_CANQUEUE);
7327 if (qdepth != sdev->queue_depth)
7328 scsi_change_queue_depth(sdev, qdepth);
7330 block_unblock_all_queues(false);
7331 mutex_unlock(&sdebug_host_list_mutex);
7333 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7334 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7336 return sdev->queue_depth;
7339 static bool fake_timeout(struct scsi_cmnd *scp)
7341 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7342 if (sdebug_every_nth < -1)
7343 sdebug_every_nth = -1;
7344 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7345 return true; /* ignore command causing timeout */
7346 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7347 scsi_medium_access_command(scp))
7348 return true; /* time out reads and writes */
7353 /* Response to TUR or media access command when device stopped */
7354 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7358 ktime_t now_ts = ktime_get_boottime();
7359 struct scsi_device *sdp = scp->device;
7361 stopped_state = atomic_read(&devip->stopped);
7362 if (stopped_state == 2) {
7363 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7364 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7365 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7366 /* tur_ms_to_ready timer extinguished */
7367 atomic_set(&devip->stopped, 0);
7371 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7373 sdev_printk(KERN_INFO, sdp,
7374 "%s: Not ready: in process of becoming ready\n", my_name);
7375 if (scp->cmnd[0] == TEST_UNIT_READY) {
7376 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7378 if (diff_ns <= tur_nanosecs_to_ready)
7379 diff_ns = tur_nanosecs_to_ready - diff_ns;
7381 diff_ns = tur_nanosecs_to_ready;
7382 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7383 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7384 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7386 return check_condition_result;
7389 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7391 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7393 return check_condition_result;
7396 static void sdebug_map_queues(struct Scsi_Host *shost)
7400 if (shost->nr_hw_queues == 1)
7403 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7404 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7408 if (i == HCTX_TYPE_DEFAULT)
7409 map->nr_queues = submit_queues - poll_queues;
7410 else if (i == HCTX_TYPE_POLL)
7411 map->nr_queues = poll_queues;
7413 if (!map->nr_queues) {
7414 BUG_ON(i == HCTX_TYPE_DEFAULT);
7418 map->queue_offset = qoff;
7419 blk_mq_map_queues(map);
7421 qoff += map->nr_queues;
7425 struct sdebug_blk_mq_poll_data {
7426 unsigned int queue_num;
7431 * We don't handle aborted commands here, but it does not seem possible to have
7432 * aborted polled commands from schedule_resp()
7434 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7436 struct sdebug_blk_mq_poll_data *data = opaque;
7437 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7438 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7439 struct sdebug_defer *sd_dp;
7440 u32 unique_tag = blk_mq_unique_tag(rq);
7441 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7442 struct sdebug_queued_cmd *sqcp;
7443 unsigned long flags;
7444 int queue_num = data->queue_num;
7447 /* We're only interested in one queue for this iteration */
7448 if (hwq != queue_num)
7451 /* Subsequent checks would fail if this failed, but check anyway */
7452 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7455 time = ktime_get_boottime();
7457 spin_lock_irqsave(&sdsc->lock, flags);
7458 sqcp = TO_QUEUED_CMD(cmd);
7460 spin_unlock_irqrestore(&sdsc->lock, flags);
7464 sd_dp = &sqcp->sd_dp;
7465 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7466 spin_unlock_irqrestore(&sdsc->lock, flags);
7470 if (time < sd_dp->cmpl_ts) {
7471 spin_unlock_irqrestore(&sdsc->lock, flags);
7475 ASSIGN_QUEUED_CMD(cmd, NULL);
7476 spin_unlock_irqrestore(&sdsc->lock, flags);
7478 if (sdebug_statistics) {
7479 atomic_inc(&sdebug_completions);
7480 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7481 atomic_inc(&sdebug_miss_cpus);
7484 sdebug_free_queued_cmd(sqcp);
7486 scsi_done(cmd); /* callback to mid level */
7487 (*data->num_entries)++;
7491 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7493 int num_entries = 0;
7494 struct sdebug_blk_mq_poll_data data = {
7495 .queue_num = queue_num,
7496 .num_entries = &num_entries,
7499 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7502 if (num_entries > 0)
7503 atomic_add(num_entries, &sdeb_mq_poll_count);
7507 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7508 struct scsi_cmnd *scp)
7511 struct scsi_device *sdp = scp->device;
7512 const struct opcode_info_t *oip;
7513 const struct opcode_info_t *r_oip;
7514 struct sdebug_dev_info *devip;
7515 u8 *cmd = scp->cmnd;
7516 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7517 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7520 u64 lun_index = sdp->lun & 0x3FFF;
7527 scsi_set_resid(scp, 0);
7528 if (sdebug_statistics) {
7529 atomic_inc(&sdebug_cmnd_count);
7530 inject_now = inject_on_this_cmd();
7534 if (unlikely(sdebug_verbose &&
7535 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7540 sb = (int)sizeof(b);
7542 strcpy(b, "too long, over 32 bytes");
7544 for (k = 0, n = 0; k < len && n < sb; ++k)
7545 n += scnprintf(b + n, sb - n, "%02x ",
7548 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7549 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7551 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7552 return SCSI_MLQUEUE_HOST_BUSY;
7553 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7554 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7557 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7558 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7559 devip = (struct sdebug_dev_info *)sdp->hostdata;
7560 if (unlikely(!devip)) {
7561 devip = find_build_dev_info(sdp);
7565 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7566 atomic_set(&sdeb_inject_pending, 1);
7568 na = oip->num_attached;
7570 if (na) { /* multiple commands with this opcode */
7572 if (FF_SA & r_oip->flags) {
7573 if (F_SA_LOW & oip->flags)
7576 sa = get_unaligned_be16(cmd + 8);
7577 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7578 if (opcode == oip->opcode && sa == oip->sa)
7581 } else { /* since no service action only check opcode */
7582 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7583 if (opcode == oip->opcode)
7588 if (F_SA_LOW & r_oip->flags)
7589 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7590 else if (F_SA_HIGH & r_oip->flags)
7591 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7593 mk_sense_invalid_opcode(scp);
7596 } /* else (when na==0) we assume the oip is a match */
7598 if (unlikely(F_INV_OP & flags)) {
7599 mk_sense_invalid_opcode(scp);
7602 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7604 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7605 my_name, opcode, " supported for wlun");
7606 mk_sense_invalid_opcode(scp);
7609 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7613 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7614 rem = ~oip->len_mask[k] & cmd[k];
7616 for (j = 7; j >= 0; --j, rem <<= 1) {
7620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7625 if (unlikely(!(F_SKIP_UA & flags) &&
7626 find_first_bit(devip->uas_bm,
7627 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7628 errsts = make_ua(scp, devip);
7632 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7633 atomic_read(&devip->stopped))) {
7634 errsts = resp_not_ready(scp, devip);
7638 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7640 if (unlikely(sdebug_every_nth)) {
7641 if (fake_timeout(scp))
7642 return 0; /* ignore command: make trouble */
7644 if (likely(oip->pfp))
7645 pfp = oip->pfp; /* calls a resp_* function */
7647 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7650 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7651 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7652 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7653 sdebug_ndelay > 10000)) {
7655 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7656 * for Start Stop Unit (SSU) want at least 1 second delay and
7657 * if sdebug_jdelay>1 want a long delay of that many seconds.
7658 * For Synchronize Cache want 1/20 of SSU's delay.
7660 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7661 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7663 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7664 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7666 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7669 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7671 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7674 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7676 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7678 spin_lock_init(&sdsc->lock);
7684 static struct scsi_host_template sdebug_driver_template = {
7685 .show_info = scsi_debug_show_info,
7686 .write_info = scsi_debug_write_info,
7687 .proc_name = sdebug_proc_name,
7688 .name = "SCSI DEBUG",
7689 .info = scsi_debug_info,
7690 .slave_alloc = scsi_debug_slave_alloc,
7691 .slave_configure = scsi_debug_slave_configure,
7692 .slave_destroy = scsi_debug_slave_destroy,
7693 .ioctl = scsi_debug_ioctl,
7694 .queuecommand = scsi_debug_queuecommand,
7695 .change_queue_depth = sdebug_change_qdepth,
7696 .map_queues = sdebug_map_queues,
7697 .mq_poll = sdebug_blk_mq_poll,
7698 .eh_abort_handler = scsi_debug_abort,
7699 .eh_device_reset_handler = scsi_debug_device_reset,
7700 .eh_target_reset_handler = scsi_debug_target_reset,
7701 .eh_bus_reset_handler = scsi_debug_bus_reset,
7702 .eh_host_reset_handler = scsi_debug_host_reset,
7703 .can_queue = SDEBUG_CANQUEUE,
7705 .sg_tablesize = SG_MAX_SEGMENTS,
7706 .cmd_per_lun = DEF_CMD_PER_LUN,
7708 .max_segment_size = -1U,
7709 .module = THIS_MODULE,
7710 .track_queue_depth = 1,
7711 .cmd_size = sizeof(struct sdebug_scsi_cmd),
7712 .init_cmd_priv = sdebug_init_cmd_priv,
7715 static int sdebug_driver_probe(struct device *dev)
7718 struct sdebug_host_info *sdbg_host;
7719 struct Scsi_Host *hpnt;
7722 sdbg_host = dev_to_sdebug_host(dev);
7724 sdebug_driver_template.can_queue = sdebug_max_queue;
7725 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7726 if (!sdebug_clustering)
7727 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7729 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7731 pr_err("scsi_host_alloc failed\n");
7735 if (submit_queues > nr_cpu_ids) {
7736 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7737 my_name, submit_queues, nr_cpu_ids);
7738 submit_queues = nr_cpu_ids;
7741 * Decide whether to tell scsi subsystem that we want mq. The
7742 * following should give the same answer for each host.
7744 hpnt->nr_hw_queues = submit_queues;
7745 if (sdebug_host_max_queue)
7746 hpnt->host_tagset = 1;
7748 /* poll queues are possible for nr_hw_queues > 1 */
7749 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7750 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7751 my_name, poll_queues, hpnt->nr_hw_queues);
7756 * Poll queues don't need interrupts, but we need at least one I/O queue
7757 * left over for non-polled I/O.
7758 * If condition not met, trim poll_queues to 1 (just for simplicity).
7760 if (poll_queues >= submit_queues) {
7761 if (submit_queues < 3)
7762 pr_warn("%s: trim poll_queues to 1\n", my_name);
7764 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7765 my_name, submit_queues - 1);
7771 sdbg_host->shost = hpnt;
7772 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7773 hpnt->max_id = sdebug_num_tgts + 1;
7775 hpnt->max_id = sdebug_num_tgts;
7776 /* = sdebug_max_luns; */
7777 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7781 switch (sdebug_dif) {
7783 case T10_PI_TYPE1_PROTECTION:
7784 hprot = SHOST_DIF_TYPE1_PROTECTION;
7786 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7789 case T10_PI_TYPE2_PROTECTION:
7790 hprot = SHOST_DIF_TYPE2_PROTECTION;
7792 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7795 case T10_PI_TYPE3_PROTECTION:
7796 hprot = SHOST_DIF_TYPE3_PROTECTION;
7798 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7803 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7807 scsi_host_set_prot(hpnt, hprot);
7809 if (have_dif_prot || sdebug_dix)
7810 pr_info("host protection%s%s%s%s%s%s%s\n",
7811 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7812 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7813 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7814 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7815 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7816 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7817 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7819 if (sdebug_guard == 1)
7820 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7822 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7824 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7825 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7826 if (sdebug_every_nth) /* need stats counters for every_nth */
7827 sdebug_statistics = true;
7828 error = scsi_add_host(hpnt, &sdbg_host->dev);
7830 pr_err("scsi_add_host failed\n");
7832 scsi_host_put(hpnt);
7834 scsi_scan_host(hpnt);
7840 static void sdebug_driver_remove(struct device *dev)
7842 struct sdebug_host_info *sdbg_host;
7843 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7845 sdbg_host = dev_to_sdebug_host(dev);
7847 scsi_remove_host(sdbg_host->shost);
7849 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7851 list_del(&sdbg_devinfo->dev_list);
7852 kfree(sdbg_devinfo->zstate);
7853 kfree(sdbg_devinfo);
7856 scsi_host_put(sdbg_host->shost);
7859 static struct bus_type pseudo_lld_bus = {
7861 .probe = sdebug_driver_probe,
7862 .remove = sdebug_driver_remove,
7863 .drv_groups = sdebug_drv_groups,