1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1567 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1569 alloc_len = get_unaligned_be16(cmd + 3);
1570 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1572 return DID_REQUEUE << 16;
1573 is_disk = (sdebug_ptype == TYPE_DISK);
1574 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1575 is_disk_zbc = (is_disk || is_zbc);
1576 have_wlun = scsi_is_wlun(scp->device->lun);
1578 pq_pdt = TYPE_WLUN; /* present, wlun */
1579 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1582 pq_pdt = (sdebug_ptype & 0x1f);
1584 if (0x2 & cmd[1]) { /* CMDDT bit set */
1585 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1587 return check_condition_result;
1588 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1589 int lu_id_num, port_group_id, target_dev_id;
1592 int host_no = devip->sdbg_host->shost->host_no;
1594 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 (devip->channel & 0x7f);
1596 if (sdebug_vpd_use_hostno == 0)
1598 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1599 (devip->target * 1000) + devip->lun);
1600 target_dev_id = ((host_no + 1) * 2000) +
1601 (devip->target * 1000) - 3;
1602 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1603 if (0 == cmd[2]) { /* supported vital product data pages */
1604 arr[1] = cmd[2]; /*sanity */
1606 arr[n++] = 0x0; /* this page */
1607 arr[n++] = 0x80; /* unit serial number */
1608 arr[n++] = 0x83; /* device identification */
1609 arr[n++] = 0x84; /* software interface ident. */
1610 arr[n++] = 0x85; /* management network addresses */
1611 arr[n++] = 0x86; /* extended inquiry */
1612 arr[n++] = 0x87; /* mode page policy */
1613 arr[n++] = 0x88; /* SCSI ports */
1614 if (is_disk_zbc) { /* SBC or ZBC */
1615 arr[n++] = 0x89; /* ATA information */
1616 arr[n++] = 0xb0; /* Block limits */
1617 arr[n++] = 0xb1; /* Block characteristics */
1619 arr[n++] = 0xb2; /* LB Provisioning */
1621 arr[n++] = 0xb6; /* ZB dev. char. */
1623 arr[3] = n - 4; /* number of supported VPD pages */
1624 } else if (0x80 == cmd[2]) { /* unit serial number */
1625 arr[1] = cmd[2]; /*sanity */
1627 memcpy(&arr[4], lu_id_str, len);
1628 } else if (0x83 == cmd[2]) { /* device identification */
1629 arr[1] = cmd[2]; /*sanity */
1630 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 target_dev_id, lu_id_num,
1634 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 arr[1] = cmd[2]; /*sanity */
1636 arr[3] = inquiry_vpd_84(&arr[4]);
1637 } else if (0x85 == cmd[2]) { /* Management network addresses */
1638 arr[1] = cmd[2]; /*sanity */
1639 arr[3] = inquiry_vpd_85(&arr[4]);
1640 } else if (0x86 == cmd[2]) { /* extended inquiry */
1641 arr[1] = cmd[2]; /*sanity */
1642 arr[3] = 0x3c; /* number of following entries */
1643 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1644 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1645 else if (have_dif_prot)
1646 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1648 arr[4] = 0x0; /* no protection stuff */
1649 arr[5] = 0x7; /* head of q, ordered + simple q's */
1650 } else if (0x87 == cmd[2]) { /* mode page policy */
1651 arr[1] = cmd[2]; /*sanity */
1652 arr[3] = 0x8; /* number of following entries */
1653 arr[4] = 0x2; /* disconnect-reconnect mp */
1654 arr[6] = 0x80; /* mlus, shared */
1655 arr[8] = 0x18; /* protocol specific lu */
1656 arr[10] = 0x82; /* mlus, per initiator port */
1657 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 arr[1] = cmd[2]; /*sanity */
1659 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1660 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1661 arr[1] = cmd[2]; /*sanity */
1662 n = inquiry_vpd_89(&arr[4]);
1663 put_unaligned_be16(n, arr + 2);
1664 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1665 arr[1] = cmd[2]; /*sanity */
1666 arr[3] = inquiry_vpd_b0(&arr[4]);
1667 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1668 arr[1] = cmd[2]; /*sanity */
1669 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1670 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1671 arr[1] = cmd[2]; /*sanity */
1672 arr[3] = inquiry_vpd_b2(&arr[4]);
1673 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 arr[1] = cmd[2]; /*sanity */
1675 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1677 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1679 return check_condition_result;
1681 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1682 ret = fill_from_dev_buffer(scp, arr,
1683 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1687 /* drops through here for a standard inquiry */
1688 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1689 arr[2] = sdebug_scsi_level;
1690 arr[3] = 2; /* response_data_format==2 */
1691 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1692 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1693 if (sdebug_vpd_use_hostno == 0)
1694 arr[5] |= 0x10; /* claim: implicit TPGS */
1695 arr[6] = 0x10; /* claim: MultiP */
1696 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1697 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1698 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1701 /* Use Vendor Specific area to place driver date in ASCII hex */
1702 memcpy(&arr[36], sdebug_version_date, 8);
1703 /* version descriptors (2 bytes each) follow */
1704 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1705 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1707 if (is_disk) { /* SBC-4 no version claimed */
1708 put_unaligned_be16(0x600, arr + n);
1710 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1711 put_unaligned_be16(0x525, arr + n);
1713 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1714 put_unaligned_be16(0x624, arr + n);
1717 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1718 ret = fill_from_dev_buffer(scp, arr,
1719 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1724 /* See resp_iec_m_pg() for how this data is manipulated */
1725 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1728 static int resp_requests(struct scsi_cmnd *scp,
1729 struct sdebug_dev_info *devip)
1731 unsigned char *cmd = scp->cmnd;
1732 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1733 bool dsense = !!(cmd[1] & 1);
1734 u32 alloc_len = cmd[4];
1736 int stopped_state = atomic_read(&devip->stopped);
1738 memset(arr, 0, sizeof(arr));
1739 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1743 arr[2] = LOGICAL_UNIT_NOT_READY;
1744 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1748 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1749 arr[7] = 0xa; /* 18 byte sense buffer */
1750 arr[12] = LOGICAL_UNIT_NOT_READY;
1751 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1753 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1757 arr[1] = 0x0; /* NO_SENSE in sense_key */
1758 arr[2] = THRESHOLD_EXCEEDED;
1759 arr[3] = 0xff; /* Failure prediction(false) */
1763 arr[2] = 0x0; /* NO_SENSE in sense_key */
1764 arr[7] = 0xa; /* 18 byte sense buffer */
1765 arr[12] = THRESHOLD_EXCEEDED;
1766 arr[13] = 0xff; /* Failure prediction(false) */
1768 } else { /* nothing to report */
1771 memset(arr, 0, len);
1774 memset(arr, 0, len);
1779 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1782 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1784 unsigned char *cmd = scp->cmnd;
1785 int power_cond, want_stop, stopped_state;
1788 power_cond = (cmd[4] & 0xf0) >> 4;
1790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1791 return check_condition_result;
1793 want_stop = !(cmd[4] & 1);
1794 stopped_state = atomic_read(&devip->stopped);
1795 if (stopped_state == 2) {
1796 ktime_t now_ts = ktime_get_boottime();
1798 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1801 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 /* tur_ms_to_ready timer extinguished */
1803 atomic_set(&devip->stopped, 0);
1807 if (stopped_state == 2) {
1809 stopped_state = 1; /* dummy up success */
1810 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1811 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 return check_condition_result;
1816 changing = (stopped_state != want_stop);
1818 atomic_xchg(&devip->stopped, want_stop);
1819 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1820 return SDEG_RES_IMMED_MASK;
1825 static sector_t get_sdebug_capacity(void)
1827 static const unsigned int gibibyte = 1073741824;
1829 if (sdebug_virtual_gb > 0)
1830 return (sector_t)sdebug_virtual_gb *
1831 (gibibyte / sdebug_sector_size);
1833 return sdebug_store_sectors;
1836 #define SDEBUG_READCAP_ARR_SZ 8
1837 static int resp_readcap(struct scsi_cmnd *scp,
1838 struct sdebug_dev_info *devip)
1840 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1843 /* following just in case virtual_gb changed */
1844 sdebug_capacity = get_sdebug_capacity();
1845 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1846 if (sdebug_capacity < 0xffffffff) {
1847 capac = (unsigned int)sdebug_capacity - 1;
1848 put_unaligned_be32(capac, arr + 0);
1850 put_unaligned_be32(0xffffffff, arr + 0);
1851 put_unaligned_be16(sdebug_sector_size, arr + 6);
1852 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1855 #define SDEBUG_READCAP16_ARR_SZ 32
1856 static int resp_readcap16(struct scsi_cmnd *scp,
1857 struct sdebug_dev_info *devip)
1859 unsigned char *cmd = scp->cmnd;
1860 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1863 alloc_len = get_unaligned_be32(cmd + 10);
1864 /* following just in case virtual_gb changed */
1865 sdebug_capacity = get_sdebug_capacity();
1866 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1867 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 arr[13] = sdebug_physblk_exp & 0xf;
1870 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1872 if (scsi_debug_lbp()) {
1873 arr[14] |= 0x80; /* LBPME */
1874 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 * in the wider field maps to 0 in this field.
1878 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1882 arr[15] = sdebug_lowest_aligned & 0xff;
1884 if (have_dif_prot) {
1885 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1886 arr[12] |= 1; /* PROT_EN */
1889 return fill_from_dev_buffer(scp, arr,
1890 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1893 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1895 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1896 struct sdebug_dev_info *devip)
1898 unsigned char *cmd = scp->cmnd;
1900 int host_no = devip->sdbg_host->shost->host_no;
1901 int port_group_a, port_group_b, port_a, port_b;
1905 alen = get_unaligned_be32(cmd + 6);
1906 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1908 return DID_REQUEUE << 16;
1910 * EVPD page 0x88 states we have two ports, one
1911 * real and a fake port with no device connected.
1912 * So we create two port groups with one port each
1913 * and set the group with port B to unavailable.
1915 port_a = 0x1; /* relative port A */
1916 port_b = 0x2; /* relative port B */
1917 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1918 (devip->channel & 0x7f);
1919 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1920 (devip->channel & 0x7f) + 0x80;
1923 * The asymmetric access state is cycled according to the host_id.
1926 if (sdebug_vpd_use_hostno == 0) {
1927 arr[n++] = host_no % 3; /* Asymm access state */
1928 arr[n++] = 0x0F; /* claim: all states are supported */
1930 arr[n++] = 0x0; /* Active/Optimized path */
1931 arr[n++] = 0x01; /* only support active/optimized paths */
1933 put_unaligned_be16(port_group_a, arr + n);
1935 arr[n++] = 0; /* Reserved */
1936 arr[n++] = 0; /* Status code */
1937 arr[n++] = 0; /* Vendor unique */
1938 arr[n++] = 0x1; /* One port per group */
1939 arr[n++] = 0; /* Reserved */
1940 arr[n++] = 0; /* Reserved */
1941 put_unaligned_be16(port_a, arr + n);
1943 arr[n++] = 3; /* Port unavailable */
1944 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1945 put_unaligned_be16(port_group_b, arr + n);
1947 arr[n++] = 0; /* Reserved */
1948 arr[n++] = 0; /* Status code */
1949 arr[n++] = 0; /* Vendor unique */
1950 arr[n++] = 0x1; /* One port per group */
1951 arr[n++] = 0; /* Reserved */
1952 arr[n++] = 0; /* Reserved */
1953 put_unaligned_be16(port_b, arr + n);
1957 put_unaligned_be32(rlen, arr + 0);
1960 * Return the smallest value of either
1961 * - The allocated length
1962 * - The constructed command length
1963 * - The maximum array size
1965 rlen = min(alen, n);
1966 ret = fill_from_dev_buffer(scp, arr,
1967 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1972 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 struct sdebug_dev_info *devip)
1976 u8 reporting_opts, req_opcode, sdeb_i, supp;
1978 u32 alloc_len, a_len;
1979 int k, offset, len, errsts, count, bump, na;
1980 const struct opcode_info_t *oip;
1981 const struct opcode_info_t *r_oip;
1983 u8 *cmd = scp->cmnd;
1985 rctd = !!(cmd[2] & 0x80);
1986 reporting_opts = cmd[2] & 0x7;
1987 req_opcode = cmd[3];
1988 req_sa = get_unaligned_be16(cmd + 4);
1989 alloc_len = get_unaligned_be32(cmd + 6);
1990 if (alloc_len < 4 || alloc_len > 0xffff) {
1991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 return check_condition_result;
1994 if (alloc_len > 8192)
1998 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2000 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2002 return check_condition_result;
2004 switch (reporting_opts) {
2005 case 0: /* all commands */
2006 /* count number of commands */
2007 for (count = 0, oip = opcode_info_arr;
2008 oip->num_attached != 0xff; ++oip) {
2009 if (F_INV_OP & oip->flags)
2011 count += (oip->num_attached + 1);
2013 bump = rctd ? 20 : 8;
2014 put_unaligned_be32(count * bump, arr);
2015 for (offset = 4, oip = opcode_info_arr;
2016 oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 if (F_INV_OP & oip->flags)
2019 na = oip->num_attached;
2020 arr[offset] = oip->opcode;
2021 put_unaligned_be16(oip->sa, arr + offset + 2);
2023 arr[offset + 5] |= 0x2;
2024 if (FF_SA & oip->flags)
2025 arr[offset + 5] |= 0x1;
2026 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2028 put_unaligned_be16(0xa, arr + offset + 8);
2030 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 if (F_INV_OP & oip->flags)
2034 arr[offset] = oip->opcode;
2035 put_unaligned_be16(oip->sa, arr + offset + 2);
2037 arr[offset + 5] |= 0x2;
2038 if (FF_SA & oip->flags)
2039 arr[offset + 5] |= 0x1;
2040 put_unaligned_be16(oip->len_mask[0],
2043 put_unaligned_be16(0xa,
2050 case 1: /* one command: opcode only */
2051 case 2: /* one command: opcode plus service action */
2052 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2053 sdeb_i = opcode_ind_arr[req_opcode];
2054 oip = &opcode_info_arr[sdeb_i];
2055 if (F_INV_OP & oip->flags) {
2059 if (1 == reporting_opts) {
2060 if (FF_SA & oip->flags) {
2061 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2064 return check_condition_result;
2067 } else if (2 == reporting_opts &&
2068 0 == (FF_SA & oip->flags)) {
2069 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 kfree(arr); /* point at requested sa */
2071 return check_condition_result;
2073 if (0 == (FF_SA & oip->flags) &&
2074 req_opcode == oip->opcode)
2076 else if (0 == (FF_SA & oip->flags)) {
2077 na = oip->num_attached;
2078 for (k = 0, oip = oip->arrp; k < na;
2080 if (req_opcode == oip->opcode)
2083 supp = (k >= na) ? 1 : 3;
2084 } else if (req_sa != oip->sa) {
2085 na = oip->num_attached;
2086 for (k = 0, oip = oip->arrp; k < na;
2088 if (req_sa == oip->sa)
2091 supp = (k >= na) ? 1 : 3;
2095 u = oip->len_mask[0];
2096 put_unaligned_be16(u, arr + 2);
2097 arr[4] = oip->opcode;
2098 for (k = 1; k < u; ++k)
2099 arr[4 + k] = (k < 16) ?
2100 oip->len_mask[k] : 0xff;
2105 arr[1] = (rctd ? 0x80 : 0) | supp;
2107 put_unaligned_be16(0xa, arr + offset);
2112 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2114 return check_condition_result;
2116 offset = (offset < a_len) ? offset : a_len;
2117 len = (offset < alloc_len) ? offset : alloc_len;
2118 errsts = fill_from_dev_buffer(scp, arr, len);
2123 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 struct sdebug_dev_info *devip)
2129 u8 *cmd = scp->cmnd;
2131 memset(arr, 0, sizeof(arr));
2132 repd = !!(cmd[2] & 0x80);
2133 alloc_len = get_unaligned_be32(cmd + 6);
2134 if (alloc_len < 4) {
2135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 return check_condition_result;
2138 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2139 arr[1] = 0x1; /* ITNRS */
2146 len = (len < alloc_len) ? len : alloc_len;
2147 return fill_from_dev_buffer(scp, arr, len);
2150 /* <<Following mode page info copied from ST318451LW>> */
2152 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2153 { /* Read-Write Error Recovery page for mode_sense */
2154 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2157 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2159 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 return sizeof(err_recov_pg);
2163 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2164 { /* Disconnect-Reconnect page for mode_sense */
2165 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 0, 0, 0, 0, 0, 0, 0, 0};
2168 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2170 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 return sizeof(disconnect_pg);
2174 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2175 { /* Format device page for mode_sense */
2176 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0, 0, 0, 0,
2178 0, 0, 0, 0, 0x40, 0, 0, 0};
2180 memcpy(p, format_pg, sizeof(format_pg));
2181 put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 put_unaligned_be16(sdebug_sector_size, p + 12);
2183 if (sdebug_removable)
2184 p[20] |= 0x20; /* should agree with INQUIRY */
2186 memset(p + 2, 0, sizeof(format_pg) - 2);
2187 return sizeof(format_pg);
2190 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2194 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2195 { /* Caching page for mode_sense */
2196 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2199 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2201 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2202 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2203 memcpy(p, caching_pg, sizeof(caching_pg));
2205 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 else if (2 == pcontrol)
2207 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2208 return sizeof(caching_pg);
2211 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2214 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2215 { /* Control mode page for mode_sense */
2216 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2218 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2222 ctrl_m_pg[2] |= 0x4;
2224 ctrl_m_pg[2] &= ~0x4;
2227 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2229 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2231 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 else if (2 == pcontrol)
2233 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2234 return sizeof(ctrl_m_pg);
2238 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2239 { /* Informational Exceptions control mode page for mode_sense */
2240 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2242 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2245 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2247 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 else if (2 == pcontrol)
2249 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2250 return sizeof(iec_m_pg);
2253 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2254 { /* SAS SSP mode page - short format for mode_sense */
2255 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2258 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2260 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 return sizeof(sas_sf_m_pg);
2265 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2267 { /* SAS phy control and discover mode page for mode_sense */
2268 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2272 0x2, 0, 0, 0, 0, 0, 0, 0,
2273 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 0, 0, 0, 0, 0, 0, 0, 0,
2275 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2278 0x3, 0, 0, 0, 0, 0, 0, 0,
2279 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 0, 0, 0, 0, 0, 0, 0, 0,
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2288 port_a = target_dev_id + 1;
2289 port_b = port_a + 1;
2290 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2291 put_unaligned_be32(port_a, p + 20);
2292 put_unaligned_be32(port_b, p + 48 + 20);
2294 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 return sizeof(sas_pcd_m_pg);
2298 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2299 { /* SAS SSP shared protocol specific port mode subpage */
2300 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 0, 0, 0, 0, 0, 0, 0, 0,
2304 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2306 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 return sizeof(sas_sha_m_pg);
2310 #define SDEBUG_MAX_MSENSE_SZ 256
2312 static int resp_mode_sense(struct scsi_cmnd *scp,
2313 struct sdebug_dev_info *devip)
2315 int pcontrol, pcode, subpcode, bd_len;
2316 unsigned char dev_spec;
2317 u32 alloc_len, offset, len;
2319 int target = scp->device->id;
2321 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2322 unsigned char *cmd = scp->cmnd;
2323 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2325 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2326 pcontrol = (cmd[2] & 0xc0) >> 6;
2327 pcode = cmd[2] & 0x3f;
2329 msense_6 = (MODE_SENSE == cmd[0]);
2330 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2331 is_disk = (sdebug_ptype == TYPE_DISK);
2332 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2333 if ((is_disk || is_zbc) && !dbd)
2334 bd_len = llbaa ? 16 : 8;
2337 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2338 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2339 if (0x3 == pcontrol) { /* Saving values not supported */
2340 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2341 return check_condition_result;
2343 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2344 (devip->target * 1000) - 3;
2345 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2346 if (is_disk || is_zbc) {
2347 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2359 arr[4] = 0x1; /* set LONGLBA bit */
2360 arr[7] = bd_len; /* assume 255 or less */
2364 if ((bd_len > 0) && (!sdebug_capacity))
2365 sdebug_capacity = get_sdebug_capacity();
2368 if (sdebug_capacity > 0xfffffffe)
2369 put_unaligned_be32(0xffffffff, ap + 0);
2371 put_unaligned_be32(sdebug_capacity, ap + 0);
2372 put_unaligned_be16(sdebug_sector_size, ap + 6);
2375 } else if (16 == bd_len) {
2376 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2377 put_unaligned_be32(sdebug_sector_size, ap + 12);
2382 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2383 /* TODO: Control Extension page */
2384 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2385 return check_condition_result;
2390 case 0x1: /* Read-Write error recovery page, direct access */
2391 len = resp_err_recov_pg(ap, pcontrol, target);
2394 case 0x2: /* Disconnect-Reconnect page, all devices */
2395 len = resp_disconnect_pg(ap, pcontrol, target);
2398 case 0x3: /* Format device page, direct access */
2400 len = resp_format_pg(ap, pcontrol, target);
2405 case 0x8: /* Caching page, direct access */
2406 if (is_disk || is_zbc) {
2407 len = resp_caching_pg(ap, pcontrol, target);
2412 case 0xa: /* Control Mode page, all devices */
2413 len = resp_ctrl_m_pg(ap, pcontrol, target);
2416 case 0x19: /* if spc==1 then sas phy, control+discover */
2417 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2418 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2419 return check_condition_result;
2422 if ((0x0 == subpcode) || (0xff == subpcode))
2423 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2424 if ((0x1 == subpcode) || (0xff == subpcode))
2425 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2427 if ((0x2 == subpcode) || (0xff == subpcode))
2428 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2431 case 0x1c: /* Informational Exceptions Mode page, all devices */
2432 len = resp_iec_m_pg(ap, pcontrol, target);
2435 case 0x3f: /* Read all Mode pages */
2436 if ((0 == subpcode) || (0xff == subpcode)) {
2437 len = resp_err_recov_pg(ap, pcontrol, target);
2438 len += resp_disconnect_pg(ap + len, pcontrol, target);
2440 len += resp_format_pg(ap + len, pcontrol,
2442 len += resp_caching_pg(ap + len, pcontrol,
2444 } else if (is_zbc) {
2445 len += resp_caching_pg(ap + len, pcontrol,
2448 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2449 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2450 if (0xff == subpcode) {
2451 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2452 target, target_dev_id);
2453 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2455 len += resp_iec_m_pg(ap + len, pcontrol, target);
2458 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2459 return check_condition_result;
2467 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2468 return check_condition_result;
2471 arr[0] = offset - 1;
2473 put_unaligned_be16((offset - 2), arr + 0);
2474 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2477 #define SDEBUG_MAX_MSELECT_SZ 512
2479 static int resp_mode_select(struct scsi_cmnd *scp,
2480 struct sdebug_dev_info *devip)
2482 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2483 int param_len, res, mpage;
2484 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2485 unsigned char *cmd = scp->cmnd;
2486 int mselect6 = (MODE_SELECT == cmd[0]);
2488 memset(arr, 0, sizeof(arr));
2491 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2492 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2493 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2494 return check_condition_result;
2496 res = fetch_to_dev_buffer(scp, arr, param_len);
2498 return DID_ERROR << 16;
2499 else if (sdebug_verbose && (res < param_len))
2500 sdev_printk(KERN_INFO, scp->device,
2501 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2502 __func__, param_len, res);
2503 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2504 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2505 off = bd_len + (mselect6 ? 4 : 8);
2506 if (md_len > 2 || off >= res) {
2507 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2508 return check_condition_result;
2510 mpage = arr[off] & 0x3f;
2511 ps = !!(arr[off] & 0x80);
2513 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2514 return check_condition_result;
2516 spf = !!(arr[off] & 0x40);
2517 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2519 if ((pg_len + off) > param_len) {
2520 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2521 PARAMETER_LIST_LENGTH_ERR, 0);
2522 return check_condition_result;
2525 case 0x8: /* Caching Mode page */
2526 if (caching_pg[1] == arr[off + 1]) {
2527 memcpy(caching_pg + 2, arr + off + 2,
2528 sizeof(caching_pg) - 2);
2529 goto set_mode_changed_ua;
2532 case 0xa: /* Control Mode page */
2533 if (ctrl_m_pg[1] == arr[off + 1]) {
2534 memcpy(ctrl_m_pg + 2, arr + off + 2,
2535 sizeof(ctrl_m_pg) - 2);
2536 if (ctrl_m_pg[4] & 0x8)
2540 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2541 goto set_mode_changed_ua;
2544 case 0x1c: /* Informational Exceptions Mode page */
2545 if (iec_m_pg[1] == arr[off + 1]) {
2546 memcpy(iec_m_pg + 2, arr + off + 2,
2547 sizeof(iec_m_pg) - 2);
2548 goto set_mode_changed_ua;
2554 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2555 return check_condition_result;
2556 set_mode_changed_ua:
2557 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2561 static int resp_temp_l_pg(unsigned char *arr)
2563 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2564 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2567 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2568 return sizeof(temp_l_pg);
2571 static int resp_ie_l_pg(unsigned char *arr)
2573 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2576 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2577 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2578 arr[4] = THRESHOLD_EXCEEDED;
2581 return sizeof(ie_l_pg);
2584 #define SDEBUG_MAX_LSENSE_SZ 512
2586 static int resp_log_sense(struct scsi_cmnd *scp,
2587 struct sdebug_dev_info *devip)
2589 int ppc, sp, pcode, subpcode;
2590 u32 alloc_len, len, n;
2591 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2592 unsigned char *cmd = scp->cmnd;
2594 memset(arr, 0, sizeof(arr));
2598 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2599 return check_condition_result;
2601 pcode = cmd[2] & 0x3f;
2602 subpcode = cmd[3] & 0xff;
2603 alloc_len = get_unaligned_be16(cmd + 7);
2605 if (0 == subpcode) {
2607 case 0x0: /* Supported log pages log page */
2609 arr[n++] = 0x0; /* this page */
2610 arr[n++] = 0xd; /* Temperature */
2611 arr[n++] = 0x2f; /* Informational exceptions */
2614 case 0xd: /* Temperature log page */
2615 arr[3] = resp_temp_l_pg(arr + 4);
2617 case 0x2f: /* Informational exceptions log page */
2618 arr[3] = resp_ie_l_pg(arr + 4);
2621 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2622 return check_condition_result;
2624 } else if (0xff == subpcode) {
2628 case 0x0: /* Supported log pages and subpages log page */
2631 arr[n++] = 0x0; /* 0,0 page */
2633 arr[n++] = 0xff; /* this page */
2635 arr[n++] = 0x0; /* Temperature */
2637 arr[n++] = 0x0; /* Informational exceptions */
2640 case 0xd: /* Temperature subpages */
2643 arr[n++] = 0x0; /* Temperature */
2646 case 0x2f: /* Informational exceptions subpages */
2649 arr[n++] = 0x0; /* Informational exceptions */
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 return check_condition_result;
2657 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2658 return check_condition_result;
2660 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2661 return fill_from_dev_buffer(scp, arr,
2662 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2665 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2667 return devip->nr_zones != 0;
2670 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2671 unsigned long long lba)
2673 return &devip->zstate[lba >> devip->zsize_shift];
2676 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2678 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2681 static void zbc_close_zone(struct sdebug_dev_info *devip,
2682 struct sdeb_zone_state *zsp)
2684 enum sdebug_z_cond zc;
2686 if (zbc_zone_is_conv(zsp))
2690 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2693 if (zc == ZC2_IMPLICIT_OPEN)
2694 devip->nr_imp_open--;
2696 devip->nr_exp_open--;
2698 if (zsp->z_wp == zsp->z_start) {
2699 zsp->z_cond = ZC1_EMPTY;
2701 zsp->z_cond = ZC4_CLOSED;
2706 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2708 struct sdeb_zone_state *zsp = &devip->zstate[0];
2711 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2712 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2713 zbc_close_zone(devip, zsp);
2719 static void zbc_open_zone(struct sdebug_dev_info *devip,
2720 struct sdeb_zone_state *zsp, bool explicit)
2722 enum sdebug_z_cond zc;
2724 if (zbc_zone_is_conv(zsp))
2728 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2729 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2732 /* Close an implicit open zone if necessary */
2733 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2734 zbc_close_zone(devip, zsp);
2735 else if (devip->max_open &&
2736 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2737 zbc_close_imp_open_zone(devip);
2739 if (zsp->z_cond == ZC4_CLOSED)
2742 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2743 devip->nr_exp_open++;
2745 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2746 devip->nr_imp_open++;
2750 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2751 unsigned long long lba, unsigned int num)
2753 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2754 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2756 if (zbc_zone_is_conv(zsp))
2759 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2761 if (zsp->z_wp >= zend)
2762 zsp->z_cond = ZC5_FULL;
2767 if (lba != zsp->z_wp)
2768 zsp->z_non_seq_resource = true;
2774 } else if (end > zsp->z_wp) {
2780 if (zsp->z_wp >= zend)
2781 zsp->z_cond = ZC5_FULL;
2787 zend = zsp->z_start + zsp->z_size;
2792 static int check_zbc_access_params(struct scsi_cmnd *scp,
2793 unsigned long long lba, unsigned int num, bool write)
2795 struct scsi_device *sdp = scp->device;
2796 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2797 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2798 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2801 if (devip->zmodel == BLK_ZONED_HA)
2803 /* For host-managed, reads cannot cross zone types boundaries */
2804 if (zsp_end != zsp &&
2805 zbc_zone_is_conv(zsp) &&
2806 !zbc_zone_is_conv(zsp_end)) {
2807 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2810 return check_condition_result;
2815 /* No restrictions for writes within conventional zones */
2816 if (zbc_zone_is_conv(zsp)) {
2817 if (!zbc_zone_is_conv(zsp_end)) {
2818 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2820 WRITE_BOUNDARY_ASCQ);
2821 return check_condition_result;
2826 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2827 /* Writes cannot cross sequential zone boundaries */
2828 if (zsp_end != zsp) {
2829 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2831 WRITE_BOUNDARY_ASCQ);
2832 return check_condition_result;
2834 /* Cannot write full zones */
2835 if (zsp->z_cond == ZC5_FULL) {
2836 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 INVALID_FIELD_IN_CDB, 0);
2838 return check_condition_result;
2840 /* Writes must be aligned to the zone WP */
2841 if (lba != zsp->z_wp) {
2842 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2844 UNALIGNED_WRITE_ASCQ);
2845 return check_condition_result;
2849 /* Handle implicit open of closed and empty zones */
2850 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2851 if (devip->max_open &&
2852 devip->nr_exp_open >= devip->max_open) {
2853 mk_sense_buffer(scp, DATA_PROTECT,
2856 return check_condition_result;
2858 zbc_open_zone(devip, zsp, false);
2864 static inline int check_device_access_params
2865 (struct scsi_cmnd *scp, unsigned long long lba,
2866 unsigned int num, bool write)
2868 struct scsi_device *sdp = scp->device;
2869 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2871 if (lba + num > sdebug_capacity) {
2872 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2873 return check_condition_result;
2875 /* transfer length excessive (tie in to block limits VPD page) */
2876 if (num > sdebug_store_sectors) {
2877 /* needs work to find which cdb byte 'num' comes from */
2878 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2879 return check_condition_result;
2881 if (write && unlikely(sdebug_wp)) {
2882 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2883 return check_condition_result;
2885 if (sdebug_dev_is_zoned(devip))
2886 return check_zbc_access_params(scp, lba, num, write);
2892 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2893 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2894 * that access any of the "stores" in struct sdeb_store_info should call this
2895 * function with bug_if_fake_rw set to true.
2897 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2898 bool bug_if_fake_rw)
2900 if (sdebug_fake_rw) {
2901 BUG_ON(bug_if_fake_rw); /* See note above */
2904 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2907 /* Returns number of bytes copied or -1 if error. */
2908 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2909 u32 sg_skip, u64 lba, u32 num, bool do_write)
2912 u64 block, rest = 0;
2913 enum dma_data_direction dir;
2914 struct scsi_data_buffer *sdb = &scp->sdb;
2918 dir = DMA_TO_DEVICE;
2919 write_since_sync = true;
2921 dir = DMA_FROM_DEVICE;
2924 if (!sdb->length || !sip)
2926 if (scp->sc_data_direction != dir)
2930 block = do_div(lba, sdebug_store_sectors);
2931 if (block + num > sdebug_store_sectors)
2932 rest = block + num - sdebug_store_sectors;
2934 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2935 fsp + (block * sdebug_sector_size),
2936 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2937 if (ret != (num - rest) * sdebug_sector_size)
2941 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2942 fsp, rest * sdebug_sector_size,
2943 sg_skip + ((num - rest) * sdebug_sector_size),
2950 /* Returns number of bytes copied or -1 if error. */
2951 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2953 struct scsi_data_buffer *sdb = &scp->sdb;
2957 if (scp->sc_data_direction != DMA_TO_DEVICE)
2959 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2960 num * sdebug_sector_size, 0, true);
2963 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2964 * arr into sip->storep+lba and return true. If comparison fails then
2966 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2967 const u8 *arr, bool compare_only)
2970 u64 block, rest = 0;
2971 u32 store_blks = sdebug_store_sectors;
2972 u32 lb_size = sdebug_sector_size;
2973 u8 *fsp = sip->storep;
2975 block = do_div(lba, store_blks);
2976 if (block + num > store_blks)
2977 rest = block + num - store_blks;
2979 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2983 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2989 arr += num * lb_size;
2990 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2992 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2996 static __be16 dif_compute_csum(const void *buf, int len)
3001 csum = (__force __be16)ip_compute_csum(buf, len);
3003 csum = cpu_to_be16(crc_t10dif(buf, len));
3008 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3009 sector_t sector, u32 ei_lba)
3011 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3013 if (sdt->guard_tag != csum) {
3014 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3015 (unsigned long)sector,
3016 be16_to_cpu(sdt->guard_tag),
3020 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3021 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3022 pr_err("REF check failed on sector %lu\n",
3023 (unsigned long)sector);
3026 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3027 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3028 pr_err("REF check failed on sector %lu\n",
3029 (unsigned long)sector);
3035 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3036 unsigned int sectors, bool read)
3040 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3041 scp->device->hostdata, true);
3042 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3043 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3044 struct sg_mapping_iter miter;
3046 /* Bytes of protection data to copy into sgl */
3047 resid = sectors * sizeof(*dif_storep);
3049 sg_miter_start(&miter, scsi_prot_sglist(scp),
3050 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3051 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3053 while (sg_miter_next(&miter) && resid > 0) {
3054 size_t len = min_t(size_t, miter.length, resid);
3055 void *start = dif_store(sip, sector);
3058 if (dif_store_end < start + len)
3059 rest = start + len - dif_store_end;
3064 memcpy(paddr, start, len - rest);
3066 memcpy(start, paddr, len - rest);
3070 memcpy(paddr + len - rest, dif_storep, rest);
3072 memcpy(dif_storep, paddr + len - rest, rest);
3075 sector += len / sizeof(*dif_storep);
3078 sg_miter_stop(&miter);
3081 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3082 unsigned int sectors, u32 ei_lba)
3087 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3088 scp->device->hostdata, true);
3089 struct t10_pi_tuple *sdt;
3091 for (i = 0; i < sectors; i++, ei_lba++) {
3092 sector = start_sec + i;
3093 sdt = dif_store(sip, sector);
3095 if (sdt->app_tag == cpu_to_be16(0xffff))
3099 * Because scsi_debug acts as both initiator and
3100 * target we proceed to verify the PI even if
3101 * RDPROTECT=3. This is done so the "initiator" knows
3102 * which type of error to return. Otherwise we would
3103 * have to iterate over the PI twice.
3105 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3106 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3115 dif_copy_prot(scp, start_sec, sectors, true);
3121 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3128 struct sdeb_store_info *sip = devip2sip(devip, true);
3129 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3130 u8 *cmd = scp->cmnd;
3135 lba = get_unaligned_be64(cmd + 2);
3136 num = get_unaligned_be32(cmd + 10);
3141 lba = get_unaligned_be32(cmd + 2);
3142 num = get_unaligned_be16(cmd + 7);
3147 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3148 (u32)(cmd[1] & 0x1f) << 16;
3149 num = (0 == cmd[4]) ? 256 : cmd[4];
3154 lba = get_unaligned_be32(cmd + 2);
3155 num = get_unaligned_be32(cmd + 6);
3158 case XDWRITEREAD_10:
3160 lba = get_unaligned_be32(cmd + 2);
3161 num = get_unaligned_be16(cmd + 7);
3164 default: /* assume READ(32) */
3165 lba = get_unaligned_be64(cmd + 12);
3166 ei_lba = get_unaligned_be32(cmd + 20);
3167 num = get_unaligned_be32(cmd + 28);
3171 if (unlikely(have_dif_prot && check_prot)) {
3172 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3174 mk_sense_invalid_opcode(scp);
3175 return check_condition_result;
3177 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3178 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3179 (cmd[1] & 0xe0) == 0)
3180 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3183 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3184 atomic_read(&sdeb_inject_pending))) {
3186 atomic_set(&sdeb_inject_pending, 0);
3189 ret = check_device_access_params(scp, lba, num, false);
3192 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3193 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3194 ((lba + num) > sdebug_medium_error_start))) {
3195 /* claim unrecoverable read error */
3196 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3197 /* set info field and valid bit for fixed descriptor */
3198 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3199 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3200 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3201 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3202 put_unaligned_be32(ret, scp->sense_buffer + 3);
3204 scsi_set_resid(scp, scsi_bufflen(scp));
3205 return check_condition_result;
3208 read_lock(macc_lckp);
3211 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3212 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3213 case 1: /* Guard tag error */
3214 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3215 read_unlock(macc_lckp);
3216 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3217 return check_condition_result;
3218 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3219 read_unlock(macc_lckp);
3220 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3221 return illegal_condition_result;
3224 case 3: /* Reference tag error */
3225 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3226 read_unlock(macc_lckp);
3227 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3228 return check_condition_result;
3229 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3230 read_unlock(macc_lckp);
3231 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3232 return illegal_condition_result;
3238 ret = do_device_access(sip, scp, 0, lba, num, false);
3239 read_unlock(macc_lckp);
3240 if (unlikely(ret == -1))
3241 return DID_ERROR << 16;
3243 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3245 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3246 atomic_read(&sdeb_inject_pending))) {
3247 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3248 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3249 atomic_set(&sdeb_inject_pending, 0);
3250 return check_condition_result;
3251 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3252 /* Logical block guard check failed */
3253 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3254 atomic_set(&sdeb_inject_pending, 0);
3255 return illegal_condition_result;
3256 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3257 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3258 atomic_set(&sdeb_inject_pending, 0);
3259 return illegal_condition_result;
3265 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3266 unsigned int sectors, u32 ei_lba)
3269 struct t10_pi_tuple *sdt;
3271 sector_t sector = start_sec;
3274 struct sg_mapping_iter diter;
3275 struct sg_mapping_iter piter;
3277 BUG_ON(scsi_sg_count(SCpnt) == 0);
3278 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3280 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3281 scsi_prot_sg_count(SCpnt),
3282 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3283 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3284 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3286 /* For each protection page */
3287 while (sg_miter_next(&piter)) {
3289 if (WARN_ON(!sg_miter_next(&diter))) {
3294 for (ppage_offset = 0; ppage_offset < piter.length;
3295 ppage_offset += sizeof(struct t10_pi_tuple)) {
3296 /* If we're at the end of the current
3297 * data page advance to the next one
3299 if (dpage_offset >= diter.length) {
3300 if (WARN_ON(!sg_miter_next(&diter))) {
3307 sdt = piter.addr + ppage_offset;
3308 daddr = diter.addr + dpage_offset;
3310 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3311 ret = dif_verify(sdt, daddr, sector, ei_lba);
3318 dpage_offset += sdebug_sector_size;
3320 diter.consumed = dpage_offset;
3321 sg_miter_stop(&diter);
3323 sg_miter_stop(&piter);
3325 dif_copy_prot(SCpnt, start_sec, sectors, false);
3332 sg_miter_stop(&diter);
3333 sg_miter_stop(&piter);
3337 static unsigned long lba_to_map_index(sector_t lba)
3339 if (sdebug_unmap_alignment)
3340 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3341 sector_div(lba, sdebug_unmap_granularity);
3345 static sector_t map_index_to_lba(unsigned long index)
3347 sector_t lba = index * sdebug_unmap_granularity;
3349 if (sdebug_unmap_alignment)
3350 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3354 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3358 unsigned int mapped;
3359 unsigned long index;
3362 index = lba_to_map_index(lba);
3363 mapped = test_bit(index, sip->map_storep);
3366 next = find_next_zero_bit(sip->map_storep, map_size, index);
3368 next = find_next_bit(sip->map_storep, map_size, index);
3370 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3375 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3378 sector_t end = lba + len;
3381 unsigned long index = lba_to_map_index(lba);
3383 if (index < map_size)
3384 set_bit(index, sip->map_storep);
3386 lba = map_index_to_lba(index + 1);
3390 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3393 sector_t end = lba + len;
3394 u8 *fsp = sip->storep;
3397 unsigned long index = lba_to_map_index(lba);
3399 if (lba == map_index_to_lba(index) &&
3400 lba + sdebug_unmap_granularity <= end &&
3402 clear_bit(index, sip->map_storep);
3403 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3404 memset(fsp + lba * sdebug_sector_size,
3405 (sdebug_lbprz & 1) ? 0 : 0xff,
3406 sdebug_sector_size *
3407 sdebug_unmap_granularity);
3409 if (sip->dif_storep) {
3410 memset(sip->dif_storep + lba, 0xff,
3411 sizeof(*sip->dif_storep) *
3412 sdebug_unmap_granularity);
3415 lba = map_index_to_lba(index + 1);
3419 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3426 struct sdeb_store_info *sip = devip2sip(devip, true);
3427 rwlock_t *macc_lckp = &sip->macc_lck;
3428 u8 *cmd = scp->cmnd;
3433 lba = get_unaligned_be64(cmd + 2);
3434 num = get_unaligned_be32(cmd + 10);
3439 lba = get_unaligned_be32(cmd + 2);
3440 num = get_unaligned_be16(cmd + 7);
3445 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3446 (u32)(cmd[1] & 0x1f) << 16;
3447 num = (0 == cmd[4]) ? 256 : cmd[4];
3452 lba = get_unaligned_be32(cmd + 2);
3453 num = get_unaligned_be32(cmd + 6);
3456 case 0x53: /* XDWRITEREAD(10) */
3458 lba = get_unaligned_be32(cmd + 2);
3459 num = get_unaligned_be16(cmd + 7);
3462 default: /* assume WRITE(32) */
3463 lba = get_unaligned_be64(cmd + 12);
3464 ei_lba = get_unaligned_be32(cmd + 20);
3465 num = get_unaligned_be32(cmd + 28);
3469 if (unlikely(have_dif_prot && check_prot)) {
3470 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3472 mk_sense_invalid_opcode(scp);
3473 return check_condition_result;
3475 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3476 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3477 (cmd[1] & 0xe0) == 0)
3478 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3482 write_lock(macc_lckp);
3483 ret = check_device_access_params(scp, lba, num, true);
3485 write_unlock(macc_lckp);
3490 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3491 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3492 case 1: /* Guard tag error */
3493 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3494 write_unlock(macc_lckp);
3495 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3496 return illegal_condition_result;
3497 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3498 write_unlock(macc_lckp);
3499 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3500 return check_condition_result;
3503 case 3: /* Reference tag error */
3504 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3505 write_unlock(macc_lckp);
3506 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3507 return illegal_condition_result;
3508 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3509 write_unlock(macc_lckp);
3510 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3511 return check_condition_result;
3517 ret = do_device_access(sip, scp, 0, lba, num, true);
3518 if (unlikely(scsi_debug_lbp()))
3519 map_region(sip, lba, num);
3520 /* If ZBC zone then bump its write pointer */
3521 if (sdebug_dev_is_zoned(devip))
3522 zbc_inc_wp(devip, lba, num);
3523 write_unlock(macc_lckp);
3524 if (unlikely(-1 == ret))
3525 return DID_ERROR << 16;
3526 else if (unlikely(sdebug_verbose &&
3527 (ret < (num * sdebug_sector_size))))
3528 sdev_printk(KERN_INFO, scp->device,
3529 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3530 my_name, num * sdebug_sector_size, ret);
3532 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3533 atomic_read(&sdeb_inject_pending))) {
3534 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3535 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3536 atomic_set(&sdeb_inject_pending, 0);
3537 return check_condition_result;
3538 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3539 /* Logical block guard check failed */
3540 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3541 atomic_set(&sdeb_inject_pending, 0);
3542 return illegal_condition_result;
3543 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3544 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3545 atomic_set(&sdeb_inject_pending, 0);
3546 return illegal_condition_result;
3553 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3554 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3556 static int resp_write_scat(struct scsi_cmnd *scp,
3557 struct sdebug_dev_info *devip)
3559 u8 *cmd = scp->cmnd;
3562 struct sdeb_store_info *sip = devip2sip(devip, true);
3563 rwlock_t *macc_lckp = &sip->macc_lck;
3565 u16 lbdof, num_lrd, k;
3566 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3567 u32 lb_size = sdebug_sector_size;
3572 static const u32 lrd_size = 32; /* + parameter list header size */
3574 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3576 wrprotect = (cmd[10] >> 5) & 0x7;
3577 lbdof = get_unaligned_be16(cmd + 12);
3578 num_lrd = get_unaligned_be16(cmd + 16);
3579 bt_len = get_unaligned_be32(cmd + 28);
3580 } else { /* that leaves WRITE SCATTERED(16) */
3582 wrprotect = (cmd[2] >> 5) & 0x7;
3583 lbdof = get_unaligned_be16(cmd + 4);
3584 num_lrd = get_unaligned_be16(cmd + 8);
3585 bt_len = get_unaligned_be32(cmd + 10);
3586 if (unlikely(have_dif_prot)) {
3587 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3589 mk_sense_invalid_opcode(scp);
3590 return illegal_condition_result;
3592 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3593 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3595 sdev_printk(KERN_ERR, scp->device,
3596 "Unprotected WR to DIF device\n");
3599 if ((num_lrd == 0) || (bt_len == 0))
3600 return 0; /* T10 says these do-nothings are not errors */
3603 sdev_printk(KERN_INFO, scp->device,
3604 "%s: %s: LB Data Offset field bad\n",
3606 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3607 return illegal_condition_result;
3609 lbdof_blen = lbdof * lb_size;
3610 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3612 sdev_printk(KERN_INFO, scp->device,
3613 "%s: %s: LBA range descriptors don't fit\n",
3615 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3616 return illegal_condition_result;
3618 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3620 return SCSI_MLQUEUE_HOST_BUSY;
3622 sdev_printk(KERN_INFO, scp->device,
3623 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3624 my_name, __func__, lbdof_blen);
3625 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3627 ret = DID_ERROR << 16;
3631 write_lock(macc_lckp);
3632 sg_off = lbdof_blen;
3633 /* Spec says Buffer xfer Length field in number of LBs in dout */
3635 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3636 lba = get_unaligned_be64(up + 0);
3637 num = get_unaligned_be32(up + 8);
3639 sdev_printk(KERN_INFO, scp->device,
3640 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3641 my_name, __func__, k, lba, num, sg_off);
3644 ret = check_device_access_params(scp, lba, num, true);
3646 goto err_out_unlock;
3647 num_by = num * lb_size;
3648 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3650 if ((cum_lb + num) > bt_len) {
3652 sdev_printk(KERN_INFO, scp->device,
3653 "%s: %s: sum of blocks > data provided\n",
3655 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3657 ret = illegal_condition_result;
3658 goto err_out_unlock;
3662 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3663 int prot_ret = prot_verify_write(scp, lba, num,
3667 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3669 ret = illegal_condition_result;
3670 goto err_out_unlock;
3674 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3675 /* If ZBC zone then bump its write pointer */
3676 if (sdebug_dev_is_zoned(devip))
3677 zbc_inc_wp(devip, lba, num);
3678 if (unlikely(scsi_debug_lbp()))
3679 map_region(sip, lba, num);
3680 if (unlikely(-1 == ret)) {
3681 ret = DID_ERROR << 16;
3682 goto err_out_unlock;
3683 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3684 sdev_printk(KERN_INFO, scp->device,
3685 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3686 my_name, num_by, ret);
3688 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3689 atomic_read(&sdeb_inject_pending))) {
3690 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3691 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3692 atomic_set(&sdeb_inject_pending, 0);
3693 ret = check_condition_result;
3694 goto err_out_unlock;
3695 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3696 /* Logical block guard check failed */
3697 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3698 atomic_set(&sdeb_inject_pending, 0);
3699 ret = illegal_condition_result;
3700 goto err_out_unlock;
3701 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3702 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3703 atomic_set(&sdeb_inject_pending, 0);
3704 ret = illegal_condition_result;
3705 goto err_out_unlock;
3713 write_unlock(macc_lckp);
3719 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3720 u32 ei_lba, bool unmap, bool ndob)
3722 struct scsi_device *sdp = scp->device;
3723 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3724 unsigned long long i;
3726 u32 lb_size = sdebug_sector_size;
3728 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3729 scp->device->hostdata, true);
3730 rwlock_t *macc_lckp = &sip->macc_lck;
3734 write_lock(macc_lckp);
3736 ret = check_device_access_params(scp, lba, num, true);
3738 write_unlock(macc_lckp);
3742 if (unmap && scsi_debug_lbp()) {
3743 unmap_region(sip, lba, num);
3747 block = do_div(lbaa, sdebug_store_sectors);
3748 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3750 fs1p = fsp + (block * lb_size);
3752 memset(fs1p, 0, lb_size);
3755 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3758 write_unlock(&sip->macc_lck);
3759 return DID_ERROR << 16;
3760 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3761 sdev_printk(KERN_INFO, scp->device,
3762 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3763 my_name, "write same", lb_size, ret);
3765 /* Copy first sector to remaining blocks */
3766 for (i = 1 ; i < num ; i++) {
3768 block = do_div(lbaa, sdebug_store_sectors);
3769 memmove(fsp + (block * lb_size), fs1p, lb_size);
3771 if (scsi_debug_lbp())
3772 map_region(sip, lba, num);
3773 /* If ZBC zone then bump its write pointer */
3774 if (sdebug_dev_is_zoned(devip))
3775 zbc_inc_wp(devip, lba, num);
3777 write_unlock(macc_lckp);
3782 static int resp_write_same_10(struct scsi_cmnd *scp,
3783 struct sdebug_dev_info *devip)
3785 u8 *cmd = scp->cmnd;
3792 if (sdebug_lbpws10 == 0) {
3793 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3794 return check_condition_result;
3798 lba = get_unaligned_be32(cmd + 2);
3799 num = get_unaligned_be16(cmd + 7);
3800 if (num > sdebug_write_same_length) {
3801 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3802 return check_condition_result;
3804 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3807 static int resp_write_same_16(struct scsi_cmnd *scp,
3808 struct sdebug_dev_info *devip)
3810 u8 *cmd = scp->cmnd;
3817 if (cmd[1] & 0x8) { /* UNMAP */
3818 if (sdebug_lbpws == 0) {
3819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3820 return check_condition_result;
3824 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3826 lba = get_unaligned_be64(cmd + 2);
3827 num = get_unaligned_be32(cmd + 10);
3828 if (num > sdebug_write_same_length) {
3829 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3830 return check_condition_result;
3832 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3835 /* Note the mode field is in the same position as the (lower) service action
3836 * field. For the Report supported operation codes command, SPC-4 suggests
3837 * each mode of this command should be reported separately; for future. */
3838 static int resp_write_buffer(struct scsi_cmnd *scp,
3839 struct sdebug_dev_info *devip)
3841 u8 *cmd = scp->cmnd;
3842 struct scsi_device *sdp = scp->device;
3843 struct sdebug_dev_info *dp;
3846 mode = cmd[1] & 0x1f;
3848 case 0x4: /* download microcode (MC) and activate (ACT) */
3849 /* set UAs on this device only */
3850 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3851 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3853 case 0x5: /* download MC, save and ACT */
3854 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3856 case 0x6: /* download MC with offsets and ACT */
3857 /* set UAs on most devices (LUs) in this target */
3858 list_for_each_entry(dp,
3859 &devip->sdbg_host->dev_info_list,
3861 if (dp->target == sdp->id) {
3862 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3864 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3868 case 0x7: /* download MC with offsets, save, and ACT */
3869 /* set UA on all devices (LUs) in this target */
3870 list_for_each_entry(dp,
3871 &devip->sdbg_host->dev_info_list,
3873 if (dp->target == sdp->id)
3874 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3878 /* do nothing for this command for other mode values */
3884 static int resp_comp_write(struct scsi_cmnd *scp,
3885 struct sdebug_dev_info *devip)
3887 u8 *cmd = scp->cmnd;
3889 struct sdeb_store_info *sip = devip2sip(devip, true);
3890 rwlock_t *macc_lckp = &sip->macc_lck;
3893 u32 lb_size = sdebug_sector_size;
3898 lba = get_unaligned_be64(cmd + 2);
3899 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3901 return 0; /* degenerate case, not an error */
3902 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3904 mk_sense_invalid_opcode(scp);
3905 return check_condition_result;
3907 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3908 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3909 (cmd[1] & 0xe0) == 0)
3910 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3912 ret = check_device_access_params(scp, lba, num, false);
3916 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3918 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3920 return check_condition_result;
3923 write_lock(macc_lckp);
3925 ret = do_dout_fetch(scp, dnum, arr);
3927 retval = DID_ERROR << 16;
3929 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3930 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3931 "indicated=%u, IO sent=%d bytes\n", my_name,
3932 dnum * lb_size, ret);
3933 if (!comp_write_worker(sip, lba, num, arr, false)) {
3934 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3935 retval = check_condition_result;
3938 if (scsi_debug_lbp())
3939 map_region(sip, lba, num);
3941 write_unlock(macc_lckp);
3946 struct unmap_block_desc {
3952 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3955 struct unmap_block_desc *desc;
3956 struct sdeb_store_info *sip = devip2sip(devip, true);
3957 rwlock_t *macc_lckp = &sip->macc_lck;
3958 unsigned int i, payload_len, descriptors;
3961 if (!scsi_debug_lbp())
3962 return 0; /* fib and say its done */
3963 payload_len = get_unaligned_be16(scp->cmnd + 7);
3964 BUG_ON(scsi_bufflen(scp) != payload_len);
3966 descriptors = (payload_len - 8) / 16;
3967 if (descriptors > sdebug_unmap_max_desc) {
3968 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3969 return check_condition_result;
3972 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3974 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3976 return check_condition_result;
3979 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3981 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3982 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3984 desc = (void *)&buf[8];
3986 write_lock(macc_lckp);
3988 for (i = 0 ; i < descriptors ; i++) {
3989 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3990 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3992 ret = check_device_access_params(scp, lba, num, true);
3996 unmap_region(sip, lba, num);
4002 write_unlock(macc_lckp);
4008 #define SDEBUG_GET_LBA_STATUS_LEN 32
4010 static int resp_get_lba_status(struct scsi_cmnd *scp,
4011 struct sdebug_dev_info *devip)
4013 u8 *cmd = scp->cmnd;
4015 u32 alloc_len, mapped, num;
4017 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4019 lba = get_unaligned_be64(cmd + 2);
4020 alloc_len = get_unaligned_be32(cmd + 10);
4025 ret = check_device_access_params(scp, lba, 1, false);
4029 if (scsi_debug_lbp()) {
4030 struct sdeb_store_info *sip = devip2sip(devip, true);
4032 mapped = map_state(sip, lba, &num);
4035 /* following just in case virtual_gb changed */
4036 sdebug_capacity = get_sdebug_capacity();
4037 if (sdebug_capacity - lba <= 0xffffffff)
4038 num = sdebug_capacity - lba;
4043 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4044 put_unaligned_be32(20, arr); /* Parameter Data Length */
4045 put_unaligned_be64(lba, arr + 8); /* LBA */
4046 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4047 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4049 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4052 static int resp_sync_cache(struct scsi_cmnd *scp,
4053 struct sdebug_dev_info *devip)
4058 u8 *cmd = scp->cmnd;
4060 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4061 lba = get_unaligned_be32(cmd + 2);
4062 num_blocks = get_unaligned_be16(cmd + 7);
4063 } else { /* SYNCHRONIZE_CACHE(16) */
4064 lba = get_unaligned_be64(cmd + 2);
4065 num_blocks = get_unaligned_be32(cmd + 10);
4067 if (lba + num_blocks > sdebug_capacity) {
4068 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4069 return check_condition_result;
4071 if (!write_since_sync || (cmd[1] & 0x2))
4072 res = SDEG_RES_IMMED_MASK;
4073 else /* delay if write_since_sync and IMMED clear */
4074 write_since_sync = false;
4079 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4080 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4081 * a GOOD status otherwise. Model a disk with a big cache and yield
4082 * CONDITION MET. Actually tries to bring range in main memory into the
4083 * cache associated with the CPU(s).
4085 static int resp_pre_fetch(struct scsi_cmnd *scp,
4086 struct sdebug_dev_info *devip)
4090 u64 block, rest = 0;
4092 u8 *cmd = scp->cmnd;
4093 struct sdeb_store_info *sip = devip2sip(devip, true);
4094 rwlock_t *macc_lckp = &sip->macc_lck;
4095 u8 *fsp = sip->storep;
4097 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4098 lba = get_unaligned_be32(cmd + 2);
4099 nblks = get_unaligned_be16(cmd + 7);
4100 } else { /* PRE-FETCH(16) */
4101 lba = get_unaligned_be64(cmd + 2);
4102 nblks = get_unaligned_be32(cmd + 10);
4104 if (lba + nblks > sdebug_capacity) {
4105 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4106 return check_condition_result;
4110 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4111 block = do_div(lba, sdebug_store_sectors);
4112 if (block + nblks > sdebug_store_sectors)
4113 rest = block + nblks - sdebug_store_sectors;
4115 /* Try to bring the PRE-FETCH range into CPU's cache */
4116 read_lock(macc_lckp);
4117 prefetch_range(fsp + (sdebug_sector_size * block),
4118 (nblks - rest) * sdebug_sector_size);
4120 prefetch_range(fsp, rest * sdebug_sector_size);
4121 read_unlock(macc_lckp);
4124 res = SDEG_RES_IMMED_MASK;
4125 return res | condition_met_result;
4128 #define RL_BUCKET_ELEMS 8
4130 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4131 * (W-LUN), the normal Linux scanning logic does not associate it with a
4132 * device (e.g. /dev/sg7). The following magic will make that association:
4133 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4134 * where <n> is a host number. If there are multiple targets in a host then
4135 * the above will associate a W-LUN to each target. To only get a W-LUN
4136 * for target 2, then use "echo '- 2 49409' > scan" .
4138 static int resp_report_luns(struct scsi_cmnd *scp,
4139 struct sdebug_dev_info *devip)
4141 unsigned char *cmd = scp->cmnd;
4142 unsigned int alloc_len;
4143 unsigned char select_report;
4145 struct scsi_lun *lun_p;
4146 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4147 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4148 unsigned int wlun_cnt; /* report luns W-LUN count */
4149 unsigned int tlun_cnt; /* total LUN count */
4150 unsigned int rlen; /* response length (in bytes) */
4152 unsigned int off_rsp = 0;
4153 const int sz_lun = sizeof(struct scsi_lun);
4155 clear_luns_changed_on_target(devip);
4157 select_report = cmd[2];
4158 alloc_len = get_unaligned_be32(cmd + 6);
4160 if (alloc_len < 4) {
4161 pr_err("alloc len too small %d\n", alloc_len);
4162 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4163 return check_condition_result;
4166 switch (select_report) {
4167 case 0: /* all LUNs apart from W-LUNs */
4168 lun_cnt = sdebug_max_luns;
4171 case 1: /* only W-LUNs */
4175 case 2: /* all LUNs */
4176 lun_cnt = sdebug_max_luns;
4179 case 0x10: /* only administrative LUs */
4180 case 0x11: /* see SPC-5 */
4181 case 0x12: /* only subsiduary LUs owned by referenced LU */
4183 pr_debug("select report invalid %d\n", select_report);
4184 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4185 return check_condition_result;
4188 if (sdebug_no_lun_0 && (lun_cnt > 0))
4191 tlun_cnt = lun_cnt + wlun_cnt;
4192 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4193 scsi_set_resid(scp, scsi_bufflen(scp));
4194 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4195 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4197 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4198 lun = sdebug_no_lun_0 ? 1 : 0;
4199 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4200 memset(arr, 0, sizeof(arr));
4201 lun_p = (struct scsi_lun *)&arr[0];
4203 put_unaligned_be32(rlen, &arr[0]);
4207 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4208 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4210 int_to_scsilun(lun++, lun_p);
4211 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4212 lun_p->scsi_lun[0] |= 0x40;
4214 if (j < RL_BUCKET_ELEMS)
4217 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4223 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4227 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4231 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4233 bool is_bytchk3 = false;
4236 u32 vnum, a_num, off;
4237 const u32 lb_size = sdebug_sector_size;
4240 u8 *cmd = scp->cmnd;
4241 struct sdeb_store_info *sip = devip2sip(devip, true);
4242 rwlock_t *macc_lckp = &sip->macc_lck;
4244 bytchk = (cmd[1] >> 1) & 0x3;
4246 return 0; /* always claim internal verify okay */
4247 } else if (bytchk == 2) {
4248 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4249 return check_condition_result;
4250 } else if (bytchk == 3) {
4251 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4255 lba = get_unaligned_be64(cmd + 2);
4256 vnum = get_unaligned_be32(cmd + 10);
4258 case VERIFY: /* is VERIFY(10) */
4259 lba = get_unaligned_be32(cmd + 2);
4260 vnum = get_unaligned_be16(cmd + 7);
4263 mk_sense_invalid_opcode(scp);
4264 return check_condition_result;
4267 return 0; /* not an error */
4268 a_num = is_bytchk3 ? 1 : vnum;
4269 /* Treat following check like one for read (i.e. no write) access */
4270 ret = check_device_access_params(scp, lba, a_num, false);
4274 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4276 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4278 return check_condition_result;
4280 /* Not changing store, so only need read access */
4281 read_lock(macc_lckp);
4283 ret = do_dout_fetch(scp, a_num, arr);
4285 ret = DID_ERROR << 16;
4287 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4288 sdev_printk(KERN_INFO, scp->device,
4289 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4290 my_name, __func__, a_num * lb_size, ret);
4293 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4294 memcpy(arr + off, arr, lb_size);
4297 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4298 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4299 ret = check_condition_result;
4303 read_unlock(macc_lckp);
4308 #define RZONES_DESC_HD 64
4310 /* Report zones depending on start LBA nad reporting options */
4311 static int resp_report_zones(struct scsi_cmnd *scp,
4312 struct sdebug_dev_info *devip)
4314 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4316 u32 alloc_len, rep_opts, rep_len;
4319 u8 *arr = NULL, *desc;
4320 u8 *cmd = scp->cmnd;
4321 struct sdeb_zone_state *zsp;
4322 struct sdeb_store_info *sip = devip2sip(devip, false);
4323 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4325 if (!sdebug_dev_is_zoned(devip)) {
4326 mk_sense_invalid_opcode(scp);
4327 return check_condition_result;
4329 zs_lba = get_unaligned_be64(cmd + 2);
4330 alloc_len = get_unaligned_be32(cmd + 10);
4332 return 0; /* not an error */
4333 rep_opts = cmd[14] & 0x3f;
4334 partial = cmd[14] & 0x80;
4336 if (zs_lba >= sdebug_capacity) {
4337 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4338 return check_condition_result;
4341 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4342 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4345 arr = kzalloc(alloc_len, GFP_ATOMIC);
4347 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4349 return check_condition_result;
4352 read_lock(macc_lckp);
4355 for (i = 0; i < max_zones; i++) {
4356 lba = zs_lba + devip->zsize * i;
4357 if (lba > sdebug_capacity)
4359 zsp = zbc_zone(devip, lba);
4366 if (zsp->z_cond != ZC1_EMPTY)
4370 /* Implicit open zones */
4371 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4375 /* Explicit open zones */
4376 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4381 if (zsp->z_cond != ZC4_CLOSED)
4386 if (zsp->z_cond != ZC5_FULL)
4393 * Read-only, offline, reset WP recommended are
4394 * not emulated: no zones to report;
4398 /* non-seq-resource set */
4399 if (!zsp->z_non_seq_resource)
4403 /* Not write pointer (conventional) zones */
4404 if (!zbc_zone_is_conv(zsp))
4408 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4409 INVALID_FIELD_IN_CDB, 0);
4410 ret = check_condition_result;
4414 if (nrz < rep_max_zones) {
4415 /* Fill zone descriptor */
4416 desc[0] = zsp->z_type;
4417 desc[1] = zsp->z_cond << 4;
4418 if (zsp->z_non_seq_resource)
4420 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4421 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4422 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4426 if (partial && nrz >= rep_max_zones)
4433 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4434 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4436 rep_len = (unsigned long)desc - (unsigned long)arr;
4437 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4440 read_unlock(macc_lckp);
4445 /* Logic transplanted from tcmu-runner, file_zbc.c */
4446 static void zbc_open_all(struct sdebug_dev_info *devip)
4448 struct sdeb_zone_state *zsp = &devip->zstate[0];
4451 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4452 if (zsp->z_cond == ZC4_CLOSED)
4453 zbc_open_zone(devip, &devip->zstate[i], true);
4457 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4461 enum sdebug_z_cond zc;
4462 u8 *cmd = scp->cmnd;
4463 struct sdeb_zone_state *zsp;
4464 bool all = cmd[14] & 0x01;
4465 struct sdeb_store_info *sip = devip2sip(devip, false);
4466 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4468 if (!sdebug_dev_is_zoned(devip)) {
4469 mk_sense_invalid_opcode(scp);
4470 return check_condition_result;
4473 write_lock(macc_lckp);
4476 /* Check if all closed zones can be open */
4477 if (devip->max_open &&
4478 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4479 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4481 res = check_condition_result;
4484 /* Open all closed zones */
4485 zbc_open_all(devip);
4489 /* Open the specified zone */
4490 z_id = get_unaligned_be64(cmd + 2);
4491 if (z_id >= sdebug_capacity) {
4492 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4493 res = check_condition_result;
4497 zsp = zbc_zone(devip, z_id);
4498 if (z_id != zsp->z_start) {
4499 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4500 res = check_condition_result;
4503 if (zbc_zone_is_conv(zsp)) {
4504 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4505 res = check_condition_result;
4510 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4513 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4514 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4516 res = check_condition_result;
4520 zbc_open_zone(devip, zsp, true);
4522 write_unlock(macc_lckp);
4526 static void zbc_close_all(struct sdebug_dev_info *devip)
4530 for (i = 0; i < devip->nr_zones; i++)
4531 zbc_close_zone(devip, &devip->zstate[i]);
4534 static int resp_close_zone(struct scsi_cmnd *scp,
4535 struct sdebug_dev_info *devip)
4539 u8 *cmd = scp->cmnd;
4540 struct sdeb_zone_state *zsp;
4541 bool all = cmd[14] & 0x01;
4542 struct sdeb_store_info *sip = devip2sip(devip, false);
4543 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4545 if (!sdebug_dev_is_zoned(devip)) {
4546 mk_sense_invalid_opcode(scp);
4547 return check_condition_result;
4550 write_lock(macc_lckp);
4553 zbc_close_all(devip);
4557 /* Close specified zone */
4558 z_id = get_unaligned_be64(cmd + 2);
4559 if (z_id >= sdebug_capacity) {
4560 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4561 res = check_condition_result;
4565 zsp = zbc_zone(devip, z_id);
4566 if (z_id != zsp->z_start) {
4567 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4568 res = check_condition_result;
4571 if (zbc_zone_is_conv(zsp)) {
4572 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4573 res = check_condition_result;
4577 zbc_close_zone(devip, zsp);
4579 write_unlock(macc_lckp);
4583 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4584 struct sdeb_zone_state *zsp, bool empty)
4586 enum sdebug_z_cond zc = zsp->z_cond;
4588 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4589 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4590 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4591 zbc_close_zone(devip, zsp);
4592 if (zsp->z_cond == ZC4_CLOSED)
4594 zsp->z_wp = zsp->z_start + zsp->z_size;
4595 zsp->z_cond = ZC5_FULL;
4599 static void zbc_finish_all(struct sdebug_dev_info *devip)
4603 for (i = 0; i < devip->nr_zones; i++)
4604 zbc_finish_zone(devip, &devip->zstate[i], false);
4607 static int resp_finish_zone(struct scsi_cmnd *scp,
4608 struct sdebug_dev_info *devip)
4610 struct sdeb_zone_state *zsp;
4613 u8 *cmd = scp->cmnd;
4614 bool all = cmd[14] & 0x01;
4615 struct sdeb_store_info *sip = devip2sip(devip, false);
4616 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4618 if (!sdebug_dev_is_zoned(devip)) {
4619 mk_sense_invalid_opcode(scp);
4620 return check_condition_result;
4623 write_lock(macc_lckp);
4626 zbc_finish_all(devip);
4630 /* Finish the specified zone */
4631 z_id = get_unaligned_be64(cmd + 2);
4632 if (z_id >= sdebug_capacity) {
4633 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4634 res = check_condition_result;
4638 zsp = zbc_zone(devip, z_id);
4639 if (z_id != zsp->z_start) {
4640 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4641 res = check_condition_result;
4644 if (zbc_zone_is_conv(zsp)) {
4645 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4646 res = check_condition_result;
4650 zbc_finish_zone(devip, zsp, true);
4652 write_unlock(macc_lckp);
4656 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4657 struct sdeb_zone_state *zsp)
4659 enum sdebug_z_cond zc;
4660 struct sdeb_store_info *sip = devip2sip(devip, false);
4662 if (zbc_zone_is_conv(zsp))
4666 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4667 zbc_close_zone(devip, zsp);
4669 if (zsp->z_cond == ZC4_CLOSED)
4672 if (zsp->z_wp > zsp->z_start)
4673 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4674 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4676 zsp->z_non_seq_resource = false;
4677 zsp->z_wp = zsp->z_start;
4678 zsp->z_cond = ZC1_EMPTY;
4681 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4685 for (i = 0; i < devip->nr_zones; i++)
4686 zbc_rwp_zone(devip, &devip->zstate[i]);
4689 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4691 struct sdeb_zone_state *zsp;
4694 u8 *cmd = scp->cmnd;
4695 bool all = cmd[14] & 0x01;
4696 struct sdeb_store_info *sip = devip2sip(devip, false);
4697 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4699 if (!sdebug_dev_is_zoned(devip)) {
4700 mk_sense_invalid_opcode(scp);
4701 return check_condition_result;
4704 write_lock(macc_lckp);
4711 z_id = get_unaligned_be64(cmd + 2);
4712 if (z_id >= sdebug_capacity) {
4713 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4714 res = check_condition_result;
4718 zsp = zbc_zone(devip, z_id);
4719 if (z_id != zsp->z_start) {
4720 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4721 res = check_condition_result;
4724 if (zbc_zone_is_conv(zsp)) {
4725 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 res = check_condition_result;
4730 zbc_rwp_zone(devip, zsp);
4732 write_unlock(macc_lckp);
4736 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4739 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4741 hwq = blk_mq_unique_tag_to_hwq(tag);
4743 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4744 if (WARN_ON_ONCE(hwq >= submit_queues))
4747 return sdebug_q_arr + hwq;
4750 static u32 get_tag(struct scsi_cmnd *cmnd)
4752 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4755 /* Queued (deferred) command completions converge here. */
4756 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4758 bool aborted = sd_dp->aborted;
4761 unsigned long iflags;
4762 struct sdebug_queue *sqp;
4763 struct sdebug_queued_cmd *sqcp;
4764 struct scsi_cmnd *scp;
4765 struct sdebug_dev_info *devip;
4767 if (unlikely(aborted))
4768 sd_dp->aborted = false;
4769 qc_idx = sd_dp->qc_idx;
4770 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4771 if (sdebug_statistics) {
4772 atomic_inc(&sdebug_completions);
4773 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4774 atomic_inc(&sdebug_miss_cpus);
4776 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4777 pr_err("wild qc_idx=%d\n", qc_idx);
4780 spin_lock_irqsave(&sqp->qc_lock, iflags);
4781 sd_dp->defer_t = SDEB_DEFER_NONE;
4782 sqcp = &sqp->qc_arr[qc_idx];
4784 if (unlikely(scp == NULL)) {
4785 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4786 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4787 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4790 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4792 atomic_dec(&devip->num_in_q);
4794 pr_err("devip=NULL\n");
4795 if (unlikely(atomic_read(&retired_max_queue) > 0))
4798 sqcp->a_cmnd = NULL;
4799 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4800 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4801 pr_err("Unexpected completion\n");
4805 if (unlikely(retiring)) { /* user has reduced max_queue */
4808 retval = atomic_read(&retired_max_queue);
4809 if (qc_idx >= retval) {
4810 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4811 pr_err("index %d too large\n", retval);
4814 k = find_last_bit(sqp->in_use_bm, retval);
4815 if ((k < sdebug_max_queue) || (k == retval))
4816 atomic_set(&retired_max_queue, 0);
4818 atomic_set(&retired_max_queue, k + 1);
4820 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4821 if (unlikely(aborted)) {
4823 pr_info("bypassing scsi_done() due to aborted cmd\n");
4826 scsi_done(scp); /* callback to mid level */
4829 /* When high resolution timer goes off this function is called. */
4830 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4832 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4834 sdebug_q_cmd_complete(sd_dp);
4835 return HRTIMER_NORESTART;
4838 /* When work queue schedules work, it calls this function. */
4839 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4841 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4843 sdebug_q_cmd_complete(sd_dp);
4846 static bool got_shared_uuid;
4847 static uuid_t shared_uuid;
4849 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4851 struct sdeb_zone_state *zsp;
4852 sector_t capacity = get_sdebug_capacity();
4853 sector_t zstart = 0;
4857 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4858 * a zone size allowing for at least 4 zones on the device. Otherwise,
4859 * use the specified zone size checking that at least 2 zones can be
4860 * created for the device.
4862 if (!sdeb_zbc_zone_size_mb) {
4863 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4864 >> ilog2(sdebug_sector_size);
4865 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4867 if (devip->zsize < 2) {
4868 pr_err("Device capacity too small\n");
4872 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4873 pr_err("Zone size is not a power of 2\n");
4876 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4877 >> ilog2(sdebug_sector_size);
4878 if (devip->zsize >= capacity) {
4879 pr_err("Zone size too large for device capacity\n");
4884 devip->zsize_shift = ilog2(devip->zsize);
4885 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4887 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4888 pr_err("Number of conventional zones too large\n");
4891 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4893 if (devip->zmodel == BLK_ZONED_HM) {
4894 /* zbc_max_open_zones can be 0, meaning "not reported" */
4895 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4896 devip->max_open = (devip->nr_zones - 1) / 2;
4898 devip->max_open = sdeb_zbc_max_open;
4901 devip->zstate = kcalloc(devip->nr_zones,
4902 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4906 for (i = 0; i < devip->nr_zones; i++) {
4907 zsp = &devip->zstate[i];
4909 zsp->z_start = zstart;
4911 if (i < devip->nr_conv_zones) {
4912 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4913 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4914 zsp->z_wp = (sector_t)-1;
4916 if (devip->zmodel == BLK_ZONED_HM)
4917 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4919 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4920 zsp->z_cond = ZC1_EMPTY;
4921 zsp->z_wp = zsp->z_start;
4924 if (zsp->z_start + devip->zsize < capacity)
4925 zsp->z_size = devip->zsize;
4927 zsp->z_size = capacity - zsp->z_start;
4929 zstart += zsp->z_size;
4935 static struct sdebug_dev_info *sdebug_device_create(
4936 struct sdebug_host_info *sdbg_host, gfp_t flags)
4938 struct sdebug_dev_info *devip;
4940 devip = kzalloc(sizeof(*devip), flags);
4942 if (sdebug_uuid_ctl == 1)
4943 uuid_gen(&devip->lu_name);
4944 else if (sdebug_uuid_ctl == 2) {
4945 if (got_shared_uuid)
4946 devip->lu_name = shared_uuid;
4948 uuid_gen(&shared_uuid);
4949 got_shared_uuid = true;
4950 devip->lu_name = shared_uuid;
4953 devip->sdbg_host = sdbg_host;
4954 if (sdeb_zbc_in_use) {
4955 devip->zmodel = sdeb_zbc_model;
4956 if (sdebug_device_create_zones(devip)) {
4961 devip->zmodel = BLK_ZONED_NONE;
4963 devip->sdbg_host = sdbg_host;
4964 devip->create_ts = ktime_get_boottime();
4965 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4966 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4971 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4973 struct sdebug_host_info *sdbg_host;
4974 struct sdebug_dev_info *open_devip = NULL;
4975 struct sdebug_dev_info *devip;
4977 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4979 pr_err("Host info NULL\n");
4983 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4984 if ((devip->used) && (devip->channel == sdev->channel) &&
4985 (devip->target == sdev->id) &&
4986 (devip->lun == sdev->lun))
4989 if ((!devip->used) && (!open_devip))
4993 if (!open_devip) { /* try and make a new one */
4994 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4996 pr_err("out of memory at line %d\n", __LINE__);
5001 open_devip->channel = sdev->channel;
5002 open_devip->target = sdev->id;
5003 open_devip->lun = sdev->lun;
5004 open_devip->sdbg_host = sdbg_host;
5005 atomic_set(&open_devip->num_in_q, 0);
5006 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
5007 open_devip->used = true;
5011 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5014 pr_info("slave_alloc <%u %u %u %llu>\n",
5015 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5019 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5021 struct sdebug_dev_info *devip =
5022 (struct sdebug_dev_info *)sdp->hostdata;
5025 pr_info("slave_configure <%u %u %u %llu>\n",
5026 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5027 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5028 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5029 if (devip == NULL) {
5030 devip = find_build_dev_info(sdp);
5032 return 1; /* no resources, will be marked offline */
5034 sdp->hostdata = devip;
5036 sdp->no_uld_attach = 1;
5037 config_cdb_len(sdp);
5041 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5043 struct sdebug_dev_info *devip =
5044 (struct sdebug_dev_info *)sdp->hostdata;
5047 pr_info("slave_destroy <%u %u %u %llu>\n",
5048 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5050 /* make this slot available for re-use */
5051 devip->used = false;
5052 sdp->hostdata = NULL;
5056 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5057 enum sdeb_defer_type defer_t)
5061 if (defer_t == SDEB_DEFER_HRT)
5062 hrtimer_cancel(&sd_dp->hrt);
5063 else if (defer_t == SDEB_DEFER_WQ)
5064 cancel_work_sync(&sd_dp->ew.work);
5067 /* If @cmnd found deletes its timer or work queue and returns true; else
5069 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5071 unsigned long iflags;
5072 int j, k, qmax, r_qmax;
5073 enum sdeb_defer_type l_defer_t;
5074 struct sdebug_queue *sqp;
5075 struct sdebug_queued_cmd *sqcp;
5076 struct sdebug_dev_info *devip;
5077 struct sdebug_defer *sd_dp;
5079 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5080 spin_lock_irqsave(&sqp->qc_lock, iflags);
5081 qmax = sdebug_max_queue;
5082 r_qmax = atomic_read(&retired_max_queue);
5085 for (k = 0; k < qmax; ++k) {
5086 if (test_bit(k, sqp->in_use_bm)) {
5087 sqcp = &sqp->qc_arr[k];
5088 if (cmnd != sqcp->a_cmnd)
5091 devip = (struct sdebug_dev_info *)
5092 cmnd->device->hostdata;
5094 atomic_dec(&devip->num_in_q);
5095 sqcp->a_cmnd = NULL;
5096 sd_dp = sqcp->sd_dp;
5098 l_defer_t = sd_dp->defer_t;
5099 sd_dp->defer_t = SDEB_DEFER_NONE;
5101 l_defer_t = SDEB_DEFER_NONE;
5102 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5103 stop_qc_helper(sd_dp, l_defer_t);
5104 clear_bit(k, sqp->in_use_bm);
5108 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5113 /* Deletes (stops) timers or work queues of all queued commands */
5114 static void stop_all_queued(void)
5116 unsigned long iflags;
5118 enum sdeb_defer_type l_defer_t;
5119 struct sdebug_queue *sqp;
5120 struct sdebug_queued_cmd *sqcp;
5121 struct sdebug_dev_info *devip;
5122 struct sdebug_defer *sd_dp;
5124 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5125 spin_lock_irqsave(&sqp->qc_lock, iflags);
5126 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5127 if (test_bit(k, sqp->in_use_bm)) {
5128 sqcp = &sqp->qc_arr[k];
5129 if (sqcp->a_cmnd == NULL)
5131 devip = (struct sdebug_dev_info *)
5132 sqcp->a_cmnd->device->hostdata;
5134 atomic_dec(&devip->num_in_q);
5135 sqcp->a_cmnd = NULL;
5136 sd_dp = sqcp->sd_dp;
5138 l_defer_t = sd_dp->defer_t;
5139 sd_dp->defer_t = SDEB_DEFER_NONE;
5141 l_defer_t = SDEB_DEFER_NONE;
5142 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5143 stop_qc_helper(sd_dp, l_defer_t);
5144 clear_bit(k, sqp->in_use_bm);
5145 spin_lock_irqsave(&sqp->qc_lock, iflags);
5148 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5152 /* Free queued command memory on heap */
5153 static void free_all_queued(void)
5156 struct sdebug_queue *sqp;
5157 struct sdebug_queued_cmd *sqcp;
5159 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5160 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5161 sqcp = &sqp->qc_arr[k];
5168 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5174 ok = stop_queued_cmnd(SCpnt);
5175 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5176 sdev_printk(KERN_INFO, SCpnt->device,
5177 "%s: command%s found\n", __func__,
5183 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5186 if (SCpnt && SCpnt->device) {
5187 struct scsi_device *sdp = SCpnt->device;
5188 struct sdebug_dev_info *devip =
5189 (struct sdebug_dev_info *)sdp->hostdata;
5191 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5192 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5194 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5199 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5201 struct sdebug_host_info *sdbg_host;
5202 struct sdebug_dev_info *devip;
5203 struct scsi_device *sdp;
5204 struct Scsi_Host *hp;
5207 ++num_target_resets;
5210 sdp = SCpnt->device;
5213 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5214 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5218 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5220 list_for_each_entry(devip,
5221 &sdbg_host->dev_info_list,
5223 if (devip->target == sdp->id) {
5224 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5228 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5229 sdev_printk(KERN_INFO, sdp,
5230 "%s: %d device(s) found in target\n", __func__, k);
5235 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5237 struct sdebug_host_info *sdbg_host;
5238 struct sdebug_dev_info *devip;
5239 struct scsi_device *sdp;
5240 struct Scsi_Host *hp;
5244 if (!(SCpnt && SCpnt->device))
5246 sdp = SCpnt->device;
5247 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5248 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5251 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5253 list_for_each_entry(devip,
5254 &sdbg_host->dev_info_list,
5256 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5261 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5262 sdev_printk(KERN_INFO, sdp,
5263 "%s: %d device(s) found in host\n", __func__, k);
5268 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5270 struct sdebug_host_info *sdbg_host;
5271 struct sdebug_dev_info *devip;
5275 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5276 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5277 spin_lock(&sdebug_host_list_lock);
5278 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5279 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5281 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5285 spin_unlock(&sdebug_host_list_lock);
5287 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5288 sdev_printk(KERN_INFO, SCpnt->device,
5289 "%s: %d device(s) found\n", __func__, k);
5293 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5295 struct msdos_partition *pp;
5296 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5297 int sectors_per_part, num_sectors, k;
5298 int heads_by_sects, start_sec, end_sec;
5300 /* assume partition table already zeroed */
5301 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5303 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5304 sdebug_num_parts = SDEBUG_MAX_PARTS;
5305 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5307 num_sectors = (int)get_sdebug_capacity();
5308 sectors_per_part = (num_sectors - sdebug_sectors_per)
5310 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5311 starts[0] = sdebug_sectors_per;
5312 max_part_secs = sectors_per_part;
5313 for (k = 1; k < sdebug_num_parts; ++k) {
5314 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5316 if (starts[k] - starts[k - 1] < max_part_secs)
5317 max_part_secs = starts[k] - starts[k - 1];
5319 starts[sdebug_num_parts] = num_sectors;
5320 starts[sdebug_num_parts + 1] = 0;
5322 ramp[510] = 0x55; /* magic partition markings */
5324 pp = (struct msdos_partition *)(ramp + 0x1be);
5325 for (k = 0; starts[k + 1]; ++k, ++pp) {
5326 start_sec = starts[k];
5327 end_sec = starts[k] + max_part_secs - 1;
5330 pp->cyl = start_sec / heads_by_sects;
5331 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5332 / sdebug_sectors_per;
5333 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5335 pp->end_cyl = end_sec / heads_by_sects;
5336 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5337 / sdebug_sectors_per;
5338 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5340 pp->start_sect = cpu_to_le32(start_sec);
5341 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5342 pp->sys_ind = 0x83; /* plain Linux partition */
5346 static void block_unblock_all_queues(bool block)
5349 struct sdebug_queue *sqp;
5351 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5352 atomic_set(&sqp->blocked, (int)block);
5355 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5356 * commands will be processed normally before triggers occur.
5358 static void tweak_cmnd_count(void)
5362 modulo = abs(sdebug_every_nth);
5365 block_unblock_all_queues(true);
5366 count = atomic_read(&sdebug_cmnd_count);
5367 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5368 block_unblock_all_queues(false);
5371 static void clear_queue_stats(void)
5373 atomic_set(&sdebug_cmnd_count, 0);
5374 atomic_set(&sdebug_completions, 0);
5375 atomic_set(&sdebug_miss_cpus, 0);
5376 atomic_set(&sdebug_a_tsf, 0);
5379 static bool inject_on_this_cmd(void)
5381 if (sdebug_every_nth == 0)
5383 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5386 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5388 /* Complete the processing of the thread that queued a SCSI command to this
5389 * driver. It either completes the command by calling cmnd_done() or
5390 * schedules a hr timer or work queue then returns 0. Returns
5391 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5393 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5395 int (*pfp)(struct scsi_cmnd *,
5396 struct sdebug_dev_info *),
5397 int delta_jiff, int ndelay)
5400 bool inject = false;
5401 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5402 int k, num_in_q, qdepth;
5403 unsigned long iflags;
5404 u64 ns_from_boot = 0;
5405 struct sdebug_queue *sqp;
5406 struct sdebug_queued_cmd *sqcp;
5407 struct scsi_device *sdp;
5408 struct sdebug_defer *sd_dp;
5410 if (unlikely(devip == NULL)) {
5411 if (scsi_result == 0)
5412 scsi_result = DID_NO_CONNECT << 16;
5413 goto respond_in_thread;
5417 if (delta_jiff == 0)
5418 goto respond_in_thread;
5420 sqp = get_queue(cmnd);
5421 spin_lock_irqsave(&sqp->qc_lock, iflags);
5422 if (unlikely(atomic_read(&sqp->blocked))) {
5423 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5424 return SCSI_MLQUEUE_HOST_BUSY;
5426 num_in_q = atomic_read(&devip->num_in_q);
5427 qdepth = cmnd->device->queue_depth;
5428 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5430 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5431 goto respond_in_thread;
5433 scsi_result = device_qfull_result;
5434 } else if (unlikely(sdebug_every_nth &&
5435 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5436 (scsi_result == 0))) {
5437 if ((num_in_q == (qdepth - 1)) &&
5438 (atomic_inc_return(&sdebug_a_tsf) >=
5439 abs(sdebug_every_nth))) {
5440 atomic_set(&sdebug_a_tsf, 0);
5442 scsi_result = device_qfull_result;
5446 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5447 if (unlikely(k >= sdebug_max_queue)) {
5448 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5450 goto respond_in_thread;
5451 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5452 scsi_result = device_qfull_result;
5453 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5454 sdev_printk(KERN_INFO, sdp,
5455 "%s: max_queue=%d exceeded, %s\n",
5456 __func__, sdebug_max_queue,
5457 (scsi_result ? "status: TASK SET FULL" :
5458 "report: host busy"));
5460 goto respond_in_thread;
5462 return SCSI_MLQUEUE_HOST_BUSY;
5464 set_bit(k, sqp->in_use_bm);
5465 atomic_inc(&devip->num_in_q);
5466 sqcp = &sqp->qc_arr[k];
5467 sqcp->a_cmnd = cmnd;
5468 cmnd->host_scribble = (unsigned char *)sqcp;
5469 sd_dp = sqcp->sd_dp;
5470 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5473 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5475 atomic_dec(&devip->num_in_q);
5476 clear_bit(k, sqp->in_use_bm);
5477 return SCSI_MLQUEUE_HOST_BUSY;
5484 /* Set the hostwide tag */
5485 if (sdebug_host_max_queue)
5486 sd_dp->hc_idx = get_tag(cmnd);
5489 ns_from_boot = ktime_get_boottime_ns();
5491 /* one of the resp_*() response functions is called here */
5492 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5493 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5494 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5495 delta_jiff = ndelay = 0;
5497 if (cmnd->result == 0 && scsi_result != 0)
5498 cmnd->result = scsi_result;
5499 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5500 if (atomic_read(&sdeb_inject_pending)) {
5501 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5502 atomic_set(&sdeb_inject_pending, 0);
5503 cmnd->result = check_condition_result;
5507 if (unlikely(sdebug_verbose && cmnd->result))
5508 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5509 __func__, cmnd->result);
5511 if (delta_jiff > 0 || ndelay > 0) {
5514 if (delta_jiff > 0) {
5515 u64 ns = jiffies_to_nsecs(delta_jiff);
5517 if (sdebug_random && ns < U32_MAX) {
5518 ns = prandom_u32_max((u32)ns);
5519 } else if (sdebug_random) {
5520 ns >>= 12; /* scale to 4 usec precision */
5521 if (ns < U32_MAX) /* over 4 hours max */
5522 ns = prandom_u32_max((u32)ns);
5525 kt = ns_to_ktime(ns);
5526 } else { /* ndelay has a 4.2 second max */
5527 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5529 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5530 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5532 if (kt <= d) { /* elapsed duration >= kt */
5533 spin_lock_irqsave(&sqp->qc_lock, iflags);
5534 sqcp->a_cmnd = NULL;
5535 atomic_dec(&devip->num_in_q);
5536 clear_bit(k, sqp->in_use_bm);
5537 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5540 /* call scsi_done() from this thread */
5544 /* otherwise reduce kt by elapsed time */
5549 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5550 spin_lock_irqsave(&sqp->qc_lock, iflags);
5551 if (!sd_dp->init_poll) {
5552 sd_dp->init_poll = true;
5553 sqcp->sd_dp = sd_dp;
5554 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5557 sd_dp->defer_t = SDEB_DEFER_POLL;
5558 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5560 if (!sd_dp->init_hrt) {
5561 sd_dp->init_hrt = true;
5562 sqcp->sd_dp = sd_dp;
5563 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5564 HRTIMER_MODE_REL_PINNED);
5565 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5566 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5569 sd_dp->defer_t = SDEB_DEFER_HRT;
5570 /* schedule the invocation of scsi_done() for a later time */
5571 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5573 if (sdebug_statistics)
5574 sd_dp->issuing_cpu = raw_smp_processor_id();
5575 } else { /* jdelay < 0, use work queue */
5576 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5577 atomic_read(&sdeb_inject_pending)))
5578 sd_dp->aborted = true;
5580 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5581 spin_lock_irqsave(&sqp->qc_lock, iflags);
5582 if (!sd_dp->init_poll) {
5583 sd_dp->init_poll = true;
5584 sqcp->sd_dp = sd_dp;
5585 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5588 sd_dp->defer_t = SDEB_DEFER_POLL;
5589 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5591 if (!sd_dp->init_wq) {
5592 sd_dp->init_wq = true;
5593 sqcp->sd_dp = sd_dp;
5594 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5596 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5598 sd_dp->defer_t = SDEB_DEFER_WQ;
5599 schedule_work(&sd_dp->ew.work);
5601 if (sdebug_statistics)
5602 sd_dp->issuing_cpu = raw_smp_processor_id();
5603 if (unlikely(sd_dp->aborted)) {
5604 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5605 scsi_cmd_to_rq(cmnd)->tag);
5606 blk_abort_request(scsi_cmd_to_rq(cmnd));
5607 atomic_set(&sdeb_inject_pending, 0);
5608 sd_dp->aborted = false;
5611 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5612 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5613 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5616 respond_in_thread: /* call back to mid-layer using invocation thread */
5617 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5618 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5619 if (cmnd->result == 0 && scsi_result != 0)
5620 cmnd->result = scsi_result;
5625 /* Note: The following macros create attribute files in the
5626 /sys/module/scsi_debug/parameters directory. Unfortunately this
5627 driver is unaware of a change and cannot trigger auxiliary actions
5628 as it can when the corresponding attribute in the
5629 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5631 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5632 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5633 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5634 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5635 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5636 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5637 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5638 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5639 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5640 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5641 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5642 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5643 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5644 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5645 module_param_string(inq_product, sdebug_inq_product_id,
5646 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5647 module_param_string(inq_rev, sdebug_inq_product_rev,
5648 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5649 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5650 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5651 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5652 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5653 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5654 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5655 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5656 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5657 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5658 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5659 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5661 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5663 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5664 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5665 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5666 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5667 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5668 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5669 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5670 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5671 module_param_named(per_host_store, sdebug_per_host_store, bool,
5673 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5674 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5675 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5676 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5677 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5678 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5679 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5680 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5681 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5682 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5683 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5684 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5685 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5686 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5687 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5688 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5689 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5690 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5692 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5693 module_param_named(write_same_length, sdebug_write_same_length, int,
5695 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5696 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5697 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5698 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5700 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5701 MODULE_DESCRIPTION("SCSI debug adapter driver");
5702 MODULE_LICENSE("GPL");
5703 MODULE_VERSION(SDEBUG_VERSION);
5705 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5706 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5707 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5708 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5709 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5710 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5711 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5712 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5713 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5714 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5715 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5716 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5717 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5718 MODULE_PARM_DESC(host_max_queue,
5719 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5720 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5721 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5722 SDEBUG_VERSION "\")");
5723 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5724 MODULE_PARM_DESC(lbprz,
5725 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5726 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5727 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5728 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5729 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5730 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5731 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5732 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5733 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5734 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5735 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5736 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5737 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5738 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5739 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5740 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5741 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5742 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5743 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5744 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5745 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5746 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5747 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5748 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5749 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5750 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5751 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5752 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5753 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5754 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5755 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5756 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5757 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5758 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5759 MODULE_PARM_DESC(uuid_ctl,
5760 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5761 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5762 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5763 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5764 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5765 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5766 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5767 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5768 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5770 #define SDEBUG_INFO_LEN 256
5771 static char sdebug_info[SDEBUG_INFO_LEN];
5773 static const char *scsi_debug_info(struct Scsi_Host *shp)
5777 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5778 my_name, SDEBUG_VERSION, sdebug_version_date);
5779 if (k >= (SDEBUG_INFO_LEN - 1))
5781 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5782 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5783 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5784 "statistics", (int)sdebug_statistics);
5788 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5789 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5794 int minLen = length > 15 ? 15 : length;
5796 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5798 memcpy(arr, buffer, minLen);
5800 if (1 != sscanf(arr, "%d", &opts))
5803 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5804 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5805 if (sdebug_every_nth != 0)
5810 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5811 * same for each scsi_debug host (if more than one). Some of the counters
5812 * output are not atomics so might be inaccurate in a busy system. */
5813 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5816 struct sdebug_queue *sqp;
5817 struct sdebug_host_info *sdhp;
5819 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5820 SDEBUG_VERSION, sdebug_version_date);
5821 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5822 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5823 sdebug_opts, sdebug_every_nth);
5824 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5825 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5826 sdebug_sector_size, "bytes");
5827 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5828 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5830 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5831 num_dev_resets, num_target_resets, num_bus_resets,
5833 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5834 dix_reads, dix_writes, dif_errors);
5835 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5837 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5838 atomic_read(&sdebug_cmnd_count),
5839 atomic_read(&sdebug_completions),
5840 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5841 atomic_read(&sdebug_a_tsf),
5842 atomic_read(&sdeb_mq_poll_count));
5844 seq_printf(m, "submit_queues=%d\n", submit_queues);
5845 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5846 seq_printf(m, " queue %d:\n", j);
5847 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5848 if (f != sdebug_max_queue) {
5849 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5850 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5851 "first,last bits", f, l);
5855 seq_printf(m, "this host_no=%d\n", host->host_no);
5856 if (!xa_empty(per_store_ap)) {
5859 unsigned long l_idx;
5860 struct sdeb_store_info *sip;
5862 seq_puts(m, "\nhost list:\n");
5864 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5866 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5867 sdhp->shost->host_no, idx);
5870 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5871 sdeb_most_recent_idx);
5873 xa_for_each(per_store_ap, l_idx, sip) {
5874 niu = xa_get_mark(per_store_ap, l_idx,
5875 SDEB_XA_NOT_IN_USE);
5877 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5878 (niu ? " not_in_use" : ""));
5885 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5887 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5889 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5890 * of delay is jiffies.
5892 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5897 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5899 if (sdebug_jdelay != jdelay) {
5901 struct sdebug_queue *sqp;
5903 block_unblock_all_queues(true);
5904 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5906 k = find_first_bit(sqp->in_use_bm,
5908 if (k != sdebug_max_queue) {
5909 res = -EBUSY; /* queued commands */
5914 sdebug_jdelay = jdelay;
5917 block_unblock_all_queues(false);
5923 static DRIVER_ATTR_RW(delay);
5925 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5927 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5929 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5930 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5931 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5936 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5937 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5939 if (sdebug_ndelay != ndelay) {
5941 struct sdebug_queue *sqp;
5943 block_unblock_all_queues(true);
5944 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5946 k = find_first_bit(sqp->in_use_bm,
5948 if (k != sdebug_max_queue) {
5949 res = -EBUSY; /* queued commands */
5954 sdebug_ndelay = ndelay;
5955 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5958 block_unblock_all_queues(false);
5964 static DRIVER_ATTR_RW(ndelay);
5966 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5968 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5971 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5977 if (sscanf(buf, "%10s", work) == 1) {
5978 if (strncasecmp(work, "0x", 2) == 0) {
5979 if (kstrtoint(work + 2, 16, &opts) == 0)
5982 if (kstrtoint(work, 10, &opts) == 0)
5989 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5990 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5994 static DRIVER_ATTR_RW(opts);
5996 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5998 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6000 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6005 /* Cannot change from or to TYPE_ZBC with sysfs */
6006 if (sdebug_ptype == TYPE_ZBC)
6009 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6017 static DRIVER_ATTR_RW(ptype);
6019 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6021 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6023 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6028 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6034 static DRIVER_ATTR_RW(dsense);
6036 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6038 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6040 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6045 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6046 bool want_store = (n == 0);
6047 struct sdebug_host_info *sdhp;
6050 sdebug_fake_rw = (sdebug_fake_rw > 0);
6051 if (sdebug_fake_rw == n)
6052 return count; /* not transitioning so do nothing */
6054 if (want_store) { /* 1 --> 0 transition, set up store */
6055 if (sdeb_first_idx < 0) {
6056 idx = sdebug_add_store();
6060 idx = sdeb_first_idx;
6061 xa_clear_mark(per_store_ap, idx,
6062 SDEB_XA_NOT_IN_USE);
6064 /* make all hosts use same store */
6065 list_for_each_entry(sdhp, &sdebug_host_list,
6067 if (sdhp->si_idx != idx) {
6068 xa_set_mark(per_store_ap, sdhp->si_idx,
6069 SDEB_XA_NOT_IN_USE);
6073 sdeb_most_recent_idx = idx;
6074 } else { /* 0 --> 1 transition is trigger for shrink */
6075 sdebug_erase_all_stores(true /* apart from first */);
6082 static DRIVER_ATTR_RW(fake_rw);
6084 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6086 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6088 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6093 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6094 sdebug_no_lun_0 = n;
6099 static DRIVER_ATTR_RW(no_lun_0);
6101 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6103 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6105 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6110 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6111 sdebug_num_tgts = n;
6112 sdebug_max_tgts_luns();
6117 static DRIVER_ATTR_RW(num_tgts);
6119 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6121 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6123 static DRIVER_ATTR_RO(dev_size_mb);
6125 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6127 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6130 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6135 if (kstrtobool(buf, &v))
6138 sdebug_per_host_store = v;
6141 static DRIVER_ATTR_RW(per_host_store);
6143 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6145 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6147 static DRIVER_ATTR_RO(num_parts);
6149 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6151 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6153 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6159 if (sscanf(buf, "%10s", work) == 1) {
6160 if (strncasecmp(work, "0x", 2) == 0) {
6161 if (kstrtoint(work + 2, 16, &nth) == 0)
6162 goto every_nth_done;
6164 if (kstrtoint(work, 10, &nth) == 0)
6165 goto every_nth_done;
6171 sdebug_every_nth = nth;
6172 if (nth && !sdebug_statistics) {
6173 pr_info("every_nth needs statistics=1, set it\n");
6174 sdebug_statistics = true;
6179 static DRIVER_ATTR_RW(every_nth);
6181 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6183 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6185 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6191 if (kstrtoint(buf, 0, &n))
6194 if (n > (int)SAM_LUN_AM_FLAT) {
6195 pr_warn("only LUN address methods 0 and 1 are supported\n");
6198 changed = ((int)sdebug_lun_am != n);
6200 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6201 struct sdebug_host_info *sdhp;
6202 struct sdebug_dev_info *dp;
6204 spin_lock(&sdebug_host_list_lock);
6205 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6206 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6207 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6210 spin_unlock(&sdebug_host_list_lock);
6216 static DRIVER_ATTR_RW(lun_format);
6218 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6220 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6222 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6228 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6230 pr_warn("max_luns can be no more than 256\n");
6233 changed = (sdebug_max_luns != n);
6234 sdebug_max_luns = n;
6235 sdebug_max_tgts_luns();
6236 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6237 struct sdebug_host_info *sdhp;
6238 struct sdebug_dev_info *dp;
6240 spin_lock(&sdebug_host_list_lock);
6241 list_for_each_entry(sdhp, &sdebug_host_list,
6243 list_for_each_entry(dp, &sdhp->dev_info_list,
6245 set_bit(SDEBUG_UA_LUNS_CHANGED,
6249 spin_unlock(&sdebug_host_list_lock);
6255 static DRIVER_ATTR_RW(max_luns);
6257 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6259 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6261 /* N.B. max_queue can be changed while there are queued commands. In flight
6262 * commands beyond the new max_queue will be completed. */
6263 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6267 struct sdebug_queue *sqp;
6269 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6270 (n <= SDEBUG_CANQUEUE) &&
6271 (sdebug_host_max_queue == 0)) {
6272 block_unblock_all_queues(true);
6274 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6276 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6280 sdebug_max_queue = n;
6281 if (k == SDEBUG_CANQUEUE)
6282 atomic_set(&retired_max_queue, 0);
6284 atomic_set(&retired_max_queue, k + 1);
6286 atomic_set(&retired_max_queue, 0);
6287 block_unblock_all_queues(false);
6292 static DRIVER_ATTR_RW(max_queue);
6294 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6296 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6300 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6301 * in range [0, sdebug_host_max_queue), we can't change it.
6303 static DRIVER_ATTR_RO(host_max_queue);
6305 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6307 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6309 static DRIVER_ATTR_RO(no_uld);
6311 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6313 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6315 static DRIVER_ATTR_RO(scsi_level);
6317 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6319 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6321 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6327 /* Ignore capacity change for ZBC drives for now */
6328 if (sdeb_zbc_in_use)
6331 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6332 changed = (sdebug_virtual_gb != n);
6333 sdebug_virtual_gb = n;
6334 sdebug_capacity = get_sdebug_capacity();
6336 struct sdebug_host_info *sdhp;
6337 struct sdebug_dev_info *dp;
6339 spin_lock(&sdebug_host_list_lock);
6340 list_for_each_entry(sdhp, &sdebug_host_list,
6342 list_for_each_entry(dp, &sdhp->dev_info_list,
6344 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6348 spin_unlock(&sdebug_host_list_lock);
6354 static DRIVER_ATTR_RW(virtual_gb);
6356 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6358 /* absolute number of hosts currently active is what is shown */
6359 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6362 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6367 struct sdeb_store_info *sip;
6368 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6371 if (sscanf(buf, "%d", &delta_hosts) != 1)
6373 if (delta_hosts > 0) {
6377 xa_for_each_marked(per_store_ap, idx, sip,
6378 SDEB_XA_NOT_IN_USE) {
6379 sdeb_most_recent_idx = (int)idx;
6383 if (found) /* re-use case */
6384 sdebug_add_host_helper((int)idx);
6386 sdebug_do_add_host(true);
6388 sdebug_do_add_host(false);
6390 } while (--delta_hosts);
6391 } else if (delta_hosts < 0) {
6393 sdebug_do_remove_host(false);
6394 } while (++delta_hosts);
6398 static DRIVER_ATTR_RW(add_host);
6400 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6402 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6404 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6409 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6410 sdebug_vpd_use_hostno = n;
6415 static DRIVER_ATTR_RW(vpd_use_hostno);
6417 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6419 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6421 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6426 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6428 sdebug_statistics = true;
6430 clear_queue_stats();
6431 sdebug_statistics = false;
6437 static DRIVER_ATTR_RW(statistics);
6439 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6441 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6443 static DRIVER_ATTR_RO(sector_size);
6445 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6447 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6449 static DRIVER_ATTR_RO(submit_queues);
6451 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6453 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6455 static DRIVER_ATTR_RO(dix);
6457 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6459 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6461 static DRIVER_ATTR_RO(dif);
6463 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6465 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6467 static DRIVER_ATTR_RO(guard);
6469 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6471 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6473 static DRIVER_ATTR_RO(ato);
6475 static ssize_t map_show(struct device_driver *ddp, char *buf)
6479 if (!scsi_debug_lbp())
6480 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6481 sdebug_store_sectors);
6483 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6484 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6487 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6488 (int)map_size, sip->map_storep);
6490 buf[count++] = '\n';
6495 static DRIVER_ATTR_RO(map);
6497 static ssize_t random_show(struct device_driver *ddp, char *buf)
6499 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6502 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6507 if (kstrtobool(buf, &v))
6513 static DRIVER_ATTR_RW(random);
6515 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6517 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6519 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6524 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6525 sdebug_removable = (n > 0);
6530 static DRIVER_ATTR_RW(removable);
6532 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6534 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6536 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6537 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6542 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6543 sdebug_host_lock = (n > 0);
6548 static DRIVER_ATTR_RW(host_lock);
6550 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6552 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6554 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6559 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6560 sdebug_strict = (n > 0);
6565 static DRIVER_ATTR_RW(strict);
6567 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6569 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6571 static DRIVER_ATTR_RO(uuid_ctl);
6573 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6575 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6577 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6582 ret = kstrtoint(buf, 0, &n);
6586 all_config_cdb_len();
6589 static DRIVER_ATTR_RW(cdb_len);
6591 static const char * const zbc_model_strs_a[] = {
6592 [BLK_ZONED_NONE] = "none",
6593 [BLK_ZONED_HA] = "host-aware",
6594 [BLK_ZONED_HM] = "host-managed",
6597 static const char * const zbc_model_strs_b[] = {
6598 [BLK_ZONED_NONE] = "no",
6599 [BLK_ZONED_HA] = "aware",
6600 [BLK_ZONED_HM] = "managed",
6603 static const char * const zbc_model_strs_c[] = {
6604 [BLK_ZONED_NONE] = "0",
6605 [BLK_ZONED_HA] = "1",
6606 [BLK_ZONED_HM] = "2",
6609 static int sdeb_zbc_model_str(const char *cp)
6611 int res = sysfs_match_string(zbc_model_strs_a, cp);
6614 res = sysfs_match_string(zbc_model_strs_b, cp);
6616 res = sysfs_match_string(zbc_model_strs_c, cp);
6624 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6626 return scnprintf(buf, PAGE_SIZE, "%s\n",
6627 zbc_model_strs_a[sdeb_zbc_model]);
6629 static DRIVER_ATTR_RO(zbc);
6631 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6633 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6635 static DRIVER_ATTR_RO(tur_ms_to_ready);
6637 /* Note: The following array creates attribute files in the
6638 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6639 files (over those found in the /sys/module/scsi_debug/parameters
6640 directory) is that auxiliary actions can be triggered when an attribute
6641 is changed. For example see: add_host_store() above.
6644 static struct attribute *sdebug_drv_attrs[] = {
6645 &driver_attr_delay.attr,
6646 &driver_attr_opts.attr,
6647 &driver_attr_ptype.attr,
6648 &driver_attr_dsense.attr,
6649 &driver_attr_fake_rw.attr,
6650 &driver_attr_host_max_queue.attr,
6651 &driver_attr_no_lun_0.attr,
6652 &driver_attr_num_tgts.attr,
6653 &driver_attr_dev_size_mb.attr,
6654 &driver_attr_num_parts.attr,
6655 &driver_attr_every_nth.attr,
6656 &driver_attr_lun_format.attr,
6657 &driver_attr_max_luns.attr,
6658 &driver_attr_max_queue.attr,
6659 &driver_attr_no_uld.attr,
6660 &driver_attr_scsi_level.attr,
6661 &driver_attr_virtual_gb.attr,
6662 &driver_attr_add_host.attr,
6663 &driver_attr_per_host_store.attr,
6664 &driver_attr_vpd_use_hostno.attr,
6665 &driver_attr_sector_size.attr,
6666 &driver_attr_statistics.attr,
6667 &driver_attr_submit_queues.attr,
6668 &driver_attr_dix.attr,
6669 &driver_attr_dif.attr,
6670 &driver_attr_guard.attr,
6671 &driver_attr_ato.attr,
6672 &driver_attr_map.attr,
6673 &driver_attr_random.attr,
6674 &driver_attr_removable.attr,
6675 &driver_attr_host_lock.attr,
6676 &driver_attr_ndelay.attr,
6677 &driver_attr_strict.attr,
6678 &driver_attr_uuid_ctl.attr,
6679 &driver_attr_cdb_len.attr,
6680 &driver_attr_tur_ms_to_ready.attr,
6681 &driver_attr_zbc.attr,
6684 ATTRIBUTE_GROUPS(sdebug_drv);
6686 static struct device *pseudo_primary;
6688 static int __init scsi_debug_init(void)
6690 bool want_store = (sdebug_fake_rw == 0);
6692 int k, ret, hosts_to_add;
6695 ramdisk_lck_a[0] = &atomic_rw;
6696 ramdisk_lck_a[1] = &atomic_rw2;
6697 atomic_set(&retired_max_queue, 0);
6699 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6700 pr_warn("ndelay must be less than 1 second, ignored\n");
6702 } else if (sdebug_ndelay > 0)
6703 sdebug_jdelay = JDELAY_OVERRIDDEN;
6705 switch (sdebug_sector_size) {
6712 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6716 switch (sdebug_dif) {
6717 case T10_PI_TYPE0_PROTECTION:
6719 case T10_PI_TYPE1_PROTECTION:
6720 case T10_PI_TYPE2_PROTECTION:
6721 case T10_PI_TYPE3_PROTECTION:
6722 have_dif_prot = true;
6726 pr_err("dif must be 0, 1, 2 or 3\n");
6730 if (sdebug_num_tgts < 0) {
6731 pr_err("num_tgts must be >= 0\n");
6735 if (sdebug_guard > 1) {
6736 pr_err("guard must be 0 or 1\n");
6740 if (sdebug_ato > 1) {
6741 pr_err("ato must be 0 or 1\n");
6745 if (sdebug_physblk_exp > 15) {
6746 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6750 sdebug_lun_am = sdebug_lun_am_i;
6751 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6752 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6753 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6756 if (sdebug_max_luns > 256) {
6757 if (sdebug_max_luns > 16384) {
6758 pr_warn("max_luns can be no more than 16384, use default\n");
6759 sdebug_max_luns = DEF_MAX_LUNS;
6761 sdebug_lun_am = SAM_LUN_AM_FLAT;
6764 if (sdebug_lowest_aligned > 0x3fff) {
6765 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6769 if (submit_queues < 1) {
6770 pr_err("submit_queues must be 1 or more\n");
6774 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6775 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6779 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6780 (sdebug_host_max_queue < 0)) {
6781 pr_err("host_max_queue must be in range [0 %d]\n",
6786 if (sdebug_host_max_queue &&
6787 (sdebug_max_queue != sdebug_host_max_queue)) {
6788 sdebug_max_queue = sdebug_host_max_queue;
6789 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6793 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6795 if (sdebug_q_arr == NULL)
6797 for (k = 0; k < submit_queues; ++k)
6798 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6801 * check for host managed zoned block device specified with
6802 * ptype=0x14 or zbc=XXX.
6804 if (sdebug_ptype == TYPE_ZBC) {
6805 sdeb_zbc_model = BLK_ZONED_HM;
6806 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6807 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6813 switch (sdeb_zbc_model) {
6814 case BLK_ZONED_NONE:
6816 sdebug_ptype = TYPE_DISK;
6819 sdebug_ptype = TYPE_ZBC;
6822 pr_err("Invalid ZBC model\n");
6827 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6828 sdeb_zbc_in_use = true;
6829 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6830 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6833 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6834 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6835 if (sdebug_dev_size_mb < 1)
6836 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6837 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6838 sdebug_store_sectors = sz / sdebug_sector_size;
6839 sdebug_capacity = get_sdebug_capacity();
6841 /* play around with geometry, don't waste too much on track 0 */
6843 sdebug_sectors_per = 32;
6844 if (sdebug_dev_size_mb >= 256)
6846 else if (sdebug_dev_size_mb >= 16)
6848 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6849 (sdebug_sectors_per * sdebug_heads);
6850 if (sdebug_cylinders_per >= 1024) {
6851 /* other LLDs do this; implies >= 1GB ram disk ... */
6853 sdebug_sectors_per = 63;
6854 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6855 (sdebug_sectors_per * sdebug_heads);
6857 if (scsi_debug_lbp()) {
6858 sdebug_unmap_max_blocks =
6859 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6861 sdebug_unmap_max_desc =
6862 clamp(sdebug_unmap_max_desc, 0U, 256U);
6864 sdebug_unmap_granularity =
6865 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6867 if (sdebug_unmap_alignment &&
6868 sdebug_unmap_granularity <=
6869 sdebug_unmap_alignment) {
6870 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6875 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6877 idx = sdebug_add_store();
6884 pseudo_primary = root_device_register("pseudo_0");
6885 if (IS_ERR(pseudo_primary)) {
6886 pr_warn("root_device_register() error\n");
6887 ret = PTR_ERR(pseudo_primary);
6890 ret = bus_register(&pseudo_lld_bus);
6892 pr_warn("bus_register error: %d\n", ret);
6895 ret = driver_register(&sdebug_driverfs_driver);
6897 pr_warn("driver_register error: %d\n", ret);
6901 hosts_to_add = sdebug_add_host;
6902 sdebug_add_host = 0;
6904 for (k = 0; k < hosts_to_add; k++) {
6905 if (want_store && k == 0) {
6906 ret = sdebug_add_host_helper(idx);
6908 pr_err("add_host_helper k=%d, error=%d\n",
6913 ret = sdebug_do_add_host(want_store &&
6914 sdebug_per_host_store);
6916 pr_err("add_host k=%d error=%d\n", k, -ret);
6922 pr_info("built %d host(s)\n", sdebug_num_hosts);
6927 bus_unregister(&pseudo_lld_bus);
6929 root_device_unregister(pseudo_primary);
6931 sdebug_erase_store(idx, NULL);
6933 kfree(sdebug_q_arr);
6937 static void __exit scsi_debug_exit(void)
6939 int k = sdebug_num_hosts;
6943 sdebug_do_remove_host(true);
6945 driver_unregister(&sdebug_driverfs_driver);
6946 bus_unregister(&pseudo_lld_bus);
6947 root_device_unregister(pseudo_primary);
6949 sdebug_erase_all_stores(false);
6950 xa_destroy(per_store_ap);
6951 kfree(sdebug_q_arr);
6954 device_initcall(scsi_debug_init);
6955 module_exit(scsi_debug_exit);
6957 static void sdebug_release_adapter(struct device *dev)
6959 struct sdebug_host_info *sdbg_host;
6961 sdbg_host = to_sdebug_host(dev);
6965 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6966 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6971 if (xa_empty(per_store_ap))
6973 sip = xa_load(per_store_ap, idx);
6977 vfree(sip->map_storep);
6978 vfree(sip->dif_storep);
6980 xa_erase(per_store_ap, idx);
6984 /* Assume apart_from_first==false only in shutdown case. */
6985 static void sdebug_erase_all_stores(bool apart_from_first)
6988 struct sdeb_store_info *sip = NULL;
6990 xa_for_each(per_store_ap, idx, sip) {
6991 if (apart_from_first)
6992 apart_from_first = false;
6994 sdebug_erase_store(idx, sip);
6996 if (apart_from_first)
6997 sdeb_most_recent_idx = sdeb_first_idx;
7001 * Returns store xarray new element index (idx) if >=0 else negated errno.
7002 * Limit the number of stores to 65536.
7004 static int sdebug_add_store(void)
7008 unsigned long iflags;
7009 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7010 struct sdeb_store_info *sip = NULL;
7011 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7013 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7017 xa_lock_irqsave(per_store_ap, iflags);
7018 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7019 if (unlikely(res < 0)) {
7020 xa_unlock_irqrestore(per_store_ap, iflags);
7022 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7025 sdeb_most_recent_idx = n_idx;
7026 if (sdeb_first_idx < 0)
7027 sdeb_first_idx = n_idx;
7028 xa_unlock_irqrestore(per_store_ap, iflags);
7031 sip->storep = vzalloc(sz);
7033 pr_err("user data oom\n");
7036 if (sdebug_num_parts > 0)
7037 sdebug_build_parts(sip->storep, sz);
7039 /* DIF/DIX: what T10 calls Protection Information (PI) */
7043 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7044 sip->dif_storep = vmalloc(dif_size);
7046 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7049 if (!sip->dif_storep) {
7050 pr_err("DIX oom\n");
7053 memset(sip->dif_storep, 0xff, dif_size);
7055 /* Logical Block Provisioning */
7056 if (scsi_debug_lbp()) {
7057 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7058 sip->map_storep = vmalloc(array_size(sizeof(long),
7059 BITS_TO_LONGS(map_size)));
7061 pr_info("%lu provisioning blocks\n", map_size);
7063 if (!sip->map_storep) {
7064 pr_err("LBP map oom\n");
7068 bitmap_zero(sip->map_storep, map_size);
7070 /* Map first 1KB for partition table */
7071 if (sdebug_num_parts)
7072 map_region(sip, 0, 2);
7075 rwlock_init(&sip->macc_lck);
7078 sdebug_erase_store((int)n_idx, sip);
7079 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7083 static int sdebug_add_host_helper(int per_host_idx)
7085 int k, devs_per_host, idx;
7086 int error = -ENOMEM;
7087 struct sdebug_host_info *sdbg_host;
7088 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7090 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7093 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7094 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7095 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7096 sdbg_host->si_idx = idx;
7098 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7100 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7101 for (k = 0; k < devs_per_host; k++) {
7102 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7107 spin_lock(&sdebug_host_list_lock);
7108 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7109 spin_unlock(&sdebug_host_list_lock);
7111 sdbg_host->dev.bus = &pseudo_lld_bus;
7112 sdbg_host->dev.parent = pseudo_primary;
7113 sdbg_host->dev.release = &sdebug_release_adapter;
7114 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7116 error = device_register(&sdbg_host->dev);
7124 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7126 list_del(&sdbg_devinfo->dev_list);
7127 kfree(sdbg_devinfo->zstate);
7128 kfree(sdbg_devinfo);
7131 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7135 static int sdebug_do_add_host(bool mk_new_store)
7137 int ph_idx = sdeb_most_recent_idx;
7140 ph_idx = sdebug_add_store();
7144 return sdebug_add_host_helper(ph_idx);
7147 static void sdebug_do_remove_host(bool the_end)
7150 struct sdebug_host_info *sdbg_host = NULL;
7151 struct sdebug_host_info *sdbg_host2;
7153 spin_lock(&sdebug_host_list_lock);
7154 if (!list_empty(&sdebug_host_list)) {
7155 sdbg_host = list_entry(sdebug_host_list.prev,
7156 struct sdebug_host_info, host_list);
7157 idx = sdbg_host->si_idx;
7159 if (!the_end && idx >= 0) {
7162 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7163 if (sdbg_host2 == sdbg_host)
7165 if (idx == sdbg_host2->si_idx) {
7171 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7172 if (idx == sdeb_most_recent_idx)
7173 --sdeb_most_recent_idx;
7177 list_del(&sdbg_host->host_list);
7178 spin_unlock(&sdebug_host_list_lock);
7183 device_unregister(&sdbg_host->dev);
7187 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7190 struct sdebug_dev_info *devip;
7192 block_unblock_all_queues(true);
7193 devip = (struct sdebug_dev_info *)sdev->hostdata;
7194 if (NULL == devip) {
7195 block_unblock_all_queues(false);
7198 num_in_q = atomic_read(&devip->num_in_q);
7200 if (qdepth > SDEBUG_CANQUEUE) {
7201 qdepth = SDEBUG_CANQUEUE;
7202 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7203 qdepth, SDEBUG_CANQUEUE);
7207 if (qdepth != sdev->queue_depth)
7208 scsi_change_queue_depth(sdev, qdepth);
7210 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7211 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7212 __func__, qdepth, num_in_q);
7214 block_unblock_all_queues(false);
7215 return sdev->queue_depth;
7218 static bool fake_timeout(struct scsi_cmnd *scp)
7220 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7221 if (sdebug_every_nth < -1)
7222 sdebug_every_nth = -1;
7223 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7224 return true; /* ignore command causing timeout */
7225 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7226 scsi_medium_access_command(scp))
7227 return true; /* time out reads and writes */
7232 /* Response to TUR or media access command when device stopped */
7233 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7237 ktime_t now_ts = ktime_get_boottime();
7238 struct scsi_device *sdp = scp->device;
7240 stopped_state = atomic_read(&devip->stopped);
7241 if (stopped_state == 2) {
7242 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7243 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7244 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7245 /* tur_ms_to_ready timer extinguished */
7246 atomic_set(&devip->stopped, 0);
7250 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7252 sdev_printk(KERN_INFO, sdp,
7253 "%s: Not ready: in process of becoming ready\n", my_name);
7254 if (scp->cmnd[0] == TEST_UNIT_READY) {
7255 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7257 if (diff_ns <= tur_nanosecs_to_ready)
7258 diff_ns = tur_nanosecs_to_ready - diff_ns;
7260 diff_ns = tur_nanosecs_to_ready;
7261 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7262 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7263 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7265 return check_condition_result;
7268 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7270 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7272 return check_condition_result;
7275 static int sdebug_map_queues(struct Scsi_Host *shost)
7279 if (shost->nr_hw_queues == 1)
7282 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7283 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7287 if (i == HCTX_TYPE_DEFAULT)
7288 map->nr_queues = submit_queues - poll_queues;
7289 else if (i == HCTX_TYPE_POLL)
7290 map->nr_queues = poll_queues;
7292 if (!map->nr_queues) {
7293 BUG_ON(i == HCTX_TYPE_DEFAULT);
7297 map->queue_offset = qoff;
7298 blk_mq_map_queues(map);
7300 qoff += map->nr_queues;
7307 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7310 bool retiring = false;
7311 int num_entries = 0;
7312 unsigned int qc_idx = 0;
7313 unsigned long iflags;
7314 ktime_t kt_from_boot = ktime_get_boottime();
7315 struct sdebug_queue *sqp;
7316 struct sdebug_queued_cmd *sqcp;
7317 struct scsi_cmnd *scp;
7318 struct sdebug_dev_info *devip;
7319 struct sdebug_defer *sd_dp;
7321 sqp = sdebug_q_arr + queue_num;
7322 spin_lock_irqsave(&sqp->qc_lock, iflags);
7324 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7326 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7329 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7331 if (unlikely(qc_idx >= sdebug_max_queue))
7334 sqcp = &sqp->qc_arr[qc_idx];
7335 sd_dp = sqcp->sd_dp;
7336 if (unlikely(!sd_dp))
7339 if (unlikely(scp == NULL)) {
7340 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7341 queue_num, qc_idx, __func__);
7344 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7345 if (kt_from_boot < sd_dp->cmpl_ts)
7348 } else /* ignoring non REQ_POLLED requests */
7350 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7352 atomic_dec(&devip->num_in_q);
7354 pr_err("devip=NULL from %s\n", __func__);
7355 if (unlikely(atomic_read(&retired_max_queue) > 0))
7358 sqcp->a_cmnd = NULL;
7359 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7360 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7361 sqp, queue_num, qc_idx, __func__);
7364 if (unlikely(retiring)) { /* user has reduced max_queue */
7367 retval = atomic_read(&retired_max_queue);
7368 if (qc_idx >= retval) {
7369 pr_err("index %d too large\n", retval);
7372 k = find_last_bit(sqp->in_use_bm, retval);
7373 if ((k < sdebug_max_queue) || (k == retval))
7374 atomic_set(&retired_max_queue, 0);
7376 atomic_set(&retired_max_queue, k + 1);
7378 sd_dp->defer_t = SDEB_DEFER_NONE;
7379 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7380 scsi_done(scp); /* callback to mid level */
7381 spin_lock_irqsave(&sqp->qc_lock, iflags);
7384 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7385 if (num_entries > 0)
7386 atomic_add(num_entries, &sdeb_mq_poll_count);
7390 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7391 struct scsi_cmnd *scp)
7394 struct scsi_device *sdp = scp->device;
7395 const struct opcode_info_t *oip;
7396 const struct opcode_info_t *r_oip;
7397 struct sdebug_dev_info *devip;
7398 u8 *cmd = scp->cmnd;
7399 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7400 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7403 u64 lun_index = sdp->lun & 0x3FFF;
7410 scsi_set_resid(scp, 0);
7411 if (sdebug_statistics) {
7412 atomic_inc(&sdebug_cmnd_count);
7413 inject_now = inject_on_this_cmd();
7417 if (unlikely(sdebug_verbose &&
7418 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7423 sb = (int)sizeof(b);
7425 strcpy(b, "too long, over 32 bytes");
7427 for (k = 0, n = 0; k < len && n < sb; ++k)
7428 n += scnprintf(b + n, sb - n, "%02x ",
7431 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7432 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7434 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7435 return SCSI_MLQUEUE_HOST_BUSY;
7436 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7437 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7440 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7441 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7442 devip = (struct sdebug_dev_info *)sdp->hostdata;
7443 if (unlikely(!devip)) {
7444 devip = find_build_dev_info(sdp);
7448 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7449 atomic_set(&sdeb_inject_pending, 1);
7451 na = oip->num_attached;
7453 if (na) { /* multiple commands with this opcode */
7455 if (FF_SA & r_oip->flags) {
7456 if (F_SA_LOW & oip->flags)
7459 sa = get_unaligned_be16(cmd + 8);
7460 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7461 if (opcode == oip->opcode && sa == oip->sa)
7464 } else { /* since no service action only check opcode */
7465 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7466 if (opcode == oip->opcode)
7471 if (F_SA_LOW & r_oip->flags)
7472 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7473 else if (F_SA_HIGH & r_oip->flags)
7474 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7476 mk_sense_invalid_opcode(scp);
7479 } /* else (when na==0) we assume the oip is a match */
7481 if (unlikely(F_INV_OP & flags)) {
7482 mk_sense_invalid_opcode(scp);
7485 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7487 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7488 my_name, opcode, " supported for wlun");
7489 mk_sense_invalid_opcode(scp);
7492 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7496 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7497 rem = ~oip->len_mask[k] & cmd[k];
7499 for (j = 7; j >= 0; --j, rem <<= 1) {
7503 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7508 if (unlikely(!(F_SKIP_UA & flags) &&
7509 find_first_bit(devip->uas_bm,
7510 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7511 errsts = make_ua(scp, devip);
7515 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7516 atomic_read(&devip->stopped))) {
7517 errsts = resp_not_ready(scp, devip);
7521 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7523 if (unlikely(sdebug_every_nth)) {
7524 if (fake_timeout(scp))
7525 return 0; /* ignore command: make trouble */
7527 if (likely(oip->pfp))
7528 pfp = oip->pfp; /* calls a resp_* function */
7530 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7533 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7534 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7535 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7536 sdebug_ndelay > 10000)) {
7538 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7539 * for Start Stop Unit (SSU) want at least 1 second delay and
7540 * if sdebug_jdelay>1 want a long delay of that many seconds.
7541 * For Synchronize Cache want 1/20 of SSU's delay.
7543 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7544 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7546 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7547 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7549 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7552 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7554 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7557 static struct scsi_host_template sdebug_driver_template = {
7558 .show_info = scsi_debug_show_info,
7559 .write_info = scsi_debug_write_info,
7560 .proc_name = sdebug_proc_name,
7561 .name = "SCSI DEBUG",
7562 .info = scsi_debug_info,
7563 .slave_alloc = scsi_debug_slave_alloc,
7564 .slave_configure = scsi_debug_slave_configure,
7565 .slave_destroy = scsi_debug_slave_destroy,
7566 .ioctl = scsi_debug_ioctl,
7567 .queuecommand = scsi_debug_queuecommand,
7568 .change_queue_depth = sdebug_change_qdepth,
7569 .map_queues = sdebug_map_queues,
7570 .mq_poll = sdebug_blk_mq_poll,
7571 .eh_abort_handler = scsi_debug_abort,
7572 .eh_device_reset_handler = scsi_debug_device_reset,
7573 .eh_target_reset_handler = scsi_debug_target_reset,
7574 .eh_bus_reset_handler = scsi_debug_bus_reset,
7575 .eh_host_reset_handler = scsi_debug_host_reset,
7576 .can_queue = SDEBUG_CANQUEUE,
7578 .sg_tablesize = SG_MAX_SEGMENTS,
7579 .cmd_per_lun = DEF_CMD_PER_LUN,
7581 .max_segment_size = -1U,
7582 .module = THIS_MODULE,
7583 .track_queue_depth = 1,
7586 static int sdebug_driver_probe(struct device *dev)
7589 struct sdebug_host_info *sdbg_host;
7590 struct Scsi_Host *hpnt;
7593 sdbg_host = to_sdebug_host(dev);
7595 sdebug_driver_template.can_queue = sdebug_max_queue;
7596 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7597 if (!sdebug_clustering)
7598 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7600 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7602 pr_err("scsi_host_alloc failed\n");
7606 if (submit_queues > nr_cpu_ids) {
7607 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7608 my_name, submit_queues, nr_cpu_ids);
7609 submit_queues = nr_cpu_ids;
7612 * Decide whether to tell scsi subsystem that we want mq. The
7613 * following should give the same answer for each host.
7615 hpnt->nr_hw_queues = submit_queues;
7616 if (sdebug_host_max_queue)
7617 hpnt->host_tagset = 1;
7619 /* poll queues are possible for nr_hw_queues > 1 */
7620 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7621 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7622 my_name, poll_queues, hpnt->nr_hw_queues);
7627 * Poll queues don't need interrupts, but we need at least one I/O queue
7628 * left over for non-polled I/O.
7629 * If condition not met, trim poll_queues to 1 (just for simplicity).
7631 if (poll_queues >= submit_queues) {
7632 if (submit_queues < 3)
7633 pr_warn("%s: trim poll_queues to 1\n", my_name);
7635 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7636 my_name, submit_queues - 1);
7642 sdbg_host->shost = hpnt;
7643 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7644 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7645 hpnt->max_id = sdebug_num_tgts + 1;
7647 hpnt->max_id = sdebug_num_tgts;
7648 /* = sdebug_max_luns; */
7649 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7653 switch (sdebug_dif) {
7655 case T10_PI_TYPE1_PROTECTION:
7656 hprot = SHOST_DIF_TYPE1_PROTECTION;
7658 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7661 case T10_PI_TYPE2_PROTECTION:
7662 hprot = SHOST_DIF_TYPE2_PROTECTION;
7664 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7667 case T10_PI_TYPE3_PROTECTION:
7668 hprot = SHOST_DIF_TYPE3_PROTECTION;
7670 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7675 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7679 scsi_host_set_prot(hpnt, hprot);
7681 if (have_dif_prot || sdebug_dix)
7682 pr_info("host protection%s%s%s%s%s%s%s\n",
7683 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7684 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7685 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7686 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7687 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7688 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7689 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7691 if (sdebug_guard == 1)
7692 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7694 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7696 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7697 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7698 if (sdebug_every_nth) /* need stats counters for every_nth */
7699 sdebug_statistics = true;
7700 error = scsi_add_host(hpnt, &sdbg_host->dev);
7702 pr_err("scsi_add_host failed\n");
7704 scsi_host_put(hpnt);
7706 scsi_scan_host(hpnt);
7712 static void sdebug_driver_remove(struct device *dev)
7714 struct sdebug_host_info *sdbg_host;
7715 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7717 sdbg_host = to_sdebug_host(dev);
7719 scsi_remove_host(sdbg_host->shost);
7721 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7723 list_del(&sdbg_devinfo->dev_list);
7724 kfree(sdbg_devinfo->zstate);
7725 kfree(sdbg_devinfo);
7728 scsi_host_put(sdbg_host->shost);
7731 static int pseudo_lld_bus_match(struct device *dev,
7732 struct device_driver *dev_driver)
7737 static struct bus_type pseudo_lld_bus = {
7739 .match = pseudo_lld_bus_match,
7740 .probe = sdebug_driver_probe,
7741 .remove = sdebug_driver_remove,
7742 .drv_groups = sdebug_drv_groups,