2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2018 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
47 #include <net/checksum.h>
49 #include <asm/unaligned.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
61 #include "scsi_logging.h"
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20180128";
67 #define MY_NAME "scsi_debug"
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST 1
103 #define DEF_NUM_TGTS 1
104 #define DEF_MAX_LUNS 1
105 /* With these defaults, this driver will make 1 host with 1 target
106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB 8
114 #define DEF_D_SENSE 0
115 #define DEF_EVERY_NTH 0
116 #define DEF_FAKE_RW 0
118 #define DEF_HOST_LOCK 0
121 #define DEF_LBPWS10 0
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0 0
126 #define DEF_NUM_PARTS 0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB 0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
148 #define SDEBUG_LUN_0_VAL 0
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE 1
152 #define SDEBUG_OPT_MEDIUM_ERR 2
153 #define SDEBUG_OPT_TIMEOUT 4
154 #define SDEBUG_OPT_RECOVERED_ERR 8
155 #define SDEBUG_OPT_TRANSPORT_ERR 16
156 #define SDEBUG_OPT_DIF_ERR 32
157 #define SDEBUG_OPT_DIX_ERR 64
158 #define SDEBUG_OPT_MAC_TIMEOUT 128
159 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
160 #define SDEBUG_OPT_Q_NOISE 0x200
161 #define SDEBUG_OPT_ALL_TSF 0x400
162 #define SDEBUG_OPT_RARE_TSF 0x800
163 #define SDEBUG_OPT_N_WCE 0x1000
164 #define SDEBUG_OPT_RESET_NOISE 0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
166 #define SDEBUG_OPT_HOST_BUSY 0x8000
167 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 SDEBUG_OPT_RESET_NOISE)
169 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 SDEBUG_OPT_TRANSPORT_ERR | \
171 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 SDEBUG_OPT_SHORT_TRANSFER | \
173 SDEBUG_OPT_HOST_BUSY)
174 /* When "every_nth" > 0 then modulo "every_nth" commands:
175 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176 * - a RECOVERED_ERROR is simulated on successful read and write
177 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
178 * - a TRANSPORT_ERROR is simulated on successful read and write
179 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
181 * When "every_nth" < 0 then after "- every_nth" commands:
182 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183 * - a RECOVERED_ERROR is simulated on successful read and write
184 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
185 * - a TRANSPORT_ERROR is simulated on successful read and write
186 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187 * This will continue on every subsequent command until some other action
188 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189 * every_nth via sysfs).
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193 * priority order. In the subset implemented here lower numbers have higher
194 * priority. The UA numbers should be a sequence starting from 0 with
195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206 * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211 * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN 255
227 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
229 #define F_RL_WLUN_OK 0x10
230 #define F_SKIP_UA 0x20
231 #define F_DELAY_OVERR 0x40
232 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
234 #define F_INV_OP 0x200
235 #define F_FAKE_RW 0x400
236 #define F_M_ACCESS 0x800 /* media access */
237 #define F_LONG_DELAY 0x1000
239 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define SDEBUG_MAX_PARTS 4
245 #define SDEBUG_MAX_CMD_LEN 32
248 struct sdebug_dev_info {
249 struct list_head dev_list;
250 unsigned int channel;
254 struct sdebug_host_info *sdbg_host;
255 unsigned long uas_bm[1];
261 struct sdebug_host_info {
262 struct list_head host_list;
263 struct Scsi_Host *shost;
265 struct list_head dev_info_list;
268 #define to_sdebug_host(d) \
269 container_of(d, struct sdebug_host_info, dev)
271 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
274 struct sdebug_defer {
276 struct execute_work ew;
277 int sqa_idx; /* index of sdebug_queue array */
278 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
282 enum sdeb_defer_type defer_t;
285 struct sdebug_queued_cmd {
286 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
287 * instance indicates this slot is in use.
289 struct sdebug_defer *sd_dp;
290 struct scsi_cmnd *a_cmnd;
291 unsigned int inj_recovered:1;
292 unsigned int inj_transport:1;
293 unsigned int inj_dif:1;
294 unsigned int inj_dix:1;
295 unsigned int inj_short:1;
296 unsigned int inj_host_busy:1;
299 struct sdebug_queue {
300 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
301 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
303 atomic_t blocked; /* to temporarily stop more being queued */
306 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
307 static atomic_t sdebug_completions; /* count of deferred completions */
308 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
309 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
311 struct opcode_info_t {
312 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
313 /* for terminating element */
314 u8 opcode; /* if num_attached > 0, preferred */
315 u16 sa; /* service action */
316 u32 flags; /* OR-ed set of SDEB_F_* */
317 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
318 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
319 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
320 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
323 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
324 enum sdeb_opcode_index {
325 SDEB_I_INVALID_OPCODE = 0,
327 SDEB_I_REPORT_LUNS = 2,
328 SDEB_I_REQUEST_SENSE = 3,
329 SDEB_I_TEST_UNIT_READY = 4,
330 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
331 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
332 SDEB_I_LOG_SENSE = 7,
333 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
334 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
335 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
336 SDEB_I_START_STOP = 11,
337 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
338 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
339 SDEB_I_MAINT_IN = 14,
340 SDEB_I_MAINT_OUT = 15,
341 SDEB_I_VERIFY = 16, /* 10 only */
342 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
343 SDEB_I_RESERVE = 18, /* 6, 10 */
344 SDEB_I_RELEASE = 19, /* 6, 10 */
345 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
346 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
347 SDEB_I_ATA_PT = 22, /* 12, 16 */
348 SDEB_I_SEND_DIAG = 23,
350 SDEB_I_XDWRITEREAD = 25, /* 10 only */
351 SDEB_I_WRITE_BUFFER = 26,
352 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
353 SDEB_I_SYNC_CACHE = 28, /* 10, 16 */
354 SDEB_I_COMP_WRITE = 29,
355 SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
359 static const unsigned char opcode_ind_arr[256] = {
360 /* 0x0; 0x0->0x1f: 6 byte cdbs */
361 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
363 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
364 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
366 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
367 SDEB_I_ALLOW_REMOVAL, 0,
368 /* 0x20; 0x20->0x3f: 10 byte cdbs */
369 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
370 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
371 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
372 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
373 /* 0x40; 0x40->0x5f: 10 byte cdbs */
374 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
375 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
376 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
378 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
379 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
382 0, SDEB_I_VARIABLE_LEN,
383 /* 0x80; 0x80->0x9f: 16 byte cdbs */
384 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
385 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
386 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
388 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
389 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
390 SDEB_I_MAINT_OUT, 0, 0, 0,
391 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
392 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0, 0, 0,
395 /* 0xc0; 0xc0->0xff: vendor specific */
396 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
403 * The following "response" functions return the SCSI mid-level's 4 byte
404 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
405 * command completion, they can mask their return value with
406 * SDEG_RES_IMMED_MASK .
408 #define SDEG_RES_IMMED_MASK 0x40000000
410 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
435 * The following are overflow arrays for cdbs that "hit" the same index in
436 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
437 * should be placed in opcode_info_arr[], the others should be placed here.
439 static const struct opcode_info_t msense_iarr[] = {
440 {0, 0x1a, 0, F_D_IN, NULL, NULL,
441 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
444 static const struct opcode_info_t mselect_iarr[] = {
445 {0, 0x15, 0, F_D_OUT, NULL, NULL,
446 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
449 static const struct opcode_info_t read_iarr[] = {
450 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
451 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
453 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
454 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
455 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
456 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
460 static const struct opcode_info_t write_iarr[] = {
461 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
462 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
464 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
465 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
467 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
468 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
469 0xbf, 0xc7, 0, 0, 0, 0} },
472 static const struct opcode_info_t sa_in_16_iarr[] = {
473 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
474 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
475 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
478 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
479 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
480 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
481 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
482 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
483 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
484 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
487 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
488 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
489 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
490 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
491 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
492 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
493 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
496 static const struct opcode_info_t write_same_iarr[] = {
497 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
498 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
499 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
502 static const struct opcode_info_t reserve_iarr[] = {
503 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
504 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
507 static const struct opcode_info_t release_iarr[] = {
508 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
509 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 static const struct opcode_info_t sync_cache_iarr[] = {
513 {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
514 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
519 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
520 * plus the terminating elements for logic that scans this table such as
521 * REPORT SUPPORTED OPERATION CODES. */
522 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
524 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
525 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
527 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
529 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
530 0, 0} }, /* REPORT LUNS */
531 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
532 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
533 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
534 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
536 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
537 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
538 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
539 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
540 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
541 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
542 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
543 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
545 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
546 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
548 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
549 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
552 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
553 resp_write_dt0, write_iarr, /* WRITE(16) */
554 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
557 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
558 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
559 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
560 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
561 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
562 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
563 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
564 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
565 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
566 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
567 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
568 0xff, 0, 0xc7, 0, 0, 0, 0} },
570 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
571 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
573 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
575 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
576 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
577 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
579 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
580 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
581 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
583 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
584 NULL, release_iarr, /* RELEASE(10) <no response function> */
585 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
588 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
589 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
590 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
591 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
593 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
595 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
597 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
599 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
600 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
601 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
602 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
603 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
604 0, 0, 0, 0} }, /* WRITE_BUFFER */
605 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
606 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
609 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS,
610 resp_sync_cache, sync_cache_iarr,
611 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
613 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
614 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
615 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
618 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
619 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
622 static int sdebug_add_host = DEF_NUM_HOST;
623 static int sdebug_ato = DEF_ATO;
624 static int sdebug_cdb_len = DEF_CDB_LEN;
625 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
626 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
627 static int sdebug_dif = DEF_DIF;
628 static int sdebug_dix = DEF_DIX;
629 static int sdebug_dsense = DEF_D_SENSE;
630 static int sdebug_every_nth = DEF_EVERY_NTH;
631 static int sdebug_fake_rw = DEF_FAKE_RW;
632 static unsigned int sdebug_guard = DEF_GUARD;
633 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
634 static int sdebug_max_luns = DEF_MAX_LUNS;
635 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
636 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
637 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
638 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
639 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
640 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
641 static int sdebug_no_uld;
642 static int sdebug_num_parts = DEF_NUM_PARTS;
643 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
644 static int sdebug_opt_blks = DEF_OPT_BLKS;
645 static int sdebug_opts = DEF_OPTS;
646 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
647 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
648 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
649 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
650 static int sdebug_sector_size = DEF_SECTOR_SIZE;
651 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
652 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
653 static unsigned int sdebug_lbpu = DEF_LBPU;
654 static unsigned int sdebug_lbpws = DEF_LBPWS;
655 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
656 static unsigned int sdebug_lbprz = DEF_LBPRZ;
657 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
658 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
659 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
660 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
661 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
662 static int sdebug_uuid_ctl = DEF_UUID_CTL;
663 static bool sdebug_removable = DEF_REMOVABLE;
664 static bool sdebug_clustering;
665 static bool sdebug_host_lock = DEF_HOST_LOCK;
666 static bool sdebug_strict = DEF_STRICT;
667 static bool sdebug_any_injecting_opt;
668 static bool sdebug_verbose;
669 static bool have_dif_prot;
670 static bool sdebug_statistics = DEF_STATISTICS;
672 static unsigned int sdebug_store_sectors;
673 static sector_t sdebug_capacity; /* in sectors */
675 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
676 may still need them */
677 static int sdebug_heads; /* heads per disk */
678 static int sdebug_cylinders_per; /* cylinders per surface */
679 static int sdebug_sectors_per; /* sectors per cylinder */
681 static LIST_HEAD(sdebug_host_list);
682 static DEFINE_SPINLOCK(sdebug_host_list_lock);
684 static unsigned char *fake_storep; /* ramdisk storage */
685 static struct t10_pi_tuple *dif_storep; /* protection info */
686 static void *map_storep; /* provisioning map */
688 static unsigned long map_size;
689 static int num_aborts;
690 static int num_dev_resets;
691 static int num_target_resets;
692 static int num_bus_resets;
693 static int num_host_resets;
694 static int dix_writes;
695 static int dix_reads;
696 static int dif_errors;
698 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
699 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
701 static DEFINE_RWLOCK(atomic_rw);
703 static char sdebug_proc_name[] = MY_NAME;
704 static const char *my_name = MY_NAME;
706 static struct bus_type pseudo_lld_bus;
708 static struct device_driver sdebug_driverfs_driver = {
709 .name = sdebug_proc_name,
710 .bus = &pseudo_lld_bus,
713 static const int check_condition_result =
714 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
716 static const int illegal_condition_result =
717 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
719 static const int device_qfull_result =
720 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
723 /* Only do the extra work involved in logical block provisioning if one or
724 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
725 * real reads and writes (i.e. not skipping them for speed).
727 static inline bool scsi_debug_lbp(void)
729 return 0 == sdebug_fake_rw &&
730 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
733 static void *fake_store(unsigned long long lba)
735 lba = do_div(lba, sdebug_store_sectors);
737 return fake_storep + lba * sdebug_sector_size;
740 static struct t10_pi_tuple *dif_store(sector_t sector)
742 sector = sector_div(sector, sdebug_store_sectors);
744 return dif_storep + sector;
747 static void sdebug_max_tgts_luns(void)
749 struct sdebug_host_info *sdbg_host;
750 struct Scsi_Host *hpnt;
752 spin_lock(&sdebug_host_list_lock);
753 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
754 hpnt = sdbg_host->shost;
755 if ((hpnt->this_id >= 0) &&
756 (sdebug_num_tgts > hpnt->this_id))
757 hpnt->max_id = sdebug_num_tgts + 1;
759 hpnt->max_id = sdebug_num_tgts;
760 /* sdebug_max_luns; */
761 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
763 spin_unlock(&sdebug_host_list_lock);
766 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
768 /* Set in_bit to -1 to indicate no bit position of invalid field */
769 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
770 enum sdeb_cmd_data c_d,
771 int in_byte, int in_bit)
773 unsigned char *sbuff;
777 sbuff = scp->sense_buffer;
779 sdev_printk(KERN_ERR, scp->device,
780 "%s: sense_buffer is NULL\n", __func__);
783 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
784 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
785 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
786 memset(sks, 0, sizeof(sks));
792 sks[0] |= 0x7 & in_bit;
794 put_unaligned_be16(in_byte, sks + 1);
800 memcpy(sbuff + sl + 4, sks, 3);
802 memcpy(sbuff + 15, sks, 3);
804 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
805 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
806 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
809 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
811 unsigned char *sbuff;
813 sbuff = scp->sense_buffer;
815 sdev_printk(KERN_ERR, scp->device,
816 "%s: sense_buffer is NULL\n", __func__);
819 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
821 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
824 sdev_printk(KERN_INFO, scp->device,
825 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
826 my_name, key, asc, asq);
829 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
831 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
834 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
836 if (sdebug_verbose) {
838 sdev_printk(KERN_INFO, dev,
839 "%s: BLKFLSBUF [0x1261]\n", __func__);
840 else if (0x5331 == cmd)
841 sdev_printk(KERN_INFO, dev,
842 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
845 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
849 /* return -ENOTTY; // correct return but upsets fdisk */
852 static void config_cdb_len(struct scsi_device *sdev)
854 switch (sdebug_cdb_len) {
855 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
856 sdev->use_10_for_rw = false;
857 sdev->use_16_for_rw = false;
858 sdev->use_10_for_ms = false;
860 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
861 sdev->use_10_for_rw = true;
862 sdev->use_16_for_rw = false;
863 sdev->use_10_for_ms = false;
865 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
866 sdev->use_10_for_rw = true;
867 sdev->use_16_for_rw = false;
868 sdev->use_10_for_ms = true;
871 sdev->use_10_for_rw = false;
872 sdev->use_16_for_rw = true;
873 sdev->use_10_for_ms = true;
875 case 32: /* No knobs to suggest this so same as 16 for now */
876 sdev->use_10_for_rw = false;
877 sdev->use_16_for_rw = true;
878 sdev->use_10_for_ms = true;
881 pr_warn("unexpected cdb_len=%d, force to 10\n",
883 sdev->use_10_for_rw = true;
884 sdev->use_16_for_rw = false;
885 sdev->use_10_for_ms = false;
891 static void all_config_cdb_len(void)
893 struct sdebug_host_info *sdbg_host;
894 struct Scsi_Host *shost;
895 struct scsi_device *sdev;
897 spin_lock(&sdebug_host_list_lock);
898 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
899 shost = sdbg_host->shost;
900 shost_for_each_device(sdev, shost) {
901 config_cdb_len(sdev);
904 spin_unlock(&sdebug_host_list_lock);
907 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
909 struct sdebug_host_info *sdhp;
910 struct sdebug_dev_info *dp;
912 spin_lock(&sdebug_host_list_lock);
913 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
914 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
915 if ((devip->sdbg_host == dp->sdbg_host) &&
916 (devip->target == dp->target))
917 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
920 spin_unlock(&sdebug_host_list_lock);
923 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
927 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
928 if (k != SDEBUG_NUM_UAS) {
929 const char *cp = NULL;
933 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
934 POWER_ON_RESET_ASCQ);
936 cp = "power on reset";
938 case SDEBUG_UA_BUS_RESET:
939 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
944 case SDEBUG_UA_MODE_CHANGED:
945 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
948 cp = "mode parameters changed";
950 case SDEBUG_UA_CAPACITY_CHANGED:
951 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
952 CAPACITY_CHANGED_ASCQ);
954 cp = "capacity data changed";
956 case SDEBUG_UA_MICROCODE_CHANGED:
957 mk_sense_buffer(scp, UNIT_ATTENTION,
959 MICROCODE_CHANGED_ASCQ);
961 cp = "microcode has been changed";
963 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
964 mk_sense_buffer(scp, UNIT_ATTENTION,
966 MICROCODE_CHANGED_WO_RESET_ASCQ);
968 cp = "microcode has been changed without reset";
970 case SDEBUG_UA_LUNS_CHANGED:
972 * SPC-3 behavior is to report a UNIT ATTENTION with
973 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
974 * on the target, until a REPORT LUNS command is
975 * received. SPC-4 behavior is to report it only once.
976 * NOTE: sdebug_scsi_level does not use the same
977 * values as struct scsi_device->scsi_level.
979 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
980 clear_luns_changed_on_target(devip);
981 mk_sense_buffer(scp, UNIT_ATTENTION,
985 cp = "reported luns data has changed";
988 pr_warn("unexpected unit attention code=%d\n", k);
993 clear_bit(k, devip->uas_bm);
995 sdev_printk(KERN_INFO, scp->device,
996 "%s reports: Unit attention: %s\n",
998 return check_condition_result;
1003 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1004 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1008 struct scsi_data_buffer *sdb = scsi_in(scp);
1012 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1013 return DID_ERROR << 16;
1015 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1017 sdb->resid = scsi_bufflen(scp) - act_len;
1022 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1023 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1024 * calls, not required to write in ascending offset order. Assumes resid
1025 * set to scsi_bufflen() prior to any calls.
1027 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1028 int arr_len, unsigned int off_dst)
1031 struct scsi_data_buffer *sdb = scsi_in(scp);
1032 off_t skip = off_dst;
1034 if (sdb->length <= off_dst)
1036 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1037 return DID_ERROR << 16;
1039 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1040 arr, arr_len, skip);
1041 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1042 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1043 n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1044 sdb->resid = min(sdb->resid, n);
1048 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1049 * 'arr' or -1 if error.
1051 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1054 if (!scsi_bufflen(scp))
1056 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1059 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1063 static char sdebug_inq_vendor_id[9] = "Linux ";
1064 static char sdebug_inq_product_id[17] = "scsi_debug ";
1065 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1066 /* Use some locally assigned NAAs for SAS addresses. */
1067 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1068 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1069 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1071 /* Device identification VPD page. Returns number of bytes placed in arr */
1072 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1073 int target_dev_id, int dev_id_num,
1074 const char *dev_id_str, int dev_id_str_len,
1075 const uuid_t *lu_name)
1080 port_a = target_dev_id + 1;
1081 /* T10 vendor identifier field format (faked) */
1082 arr[0] = 0x2; /* ASCII */
1085 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1086 memcpy(&arr[12], sdebug_inq_product_id, 16);
1087 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1088 num = 8 + 16 + dev_id_str_len;
1091 if (dev_id_num >= 0) {
1092 if (sdebug_uuid_ctl) {
1093 /* Locally assigned UUID */
1094 arr[num++] = 0x1; /* binary (not necessarily sas) */
1095 arr[num++] = 0xa; /* PIV=0, lu, naa */
1098 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1100 memcpy(arr + num, lu_name, 16);
1103 /* NAA-3, Logical unit identifier (binary) */
1104 arr[num++] = 0x1; /* binary (not necessarily sas) */
1105 arr[num++] = 0x3; /* PIV=0, lu, naa */
1108 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1111 /* Target relative port number */
1112 arr[num++] = 0x61; /* proto=sas, binary */
1113 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1114 arr[num++] = 0x0; /* reserved */
1115 arr[num++] = 0x4; /* length */
1116 arr[num++] = 0x0; /* reserved */
1117 arr[num++] = 0x0; /* reserved */
1119 arr[num++] = 0x1; /* relative port A */
1121 /* NAA-3, Target port identifier */
1122 arr[num++] = 0x61; /* proto=sas, binary */
1123 arr[num++] = 0x93; /* piv=1, target port, naa */
1126 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1128 /* NAA-3, Target port group identifier */
1129 arr[num++] = 0x61; /* proto=sas, binary */
1130 arr[num++] = 0x95; /* piv=1, target port group id */
1135 put_unaligned_be16(port_group_id, arr + num);
1137 /* NAA-3, Target device identifier */
1138 arr[num++] = 0x61; /* proto=sas, binary */
1139 arr[num++] = 0xa3; /* piv=1, target device, naa */
1142 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1144 /* SCSI name string: Target device identifier */
1145 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1146 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1149 memcpy(arr + num, "naa.32222220", 12);
1151 snprintf(b, sizeof(b), "%08X", target_dev_id);
1152 memcpy(arr + num, b, 8);
1154 memset(arr + num, 0, 4);
1159 static unsigned char vpd84_data[] = {
1160 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1161 0x22,0x22,0x22,0x0,0xbb,0x1,
1162 0x22,0x22,0x22,0x0,0xbb,0x2,
1165 /* Software interface identification VPD page */
1166 static int inquiry_vpd_84(unsigned char *arr)
1168 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1169 return sizeof(vpd84_data);
1172 /* Management network addresses VPD page */
1173 static int inquiry_vpd_85(unsigned char *arr)
1176 const char *na1 = "https://www.kernel.org/config";
1177 const char *na2 = "http://www.kernel.org/log";
1180 arr[num++] = 0x1; /* lu, storage config */
1181 arr[num++] = 0x0; /* reserved */
1186 plen = ((plen / 4) + 1) * 4;
1187 arr[num++] = plen; /* length, null termianted, padded */
1188 memcpy(arr + num, na1, olen);
1189 memset(arr + num + olen, 0, plen - olen);
1192 arr[num++] = 0x4; /* lu, logging */
1193 arr[num++] = 0x0; /* reserved */
1198 plen = ((plen / 4) + 1) * 4;
1199 arr[num++] = plen; /* length, null terminated, padded */
1200 memcpy(arr + num, na2, olen);
1201 memset(arr + num + olen, 0, plen - olen);
1207 /* SCSI ports VPD page */
1208 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1213 port_a = target_dev_id + 1;
1214 port_b = port_a + 1;
1215 arr[num++] = 0x0; /* reserved */
1216 arr[num++] = 0x0; /* reserved */
1218 arr[num++] = 0x1; /* relative port 1 (primary) */
1219 memset(arr + num, 0, 6);
1222 arr[num++] = 12; /* length tp descriptor */
1223 /* naa-5 target port identifier (A) */
1224 arr[num++] = 0x61; /* proto=sas, binary */
1225 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1226 arr[num++] = 0x0; /* reserved */
1227 arr[num++] = 0x8; /* length */
1228 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1230 arr[num++] = 0x0; /* reserved */
1231 arr[num++] = 0x0; /* reserved */
1233 arr[num++] = 0x2; /* relative port 2 (secondary) */
1234 memset(arr + num, 0, 6);
1237 arr[num++] = 12; /* length tp descriptor */
1238 /* naa-5 target port identifier (B) */
1239 arr[num++] = 0x61; /* proto=sas, binary */
1240 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1241 arr[num++] = 0x0; /* reserved */
1242 arr[num++] = 0x8; /* length */
1243 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1250 static unsigned char vpd89_data[] = {
1251 /* from 4th byte */ 0,0,0,0,
1252 'l','i','n','u','x',' ',' ',' ',
1253 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1255 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1257 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1258 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1259 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1260 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1262 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1264 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1266 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1267 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1268 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1269 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1270 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1271 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1272 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1275 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1276 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1277 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1278 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1279 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1280 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1282 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1283 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1294 /* ATA Information VPD page */
1295 static int inquiry_vpd_89(unsigned char *arr)
1297 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1298 return sizeof(vpd89_data);
1302 static unsigned char vpdb0_data[] = {
1303 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1304 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1305 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1306 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1309 /* Block limits VPD page (SBC-3) */
1310 static int inquiry_vpd_b0(unsigned char *arr)
1314 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1316 /* Optimal transfer length granularity */
1317 if (sdebug_opt_xferlen_exp != 0 &&
1318 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1319 gran = 1 << sdebug_opt_xferlen_exp;
1321 gran = 1 << sdebug_physblk_exp;
1322 put_unaligned_be16(gran, arr + 2);
1324 /* Maximum Transfer Length */
1325 if (sdebug_store_sectors > 0x400)
1326 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1328 /* Optimal Transfer Length */
1329 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1332 /* Maximum Unmap LBA Count */
1333 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1335 /* Maximum Unmap Block Descriptor Count */
1336 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1339 /* Unmap Granularity Alignment */
1340 if (sdebug_unmap_alignment) {
1341 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1342 arr[28] |= 0x80; /* UGAVALID */
1345 /* Optimal Unmap Granularity */
1346 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1348 /* Maximum WRITE SAME Length */
1349 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1351 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1353 return sizeof(vpdb0_data);
1356 /* Block device characteristics VPD page (SBC-3) */
1357 static int inquiry_vpd_b1(unsigned char *arr)
1359 memset(arr, 0, 0x3c);
1361 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1363 arr[3] = 5; /* less than 1.8" */
1368 /* Logical block provisioning VPD page (SBC-4) */
1369 static int inquiry_vpd_b2(unsigned char *arr)
1371 memset(arr, 0, 0x4);
1372 arr[0] = 0; /* threshold exponent */
1379 if (sdebug_lbprz && scsi_debug_lbp())
1380 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1381 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1382 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1383 /* threshold_percentage=0 */
1387 #define SDEBUG_LONG_INQ_SZ 96
1388 #define SDEBUG_MAX_INQ_ARR_SZ 584
1390 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1392 unsigned char pq_pdt;
1394 unsigned char *cmd = scp->cmnd;
1395 int alloc_len, n, ret;
1396 bool have_wlun, is_disk;
1398 alloc_len = get_unaligned_be16(cmd + 3);
1399 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1401 return DID_REQUEUE << 16;
1402 is_disk = (sdebug_ptype == TYPE_DISK);
1403 have_wlun = scsi_is_wlun(scp->device->lun);
1405 pq_pdt = TYPE_WLUN; /* present, wlun */
1406 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1407 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1409 pq_pdt = (sdebug_ptype & 0x1f);
1411 if (0x2 & cmd[1]) { /* CMDDT bit set */
1412 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1414 return check_condition_result;
1415 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1416 int lu_id_num, port_group_id, target_dev_id, len;
1418 int host_no = devip->sdbg_host->shost->host_no;
1420 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1421 (devip->channel & 0x7f);
1422 if (sdebug_vpd_use_hostno == 0)
1424 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1425 (devip->target * 1000) + devip->lun);
1426 target_dev_id = ((host_no + 1) * 2000) +
1427 (devip->target * 1000) - 3;
1428 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1429 if (0 == cmd[2]) { /* supported vital product data pages */
1430 arr[1] = cmd[2]; /*sanity */
1432 arr[n++] = 0x0; /* this page */
1433 arr[n++] = 0x80; /* unit serial number */
1434 arr[n++] = 0x83; /* device identification */
1435 arr[n++] = 0x84; /* software interface ident. */
1436 arr[n++] = 0x85; /* management network addresses */
1437 arr[n++] = 0x86; /* extended inquiry */
1438 arr[n++] = 0x87; /* mode page policy */
1439 arr[n++] = 0x88; /* SCSI ports */
1440 if (is_disk) { /* SBC only */
1441 arr[n++] = 0x89; /* ATA information */
1442 arr[n++] = 0xb0; /* Block limits */
1443 arr[n++] = 0xb1; /* Block characteristics */
1444 arr[n++] = 0xb2; /* Logical Block Prov */
1446 arr[3] = n - 4; /* number of supported VPD pages */
1447 } else if (0x80 == cmd[2]) { /* unit serial number */
1448 arr[1] = cmd[2]; /*sanity */
1450 memcpy(&arr[4], lu_id_str, len);
1451 } else if (0x83 == cmd[2]) { /* device identification */
1452 arr[1] = cmd[2]; /*sanity */
1453 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1454 target_dev_id, lu_id_num,
1457 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1458 arr[1] = cmd[2]; /*sanity */
1459 arr[3] = inquiry_vpd_84(&arr[4]);
1460 } else if (0x85 == cmd[2]) { /* Management network addresses */
1461 arr[1] = cmd[2]; /*sanity */
1462 arr[3] = inquiry_vpd_85(&arr[4]);
1463 } else if (0x86 == cmd[2]) { /* extended inquiry */
1464 arr[1] = cmd[2]; /*sanity */
1465 arr[3] = 0x3c; /* number of following entries */
1466 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1467 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1468 else if (have_dif_prot)
1469 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1471 arr[4] = 0x0; /* no protection stuff */
1472 arr[5] = 0x7; /* head of q, ordered + simple q's */
1473 } else if (0x87 == cmd[2]) { /* mode page policy */
1474 arr[1] = cmd[2]; /*sanity */
1475 arr[3] = 0x8; /* number of following entries */
1476 arr[4] = 0x2; /* disconnect-reconnect mp */
1477 arr[6] = 0x80; /* mlus, shared */
1478 arr[8] = 0x18; /* protocol specific lu */
1479 arr[10] = 0x82; /* mlus, per initiator port */
1480 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1481 arr[1] = cmd[2]; /*sanity */
1482 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1483 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1484 arr[1] = cmd[2]; /*sanity */
1485 n = inquiry_vpd_89(&arr[4]);
1486 put_unaligned_be16(n, arr + 2);
1487 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1488 arr[1] = cmd[2]; /*sanity */
1489 arr[3] = inquiry_vpd_b0(&arr[4]);
1490 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1491 arr[1] = cmd[2]; /*sanity */
1492 arr[3] = inquiry_vpd_b1(&arr[4]);
1493 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1494 arr[1] = cmd[2]; /*sanity */
1495 arr[3] = inquiry_vpd_b2(&arr[4]);
1497 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1499 return check_condition_result;
1501 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1502 ret = fill_from_dev_buffer(scp, arr,
1503 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1507 /* drops through here for a standard inquiry */
1508 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1509 arr[2] = sdebug_scsi_level;
1510 arr[3] = 2; /* response_data_format==2 */
1511 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1512 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1513 if (sdebug_vpd_use_hostno == 0)
1514 arr[5] |= 0x10; /* claim: implicit TPGS */
1515 arr[6] = 0x10; /* claim: MultiP */
1516 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1517 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1518 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1519 memcpy(&arr[16], sdebug_inq_product_id, 16);
1520 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1521 /* Use Vendor Specific area to place driver date in ASCII hex */
1522 memcpy(&arr[36], sdebug_version_date, 8);
1523 /* version descriptors (2 bytes each) follow */
1524 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1525 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1527 if (is_disk) { /* SBC-4 no version claimed */
1528 put_unaligned_be16(0x600, arr + n);
1530 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1531 put_unaligned_be16(0x525, arr + n);
1534 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1535 ret = fill_from_dev_buffer(scp, arr,
1536 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1541 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1544 static int resp_requests(struct scsi_cmnd *scp,
1545 struct sdebug_dev_info *devip)
1547 unsigned char *sbuff;
1548 unsigned char *cmd = scp->cmnd;
1549 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1553 memset(arr, 0, sizeof(arr));
1554 dsense = !!(cmd[1] & 1);
1555 sbuff = scp->sense_buffer;
1556 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1559 arr[1] = 0x0; /* NO_SENSE in sense_key */
1560 arr[2] = THRESHOLD_EXCEEDED;
1561 arr[3] = 0xff; /* TEST set and MRIE==6 */
1565 arr[2] = 0x0; /* NO_SENSE in sense_key */
1566 arr[7] = 0xa; /* 18 byte sense buffer */
1567 arr[12] = THRESHOLD_EXCEEDED;
1568 arr[13] = 0xff; /* TEST set and MRIE==6 */
1571 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1572 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1573 ; /* have sense and formats match */
1574 else if (arr[0] <= 0x70) {
1584 } else if (dsense) {
1587 arr[1] = sbuff[2]; /* sense key */
1588 arr[2] = sbuff[12]; /* asc */
1589 arr[3] = sbuff[13]; /* ascq */
1601 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1602 return fill_from_dev_buffer(scp, arr, len);
1605 static int resp_start_stop(struct scsi_cmnd *scp,
1606 struct sdebug_dev_info *devip)
1608 unsigned char *cmd = scp->cmnd;
1609 int power_cond, stop;
1611 power_cond = (cmd[4] & 0xf0) >> 4;
1613 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1614 return check_condition_result;
1616 stop = !(cmd[4] & 1);
1617 atomic_xchg(&devip->stopped, stop);
1618 return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
1621 static sector_t get_sdebug_capacity(void)
1623 static const unsigned int gibibyte = 1073741824;
1625 if (sdebug_virtual_gb > 0)
1626 return (sector_t)sdebug_virtual_gb *
1627 (gibibyte / sdebug_sector_size);
1629 return sdebug_store_sectors;
1632 #define SDEBUG_READCAP_ARR_SZ 8
1633 static int resp_readcap(struct scsi_cmnd *scp,
1634 struct sdebug_dev_info *devip)
1636 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1639 /* following just in case virtual_gb changed */
1640 sdebug_capacity = get_sdebug_capacity();
1641 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1642 if (sdebug_capacity < 0xffffffff) {
1643 capac = (unsigned int)sdebug_capacity - 1;
1644 put_unaligned_be32(capac, arr + 0);
1646 put_unaligned_be32(0xffffffff, arr + 0);
1647 put_unaligned_be16(sdebug_sector_size, arr + 6);
1648 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1651 #define SDEBUG_READCAP16_ARR_SZ 32
1652 static int resp_readcap16(struct scsi_cmnd *scp,
1653 struct sdebug_dev_info *devip)
1655 unsigned char *cmd = scp->cmnd;
1656 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1659 alloc_len = get_unaligned_be32(cmd + 10);
1660 /* following just in case virtual_gb changed */
1661 sdebug_capacity = get_sdebug_capacity();
1662 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1663 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1664 put_unaligned_be32(sdebug_sector_size, arr + 8);
1665 arr[13] = sdebug_physblk_exp & 0xf;
1666 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1668 if (scsi_debug_lbp()) {
1669 arr[14] |= 0x80; /* LBPME */
1670 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1671 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1672 * in the wider field maps to 0 in this field.
1674 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1678 arr[15] = sdebug_lowest_aligned & 0xff;
1680 if (have_dif_prot) {
1681 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1682 arr[12] |= 1; /* PROT_EN */
1685 return fill_from_dev_buffer(scp, arr,
1686 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1689 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1691 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1692 struct sdebug_dev_info *devip)
1694 unsigned char *cmd = scp->cmnd;
1696 int host_no = devip->sdbg_host->shost->host_no;
1697 int n, ret, alen, rlen;
1698 int port_group_a, port_group_b, port_a, port_b;
1700 alen = get_unaligned_be32(cmd + 6);
1701 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1703 return DID_REQUEUE << 16;
1705 * EVPD page 0x88 states we have two ports, one
1706 * real and a fake port with no device connected.
1707 * So we create two port groups with one port each
1708 * and set the group with port B to unavailable.
1710 port_a = 0x1; /* relative port A */
1711 port_b = 0x2; /* relative port B */
1712 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1713 (devip->channel & 0x7f);
1714 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1715 (devip->channel & 0x7f) + 0x80;
1718 * The asymmetric access state is cycled according to the host_id.
1721 if (sdebug_vpd_use_hostno == 0) {
1722 arr[n++] = host_no % 3; /* Asymm access state */
1723 arr[n++] = 0x0F; /* claim: all states are supported */
1725 arr[n++] = 0x0; /* Active/Optimized path */
1726 arr[n++] = 0x01; /* only support active/optimized paths */
1728 put_unaligned_be16(port_group_a, arr + n);
1730 arr[n++] = 0; /* Reserved */
1731 arr[n++] = 0; /* Status code */
1732 arr[n++] = 0; /* Vendor unique */
1733 arr[n++] = 0x1; /* One port per group */
1734 arr[n++] = 0; /* Reserved */
1735 arr[n++] = 0; /* Reserved */
1736 put_unaligned_be16(port_a, arr + n);
1738 arr[n++] = 3; /* Port unavailable */
1739 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1740 put_unaligned_be16(port_group_b, arr + n);
1742 arr[n++] = 0; /* Reserved */
1743 arr[n++] = 0; /* Status code */
1744 arr[n++] = 0; /* Vendor unique */
1745 arr[n++] = 0x1; /* One port per group */
1746 arr[n++] = 0; /* Reserved */
1747 arr[n++] = 0; /* Reserved */
1748 put_unaligned_be16(port_b, arr + n);
1752 put_unaligned_be32(rlen, arr + 0);
1755 * Return the smallest value of either
1756 * - The allocated length
1757 * - The constructed command length
1758 * - The maximum array size
1761 ret = fill_from_dev_buffer(scp, arr,
1762 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1767 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1768 struct sdebug_dev_info *devip)
1771 u8 reporting_opts, req_opcode, sdeb_i, supp;
1773 u32 alloc_len, a_len;
1774 int k, offset, len, errsts, count, bump, na;
1775 const struct opcode_info_t *oip;
1776 const struct opcode_info_t *r_oip;
1778 u8 *cmd = scp->cmnd;
1780 rctd = !!(cmd[2] & 0x80);
1781 reporting_opts = cmd[2] & 0x7;
1782 req_opcode = cmd[3];
1783 req_sa = get_unaligned_be16(cmd + 4);
1784 alloc_len = get_unaligned_be32(cmd + 6);
1785 if (alloc_len < 4 || alloc_len > 0xffff) {
1786 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1787 return check_condition_result;
1789 if (alloc_len > 8192)
1793 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1795 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1797 return check_condition_result;
1799 switch (reporting_opts) {
1800 case 0: /* all commands */
1801 /* count number of commands */
1802 for (count = 0, oip = opcode_info_arr;
1803 oip->num_attached != 0xff; ++oip) {
1804 if (F_INV_OP & oip->flags)
1806 count += (oip->num_attached + 1);
1808 bump = rctd ? 20 : 8;
1809 put_unaligned_be32(count * bump, arr);
1810 for (offset = 4, oip = opcode_info_arr;
1811 oip->num_attached != 0xff && offset < a_len; ++oip) {
1812 if (F_INV_OP & oip->flags)
1814 na = oip->num_attached;
1815 arr[offset] = oip->opcode;
1816 put_unaligned_be16(oip->sa, arr + offset + 2);
1818 arr[offset + 5] |= 0x2;
1819 if (FF_SA & oip->flags)
1820 arr[offset + 5] |= 0x1;
1821 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1823 put_unaligned_be16(0xa, arr + offset + 8);
1825 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1826 if (F_INV_OP & oip->flags)
1829 arr[offset] = oip->opcode;
1830 put_unaligned_be16(oip->sa, arr + offset + 2);
1832 arr[offset + 5] |= 0x2;
1833 if (FF_SA & oip->flags)
1834 arr[offset + 5] |= 0x1;
1835 put_unaligned_be16(oip->len_mask[0],
1838 put_unaligned_be16(0xa,
1845 case 1: /* one command: opcode only */
1846 case 2: /* one command: opcode plus service action */
1847 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1848 sdeb_i = opcode_ind_arr[req_opcode];
1849 oip = &opcode_info_arr[sdeb_i];
1850 if (F_INV_OP & oip->flags) {
1854 if (1 == reporting_opts) {
1855 if (FF_SA & oip->flags) {
1856 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1859 return check_condition_result;
1862 } else if (2 == reporting_opts &&
1863 0 == (FF_SA & oip->flags)) {
1864 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1865 kfree(arr); /* point at requested sa */
1866 return check_condition_result;
1868 if (0 == (FF_SA & oip->flags) &&
1869 req_opcode == oip->opcode)
1871 else if (0 == (FF_SA & oip->flags)) {
1872 na = oip->num_attached;
1873 for (k = 0, oip = oip->arrp; k < na;
1875 if (req_opcode == oip->opcode)
1878 supp = (k >= na) ? 1 : 3;
1879 } else if (req_sa != oip->sa) {
1880 na = oip->num_attached;
1881 for (k = 0, oip = oip->arrp; k < na;
1883 if (req_sa == oip->sa)
1886 supp = (k >= na) ? 1 : 3;
1890 u = oip->len_mask[0];
1891 put_unaligned_be16(u, arr + 2);
1892 arr[4] = oip->opcode;
1893 for (k = 1; k < u; ++k)
1894 arr[4 + k] = (k < 16) ?
1895 oip->len_mask[k] : 0xff;
1900 arr[1] = (rctd ? 0x80 : 0) | supp;
1902 put_unaligned_be16(0xa, arr + offset);
1907 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1909 return check_condition_result;
1911 offset = (offset < a_len) ? offset : a_len;
1912 len = (offset < alloc_len) ? offset : alloc_len;
1913 errsts = fill_from_dev_buffer(scp, arr, len);
1918 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1919 struct sdebug_dev_info *devip)
1924 u8 *cmd = scp->cmnd;
1926 memset(arr, 0, sizeof(arr));
1927 repd = !!(cmd[2] & 0x80);
1928 alloc_len = get_unaligned_be32(cmd + 6);
1929 if (alloc_len < 4) {
1930 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1931 return check_condition_result;
1933 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1934 arr[1] = 0x1; /* ITNRS */
1941 len = (len < alloc_len) ? len : alloc_len;
1942 return fill_from_dev_buffer(scp, arr, len);
1945 /* <<Following mode page info copied from ST318451LW>> */
1947 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1948 { /* Read-Write Error Recovery page for mode_sense */
1949 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1952 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1954 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1955 return sizeof(err_recov_pg);
1958 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1959 { /* Disconnect-Reconnect page for mode_sense */
1960 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1961 0, 0, 0, 0, 0, 0, 0, 0};
1963 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1965 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1966 return sizeof(disconnect_pg);
1969 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1970 { /* Format device page for mode_sense */
1971 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1972 0, 0, 0, 0, 0, 0, 0, 0,
1973 0, 0, 0, 0, 0x40, 0, 0, 0};
1975 memcpy(p, format_pg, sizeof(format_pg));
1976 put_unaligned_be16(sdebug_sectors_per, p + 10);
1977 put_unaligned_be16(sdebug_sector_size, p + 12);
1978 if (sdebug_removable)
1979 p[20] |= 0x20; /* should agree with INQUIRY */
1981 memset(p + 2, 0, sizeof(format_pg) - 2);
1982 return sizeof(format_pg);
1985 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1986 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1989 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1990 { /* Caching page for mode_sense */
1991 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1992 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1993 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1994 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1996 if (SDEBUG_OPT_N_WCE & sdebug_opts)
1997 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1998 memcpy(p, caching_pg, sizeof(caching_pg));
2000 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2001 else if (2 == pcontrol)
2002 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2003 return sizeof(caching_pg);
2006 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2009 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2010 { /* Control mode page for mode_sense */
2011 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2013 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2017 ctrl_m_pg[2] |= 0x4;
2019 ctrl_m_pg[2] &= ~0x4;
2022 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2024 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2026 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2027 else if (2 == pcontrol)
2028 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2029 return sizeof(ctrl_m_pg);
2033 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2034 { /* Informational Exceptions control mode page for mode_sense */
2035 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2037 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2040 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2042 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2043 else if (2 == pcontrol)
2044 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2045 return sizeof(iec_m_pg);
2048 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2049 { /* SAS SSP mode page - short format for mode_sense */
2050 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2051 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2053 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2055 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2056 return sizeof(sas_sf_m_pg);
2060 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2062 { /* SAS phy control and discover mode page for mode_sense */
2063 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2064 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2065 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2066 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2067 0x2, 0, 0, 0, 0, 0, 0, 0,
2068 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2069 0, 0, 0, 0, 0, 0, 0, 0,
2070 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2071 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2072 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2073 0x3, 0, 0, 0, 0, 0, 0, 0,
2074 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2075 0, 0, 0, 0, 0, 0, 0, 0,
2079 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2080 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2081 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2082 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2083 port_a = target_dev_id + 1;
2084 port_b = port_a + 1;
2085 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2086 put_unaligned_be32(port_a, p + 20);
2087 put_unaligned_be32(port_b, p + 48 + 20);
2089 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2090 return sizeof(sas_pcd_m_pg);
2093 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2094 { /* SAS SSP shared protocol specific port mode subpage */
2095 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2096 0, 0, 0, 0, 0, 0, 0, 0,
2099 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2101 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2102 return sizeof(sas_sha_m_pg);
2105 #define SDEBUG_MAX_MSENSE_SZ 256
2107 static int resp_mode_sense(struct scsi_cmnd *scp,
2108 struct sdebug_dev_info *devip)
2110 int pcontrol, pcode, subpcode, bd_len;
2111 unsigned char dev_spec;
2112 int alloc_len, offset, len, target_dev_id;
2113 int target = scp->device->id;
2115 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2116 unsigned char *cmd = scp->cmnd;
2117 bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2119 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2120 pcontrol = (cmd[2] & 0xc0) >> 6;
2121 pcode = cmd[2] & 0x3f;
2123 msense_6 = (MODE_SENSE == cmd[0]);
2124 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2125 is_disk = (sdebug_ptype == TYPE_DISK);
2126 if (is_disk && !dbd)
2127 bd_len = llbaa ? 16 : 8;
2130 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2131 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2132 if (0x3 == pcontrol) { /* Saving values not supported */
2133 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2134 return check_condition_result;
2136 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2137 (devip->target * 1000) - 3;
2138 /* for disks set DPOFUA bit and clear write protect (WP) bit */
2140 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2150 arr[4] = 0x1; /* set LONGLBA bit */
2151 arr[7] = bd_len; /* assume 255 or less */
2155 if ((bd_len > 0) && (!sdebug_capacity))
2156 sdebug_capacity = get_sdebug_capacity();
2159 if (sdebug_capacity > 0xfffffffe)
2160 put_unaligned_be32(0xffffffff, ap + 0);
2162 put_unaligned_be32(sdebug_capacity, ap + 0);
2163 put_unaligned_be16(sdebug_sector_size, ap + 6);
2166 } else if (16 == bd_len) {
2167 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2168 put_unaligned_be32(sdebug_sector_size, ap + 12);
2173 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2174 /* TODO: Control Extension page */
2175 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2176 return check_condition_result;
2181 case 0x1: /* Read-Write error recovery page, direct access */
2182 len = resp_err_recov_pg(ap, pcontrol, target);
2185 case 0x2: /* Disconnect-Reconnect page, all devices */
2186 len = resp_disconnect_pg(ap, pcontrol, target);
2189 case 0x3: /* Format device page, direct access */
2191 len = resp_format_pg(ap, pcontrol, target);
2196 case 0x8: /* Caching page, direct access */
2198 len = resp_caching_pg(ap, pcontrol, target);
2203 case 0xa: /* Control Mode page, all devices */
2204 len = resp_ctrl_m_pg(ap, pcontrol, target);
2207 case 0x19: /* if spc==1 then sas phy, control+discover */
2208 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2209 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2210 return check_condition_result;
2213 if ((0x0 == subpcode) || (0xff == subpcode))
2214 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2215 if ((0x1 == subpcode) || (0xff == subpcode))
2216 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2218 if ((0x2 == subpcode) || (0xff == subpcode))
2219 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2222 case 0x1c: /* Informational Exceptions Mode page, all devices */
2223 len = resp_iec_m_pg(ap, pcontrol, target);
2226 case 0x3f: /* Read all Mode pages */
2227 if ((0 == subpcode) || (0xff == subpcode)) {
2228 len = resp_err_recov_pg(ap, pcontrol, target);
2229 len += resp_disconnect_pg(ap + len, pcontrol, target);
2231 len += resp_format_pg(ap + len, pcontrol,
2233 len += resp_caching_pg(ap + len, pcontrol,
2236 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2237 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2238 if (0xff == subpcode) {
2239 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2240 target, target_dev_id);
2241 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2243 len += resp_iec_m_pg(ap + len, pcontrol, target);
2246 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2247 return check_condition_result;
2255 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2256 return check_condition_result;
2259 arr[0] = offset - 1;
2261 put_unaligned_be16((offset - 2), arr + 0);
2262 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2265 #define SDEBUG_MAX_MSELECT_SZ 512
2267 static int resp_mode_select(struct scsi_cmnd *scp,
2268 struct sdebug_dev_info *devip)
2270 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2271 int param_len, res, mpage;
2272 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2273 unsigned char *cmd = scp->cmnd;
2274 int mselect6 = (MODE_SELECT == cmd[0]);
2276 memset(arr, 0, sizeof(arr));
2279 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2280 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2281 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2282 return check_condition_result;
2284 res = fetch_to_dev_buffer(scp, arr, param_len);
2286 return DID_ERROR << 16;
2287 else if (sdebug_verbose && (res < param_len))
2288 sdev_printk(KERN_INFO, scp->device,
2289 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2290 __func__, param_len, res);
2291 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2292 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2294 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2295 return check_condition_result;
2297 off = bd_len + (mselect6 ? 4 : 8);
2298 mpage = arr[off] & 0x3f;
2299 ps = !!(arr[off] & 0x80);
2301 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2302 return check_condition_result;
2304 spf = !!(arr[off] & 0x40);
2305 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2307 if ((pg_len + off) > param_len) {
2308 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2309 PARAMETER_LIST_LENGTH_ERR, 0);
2310 return check_condition_result;
2313 case 0x8: /* Caching Mode page */
2314 if (caching_pg[1] == arr[off + 1]) {
2315 memcpy(caching_pg + 2, arr + off + 2,
2316 sizeof(caching_pg) - 2);
2317 goto set_mode_changed_ua;
2320 case 0xa: /* Control Mode page */
2321 if (ctrl_m_pg[1] == arr[off + 1]) {
2322 memcpy(ctrl_m_pg + 2, arr + off + 2,
2323 sizeof(ctrl_m_pg) - 2);
2324 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2325 goto set_mode_changed_ua;
2328 case 0x1c: /* Informational Exceptions Mode page */
2329 if (iec_m_pg[1] == arr[off + 1]) {
2330 memcpy(iec_m_pg + 2, arr + off + 2,
2331 sizeof(iec_m_pg) - 2);
2332 goto set_mode_changed_ua;
2338 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2339 return check_condition_result;
2340 set_mode_changed_ua:
2341 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2345 static int resp_temp_l_pg(unsigned char *arr)
2347 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2348 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2351 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2352 return sizeof(temp_l_pg);
2355 static int resp_ie_l_pg(unsigned char *arr)
2357 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2360 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2361 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2362 arr[4] = THRESHOLD_EXCEEDED;
2365 return sizeof(ie_l_pg);
2368 #define SDEBUG_MAX_LSENSE_SZ 512
2370 static int resp_log_sense(struct scsi_cmnd *scp,
2371 struct sdebug_dev_info *devip)
2373 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2374 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2375 unsigned char *cmd = scp->cmnd;
2377 memset(arr, 0, sizeof(arr));
2381 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2382 return check_condition_result;
2384 pcode = cmd[2] & 0x3f;
2385 subpcode = cmd[3] & 0xff;
2386 alloc_len = get_unaligned_be16(cmd + 7);
2388 if (0 == subpcode) {
2390 case 0x0: /* Supported log pages log page */
2392 arr[n++] = 0x0; /* this page */
2393 arr[n++] = 0xd; /* Temperature */
2394 arr[n++] = 0x2f; /* Informational exceptions */
2397 case 0xd: /* Temperature log page */
2398 arr[3] = resp_temp_l_pg(arr + 4);
2400 case 0x2f: /* Informational exceptions log page */
2401 arr[3] = resp_ie_l_pg(arr + 4);
2404 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2405 return check_condition_result;
2407 } else if (0xff == subpcode) {
2411 case 0x0: /* Supported log pages and subpages log page */
2414 arr[n++] = 0x0; /* 0,0 page */
2416 arr[n++] = 0xff; /* this page */
2418 arr[n++] = 0x0; /* Temperature */
2420 arr[n++] = 0x0; /* Informational exceptions */
2423 case 0xd: /* Temperature subpages */
2426 arr[n++] = 0x0; /* Temperature */
2429 case 0x2f: /* Informational exceptions subpages */
2432 arr[n++] = 0x0; /* Informational exceptions */
2436 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2437 return check_condition_result;
2440 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2441 return check_condition_result;
2443 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2444 return fill_from_dev_buffer(scp, arr,
2445 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2448 static int check_device_access_params(struct scsi_cmnd *scp,
2449 unsigned long long lba, unsigned int num)
2451 if (lba + num > sdebug_capacity) {
2452 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2453 return check_condition_result;
2455 /* transfer length excessive (tie in to block limits VPD page) */
2456 if (num > sdebug_store_sectors) {
2457 /* needs work to find which cdb byte 'num' comes from */
2458 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2459 return check_condition_result;
2464 /* Returns number of bytes copied or -1 if error. */
2465 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2466 u32 num, bool do_write)
2469 u64 block, rest = 0;
2470 struct scsi_data_buffer *sdb;
2471 enum dma_data_direction dir;
2474 sdb = scsi_out(scmd);
2475 dir = DMA_TO_DEVICE;
2477 sdb = scsi_in(scmd);
2478 dir = DMA_FROM_DEVICE;
2483 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2486 block = do_div(lba, sdebug_store_sectors);
2487 if (block + num > sdebug_store_sectors)
2488 rest = block + num - sdebug_store_sectors;
2490 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2491 fake_storep + (block * sdebug_sector_size),
2492 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2493 if (ret != (num - rest) * sdebug_sector_size)
2497 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2498 fake_storep, rest * sdebug_sector_size,
2499 sg_skip + ((num - rest) * sdebug_sector_size),
2506 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2507 * arr into fake_store(lba,num) and return true. If comparison fails then
2509 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2512 u64 block, rest = 0;
2513 u32 store_blks = sdebug_store_sectors;
2514 u32 lb_size = sdebug_sector_size;
2516 block = do_div(lba, store_blks);
2517 if (block + num > store_blks)
2518 rest = block + num - store_blks;
2520 res = !memcmp(fake_storep + (block * lb_size), arr,
2521 (num - rest) * lb_size);
2525 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2529 arr += num * lb_size;
2530 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2532 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2537 static __be16 dif_compute_csum(const void *buf, int len)
2542 csum = (__force __be16)ip_compute_csum(buf, len);
2544 csum = cpu_to_be16(crc_t10dif(buf, len));
2549 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2550 sector_t sector, u32 ei_lba)
2552 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2554 if (sdt->guard_tag != csum) {
2555 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2556 (unsigned long)sector,
2557 be16_to_cpu(sdt->guard_tag),
2561 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2562 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2563 pr_err("REF check failed on sector %lu\n",
2564 (unsigned long)sector);
2567 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2568 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2569 pr_err("REF check failed on sector %lu\n",
2570 (unsigned long)sector);
2576 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2577 unsigned int sectors, bool read)
2581 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2582 struct sg_mapping_iter miter;
2584 /* Bytes of protection data to copy into sgl */
2585 resid = sectors * sizeof(*dif_storep);
2587 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2588 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2589 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2591 while (sg_miter_next(&miter) && resid > 0) {
2592 size_t len = min(miter.length, resid);
2593 void *start = dif_store(sector);
2596 if (dif_store_end < start + len)
2597 rest = start + len - dif_store_end;
2602 memcpy(paddr, start, len - rest);
2604 memcpy(start, paddr, len - rest);
2608 memcpy(paddr + len - rest, dif_storep, rest);
2610 memcpy(dif_storep, paddr + len - rest, rest);
2613 sector += len / sizeof(*dif_storep);
2616 sg_miter_stop(&miter);
2619 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2620 unsigned int sectors, u32 ei_lba)
2623 struct t10_pi_tuple *sdt;
2626 for (i = 0; i < sectors; i++, ei_lba++) {
2629 sector = start_sec + i;
2630 sdt = dif_store(sector);
2632 if (sdt->app_tag == cpu_to_be16(0xffff))
2635 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2642 dif_copy_prot(SCpnt, start_sec, sectors, true);
2648 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2650 u8 *cmd = scp->cmnd;
2651 struct sdebug_queued_cmd *sqcp;
2655 unsigned long iflags;
2662 lba = get_unaligned_be64(cmd + 2);
2663 num = get_unaligned_be32(cmd + 10);
2668 lba = get_unaligned_be32(cmd + 2);
2669 num = get_unaligned_be16(cmd + 7);
2674 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2675 (u32)(cmd[1] & 0x1f) << 16;
2676 num = (0 == cmd[4]) ? 256 : cmd[4];
2681 lba = get_unaligned_be32(cmd + 2);
2682 num = get_unaligned_be32(cmd + 6);
2685 case XDWRITEREAD_10:
2687 lba = get_unaligned_be32(cmd + 2);
2688 num = get_unaligned_be16(cmd + 7);
2691 default: /* assume READ(32) */
2692 lba = get_unaligned_be64(cmd + 12);
2693 ei_lba = get_unaligned_be32(cmd + 20);
2694 num = get_unaligned_be32(cmd + 28);
2698 if (unlikely(have_dif_prot && check_prot)) {
2699 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2701 mk_sense_invalid_opcode(scp);
2702 return check_condition_result;
2704 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2705 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2706 (cmd[1] & 0xe0) == 0)
2707 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2710 if (unlikely(sdebug_any_injecting_opt)) {
2711 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2714 if (sqcp->inj_short)
2720 /* inline check_device_access_params() */
2721 if (unlikely(lba + num > sdebug_capacity)) {
2722 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2723 return check_condition_result;
2725 /* transfer length excessive (tie in to block limits VPD page) */
2726 if (unlikely(num > sdebug_store_sectors)) {
2727 /* needs work to find which cdb byte 'num' comes from */
2728 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2729 return check_condition_result;
2732 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2733 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2734 ((lba + num) > sdebug_medium_error_start))) {
2735 /* claim unrecoverable read error */
2736 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2737 /* set info field and valid bit for fixed descriptor */
2738 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2739 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2740 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2741 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2742 put_unaligned_be32(ret, scp->sense_buffer + 3);
2744 scsi_set_resid(scp, scsi_bufflen(scp));
2745 return check_condition_result;
2748 read_lock_irqsave(&atomic_rw, iflags);
2751 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2752 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2755 read_unlock_irqrestore(&atomic_rw, iflags);
2756 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2757 return illegal_condition_result;
2761 ret = do_device_access(scp, 0, lba, num, false);
2762 read_unlock_irqrestore(&atomic_rw, iflags);
2763 if (unlikely(ret == -1))
2764 return DID_ERROR << 16;
2766 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2768 if (unlikely(sqcp)) {
2769 if (sqcp->inj_recovered) {
2770 mk_sense_buffer(scp, RECOVERED_ERROR,
2771 THRESHOLD_EXCEEDED, 0);
2772 return check_condition_result;
2773 } else if (sqcp->inj_transport) {
2774 mk_sense_buffer(scp, ABORTED_COMMAND,
2775 TRANSPORT_PROBLEM, ACK_NAK_TO);
2776 return check_condition_result;
2777 } else if (sqcp->inj_dif) {
2778 /* Logical block guard check failed */
2779 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2780 return illegal_condition_result;
2781 } else if (sqcp->inj_dix) {
2782 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2783 return illegal_condition_result;
2789 static void dump_sector(unsigned char *buf, int len)
2793 pr_err(">>> Sector Dump <<<\n");
2794 for (i = 0 ; i < len ; i += 16) {
2797 for (j = 0, n = 0; j < 16; j++) {
2798 unsigned char c = buf[i+j];
2800 if (c >= 0x20 && c < 0x7e)
2801 n += scnprintf(b + n, sizeof(b) - n,
2804 n += scnprintf(b + n, sizeof(b) - n,
2807 pr_err("%04d: %s\n", i, b);
2811 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2812 unsigned int sectors, u32 ei_lba)
2815 struct t10_pi_tuple *sdt;
2817 sector_t sector = start_sec;
2820 struct sg_mapping_iter diter;
2821 struct sg_mapping_iter piter;
2823 BUG_ON(scsi_sg_count(SCpnt) == 0);
2824 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2826 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2827 scsi_prot_sg_count(SCpnt),
2828 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2829 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2830 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2832 /* For each protection page */
2833 while (sg_miter_next(&piter)) {
2835 if (WARN_ON(!sg_miter_next(&diter))) {
2840 for (ppage_offset = 0; ppage_offset < piter.length;
2841 ppage_offset += sizeof(struct t10_pi_tuple)) {
2842 /* If we're at the end of the current
2843 * data page advance to the next one
2845 if (dpage_offset >= diter.length) {
2846 if (WARN_ON(!sg_miter_next(&diter))) {
2853 sdt = piter.addr + ppage_offset;
2854 daddr = diter.addr + dpage_offset;
2856 ret = dif_verify(sdt, daddr, sector, ei_lba);
2858 dump_sector(daddr, sdebug_sector_size);
2864 dpage_offset += sdebug_sector_size;
2866 diter.consumed = dpage_offset;
2867 sg_miter_stop(&diter);
2869 sg_miter_stop(&piter);
2871 dif_copy_prot(SCpnt, start_sec, sectors, false);
2878 sg_miter_stop(&diter);
2879 sg_miter_stop(&piter);
2883 static unsigned long lba_to_map_index(sector_t lba)
2885 if (sdebug_unmap_alignment)
2886 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2887 sector_div(lba, sdebug_unmap_granularity);
2891 static sector_t map_index_to_lba(unsigned long index)
2893 sector_t lba = index * sdebug_unmap_granularity;
2895 if (sdebug_unmap_alignment)
2896 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2900 static unsigned int map_state(sector_t lba, unsigned int *num)
2903 unsigned int mapped;
2904 unsigned long index;
2907 index = lba_to_map_index(lba);
2908 mapped = test_bit(index, map_storep);
2911 next = find_next_zero_bit(map_storep, map_size, index);
2913 next = find_next_bit(map_storep, map_size, index);
2915 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2920 static void map_region(sector_t lba, unsigned int len)
2922 sector_t end = lba + len;
2925 unsigned long index = lba_to_map_index(lba);
2927 if (index < map_size)
2928 set_bit(index, map_storep);
2930 lba = map_index_to_lba(index + 1);
2934 static void unmap_region(sector_t lba, unsigned int len)
2936 sector_t end = lba + len;
2939 unsigned long index = lba_to_map_index(lba);
2941 if (lba == map_index_to_lba(index) &&
2942 lba + sdebug_unmap_granularity <= end &&
2944 clear_bit(index, map_storep);
2945 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
2946 memset(fake_storep +
2947 lba * sdebug_sector_size,
2948 (sdebug_lbprz & 1) ? 0 : 0xff,
2949 sdebug_sector_size *
2950 sdebug_unmap_granularity);
2953 memset(dif_storep + lba, 0xff,
2954 sizeof(*dif_storep) *
2955 sdebug_unmap_granularity);
2958 lba = map_index_to_lba(index + 1);
2962 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2964 u8 *cmd = scp->cmnd;
2968 unsigned long iflags;
2975 lba = get_unaligned_be64(cmd + 2);
2976 num = get_unaligned_be32(cmd + 10);
2981 lba = get_unaligned_be32(cmd + 2);
2982 num = get_unaligned_be16(cmd + 7);
2987 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2988 (u32)(cmd[1] & 0x1f) << 16;
2989 num = (0 == cmd[4]) ? 256 : cmd[4];
2994 lba = get_unaligned_be32(cmd + 2);
2995 num = get_unaligned_be32(cmd + 6);
2998 case 0x53: /* XDWRITEREAD(10) */
3000 lba = get_unaligned_be32(cmd + 2);
3001 num = get_unaligned_be16(cmd + 7);
3004 default: /* assume WRITE(32) */
3005 lba = get_unaligned_be64(cmd + 12);
3006 ei_lba = get_unaligned_be32(cmd + 20);
3007 num = get_unaligned_be32(cmd + 28);
3011 if (unlikely(have_dif_prot && check_prot)) {
3012 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3014 mk_sense_invalid_opcode(scp);
3015 return check_condition_result;
3017 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3018 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3019 (cmd[1] & 0xe0) == 0)
3020 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3024 /* inline check_device_access_params() */
3025 if (unlikely(lba + num > sdebug_capacity)) {
3026 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3027 return check_condition_result;
3029 /* transfer length excessive (tie in to block limits VPD page) */
3030 if (unlikely(num > sdebug_store_sectors)) {
3031 /* needs work to find which cdb byte 'num' comes from */
3032 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3033 return check_condition_result;
3036 write_lock_irqsave(&atomic_rw, iflags);
3039 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3040 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3043 write_unlock_irqrestore(&atomic_rw, iflags);
3044 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3045 return illegal_condition_result;
3049 ret = do_device_access(scp, 0, lba, num, true);
3050 if (unlikely(scsi_debug_lbp()))
3051 map_region(lba, num);
3052 write_unlock_irqrestore(&atomic_rw, iflags);
3053 if (unlikely(-1 == ret))
3054 return DID_ERROR << 16;
3055 else if (unlikely(sdebug_verbose &&
3056 (ret < (num * sdebug_sector_size))))
3057 sdev_printk(KERN_INFO, scp->device,
3058 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3059 my_name, num * sdebug_sector_size, ret);
3061 if (unlikely(sdebug_any_injecting_opt)) {
3062 struct sdebug_queued_cmd *sqcp =
3063 (struct sdebug_queued_cmd *)scp->host_scribble;
3066 if (sqcp->inj_recovered) {
3067 mk_sense_buffer(scp, RECOVERED_ERROR,
3068 THRESHOLD_EXCEEDED, 0);
3069 return check_condition_result;
3070 } else if (sqcp->inj_dif) {
3071 /* Logical block guard check failed */
3072 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3073 return illegal_condition_result;
3074 } else if (sqcp->inj_dix) {
3075 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3076 return illegal_condition_result;
3084 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3085 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3087 static int resp_write_scat(struct scsi_cmnd *scp,
3088 struct sdebug_dev_info *devip)
3090 u8 *cmd = scp->cmnd;
3094 u16 lbdof, num_lrd, k;
3095 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3096 u32 lb_size = sdebug_sector_size;
3099 unsigned long iflags;
3102 static const u32 lrd_size = 32; /* + parameter list header size */
3104 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3106 wrprotect = (cmd[10] >> 5) & 0x7;
3107 lbdof = get_unaligned_be16(cmd + 12);
3108 num_lrd = get_unaligned_be16(cmd + 16);
3109 bt_len = get_unaligned_be32(cmd + 28);
3110 } else { /* that leaves WRITE SCATTERED(16) */
3112 wrprotect = (cmd[2] >> 5) & 0x7;
3113 lbdof = get_unaligned_be16(cmd + 4);
3114 num_lrd = get_unaligned_be16(cmd + 8);
3115 bt_len = get_unaligned_be32(cmd + 10);
3116 if (unlikely(have_dif_prot)) {
3117 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3119 mk_sense_invalid_opcode(scp);
3120 return illegal_condition_result;
3122 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3123 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3125 sdev_printk(KERN_ERR, scp->device,
3126 "Unprotected WR to DIF device\n");
3129 if ((num_lrd == 0) || (bt_len == 0))
3130 return 0; /* T10 says these do-nothings are not errors */
3133 sdev_printk(KERN_INFO, scp->device,
3134 "%s: %s: LB Data Offset field bad\n",
3136 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3137 return illegal_condition_result;
3139 lbdof_blen = lbdof * lb_size;
3140 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3142 sdev_printk(KERN_INFO, scp->device,
3143 "%s: %s: LBA range descriptors don't fit\n",
3145 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3146 return illegal_condition_result;
3148 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3150 return SCSI_MLQUEUE_HOST_BUSY;
3152 sdev_printk(KERN_INFO, scp->device,
3153 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3154 my_name, __func__, lbdof_blen);
3155 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3157 ret = DID_ERROR << 16;
3161 write_lock_irqsave(&atomic_rw, iflags);
3162 sg_off = lbdof_blen;
3163 /* Spec says Buffer xfer Length field in number of LBs in dout */
3165 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3166 lba = get_unaligned_be64(up + 0);
3167 num = get_unaligned_be32(up + 8);
3169 sdev_printk(KERN_INFO, scp->device,
3170 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3171 my_name, __func__, k, lba, num, sg_off);
3174 ret = check_device_access_params(scp, lba, num);
3176 goto err_out_unlock;
3177 num_by = num * lb_size;
3178 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3180 if ((cum_lb + num) > bt_len) {
3182 sdev_printk(KERN_INFO, scp->device,
3183 "%s: %s: sum of blocks > data provided\n",
3185 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3187 ret = illegal_condition_result;
3188 goto err_out_unlock;
3192 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3193 int prot_ret = prot_verify_write(scp, lba, num,
3197 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3199 ret = illegal_condition_result;
3200 goto err_out_unlock;
3204 ret = do_device_access(scp, sg_off, lba, num, true);
3205 if (unlikely(scsi_debug_lbp()))
3206 map_region(lba, num);
3207 if (unlikely(-1 == ret)) {
3208 ret = DID_ERROR << 16;
3209 goto err_out_unlock;
3210 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3211 sdev_printk(KERN_INFO, scp->device,
3212 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3213 my_name, num_by, ret);
3215 if (unlikely(sdebug_any_injecting_opt)) {
3216 struct sdebug_queued_cmd *sqcp =
3217 (struct sdebug_queued_cmd *)scp->host_scribble;
3220 if (sqcp->inj_recovered) {
3221 mk_sense_buffer(scp, RECOVERED_ERROR,
3222 THRESHOLD_EXCEEDED, 0);
3223 ret = illegal_condition_result;
3224 goto err_out_unlock;
3225 } else if (sqcp->inj_dif) {
3226 /* Logical block guard check failed */
3227 mk_sense_buffer(scp, ABORTED_COMMAND,
3229 ret = illegal_condition_result;
3230 goto err_out_unlock;
3231 } else if (sqcp->inj_dix) {
3232 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3234 ret = illegal_condition_result;
3235 goto err_out_unlock;
3244 write_unlock_irqrestore(&atomic_rw, iflags);
3250 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3251 u32 ei_lba, bool unmap, bool ndob)
3253 unsigned long iflags;
3254 unsigned long long i;
3258 ret = check_device_access_params(scp, lba, num);
3262 write_lock_irqsave(&atomic_rw, iflags);
3264 if (unmap && scsi_debug_lbp()) {
3265 unmap_region(lba, num);
3269 lba_off = lba * sdebug_sector_size;
3270 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3272 memset(fake_storep + lba_off, 0, sdebug_sector_size);
3275 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3276 sdebug_sector_size);
3279 write_unlock_irqrestore(&atomic_rw, iflags);
3280 return DID_ERROR << 16;
3281 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3282 sdev_printk(KERN_INFO, scp->device,
3283 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3284 my_name, "write same",
3285 sdebug_sector_size, ret);
3287 /* Copy first sector to remaining blocks */
3288 for (i = 1 ; i < num ; i++)
3289 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3290 fake_storep + lba_off,
3291 sdebug_sector_size);
3293 if (scsi_debug_lbp())
3294 map_region(lba, num);
3296 write_unlock_irqrestore(&atomic_rw, iflags);
3301 static int resp_write_same_10(struct scsi_cmnd *scp,
3302 struct sdebug_dev_info *devip)
3304 u8 *cmd = scp->cmnd;
3311 if (sdebug_lbpws10 == 0) {
3312 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3313 return check_condition_result;
3317 lba = get_unaligned_be32(cmd + 2);
3318 num = get_unaligned_be16(cmd + 7);
3319 if (num > sdebug_write_same_length) {
3320 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3321 return check_condition_result;
3323 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3326 static int resp_write_same_16(struct scsi_cmnd *scp,
3327 struct sdebug_dev_info *devip)
3329 u8 *cmd = scp->cmnd;
3336 if (cmd[1] & 0x8) { /* UNMAP */
3337 if (sdebug_lbpws == 0) {
3338 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3339 return check_condition_result;
3343 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3345 lba = get_unaligned_be64(cmd + 2);
3346 num = get_unaligned_be32(cmd + 10);
3347 if (num > sdebug_write_same_length) {
3348 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3349 return check_condition_result;
3351 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3354 /* Note the mode field is in the same position as the (lower) service action
3355 * field. For the Report supported operation codes command, SPC-4 suggests
3356 * each mode of this command should be reported separately; for future. */
3357 static int resp_write_buffer(struct scsi_cmnd *scp,
3358 struct sdebug_dev_info *devip)
3360 u8 *cmd = scp->cmnd;
3361 struct scsi_device *sdp = scp->device;
3362 struct sdebug_dev_info *dp;
3365 mode = cmd[1] & 0x1f;
3367 case 0x4: /* download microcode (MC) and activate (ACT) */
3368 /* set UAs on this device only */
3369 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3370 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3372 case 0x5: /* download MC, save and ACT */
3373 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3375 case 0x6: /* download MC with offsets and ACT */
3376 /* set UAs on most devices (LUs) in this target */
3377 list_for_each_entry(dp,
3378 &devip->sdbg_host->dev_info_list,
3380 if (dp->target == sdp->id) {
3381 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3383 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3387 case 0x7: /* download MC with offsets, save, and ACT */
3388 /* set UA on all devices (LUs) in this target */
3389 list_for_each_entry(dp,
3390 &devip->sdbg_host->dev_info_list,
3392 if (dp->target == sdp->id)
3393 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3397 /* do nothing for this command for other mode values */
3403 static int resp_comp_write(struct scsi_cmnd *scp,
3404 struct sdebug_dev_info *devip)
3406 u8 *cmd = scp->cmnd;
3408 u8 *fake_storep_hold;
3411 u32 lb_size = sdebug_sector_size;
3413 unsigned long iflags;
3417 lba = get_unaligned_be64(cmd + 2);
3418 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3420 return 0; /* degenerate case, not an error */
3421 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3423 mk_sense_invalid_opcode(scp);
3424 return check_condition_result;
3426 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3427 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3428 (cmd[1] & 0xe0) == 0)
3429 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3432 /* inline check_device_access_params() */
3433 if (lba + num > sdebug_capacity) {
3434 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3435 return check_condition_result;
3437 /* transfer length excessive (tie in to block limits VPD page) */
3438 if (num > sdebug_store_sectors) {
3439 /* needs work to find which cdb byte 'num' comes from */
3440 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3441 return check_condition_result;
3444 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3446 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3448 return check_condition_result;
3451 write_lock_irqsave(&atomic_rw, iflags);
3453 /* trick do_device_access() to fetch both compare and write buffers
3454 * from data-in into arr. Safe (atomic) since write_lock held. */
3455 fake_storep_hold = fake_storep;
3457 ret = do_device_access(scp, 0, 0, dnum, true);
3458 fake_storep = fake_storep_hold;
3460 retval = DID_ERROR << 16;
3462 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3463 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3464 "indicated=%u, IO sent=%d bytes\n", my_name,
3465 dnum * lb_size, ret);
3466 if (!comp_write_worker(lba, num, arr)) {
3467 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3468 retval = check_condition_result;
3471 if (scsi_debug_lbp())
3472 map_region(lba, num);
3474 write_unlock_irqrestore(&atomic_rw, iflags);
3479 struct unmap_block_desc {
3485 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3488 struct unmap_block_desc *desc;
3489 unsigned int i, payload_len, descriptors;
3491 unsigned long iflags;
3494 if (!scsi_debug_lbp())
3495 return 0; /* fib and say its done */
3496 payload_len = get_unaligned_be16(scp->cmnd + 7);
3497 BUG_ON(scsi_bufflen(scp) != payload_len);
3499 descriptors = (payload_len - 8) / 16;
3500 if (descriptors > sdebug_unmap_max_desc) {
3501 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3502 return check_condition_result;
3505 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3507 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3509 return check_condition_result;
3512 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3514 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3515 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3517 desc = (void *)&buf[8];
3519 write_lock_irqsave(&atomic_rw, iflags);
3521 for (i = 0 ; i < descriptors ; i++) {
3522 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3523 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3525 ret = check_device_access_params(scp, lba, num);
3529 unmap_region(lba, num);
3535 write_unlock_irqrestore(&atomic_rw, iflags);
3541 #define SDEBUG_GET_LBA_STATUS_LEN 32
3543 static int resp_get_lba_status(struct scsi_cmnd *scp,
3544 struct sdebug_dev_info *devip)
3546 u8 *cmd = scp->cmnd;
3548 u32 alloc_len, mapped, num;
3549 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3552 lba = get_unaligned_be64(cmd + 2);
3553 alloc_len = get_unaligned_be32(cmd + 10);
3558 ret = check_device_access_params(scp, lba, 1);
3562 if (scsi_debug_lbp())
3563 mapped = map_state(lba, &num);
3566 /* following just in case virtual_gb changed */
3567 sdebug_capacity = get_sdebug_capacity();
3568 if (sdebug_capacity - lba <= 0xffffffff)
3569 num = sdebug_capacity - lba;
3574 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3575 put_unaligned_be32(20, arr); /* Parameter Data Length */
3576 put_unaligned_be64(lba, arr + 8); /* LBA */
3577 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3578 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3580 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3583 static int resp_sync_cache(struct scsi_cmnd *scp,
3584 struct sdebug_dev_info *devip)
3588 u8 *cmd = scp->cmnd;
3590 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
3591 lba = get_unaligned_be32(cmd + 2);
3592 num_blocks = get_unaligned_be16(cmd + 7);
3593 } else { /* SYNCHRONIZE_CACHE(16) */
3594 lba = get_unaligned_be64(cmd + 2);
3595 num_blocks = get_unaligned_be32(cmd + 10);
3597 if (lba + num_blocks > sdebug_capacity) {
3598 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3599 return check_condition_result;
3601 return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
3604 #define RL_BUCKET_ELEMS 8
3606 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3607 * (W-LUN), the normal Linux scanning logic does not associate it with a
3608 * device (e.g. /dev/sg7). The following magic will make that association:
3609 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3610 * where <n> is a host number. If there are multiple targets in a host then
3611 * the above will associate a W-LUN to each target. To only get a W-LUN
3612 * for target 2, then use "echo '- 2 49409' > scan" .
3614 static int resp_report_luns(struct scsi_cmnd *scp,
3615 struct sdebug_dev_info *devip)
3617 unsigned char *cmd = scp->cmnd;
3618 unsigned int alloc_len;
3619 unsigned char select_report;
3621 struct scsi_lun *lun_p;
3622 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3623 unsigned int lun_cnt; /* normal LUN count (max: 256) */
3624 unsigned int wlun_cnt; /* report luns W-LUN count */
3625 unsigned int tlun_cnt; /* total LUN count */
3626 unsigned int rlen; /* response length (in bytes) */
3628 unsigned int off_rsp = 0;
3629 const int sz_lun = sizeof(struct scsi_lun);
3631 clear_luns_changed_on_target(devip);
3633 select_report = cmd[2];
3634 alloc_len = get_unaligned_be32(cmd + 6);
3636 if (alloc_len < 4) {
3637 pr_err("alloc len too small %d\n", alloc_len);
3638 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3639 return check_condition_result;
3642 switch (select_report) {
3643 case 0: /* all LUNs apart from W-LUNs */
3644 lun_cnt = sdebug_max_luns;
3647 case 1: /* only W-LUNs */
3651 case 2: /* all LUNs */
3652 lun_cnt = sdebug_max_luns;
3655 case 0x10: /* only administrative LUs */
3656 case 0x11: /* see SPC-5 */
3657 case 0x12: /* only subsiduary LUs owned by referenced LU */
3659 pr_debug("select report invalid %d\n", select_report);
3660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3661 return check_condition_result;
3664 if (sdebug_no_lun_0 && (lun_cnt > 0))
3667 tlun_cnt = lun_cnt + wlun_cnt;
3668 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
3669 scsi_set_resid(scp, scsi_bufflen(scp));
3670 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3671 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3673 /* loops rely on sizeof response header same as sizeof lun (both 8) */
3674 lun = sdebug_no_lun_0 ? 1 : 0;
3675 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3676 memset(arr, 0, sizeof(arr));
3677 lun_p = (struct scsi_lun *)&arr[0];
3679 put_unaligned_be32(rlen, &arr[0]);
3683 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3684 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3686 int_to_scsilun(lun++, lun_p);
3688 if (j < RL_BUCKET_ELEMS)
3691 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3697 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3701 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3705 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3706 unsigned int num, struct sdebug_dev_info *devip)
3709 unsigned char *kaddr, *buf;
3710 unsigned int offset;
3711 struct scsi_data_buffer *sdb = scsi_in(scp);
3712 struct sg_mapping_iter miter;
3714 /* better not to use temporary buffer. */
3715 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3717 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3719 return check_condition_result;
3722 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3725 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3726 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3728 while (sg_miter_next(&miter)) {
3730 for (j = 0; j < miter.length; j++)
3731 *(kaddr + j) ^= *(buf + offset + j);
3733 offset += miter.length;
3735 sg_miter_stop(&miter);
3741 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3742 struct sdebug_dev_info *devip)
3744 u8 *cmd = scp->cmnd;
3749 if (!scsi_bidi_cmnd(scp)) {
3750 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3752 return check_condition_result;
3754 errsts = resp_read_dt0(scp, devip);
3757 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3758 errsts = resp_write_dt0(scp, devip);
3762 lba = get_unaligned_be32(cmd + 2);
3763 num = get_unaligned_be16(cmd + 7);
3764 return resp_xdwriteread(scp, lba, num, devip);
3767 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3769 u32 tag = blk_mq_unique_tag(cmnd->request);
3770 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3772 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3773 if (WARN_ON_ONCE(hwq >= submit_queues))
3775 return sdebug_q_arr + hwq;
3778 /* Queued (deferred) command completions converge here. */
3779 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3783 unsigned long iflags;
3784 struct sdebug_queue *sqp;
3785 struct sdebug_queued_cmd *sqcp;
3786 struct scsi_cmnd *scp;
3787 struct sdebug_dev_info *devip;
3789 sd_dp->defer_t = SDEB_DEFER_NONE;
3790 qc_idx = sd_dp->qc_idx;
3791 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3792 if (sdebug_statistics) {
3793 atomic_inc(&sdebug_completions);
3794 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3795 atomic_inc(&sdebug_miss_cpus);
3797 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3798 pr_err("wild qc_idx=%d\n", qc_idx);
3801 spin_lock_irqsave(&sqp->qc_lock, iflags);
3802 sqcp = &sqp->qc_arr[qc_idx];
3804 if (unlikely(scp == NULL)) {
3805 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3806 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3807 sd_dp->sqa_idx, qc_idx);
3810 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3812 atomic_dec(&devip->num_in_q);
3814 pr_err("devip=NULL\n");
3815 if (unlikely(atomic_read(&retired_max_queue) > 0))
3818 sqcp->a_cmnd = NULL;
3819 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3820 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3821 pr_err("Unexpected completion\n");
3825 if (unlikely(retiring)) { /* user has reduced max_queue */
3828 retval = atomic_read(&retired_max_queue);
3829 if (qc_idx >= retval) {
3830 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3831 pr_err("index %d too large\n", retval);
3834 k = find_last_bit(sqp->in_use_bm, retval);
3835 if ((k < sdebug_max_queue) || (k == retval))
3836 atomic_set(&retired_max_queue, 0);
3838 atomic_set(&retired_max_queue, k + 1);
3840 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3841 scp->scsi_done(scp); /* callback to mid level */
3844 /* When high resolution timer goes off this function is called. */
3845 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3847 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3849 sdebug_q_cmd_complete(sd_dp);
3850 return HRTIMER_NORESTART;
3853 /* When work queue schedules work, it calls this function. */
3854 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3856 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3858 sdebug_q_cmd_complete(sd_dp);
3861 static bool got_shared_uuid;
3862 static uuid_t shared_uuid;
3864 static struct sdebug_dev_info *sdebug_device_create(
3865 struct sdebug_host_info *sdbg_host, gfp_t flags)
3867 struct sdebug_dev_info *devip;
3869 devip = kzalloc(sizeof(*devip), flags);
3871 if (sdebug_uuid_ctl == 1)
3872 uuid_gen(&devip->lu_name);
3873 else if (sdebug_uuid_ctl == 2) {
3874 if (got_shared_uuid)
3875 devip->lu_name = shared_uuid;
3877 uuid_gen(&shared_uuid);
3878 got_shared_uuid = true;
3879 devip->lu_name = shared_uuid;
3882 devip->sdbg_host = sdbg_host;
3883 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3888 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3890 struct sdebug_host_info *sdbg_host;
3891 struct sdebug_dev_info *open_devip = NULL;
3892 struct sdebug_dev_info *devip;
3894 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3896 pr_err("Host info NULL\n");
3899 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3900 if ((devip->used) && (devip->channel == sdev->channel) &&
3901 (devip->target == sdev->id) &&
3902 (devip->lun == sdev->lun))
3905 if ((!devip->used) && (!open_devip))
3909 if (!open_devip) { /* try and make a new one */
3910 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3912 pr_err("out of memory at line %d\n", __LINE__);
3917 open_devip->channel = sdev->channel;
3918 open_devip->target = sdev->id;
3919 open_devip->lun = sdev->lun;
3920 open_devip->sdbg_host = sdbg_host;
3921 atomic_set(&open_devip->num_in_q, 0);
3922 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3923 open_devip->used = true;
3927 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3930 pr_info("slave_alloc <%u %u %u %llu>\n",
3931 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3932 blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3936 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3938 struct sdebug_dev_info *devip =
3939 (struct sdebug_dev_info *)sdp->hostdata;
3942 pr_info("slave_configure <%u %u %u %llu>\n",
3943 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3944 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3945 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3946 if (devip == NULL) {
3947 devip = find_build_dev_info(sdp);
3949 return 1; /* no resources, will be marked offline */
3951 sdp->hostdata = devip;
3952 blk_queue_max_segment_size(sdp->request_queue, -1U);
3954 sdp->no_uld_attach = 1;
3955 config_cdb_len(sdp);
3959 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3961 struct sdebug_dev_info *devip =
3962 (struct sdebug_dev_info *)sdp->hostdata;
3965 pr_info("slave_destroy <%u %u %u %llu>\n",
3966 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3968 /* make this slot available for re-use */
3969 devip->used = false;
3970 sdp->hostdata = NULL;
3974 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3975 enum sdeb_defer_type defer_t)
3979 if (defer_t == SDEB_DEFER_HRT)
3980 hrtimer_cancel(&sd_dp->hrt);
3981 else if (defer_t == SDEB_DEFER_WQ)
3982 cancel_work_sync(&sd_dp->ew.work);
3985 /* If @cmnd found deletes its timer or work queue and returns true; else
3987 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3989 unsigned long iflags;
3990 int j, k, qmax, r_qmax;
3991 enum sdeb_defer_type l_defer_t;
3992 struct sdebug_queue *sqp;
3993 struct sdebug_queued_cmd *sqcp;
3994 struct sdebug_dev_info *devip;
3995 struct sdebug_defer *sd_dp;
3997 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3998 spin_lock_irqsave(&sqp->qc_lock, iflags);
3999 qmax = sdebug_max_queue;
4000 r_qmax = atomic_read(&retired_max_queue);
4003 for (k = 0; k < qmax; ++k) {
4004 if (test_bit(k, sqp->in_use_bm)) {
4005 sqcp = &sqp->qc_arr[k];
4006 if (cmnd != sqcp->a_cmnd)
4009 devip = (struct sdebug_dev_info *)
4010 cmnd->device->hostdata;
4012 atomic_dec(&devip->num_in_q);
4013 sqcp->a_cmnd = NULL;
4014 sd_dp = sqcp->sd_dp;
4016 l_defer_t = sd_dp->defer_t;
4017 sd_dp->defer_t = SDEB_DEFER_NONE;
4019 l_defer_t = SDEB_DEFER_NONE;
4020 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4021 stop_qc_helper(sd_dp, l_defer_t);
4022 clear_bit(k, sqp->in_use_bm);
4026 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4031 /* Deletes (stops) timers or work queues of all queued commands */
4032 static void stop_all_queued(void)
4034 unsigned long iflags;
4036 enum sdeb_defer_type l_defer_t;
4037 struct sdebug_queue *sqp;
4038 struct sdebug_queued_cmd *sqcp;
4039 struct sdebug_dev_info *devip;
4040 struct sdebug_defer *sd_dp;
4042 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4043 spin_lock_irqsave(&sqp->qc_lock, iflags);
4044 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4045 if (test_bit(k, sqp->in_use_bm)) {
4046 sqcp = &sqp->qc_arr[k];
4047 if (sqcp->a_cmnd == NULL)
4049 devip = (struct sdebug_dev_info *)
4050 sqcp->a_cmnd->device->hostdata;
4052 atomic_dec(&devip->num_in_q);
4053 sqcp->a_cmnd = NULL;
4054 sd_dp = sqcp->sd_dp;
4056 l_defer_t = sd_dp->defer_t;
4057 sd_dp->defer_t = SDEB_DEFER_NONE;
4059 l_defer_t = SDEB_DEFER_NONE;
4060 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4061 stop_qc_helper(sd_dp, l_defer_t);
4062 clear_bit(k, sqp->in_use_bm);
4063 spin_lock_irqsave(&sqp->qc_lock, iflags);
4066 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4070 /* Free queued command memory on heap */
4071 static void free_all_queued(void)
4074 struct sdebug_queue *sqp;
4075 struct sdebug_queued_cmd *sqcp;
4077 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4078 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4079 sqcp = &sqp->qc_arr[k];
4086 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4092 ok = stop_queued_cmnd(SCpnt);
4093 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4094 sdev_printk(KERN_INFO, SCpnt->device,
4095 "%s: command%s found\n", __func__,
4101 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4104 if (SCpnt && SCpnt->device) {
4105 struct scsi_device *sdp = SCpnt->device;
4106 struct sdebug_dev_info *devip =
4107 (struct sdebug_dev_info *)sdp->hostdata;
4109 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4110 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4112 set_bit(SDEBUG_UA_POR, devip->uas_bm);
4117 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4119 struct sdebug_host_info *sdbg_host;
4120 struct sdebug_dev_info *devip;
4121 struct scsi_device *sdp;
4122 struct Scsi_Host *hp;
4125 ++num_target_resets;
4128 sdp = SCpnt->device;
4131 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4132 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4136 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4138 list_for_each_entry(devip,
4139 &sdbg_host->dev_info_list,
4141 if (devip->target == sdp->id) {
4142 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4146 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4147 sdev_printk(KERN_INFO, sdp,
4148 "%s: %d device(s) found in target\n", __func__, k);
4153 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4155 struct sdebug_host_info *sdbg_host;
4156 struct sdebug_dev_info *devip;
4157 struct scsi_device *sdp;
4158 struct Scsi_Host *hp;
4162 if (!(SCpnt && SCpnt->device))
4164 sdp = SCpnt->device;
4165 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4166 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4169 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4171 list_for_each_entry(devip,
4172 &sdbg_host->dev_info_list,
4174 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4179 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4180 sdev_printk(KERN_INFO, sdp,
4181 "%s: %d device(s) found in host\n", __func__, k);
4186 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4188 struct sdebug_host_info *sdbg_host;
4189 struct sdebug_dev_info *devip;
4193 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4194 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4195 spin_lock(&sdebug_host_list_lock);
4196 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4197 list_for_each_entry(devip, &sdbg_host->dev_info_list,
4199 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4203 spin_unlock(&sdebug_host_list_lock);
4205 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4206 sdev_printk(KERN_INFO, SCpnt->device,
4207 "%s: %d device(s) found\n", __func__, k);
4211 static void __init sdebug_build_parts(unsigned char *ramp,
4212 unsigned long store_size)
4214 struct partition *pp;
4215 int starts[SDEBUG_MAX_PARTS + 2];
4216 int sectors_per_part, num_sectors, k;
4217 int heads_by_sects, start_sec, end_sec;
4219 /* assume partition table already zeroed */
4220 if ((sdebug_num_parts < 1) || (store_size < 1048576))
4222 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4223 sdebug_num_parts = SDEBUG_MAX_PARTS;
4224 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4226 num_sectors = (int)sdebug_store_sectors;
4227 sectors_per_part = (num_sectors - sdebug_sectors_per)
4229 heads_by_sects = sdebug_heads * sdebug_sectors_per;
4230 starts[0] = sdebug_sectors_per;
4231 for (k = 1; k < sdebug_num_parts; ++k)
4232 starts[k] = ((k * sectors_per_part) / heads_by_sects)
4234 starts[sdebug_num_parts] = num_sectors;
4235 starts[sdebug_num_parts + 1] = 0;
4237 ramp[510] = 0x55; /* magic partition markings */
4239 pp = (struct partition *)(ramp + 0x1be);
4240 for (k = 0; starts[k + 1]; ++k, ++pp) {
4241 start_sec = starts[k];
4242 end_sec = starts[k + 1] - 1;
4245 pp->cyl = start_sec / heads_by_sects;
4246 pp->head = (start_sec - (pp->cyl * heads_by_sects))
4247 / sdebug_sectors_per;
4248 pp->sector = (start_sec % sdebug_sectors_per) + 1;
4250 pp->end_cyl = end_sec / heads_by_sects;
4251 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4252 / sdebug_sectors_per;
4253 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4255 pp->start_sect = cpu_to_le32(start_sec);
4256 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4257 pp->sys_ind = 0x83; /* plain Linux partition */
4261 static void block_unblock_all_queues(bool block)
4264 struct sdebug_queue *sqp;
4266 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4267 atomic_set(&sqp->blocked, (int)block);
4270 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4271 * commands will be processed normally before triggers occur.
4273 static void tweak_cmnd_count(void)
4277 modulo = abs(sdebug_every_nth);
4280 block_unblock_all_queues(true);
4281 count = atomic_read(&sdebug_cmnd_count);
4282 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4283 block_unblock_all_queues(false);
4286 static void clear_queue_stats(void)
4288 atomic_set(&sdebug_cmnd_count, 0);
4289 atomic_set(&sdebug_completions, 0);
4290 atomic_set(&sdebug_miss_cpus, 0);
4291 atomic_set(&sdebug_a_tsf, 0);
4294 static void setup_inject(struct sdebug_queue *sqp,
4295 struct sdebug_queued_cmd *sqcp)
4297 if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4298 if (sdebug_every_nth > 0)
4299 sqcp->inj_recovered = sqcp->inj_transport
4301 = sqcp->inj_dix = sqcp->inj_short = 0;
4304 sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4305 sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4306 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4307 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4308 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4309 sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4312 /* Complete the processing of the thread that queued a SCSI command to this
4313 * driver. It either completes the command by calling cmnd_done() or
4314 * schedules a hr timer or work queue then returns 0. Returns
4315 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4317 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4319 int (*pfp)(struct scsi_cmnd *,
4320 struct sdebug_dev_info *),
4321 int delta_jiff, int ndelay)
4323 unsigned long iflags;
4324 int k, num_in_q, qdepth, inject;
4325 struct sdebug_queue *sqp;
4326 struct sdebug_queued_cmd *sqcp;
4327 struct scsi_device *sdp;
4328 struct sdebug_defer *sd_dp;
4330 if (unlikely(devip == NULL)) {
4331 if (scsi_result == 0)
4332 scsi_result = DID_NO_CONNECT << 16;
4333 goto respond_in_thread;
4337 if (delta_jiff == 0)
4338 goto respond_in_thread;
4340 /* schedule the response at a later time if resources permit */
4341 sqp = get_queue(cmnd);
4342 spin_lock_irqsave(&sqp->qc_lock, iflags);
4343 if (unlikely(atomic_read(&sqp->blocked))) {
4344 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4345 return SCSI_MLQUEUE_HOST_BUSY;
4347 num_in_q = atomic_read(&devip->num_in_q);
4348 qdepth = cmnd->device->queue_depth;
4350 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4352 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4353 goto respond_in_thread;
4355 scsi_result = device_qfull_result;
4356 } else if (unlikely(sdebug_every_nth &&
4357 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4358 (scsi_result == 0))) {
4359 if ((num_in_q == (qdepth - 1)) &&
4360 (atomic_inc_return(&sdebug_a_tsf) >=
4361 abs(sdebug_every_nth))) {
4362 atomic_set(&sdebug_a_tsf, 0);
4364 scsi_result = device_qfull_result;
4368 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4369 if (unlikely(k >= sdebug_max_queue)) {
4370 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4372 goto respond_in_thread;
4373 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4374 scsi_result = device_qfull_result;
4375 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4376 sdev_printk(KERN_INFO, sdp,
4377 "%s: max_queue=%d exceeded, %s\n",
4378 __func__, sdebug_max_queue,
4379 (scsi_result ? "status: TASK SET FULL" :
4380 "report: host busy"));
4382 goto respond_in_thread;
4384 return SCSI_MLQUEUE_HOST_BUSY;
4386 __set_bit(k, sqp->in_use_bm);
4387 atomic_inc(&devip->num_in_q);
4388 sqcp = &sqp->qc_arr[k];
4389 sqcp->a_cmnd = cmnd;
4390 cmnd->host_scribble = (unsigned char *)sqcp;
4391 sd_dp = sqcp->sd_dp;
4392 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4393 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4394 setup_inject(sqp, sqcp);
4395 if (sd_dp == NULL) {
4396 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4398 return SCSI_MLQUEUE_HOST_BUSY;
4401 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4402 if (cmnd->result & SDEG_RES_IMMED_MASK) {
4404 * This is the F_DELAY_OVERR case. No delay.
4406 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4407 delta_jiff = ndelay = 0;
4409 if (cmnd->result == 0 && scsi_result != 0)
4410 cmnd->result = scsi_result;
4412 if (unlikely(sdebug_verbose && cmnd->result))
4413 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4414 __func__, cmnd->result);
4416 if (delta_jiff > 0 || ndelay > 0) {
4419 if (delta_jiff > 0) {
4420 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4423 if (!sd_dp->init_hrt) {
4424 sd_dp->init_hrt = true;
4425 sqcp->sd_dp = sd_dp;
4426 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4427 HRTIMER_MODE_REL_PINNED);
4428 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4429 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4432 if (sdebug_statistics)
4433 sd_dp->issuing_cpu = raw_smp_processor_id();
4434 sd_dp->defer_t = SDEB_DEFER_HRT;
4435 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4436 } else { /* jdelay < 0, use work queue */
4437 if (!sd_dp->init_wq) {
4438 sd_dp->init_wq = true;
4439 sqcp->sd_dp = sd_dp;
4440 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4442 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4444 if (sdebug_statistics)
4445 sd_dp->issuing_cpu = raw_smp_processor_id();
4446 sd_dp->defer_t = SDEB_DEFER_WQ;
4447 schedule_work(&sd_dp->ew.work);
4449 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4450 (scsi_result == device_qfull_result)))
4451 sdev_printk(KERN_INFO, sdp,
4452 "%s: num_in_q=%d +1, %s%s\n", __func__,
4453 num_in_q, (inject ? "<inject> " : ""),
4454 "status: TASK SET FULL");
4457 respond_in_thread: /* call back to mid-layer using invocation thread */
4458 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4459 cmnd->result &= ~SDEG_RES_IMMED_MASK;
4460 if (cmnd->result == 0 && scsi_result != 0)
4461 cmnd->result = scsi_result;
4462 cmnd->scsi_done(cmnd);
4466 /* Note: The following macros create attribute files in the
4467 /sys/module/scsi_debug/parameters directory. Unfortunately this
4468 driver is unaware of a change and cannot trigger auxiliary actions
4469 as it can when the corresponding attribute in the
4470 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4472 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4473 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4474 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4475 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4476 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4477 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4478 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4479 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4480 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4481 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4482 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4483 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4484 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4485 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4486 sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4487 module_param_string(inq_product, sdebug_inq_product_id,
4488 sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4489 module_param_string(inq_rev, sdebug_inq_product_rev,
4490 sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4491 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4492 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4493 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4494 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4495 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4496 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4497 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4498 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4499 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4500 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4501 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4502 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4503 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4504 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4505 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4506 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4507 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4508 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4509 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4510 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4511 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4512 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4513 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4514 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4515 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4516 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4517 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4518 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4519 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4520 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4521 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4522 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4524 module_param_named(write_same_length, sdebug_write_same_length, int,
4527 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4528 MODULE_DESCRIPTION("SCSI debug adapter driver");
4529 MODULE_LICENSE("GPL");
4530 MODULE_VERSION(SDEBUG_VERSION);
4532 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4533 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4534 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4535 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4536 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4537 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4538 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4539 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4540 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4541 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4542 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4543 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4544 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4545 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4546 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4547 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4548 SDEBUG_VERSION "\")");
4549 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4550 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4551 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4552 MODULE_PARM_DESC(lbprz,
4553 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4554 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4555 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4556 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4557 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4558 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4559 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4560 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4561 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4562 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4563 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4564 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4565 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4566 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4567 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4568 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4569 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4570 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4571 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4572 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4573 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4574 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4575 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4576 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4577 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4578 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4579 MODULE_PARM_DESC(uuid_ctl,
4580 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4581 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4582 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4583 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4585 #define SDEBUG_INFO_LEN 256
4586 static char sdebug_info[SDEBUG_INFO_LEN];
4588 static const char *scsi_debug_info(struct Scsi_Host *shp)
4592 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4593 my_name, SDEBUG_VERSION, sdebug_version_date);
4594 if (k >= (SDEBUG_INFO_LEN - 1))
4596 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4597 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4598 sdebug_dev_size_mb, sdebug_opts, submit_queues,
4599 "statistics", (int)sdebug_statistics);
4603 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4604 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4609 int minLen = length > 15 ? 15 : length;
4611 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4613 memcpy(arr, buffer, minLen);
4615 if (1 != sscanf(arr, "%d", &opts))
4618 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4619 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4620 if (sdebug_every_nth != 0)
4625 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4626 * same for each scsi_debug host (if more than one). Some of the counters
4627 * output are not atomics so might be inaccurate in a busy system. */
4628 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4631 struct sdebug_queue *sqp;
4633 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4634 SDEBUG_VERSION, sdebug_version_date);
4635 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4636 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4637 sdebug_opts, sdebug_every_nth);
4638 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4639 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4640 sdebug_sector_size, "bytes");
4641 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4642 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4644 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4645 num_dev_resets, num_target_resets, num_bus_resets,
4647 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4648 dix_reads, dix_writes, dif_errors);
4649 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4651 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4652 atomic_read(&sdebug_cmnd_count),
4653 atomic_read(&sdebug_completions),
4654 "miss_cpus", atomic_read(&sdebug_miss_cpus),
4655 atomic_read(&sdebug_a_tsf));
4657 seq_printf(m, "submit_queues=%d\n", submit_queues);
4658 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4659 seq_printf(m, " queue %d:\n", j);
4660 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4661 if (f != sdebug_max_queue) {
4662 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4663 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
4664 "first,last bits", f, l);
4670 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4672 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4674 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4675 * of delay is jiffies.
4677 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4682 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4684 if (sdebug_jdelay != jdelay) {
4686 struct sdebug_queue *sqp;
4688 block_unblock_all_queues(true);
4689 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4691 k = find_first_bit(sqp->in_use_bm,
4693 if (k != sdebug_max_queue) {
4694 res = -EBUSY; /* queued commands */
4699 sdebug_jdelay = jdelay;
4702 block_unblock_all_queues(false);
4708 static DRIVER_ATTR_RW(delay);
4710 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4712 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4714 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4715 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4716 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4721 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4722 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4724 if (sdebug_ndelay != ndelay) {
4726 struct sdebug_queue *sqp;
4728 block_unblock_all_queues(true);
4729 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4731 k = find_first_bit(sqp->in_use_bm,
4733 if (k != sdebug_max_queue) {
4734 res = -EBUSY; /* queued commands */
4739 sdebug_ndelay = ndelay;
4740 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4743 block_unblock_all_queues(false);
4749 static DRIVER_ATTR_RW(ndelay);
4751 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4753 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4756 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4762 if (sscanf(buf, "%10s", work) == 1) {
4763 if (strncasecmp(work, "0x", 2) == 0) {
4764 if (kstrtoint(work + 2, 16, &opts) == 0)
4767 if (kstrtoint(work, 10, &opts) == 0)
4774 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4775 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4779 static DRIVER_ATTR_RW(opts);
4781 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4783 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4785 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4790 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4796 static DRIVER_ATTR_RW(ptype);
4798 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4800 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4802 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4807 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4813 static DRIVER_ATTR_RW(dsense);
4815 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4817 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4819 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4824 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4826 sdebug_fake_rw = (sdebug_fake_rw > 0);
4827 if (sdebug_fake_rw != n) {
4828 if ((0 == n) && (NULL == fake_storep)) {
4830 (unsigned long)sdebug_dev_size_mb *
4833 fake_storep = vmalloc(sz);
4834 if (NULL == fake_storep) {
4835 pr_err("out of memory, 9\n");
4838 memset(fake_storep, 0, sz);
4846 static DRIVER_ATTR_RW(fake_rw);
4848 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4850 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4852 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4857 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4858 sdebug_no_lun_0 = n;
4863 static DRIVER_ATTR_RW(no_lun_0);
4865 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4867 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4869 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4874 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4875 sdebug_num_tgts = n;
4876 sdebug_max_tgts_luns();
4881 static DRIVER_ATTR_RW(num_tgts);
4883 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4885 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4887 static DRIVER_ATTR_RO(dev_size_mb);
4889 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4891 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4893 static DRIVER_ATTR_RO(num_parts);
4895 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4897 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4899 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4904 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4905 sdebug_every_nth = nth;
4906 if (nth && !sdebug_statistics) {
4907 pr_info("every_nth needs statistics=1, set it\n");
4908 sdebug_statistics = true;
4915 static DRIVER_ATTR_RW(every_nth);
4917 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4919 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4921 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4927 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4929 pr_warn("max_luns can be no more than 256\n");
4932 changed = (sdebug_max_luns != n);
4933 sdebug_max_luns = n;
4934 sdebug_max_tgts_luns();
4935 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
4936 struct sdebug_host_info *sdhp;
4937 struct sdebug_dev_info *dp;
4939 spin_lock(&sdebug_host_list_lock);
4940 list_for_each_entry(sdhp, &sdebug_host_list,
4942 list_for_each_entry(dp, &sdhp->dev_info_list,
4944 set_bit(SDEBUG_UA_LUNS_CHANGED,
4948 spin_unlock(&sdebug_host_list_lock);
4954 static DRIVER_ATTR_RW(max_luns);
4956 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4958 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4960 /* N.B. max_queue can be changed while there are queued commands. In flight
4961 * commands beyond the new max_queue will be completed. */
4962 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4966 struct sdebug_queue *sqp;
4968 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4969 (n <= SDEBUG_CANQUEUE)) {
4970 block_unblock_all_queues(true);
4972 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4974 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4978 sdebug_max_queue = n;
4979 if (k == SDEBUG_CANQUEUE)
4980 atomic_set(&retired_max_queue, 0);
4982 atomic_set(&retired_max_queue, k + 1);
4984 atomic_set(&retired_max_queue, 0);
4985 block_unblock_all_queues(false);
4990 static DRIVER_ATTR_RW(max_queue);
4992 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4994 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4996 static DRIVER_ATTR_RO(no_uld);
4998 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5000 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5002 static DRIVER_ATTR_RO(scsi_level);
5004 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5006 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5008 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5014 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5015 changed = (sdebug_virtual_gb != n);
5016 sdebug_virtual_gb = n;
5017 sdebug_capacity = get_sdebug_capacity();
5019 struct sdebug_host_info *sdhp;
5020 struct sdebug_dev_info *dp;
5022 spin_lock(&sdebug_host_list_lock);
5023 list_for_each_entry(sdhp, &sdebug_host_list,
5025 list_for_each_entry(dp, &sdhp->dev_info_list,
5027 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5031 spin_unlock(&sdebug_host_list_lock);
5037 static DRIVER_ATTR_RW(virtual_gb);
5039 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5041 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5044 static int sdebug_add_adapter(void);
5045 static void sdebug_remove_adapter(void);
5047 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5052 if (sscanf(buf, "%d", &delta_hosts) != 1)
5054 if (delta_hosts > 0) {
5056 sdebug_add_adapter();
5057 } while (--delta_hosts);
5058 } else if (delta_hosts < 0) {
5060 sdebug_remove_adapter();
5061 } while (++delta_hosts);
5065 static DRIVER_ATTR_RW(add_host);
5067 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5069 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5071 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5076 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5077 sdebug_vpd_use_hostno = n;
5082 static DRIVER_ATTR_RW(vpd_use_hostno);
5084 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5086 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5088 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5093 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5095 sdebug_statistics = true;
5097 clear_queue_stats();
5098 sdebug_statistics = false;
5104 static DRIVER_ATTR_RW(statistics);
5106 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5108 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5110 static DRIVER_ATTR_RO(sector_size);
5112 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5114 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5116 static DRIVER_ATTR_RO(submit_queues);
5118 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5120 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5122 static DRIVER_ATTR_RO(dix);
5124 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5126 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5128 static DRIVER_ATTR_RO(dif);
5130 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5132 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5134 static DRIVER_ATTR_RO(guard);
5136 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5138 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5140 static DRIVER_ATTR_RO(ato);
5142 static ssize_t map_show(struct device_driver *ddp, char *buf)
5146 if (!scsi_debug_lbp())
5147 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5148 sdebug_store_sectors);
5150 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5151 (int)map_size, map_storep);
5152 buf[count++] = '\n';
5157 static DRIVER_ATTR_RO(map);
5159 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5161 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5163 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5168 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5169 sdebug_removable = (n > 0);
5174 static DRIVER_ATTR_RW(removable);
5176 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5178 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5180 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5181 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5186 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5187 sdebug_host_lock = (n > 0);
5192 static DRIVER_ATTR_RW(host_lock);
5194 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5196 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5198 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5203 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5204 sdebug_strict = (n > 0);
5209 static DRIVER_ATTR_RW(strict);
5211 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5213 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5215 static DRIVER_ATTR_RO(uuid_ctl);
5217 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5219 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5221 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5226 ret = kstrtoint(buf, 0, &n);
5230 all_config_cdb_len();
5233 static DRIVER_ATTR_RW(cdb_len);
5236 /* Note: The following array creates attribute files in the
5237 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5238 files (over those found in the /sys/module/scsi_debug/parameters
5239 directory) is that auxiliary actions can be triggered when an attribute
5240 is changed. For example see: sdebug_add_host_store() above.
5243 static struct attribute *sdebug_drv_attrs[] = {
5244 &driver_attr_delay.attr,
5245 &driver_attr_opts.attr,
5246 &driver_attr_ptype.attr,
5247 &driver_attr_dsense.attr,
5248 &driver_attr_fake_rw.attr,
5249 &driver_attr_no_lun_0.attr,
5250 &driver_attr_num_tgts.attr,
5251 &driver_attr_dev_size_mb.attr,
5252 &driver_attr_num_parts.attr,
5253 &driver_attr_every_nth.attr,
5254 &driver_attr_max_luns.attr,
5255 &driver_attr_max_queue.attr,
5256 &driver_attr_no_uld.attr,
5257 &driver_attr_scsi_level.attr,
5258 &driver_attr_virtual_gb.attr,
5259 &driver_attr_add_host.attr,
5260 &driver_attr_vpd_use_hostno.attr,
5261 &driver_attr_sector_size.attr,
5262 &driver_attr_statistics.attr,
5263 &driver_attr_submit_queues.attr,
5264 &driver_attr_dix.attr,
5265 &driver_attr_dif.attr,
5266 &driver_attr_guard.attr,
5267 &driver_attr_ato.attr,
5268 &driver_attr_map.attr,
5269 &driver_attr_removable.attr,
5270 &driver_attr_host_lock.attr,
5271 &driver_attr_ndelay.attr,
5272 &driver_attr_strict.attr,
5273 &driver_attr_uuid_ctl.attr,
5274 &driver_attr_cdb_len.attr,
5277 ATTRIBUTE_GROUPS(sdebug_drv);
5279 static struct device *pseudo_primary;
5281 static int __init scsi_debug_init(void)
5288 atomic_set(&retired_max_queue, 0);
5290 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5291 pr_warn("ndelay must be less than 1 second, ignored\n");
5293 } else if (sdebug_ndelay > 0)
5294 sdebug_jdelay = JDELAY_OVERRIDDEN;
5296 switch (sdebug_sector_size) {
5303 pr_err("invalid sector_size %d\n", sdebug_sector_size);
5307 switch (sdebug_dif) {
5308 case T10_PI_TYPE0_PROTECTION:
5310 case T10_PI_TYPE1_PROTECTION:
5311 case T10_PI_TYPE2_PROTECTION:
5312 case T10_PI_TYPE3_PROTECTION:
5313 have_dif_prot = true;
5317 pr_err("dif must be 0, 1, 2 or 3\n");
5321 if (sdebug_guard > 1) {
5322 pr_err("guard must be 0 or 1\n");
5326 if (sdebug_ato > 1) {
5327 pr_err("ato must be 0 or 1\n");
5331 if (sdebug_physblk_exp > 15) {
5332 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5335 if (sdebug_max_luns > 256) {
5336 pr_warn("max_luns can be no more than 256, use default\n");
5337 sdebug_max_luns = DEF_MAX_LUNS;
5340 if (sdebug_lowest_aligned > 0x3fff) {
5341 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5345 if (submit_queues < 1) {
5346 pr_err("submit_queues must be 1 or more\n");
5349 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5351 if (sdebug_q_arr == NULL)
5353 for (k = 0; k < submit_queues; ++k)
5354 spin_lock_init(&sdebug_q_arr[k].qc_lock);
5356 if (sdebug_dev_size_mb < 1)
5357 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
5358 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5359 sdebug_store_sectors = sz / sdebug_sector_size;
5360 sdebug_capacity = get_sdebug_capacity();
5362 /* play around with geometry, don't waste too much on track 0 */
5364 sdebug_sectors_per = 32;
5365 if (sdebug_dev_size_mb >= 256)
5367 else if (sdebug_dev_size_mb >= 16)
5369 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5370 (sdebug_sectors_per * sdebug_heads);
5371 if (sdebug_cylinders_per >= 1024) {
5372 /* other LLDs do this; implies >= 1GB ram disk ... */
5374 sdebug_sectors_per = 63;
5375 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5376 (sdebug_sectors_per * sdebug_heads);
5379 if (sdebug_fake_rw == 0) {
5380 fake_storep = vmalloc(sz);
5381 if (NULL == fake_storep) {
5382 pr_err("out of memory, 1\n");
5386 memset(fake_storep, 0, sz);
5387 if (sdebug_num_parts > 0)
5388 sdebug_build_parts(fake_storep, sz);
5394 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5395 dif_storep = vmalloc(dif_size);
5397 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5399 if (dif_storep == NULL) {
5400 pr_err("out of mem. (DIX)\n");
5405 memset(dif_storep, 0xff, dif_size);
5408 /* Logical Block Provisioning */
5409 if (scsi_debug_lbp()) {
5410 sdebug_unmap_max_blocks =
5411 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5413 sdebug_unmap_max_desc =
5414 clamp(sdebug_unmap_max_desc, 0U, 256U);
5416 sdebug_unmap_granularity =
5417 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5419 if (sdebug_unmap_alignment &&
5420 sdebug_unmap_granularity <=
5421 sdebug_unmap_alignment) {
5422 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5427 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5428 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5430 pr_info("%lu provisioning blocks\n", map_size);
5432 if (map_storep == NULL) {
5433 pr_err("out of mem. (MAP)\n");
5438 bitmap_zero(map_storep, map_size);
5440 /* Map first 1KB for partition table */
5441 if (sdebug_num_parts)
5445 pseudo_primary = root_device_register("pseudo_0");
5446 if (IS_ERR(pseudo_primary)) {
5447 pr_warn("root_device_register() error\n");
5448 ret = PTR_ERR(pseudo_primary);
5451 ret = bus_register(&pseudo_lld_bus);
5453 pr_warn("bus_register error: %d\n", ret);
5456 ret = driver_register(&sdebug_driverfs_driver);
5458 pr_warn("driver_register error: %d\n", ret);
5462 host_to_add = sdebug_add_host;
5463 sdebug_add_host = 0;
5465 for (k = 0; k < host_to_add; k++) {
5466 if (sdebug_add_adapter()) {
5467 pr_err("sdebug_add_adapter failed k=%d\n", k);
5473 pr_info("built %d host(s)\n", sdebug_add_host);
5478 bus_unregister(&pseudo_lld_bus);
5480 root_device_unregister(pseudo_primary);
5486 kfree(sdebug_q_arr);
5490 static void __exit scsi_debug_exit(void)
5492 int k = sdebug_add_host;
5497 sdebug_remove_adapter();
5498 driver_unregister(&sdebug_driverfs_driver);
5499 bus_unregister(&pseudo_lld_bus);
5500 root_device_unregister(pseudo_primary);
5505 kfree(sdebug_q_arr);
5508 device_initcall(scsi_debug_init);
5509 module_exit(scsi_debug_exit);
5511 static void sdebug_release_adapter(struct device *dev)
5513 struct sdebug_host_info *sdbg_host;
5515 sdbg_host = to_sdebug_host(dev);
5519 static int sdebug_add_adapter(void)
5521 int k, devs_per_host;
5523 struct sdebug_host_info *sdbg_host;
5524 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5526 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5527 if (sdbg_host == NULL) {
5528 pr_err("out of memory at line %d\n", __LINE__);
5532 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5534 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5535 for (k = 0; k < devs_per_host; k++) {
5536 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5537 if (!sdbg_devinfo) {
5538 pr_err("out of memory at line %d\n", __LINE__);
5544 spin_lock(&sdebug_host_list_lock);
5545 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5546 spin_unlock(&sdebug_host_list_lock);
5548 sdbg_host->dev.bus = &pseudo_lld_bus;
5549 sdbg_host->dev.parent = pseudo_primary;
5550 sdbg_host->dev.release = &sdebug_release_adapter;
5551 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5553 error = device_register(&sdbg_host->dev);
5562 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5564 list_del(&sdbg_devinfo->dev_list);
5565 kfree(sdbg_devinfo);
5572 static void sdebug_remove_adapter(void)
5574 struct sdebug_host_info *sdbg_host = NULL;
5576 spin_lock(&sdebug_host_list_lock);
5577 if (!list_empty(&sdebug_host_list)) {
5578 sdbg_host = list_entry(sdebug_host_list.prev,
5579 struct sdebug_host_info, host_list);
5580 list_del(&sdbg_host->host_list);
5582 spin_unlock(&sdebug_host_list_lock);
5587 device_unregister(&sdbg_host->dev);
5591 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5594 struct sdebug_dev_info *devip;
5596 block_unblock_all_queues(true);
5597 devip = (struct sdebug_dev_info *)sdev->hostdata;
5598 if (NULL == devip) {
5599 block_unblock_all_queues(false);
5602 num_in_q = atomic_read(&devip->num_in_q);
5606 /* allow to exceed max host qc_arr elements for testing */
5607 if (qdepth > SDEBUG_CANQUEUE + 10)
5608 qdepth = SDEBUG_CANQUEUE + 10;
5609 scsi_change_queue_depth(sdev, qdepth);
5611 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5612 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5613 __func__, qdepth, num_in_q);
5615 block_unblock_all_queues(false);
5616 return sdev->queue_depth;
5619 static bool fake_timeout(struct scsi_cmnd *scp)
5621 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5622 if (sdebug_every_nth < -1)
5623 sdebug_every_nth = -1;
5624 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5625 return true; /* ignore command causing timeout */
5626 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5627 scsi_medium_access_command(scp))
5628 return true; /* time out reads and writes */
5633 static bool fake_host_busy(struct scsi_cmnd *scp)
5635 return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5636 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5639 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5640 struct scsi_cmnd *scp)
5643 struct scsi_device *sdp = scp->device;
5644 const struct opcode_info_t *oip;
5645 const struct opcode_info_t *r_oip;
5646 struct sdebug_dev_info *devip;
5647 u8 *cmd = scp->cmnd;
5648 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5649 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5657 scsi_set_resid(scp, 0);
5658 if (sdebug_statistics)
5659 atomic_inc(&sdebug_cmnd_count);
5660 if (unlikely(sdebug_verbose &&
5661 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5666 sb = (int)sizeof(b);
5668 strcpy(b, "too long, over 32 bytes");
5670 for (k = 0, n = 0; k < len && n < sb; ++k)
5671 n += scnprintf(b + n, sb - n, "%02x ",
5674 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5675 blk_mq_unique_tag(scp->request), b);
5677 if (fake_host_busy(scp))
5678 return SCSI_MLQUEUE_HOST_BUSY;
5679 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5680 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5683 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5684 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5685 devip = (struct sdebug_dev_info *)sdp->hostdata;
5686 if (unlikely(!devip)) {
5687 devip = find_build_dev_info(sdp);
5691 na = oip->num_attached;
5693 if (na) { /* multiple commands with this opcode */
5695 if (FF_SA & r_oip->flags) {
5696 if (F_SA_LOW & oip->flags)
5699 sa = get_unaligned_be16(cmd + 8);
5700 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5701 if (opcode == oip->opcode && sa == oip->sa)
5704 } else { /* since no service action only check opcode */
5705 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5706 if (opcode == oip->opcode)
5711 if (F_SA_LOW & r_oip->flags)
5712 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5713 else if (F_SA_HIGH & r_oip->flags)
5714 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5716 mk_sense_invalid_opcode(scp);
5719 } /* else (when na==0) we assume the oip is a match */
5721 if (unlikely(F_INV_OP & flags)) {
5722 mk_sense_invalid_opcode(scp);
5725 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5727 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5728 my_name, opcode, " supported for wlun");
5729 mk_sense_invalid_opcode(scp);
5732 if (unlikely(sdebug_strict)) { /* check cdb against mask */
5736 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5737 rem = ~oip->len_mask[k] & cmd[k];
5739 for (j = 7; j >= 0; --j, rem <<= 1) {
5743 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5748 if (unlikely(!(F_SKIP_UA & flags) &&
5749 find_first_bit(devip->uas_bm,
5750 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5751 errsts = make_ua(scp, devip);
5755 if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5756 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5758 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5759 "%s\n", my_name, "initializing command "
5761 errsts = check_condition_result;
5764 if (sdebug_fake_rw && (F_FAKE_RW & flags))
5766 if (unlikely(sdebug_every_nth)) {
5767 if (fake_timeout(scp))
5768 return 0; /* ignore command: make trouble */
5770 if (likely(oip->pfp))
5771 pfp = oip->pfp; /* calls a resp_* function */
5773 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
5776 if (F_DELAY_OVERR & flags)
5777 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5778 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5780 * If any delay is active, want F_LONG_DELAY to be at least 1
5781 * second and if sdebug_jdelay>0 want a long delay of that
5784 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5786 jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ);
5787 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5789 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5792 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5794 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5797 static struct scsi_host_template sdebug_driver_template = {
5798 .show_info = scsi_debug_show_info,
5799 .write_info = scsi_debug_write_info,
5800 .proc_name = sdebug_proc_name,
5801 .name = "SCSI DEBUG",
5802 .info = scsi_debug_info,
5803 .slave_alloc = scsi_debug_slave_alloc,
5804 .slave_configure = scsi_debug_slave_configure,
5805 .slave_destroy = scsi_debug_slave_destroy,
5806 .ioctl = scsi_debug_ioctl,
5807 .queuecommand = scsi_debug_queuecommand,
5808 .change_queue_depth = sdebug_change_qdepth,
5809 .eh_abort_handler = scsi_debug_abort,
5810 .eh_device_reset_handler = scsi_debug_device_reset,
5811 .eh_target_reset_handler = scsi_debug_target_reset,
5812 .eh_bus_reset_handler = scsi_debug_bus_reset,
5813 .eh_host_reset_handler = scsi_debug_host_reset,
5814 .can_queue = SDEBUG_CANQUEUE,
5816 .sg_tablesize = SG_MAX_SEGMENTS,
5817 .cmd_per_lun = DEF_CMD_PER_LUN,
5819 .use_clustering = DISABLE_CLUSTERING,
5820 .module = THIS_MODULE,
5821 .track_queue_depth = 1,
5824 static int sdebug_driver_probe(struct device *dev)
5827 struct sdebug_host_info *sdbg_host;
5828 struct Scsi_Host *hpnt;
5831 sdbg_host = to_sdebug_host(dev);
5833 sdebug_driver_template.can_queue = sdebug_max_queue;
5834 if (sdebug_clustering)
5835 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5836 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5838 pr_err("scsi_host_alloc failed\n");
5842 if (submit_queues > nr_cpu_ids) {
5843 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5844 my_name, submit_queues, nr_cpu_ids);
5845 submit_queues = nr_cpu_ids;
5847 /* Decide whether to tell scsi subsystem that we want mq */
5848 /* Following should give the same answer for each host */
5849 if (shost_use_blk_mq(hpnt))
5850 hpnt->nr_hw_queues = submit_queues;
5852 sdbg_host->shost = hpnt;
5853 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5854 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5855 hpnt->max_id = sdebug_num_tgts + 1;
5857 hpnt->max_id = sdebug_num_tgts;
5858 /* = sdebug_max_luns; */
5859 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5863 switch (sdebug_dif) {
5865 case T10_PI_TYPE1_PROTECTION:
5866 hprot = SHOST_DIF_TYPE1_PROTECTION;
5868 hprot |= SHOST_DIX_TYPE1_PROTECTION;
5871 case T10_PI_TYPE2_PROTECTION:
5872 hprot = SHOST_DIF_TYPE2_PROTECTION;
5874 hprot |= SHOST_DIX_TYPE2_PROTECTION;
5877 case T10_PI_TYPE3_PROTECTION:
5878 hprot = SHOST_DIF_TYPE3_PROTECTION;
5880 hprot |= SHOST_DIX_TYPE3_PROTECTION;
5885 hprot |= SHOST_DIX_TYPE0_PROTECTION;
5889 scsi_host_set_prot(hpnt, hprot);
5891 if (have_dif_prot || sdebug_dix)
5892 pr_info("host protection%s%s%s%s%s%s%s\n",
5893 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5894 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5895 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5896 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5897 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5898 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5899 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5901 if (sdebug_guard == 1)
5902 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5904 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5906 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5907 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5908 if (sdebug_every_nth) /* need stats counters for every_nth */
5909 sdebug_statistics = true;
5910 error = scsi_add_host(hpnt, &sdbg_host->dev);
5912 pr_err("scsi_add_host failed\n");
5914 scsi_host_put(hpnt);
5916 scsi_scan_host(hpnt);
5921 static int sdebug_driver_remove(struct device *dev)
5923 struct sdebug_host_info *sdbg_host;
5924 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5926 sdbg_host = to_sdebug_host(dev);
5929 pr_err("Unable to locate host info\n");
5933 scsi_remove_host(sdbg_host->shost);
5935 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5937 list_del(&sdbg_devinfo->dev_list);
5938 kfree(sdbg_devinfo);
5941 scsi_host_put(sdbg_host->shost);
5945 static int pseudo_lld_bus_match(struct device *dev,
5946 struct device_driver *dev_driver)
5951 static struct bus_type pseudo_lld_bus = {
5953 .match = pseudo_lld_bus_match,
5954 .probe = sdebug_driver_probe,
5955 .remove = sdebug_driver_remove,
5956 .drv_groups = sdebug_drv_groups,