2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2016 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/timer.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
45 #include <net/checksum.h>
47 #include <asm/unaligned.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SCSI_DEBUG_VERSION "1.86"
63 static const char *sdebug_version_date = "20160422";
65 #define MY_NAME "scsi_debug"
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define UA_RESET_ASC 0x29
78 #define UA_CHANGED_ASC 0x2a
79 #define TARGET_CHANGED_ASC 0x3f
80 #define LUNS_CHANGED_ASCQ 0x0e
81 #define INSUFF_RES_ASC 0x55
82 #define INSUFF_RES_ASCQ 0x3
83 #define POWER_ON_RESET_ASCQ 0x0
84 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
85 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
86 #define CAPACITY_CHANGED_ASCQ 0x9
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91 #define MISCOMPARE_VERIFY_ASC 0x1d
92 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
93 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DELAY_OVERRIDDEN -9999
141 /* bit mask values for sdebug_opts */
142 #define SDEBUG_OPT_NOISE 1
143 #define SDEBUG_OPT_MEDIUM_ERR 2
144 #define SDEBUG_OPT_TIMEOUT 4
145 #define SDEBUG_OPT_RECOVERED_ERR 8
146 #define SDEBUG_OPT_TRANSPORT_ERR 16
147 #define SDEBUG_OPT_DIF_ERR 32
148 #define SDEBUG_OPT_DIX_ERR 64
149 #define SDEBUG_OPT_MAC_TIMEOUT 128
150 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
151 #define SDEBUG_OPT_Q_NOISE 0x200
152 #define SDEBUG_OPT_ALL_TSF 0x400
153 #define SDEBUG_OPT_RARE_TSF 0x800
154 #define SDEBUG_OPT_N_WCE 0x1000
155 #define SDEBUG_OPT_RESET_NOISE 0x2000
156 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
158 SDEBUG_OPT_RESET_NOISE)
159 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
160 SDEBUG_OPT_TRANSPORT_ERR | \
161 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
162 SDEBUG_OPT_SHORT_TRANSFER)
163 /* When "every_nth" > 0 then modulo "every_nth" commands:
164 * - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
165 * - a RECOVERED_ERROR is simulated on successful read and write
166 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
167 * - a TRANSPORT_ERROR is simulated on successful read and write
168 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
170 * When "every_nth" < 0 then after "- every_nth" commands:
171 * - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
172 * - a RECOVERED_ERROR is simulated on successful read and write
173 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
174 * - a TRANSPORT_ERROR is simulated on successful read and write
175 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
176 * This will continue on every subsequent command until some other action
177 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
178 * every_nth via sysfs).
181 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
182 * priority order. In the subset implemented here lower numbers have higher
183 * priority. The UA numbers should be a sequence starting from 0 with
184 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
185 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
186 #define SDEBUG_UA_BUS_RESET 1
187 #define SDEBUG_UA_MODE_CHANGED 2
188 #define SDEBUG_UA_CAPACITY_CHANGED 3
189 #define SDEBUG_UA_LUNS_CHANGED 4
190 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
191 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
192 #define SDEBUG_NUM_UAS 7
194 /* for check_readiness() */
195 #define UAS_ONLY 1 /* check for UAs only */
196 #define UAS_TUR 0 /* if no UAs then check if media access possible */
198 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
199 * sector on read commands: */
200 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
201 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
203 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
204 * or "peripheral device" addressing (value 0) */
205 #define SAM2_LUN_ADDRESS_METHOD 0
207 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
208 * (for response) at one time. Can be reduced by max_queue option. Command
209 * responses are not queued when delay=0 and ndelay=0. The per-device
210 * DEF_CMD_PER_LUN can be changed via sysfs:
211 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
212 * SCSI_DEBUG_CANQUEUE. */
213 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
214 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
215 #define DEF_CMD_PER_LUN 255
217 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
218 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
221 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
222 enum sdeb_opcode_index {
223 SDEB_I_INVALID_OPCODE = 0,
225 SDEB_I_REPORT_LUNS = 2,
226 SDEB_I_REQUEST_SENSE = 3,
227 SDEB_I_TEST_UNIT_READY = 4,
228 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
229 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
230 SDEB_I_LOG_SENSE = 7,
231 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
232 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
233 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
234 SDEB_I_START_STOP = 11,
235 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
236 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
237 SDEB_I_MAINT_IN = 14,
238 SDEB_I_MAINT_OUT = 15,
239 SDEB_I_VERIFY = 16, /* 10 only */
240 SDEB_I_VARIABLE_LEN = 17,
241 SDEB_I_RESERVE = 18, /* 6, 10 */
242 SDEB_I_RELEASE = 19, /* 6, 10 */
243 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
244 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
245 SDEB_I_ATA_PT = 22, /* 12, 16 */
246 SDEB_I_SEND_DIAG = 23,
248 SDEB_I_XDWRITEREAD = 25, /* 10 only */
249 SDEB_I_WRITE_BUFFER = 26,
250 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
251 SDEB_I_SYNC_CACHE = 28, /* 10 only */
252 SDEB_I_COMP_WRITE = 29,
253 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
256 static const unsigned char opcode_ind_arr[256] = {
257 /* 0x0; 0x0->0x1f: 6 byte cdbs */
258 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
260 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
261 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
263 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
264 SDEB_I_ALLOW_REMOVAL, 0,
265 /* 0x20; 0x20->0x3f: 10 byte cdbs */
266 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
267 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
268 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
269 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
270 /* 0x40; 0x40->0x5f: 10 byte cdbs */
271 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
272 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
273 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
275 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
276 /* 0x60; 0x60->0x7d are reserved */
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 0, SDEB_I_VARIABLE_LEN,
280 /* 0x80; 0x80->0x9f: 16 byte cdbs */
281 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
282 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
283 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
284 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
285 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
286 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
287 SDEB_I_MAINT_OUT, 0, 0, 0,
288 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
290 0, 0, 0, 0, 0, 0, 0, 0,
291 0, 0, 0, 0, 0, 0, 0, 0,
292 /* 0xc0; 0xc0->0xff: vendor specific */
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
301 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
303 #define F_RL_WLUN_OK 0x10
304 #define F_SKIP_UA 0x20
305 #define F_DELAY_OVERR 0x40
306 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
307 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
308 #define F_INV_OP 0x200
309 #define F_FAKE_RW 0x400
310 #define F_M_ACCESS 0x800 /* media access */
312 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
313 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
314 #define FF_SA (F_SA_HIGH | F_SA_LOW)
316 struct sdebug_dev_info;
317 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
337 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
339 struct opcode_info_t {
340 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
341 * for terminating element */
342 u8 opcode; /* if num_attached > 0, preferred */
343 u16 sa; /* service action */
344 u32 flags; /* OR-ed set of SDEB_F_* */
345 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
346 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
347 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
348 /* ignore cdb bytes after position 15 */
351 static const struct opcode_info_t msense_iarr[1] = {
352 {0, 0x1a, 0, F_D_IN, NULL, NULL,
353 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
356 static const struct opcode_info_t mselect_iarr[1] = {
357 {0, 0x15, 0, F_D_OUT, NULL, NULL,
358 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
361 static const struct opcode_info_t read_iarr[3] = {
362 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
363 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
365 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
366 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
367 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
368 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
372 static const struct opcode_info_t write_iarr[3] = {
373 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
374 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
376 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
377 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
378 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
379 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
383 static const struct opcode_info_t sa_in_iarr[1] = {
384 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
385 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
386 0xff, 0xff, 0xff, 0, 0xc7} },
389 static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
390 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
391 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
392 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
395 static const struct opcode_info_t maint_in_iarr[2] = {
396 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
397 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
399 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
400 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
404 static const struct opcode_info_t write_same_iarr[1] = {
405 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
406 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
407 0xff, 0xff, 0xff, 0x1f, 0xc7} },
410 static const struct opcode_info_t reserve_iarr[1] = {
411 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
412 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415 static const struct opcode_info_t release_iarr[1] = {
416 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
417 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
422 * plus the terminating elements for logic that scans this table such as
423 * REPORT SUPPORTED OPERATION CODES. */
424 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
426 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
427 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
429 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
430 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
431 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
433 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
434 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
435 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
436 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
438 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
440 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
441 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
442 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
443 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
445 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
446 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
448 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
449 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
452 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
453 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
454 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
455 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
456 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
457 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
458 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
460 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
461 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
462 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
463 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
465 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
466 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
467 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
468 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
470 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
473 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
474 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
476 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
477 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
480 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
485 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
487 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
488 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
489 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
490 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
491 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
493 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
494 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
495 0, 0, 0, 0} }, /* WRITE_BUFFER */
496 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
497 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
498 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
499 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
500 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
502 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
503 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
504 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
507 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
508 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 struct sdebug_scmd_extra_t {
519 static int sdebug_add_host = DEF_NUM_HOST;
520 static int sdebug_ato = DEF_ATO;
521 static int sdebug_delay = DEF_DELAY;
522 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
523 static int sdebug_dif = DEF_DIF;
524 static int sdebug_dix = DEF_DIX;
525 static int sdebug_dsense = DEF_D_SENSE;
526 static int sdebug_every_nth = DEF_EVERY_NTH;
527 static int sdebug_fake_rw = DEF_FAKE_RW;
528 static unsigned int sdebug_guard = DEF_GUARD;
529 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
530 static int sdebug_max_luns = DEF_MAX_LUNS;
531 static int sdebug_max_queue = SCSI_DEBUG_CANQUEUE;
532 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
533 static int sdebug_ndelay = DEF_NDELAY;
534 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
535 static int sdebug_no_uld;
536 static int sdebug_num_parts = DEF_NUM_PARTS;
537 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
538 static int sdebug_opt_blks = DEF_OPT_BLKS;
539 static int sdebug_opts = DEF_OPTS;
540 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
541 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
542 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
543 static int sdebug_sector_size = DEF_SECTOR_SIZE;
544 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
545 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
546 static unsigned int sdebug_lbpu = DEF_LBPU;
547 static unsigned int sdebug_lbpws = DEF_LBPWS;
548 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
549 static unsigned int sdebug_lbprz = DEF_LBPRZ;
550 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
551 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
552 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
553 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
554 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
555 static bool sdebug_removable = DEF_REMOVABLE;
556 static bool sdebug_clustering;
557 static bool sdebug_host_lock = DEF_HOST_LOCK;
558 static bool sdebug_strict = DEF_STRICT;
559 static bool sdebug_any_injecting_opt;
560 static bool sdebug_verbose;
562 static atomic_t sdebug_cmnd_count;
563 static atomic_t sdebug_completions;
564 static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
566 #define DEV_READONLY(TGT) (0)
568 static unsigned int sdebug_store_sectors;
569 static sector_t sdebug_capacity; /* in sectors */
571 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
572 may still need them */
573 static int sdebug_heads; /* heads per disk */
574 static int sdebug_cylinders_per; /* cylinders per surface */
575 static int sdebug_sectors_per; /* sectors per cylinder */
577 #define SDEBUG_MAX_PARTS 4
579 #define SCSI_DEBUG_MAX_CMD_LEN 32
581 static unsigned int scsi_debug_lbp(void)
583 return 0 == sdebug_fake_rw &&
584 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
587 struct sdebug_dev_info {
588 struct list_head dev_list;
589 unsigned int channel;
592 struct sdebug_host_info *sdbg_host;
593 unsigned long uas_bm[1];
595 char stopped; /* TODO: should be atomic */
599 struct sdebug_host_info {
600 struct list_head host_list;
601 struct Scsi_Host *shost;
603 struct list_head dev_info_list;
606 #define to_sdebug_host(d) \
607 container_of(d, struct sdebug_host_info, dev)
609 static LIST_HEAD(sdebug_host_list);
610 static DEFINE_SPINLOCK(sdebug_host_list_lock);
613 struct sdebug_hrtimer { /* ... is derived from hrtimer */
614 struct hrtimer hrt; /* must be first element */
618 struct sdebug_queued_cmd {
619 /* in_use flagged by a bit in queued_in_use_bm[] */
620 struct timer_list *cmnd_timerp;
621 struct tasklet_struct *tletp;
622 struct sdebug_hrtimer *sd_hrtp;
623 struct scsi_cmnd * a_cmnd;
625 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
626 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
629 static unsigned char * fake_storep; /* ramdisk storage */
630 static struct sd_dif_tuple *dif_storep; /* protection info */
631 static void *map_storep; /* provisioning map */
633 static unsigned long map_size;
634 static int num_aborts;
635 static int num_dev_resets;
636 static int num_target_resets;
637 static int num_bus_resets;
638 static int num_host_resets;
639 static int dix_writes;
640 static int dix_reads;
641 static int dif_errors;
643 static DEFINE_SPINLOCK(queued_arr_lock);
644 static DEFINE_RWLOCK(atomic_rw);
646 static char sdebug_proc_name[] = MY_NAME;
647 static const char *my_name = MY_NAME;
649 static struct bus_type pseudo_lld_bus;
651 static struct device_driver sdebug_driverfs_driver = {
652 .name = sdebug_proc_name,
653 .bus = &pseudo_lld_bus,
656 static const int check_condition_result =
657 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
659 static const int illegal_condition_result =
660 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
662 static const int device_qfull_result =
663 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
665 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
666 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
668 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
670 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
673 static void *fake_store(unsigned long long lba)
675 lba = do_div(lba, sdebug_store_sectors);
677 return fake_storep + lba * sdebug_sector_size;
680 static struct sd_dif_tuple *dif_store(sector_t sector)
682 sector = sector_div(sector, sdebug_store_sectors);
684 return dif_storep + sector;
687 static int sdebug_add_adapter(void);
688 static void sdebug_remove_adapter(void);
690 static void sdebug_max_tgts_luns(void)
692 struct sdebug_host_info *sdbg_host;
693 struct Scsi_Host *hpnt;
695 spin_lock(&sdebug_host_list_lock);
696 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
697 hpnt = sdbg_host->shost;
698 if ((hpnt->this_id >= 0) &&
699 (sdebug_num_tgts > hpnt->this_id))
700 hpnt->max_id = sdebug_num_tgts + 1;
702 hpnt->max_id = sdebug_num_tgts;
703 /* sdebug_max_luns; */
704 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
706 spin_unlock(&sdebug_host_list_lock);
709 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
711 /* Set in_bit to -1 to indicate no bit position of invalid field */
713 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
714 int in_byte, int in_bit)
716 unsigned char *sbuff;
720 sbuff = scp->sense_buffer;
722 sdev_printk(KERN_ERR, scp->device,
723 "%s: sense_buffer is NULL\n", __func__);
726 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
727 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
728 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
729 memset(sks, 0, sizeof(sks));
735 sks[0] |= 0x7 & in_bit;
737 put_unaligned_be16(in_byte, sks + 1);
743 memcpy(sbuff + sl + 4, sks, 3);
745 memcpy(sbuff + 15, sks, 3);
747 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
748 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
749 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
752 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
754 unsigned char *sbuff;
756 sbuff = scp->sense_buffer;
758 sdev_printk(KERN_ERR, scp->device,
759 "%s: sense_buffer is NULL\n", __func__);
762 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
764 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
767 sdev_printk(KERN_INFO, scp->device,
768 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
769 my_name, key, asc, asq);
773 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
775 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
778 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
780 if (sdebug_verbose) {
782 sdev_printk(KERN_INFO, dev,
783 "%s: BLKFLSBUF [0x1261]\n", __func__);
784 else if (0x5331 == cmd)
785 sdev_printk(KERN_INFO, dev,
786 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
789 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
793 /* return -ENOTTY; // correct return but upsets fdisk */
796 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
798 struct sdebug_host_info *sdhp;
799 struct sdebug_dev_info *dp;
801 spin_lock(&sdebug_host_list_lock);
802 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
803 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
804 if ((devip->sdbg_host == dp->sdbg_host) &&
805 (devip->target == dp->target))
806 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
809 spin_unlock(&sdebug_host_list_lock);
812 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
813 struct sdebug_dev_info * devip)
817 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
818 if (k != SDEBUG_NUM_UAS) {
819 const char *cp = NULL;
823 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
824 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
826 cp = "power on reset";
828 case SDEBUG_UA_BUS_RESET:
829 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
830 UA_RESET_ASC, BUS_RESET_ASCQ);
834 case SDEBUG_UA_MODE_CHANGED:
835 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
836 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
838 cp = "mode parameters changed";
840 case SDEBUG_UA_CAPACITY_CHANGED:
841 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
842 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
844 cp = "capacity data changed";
846 case SDEBUG_UA_MICROCODE_CHANGED:
847 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
848 TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
850 cp = "microcode has been changed";
852 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
853 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
855 MICROCODE_CHANGED_WO_RESET_ASCQ);
857 cp = "microcode has been changed without reset";
859 case SDEBUG_UA_LUNS_CHANGED:
861 * SPC-3 behavior is to report a UNIT ATTENTION with
862 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
863 * on the target, until a REPORT LUNS command is
864 * received. SPC-4 behavior is to report it only once.
865 * NOTE: sdebug_scsi_level does not use the same
866 * values as struct scsi_device->scsi_level.
868 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
869 clear_luns_changed_on_target(devip);
870 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
874 cp = "reported luns data has changed";
877 pr_warn("unexpected unit attention code=%d\n", k);
882 clear_bit(k, devip->uas_bm);
884 sdev_printk(KERN_INFO, SCpnt->device,
885 "%s reports: Unit attention: %s\n",
887 return check_condition_result;
889 if ((UAS_TUR == uas_only) && devip->stopped) {
890 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
893 sdev_printk(KERN_INFO, SCpnt->device,
894 "%s reports: Not ready: %s\n", my_name,
895 "initializing command required");
896 return check_condition_result;
901 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
902 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
906 struct scsi_data_buffer *sdb = scsi_in(scp);
910 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
911 return DID_ERROR << 16;
913 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
915 sdb->resid = scsi_bufflen(scp) - act_len;
920 /* Returns number of bytes fetched into 'arr' or -1 if error. */
921 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
924 if (!scsi_bufflen(scp))
926 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
929 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
933 static const char * inq_vendor_id = "Linux ";
934 static const char * inq_product_id = "scsi_debug ";
935 static const char *inq_product_rev = "0186"; /* version less '.' */
936 static const u64 naa5_comp_a = 0x5222222000000000ULL;
937 static const u64 naa5_comp_b = 0x5333333000000000ULL;
938 static const u64 naa5_comp_c = 0x5111111000000000ULL;
940 /* Device identification VPD page. Returns number of bytes placed in arr */
941 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
942 int target_dev_id, int dev_id_num,
943 const char * dev_id_str,
949 port_a = target_dev_id + 1;
950 /* T10 vendor identifier field format (faked) */
951 arr[0] = 0x2; /* ASCII */
954 memcpy(&arr[4], inq_vendor_id, 8);
955 memcpy(&arr[12], inq_product_id, 16);
956 memcpy(&arr[28], dev_id_str, dev_id_str_len);
957 num = 8 + 16 + dev_id_str_len;
960 if (dev_id_num >= 0) {
961 /* NAA-5, Logical unit identifier (binary) */
962 arr[num++] = 0x1; /* binary (not necessarily sas) */
963 arr[num++] = 0x3; /* PIV=0, lu, naa */
966 put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
968 /* Target relative port number */
969 arr[num++] = 0x61; /* proto=sas, binary */
970 arr[num++] = 0x94; /* PIV=1, target port, rel port */
971 arr[num++] = 0x0; /* reserved */
972 arr[num++] = 0x4; /* length */
973 arr[num++] = 0x0; /* reserved */
974 arr[num++] = 0x0; /* reserved */
976 arr[num++] = 0x1; /* relative port A */
978 /* NAA-5, Target port identifier */
979 arr[num++] = 0x61; /* proto=sas, binary */
980 arr[num++] = 0x93; /* piv=1, target port, naa */
983 put_unaligned_be64(naa5_comp_a + port_a, arr + num);
985 /* NAA-5, Target port group identifier */
986 arr[num++] = 0x61; /* proto=sas, binary */
987 arr[num++] = 0x95; /* piv=1, target port group id */
992 put_unaligned_be16(port_group_id, arr + num);
994 /* NAA-5, Target device identifier */
995 arr[num++] = 0x61; /* proto=sas, binary */
996 arr[num++] = 0xa3; /* piv=1, target device, naa */
999 put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
1001 /* SCSI name string: Target device identifier */
1002 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1003 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1006 memcpy(arr + num, "naa.52222220", 12);
1008 snprintf(b, sizeof(b), "%08X", target_dev_id);
1009 memcpy(arr + num, b, 8);
1011 memset(arr + num, 0, 4);
1016 static unsigned char vpd84_data[] = {
1017 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1018 0x22,0x22,0x22,0x0,0xbb,0x1,
1019 0x22,0x22,0x22,0x0,0xbb,0x2,
1022 /* Software interface identification VPD page */
1023 static int inquiry_evpd_84(unsigned char * arr)
1025 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1026 return sizeof(vpd84_data);
1029 /* Management network addresses VPD page */
1030 static int inquiry_evpd_85(unsigned char * arr)
1033 const char * na1 = "https://www.kernel.org/config";
1034 const char * na2 = "http://www.kernel.org/log";
1037 arr[num++] = 0x1; /* lu, storage config */
1038 arr[num++] = 0x0; /* reserved */
1043 plen = ((plen / 4) + 1) * 4;
1044 arr[num++] = plen; /* length, null termianted, padded */
1045 memcpy(arr + num, na1, olen);
1046 memset(arr + num + olen, 0, plen - olen);
1049 arr[num++] = 0x4; /* lu, logging */
1050 arr[num++] = 0x0; /* reserved */
1055 plen = ((plen / 4) + 1) * 4;
1056 arr[num++] = plen; /* length, null terminated, padded */
1057 memcpy(arr + num, na2, olen);
1058 memset(arr + num + olen, 0, plen - olen);
1064 /* SCSI ports VPD page */
1065 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1070 port_a = target_dev_id + 1;
1071 port_b = port_a + 1;
1072 arr[num++] = 0x0; /* reserved */
1073 arr[num++] = 0x0; /* reserved */
1075 arr[num++] = 0x1; /* relative port 1 (primary) */
1076 memset(arr + num, 0, 6);
1079 arr[num++] = 12; /* length tp descriptor */
1080 /* naa-5 target port identifier (A) */
1081 arr[num++] = 0x61; /* proto=sas, binary */
1082 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1083 arr[num++] = 0x0; /* reserved */
1084 arr[num++] = 0x8; /* length */
1085 put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1087 arr[num++] = 0x0; /* reserved */
1088 arr[num++] = 0x0; /* reserved */
1090 arr[num++] = 0x2; /* relative port 2 (secondary) */
1091 memset(arr + num, 0, 6);
1094 arr[num++] = 12; /* length tp descriptor */
1095 /* naa-5 target port identifier (B) */
1096 arr[num++] = 0x61; /* proto=sas, binary */
1097 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1098 arr[num++] = 0x0; /* reserved */
1099 arr[num++] = 0x8; /* length */
1100 put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1107 static unsigned char vpd89_data[] = {
1108 /* from 4th byte */ 0,0,0,0,
1109 'l','i','n','u','x',' ',' ',' ',
1110 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1112 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1114 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1115 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1116 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1117 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1119 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1121 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1123 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1124 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1125 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1126 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1127 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1128 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1129 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1132 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1133 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1134 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1135 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1136 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1139 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1140 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1141 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1151 /* ATA Information VPD page */
1152 static int inquiry_evpd_89(unsigned char * arr)
1154 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1155 return sizeof(vpd89_data);
1159 static unsigned char vpdb0_data[] = {
1160 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1166 /* Block limits VPD page (SBC-3) */
1167 static int inquiry_evpd_b0(unsigned char * arr)
1171 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1173 /* Optimal transfer length granularity */
1174 gran = 1 << sdebug_physblk_exp;
1175 put_unaligned_be16(gran, arr + 2);
1177 /* Maximum Transfer Length */
1178 if (sdebug_store_sectors > 0x400)
1179 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1181 /* Optimal Transfer Length */
1182 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1185 /* Maximum Unmap LBA Count */
1186 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1188 /* Maximum Unmap Block Descriptor Count */
1189 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1192 /* Unmap Granularity Alignment */
1193 if (sdebug_unmap_alignment) {
1194 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1195 arr[28] |= 0x80; /* UGAVALID */
1198 /* Optimal Unmap Granularity */
1199 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1201 /* Maximum WRITE SAME Length */
1202 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1204 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1206 return sizeof(vpdb0_data);
1209 /* Block device characteristics VPD page (SBC-3) */
1210 static int inquiry_evpd_b1(unsigned char *arr)
1212 memset(arr, 0, 0x3c);
1214 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1216 arr[3] = 5; /* less than 1.8" */
1221 /* Logical block provisioning VPD page (SBC-3) */
1222 static int inquiry_evpd_b2(unsigned char *arr)
1224 memset(arr, 0, 0x4);
1225 arr[0] = 0; /* threshold exponent */
1242 #define SDEBUG_LONG_INQ_SZ 96
1243 #define SDEBUG_MAX_INQ_ARR_SZ 584
1245 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1247 unsigned char pq_pdt;
1248 unsigned char * arr;
1249 unsigned char *cmd = scp->cmnd;
1250 int alloc_len, n, ret;
1253 alloc_len = get_unaligned_be16(cmd + 3);
1254 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1256 return DID_REQUEUE << 16;
1257 have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1259 pq_pdt = 0x1e; /* present, wlun */
1260 else if (sdebug_no_lun_0 && (0 == devip->lun))
1261 pq_pdt = 0x7f; /* not present, no device type */
1263 pq_pdt = (sdebug_ptype & 0x1f);
1265 if (0x2 & cmd[1]) { /* CMDDT bit set */
1266 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1268 return check_condition_result;
1269 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1270 int lu_id_num, port_group_id, target_dev_id, len;
1272 int host_no = devip->sdbg_host->shost->host_no;
1274 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1275 (devip->channel & 0x7f);
1276 if (0 == sdebug_vpd_use_hostno)
1278 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1279 (devip->target * 1000) + devip->lun);
1280 target_dev_id = ((host_no + 1) * 2000) +
1281 (devip->target * 1000) - 3;
1282 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1283 if (0 == cmd[2]) { /* supported vital product data pages */
1284 arr[1] = cmd[2]; /*sanity */
1286 arr[n++] = 0x0; /* this page */
1287 arr[n++] = 0x80; /* unit serial number */
1288 arr[n++] = 0x83; /* device identification */
1289 arr[n++] = 0x84; /* software interface ident. */
1290 arr[n++] = 0x85; /* management network addresses */
1291 arr[n++] = 0x86; /* extended inquiry */
1292 arr[n++] = 0x87; /* mode page policy */
1293 arr[n++] = 0x88; /* SCSI ports */
1294 arr[n++] = 0x89; /* ATA information */
1295 arr[n++] = 0xb0; /* Block limits (SBC) */
1296 arr[n++] = 0xb1; /* Block characteristics (SBC) */
1297 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1299 arr[3] = n - 4; /* number of supported VPD pages */
1300 } else if (0x80 == cmd[2]) { /* unit serial number */
1301 arr[1] = cmd[2]; /*sanity */
1303 memcpy(&arr[4], lu_id_str, len);
1304 } else if (0x83 == cmd[2]) { /* device identification */
1305 arr[1] = cmd[2]; /*sanity */
1306 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1307 target_dev_id, lu_id_num,
1309 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1310 arr[1] = cmd[2]; /*sanity */
1311 arr[3] = inquiry_evpd_84(&arr[4]);
1312 } else if (0x85 == cmd[2]) { /* Management network addresses */
1313 arr[1] = cmd[2]; /*sanity */
1314 arr[3] = inquiry_evpd_85(&arr[4]);
1315 } else if (0x86 == cmd[2]) { /* extended inquiry */
1316 arr[1] = cmd[2]; /*sanity */
1317 arr[3] = 0x3c; /* number of following entries */
1318 if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1319 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1320 else if (sdebug_dif)
1321 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1323 arr[4] = 0x0; /* no protection stuff */
1324 arr[5] = 0x7; /* head of q, ordered + simple q's */
1325 } else if (0x87 == cmd[2]) { /* mode page policy */
1326 arr[1] = cmd[2]; /*sanity */
1327 arr[3] = 0x8; /* number of following entries */
1328 arr[4] = 0x2; /* disconnect-reconnect mp */
1329 arr[6] = 0x80; /* mlus, shared */
1330 arr[8] = 0x18; /* protocol specific lu */
1331 arr[10] = 0x82; /* mlus, per initiator port */
1332 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1333 arr[1] = cmd[2]; /*sanity */
1334 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1335 } else if (0x89 == cmd[2]) { /* ATA information */
1336 arr[1] = cmd[2]; /*sanity */
1337 n = inquiry_evpd_89(&arr[4]);
1338 put_unaligned_be16(n, arr + 2);
1339 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1340 arr[1] = cmd[2]; /*sanity */
1341 arr[3] = inquiry_evpd_b0(&arr[4]);
1342 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1343 arr[1] = cmd[2]; /*sanity */
1344 arr[3] = inquiry_evpd_b1(&arr[4]);
1345 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1346 arr[1] = cmd[2]; /*sanity */
1347 arr[3] = inquiry_evpd_b2(&arr[4]);
1349 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1351 return check_condition_result;
1353 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1354 ret = fill_from_dev_buffer(scp, arr,
1355 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1359 /* drops through here for a standard inquiry */
1360 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1361 arr[2] = sdebug_scsi_level;
1362 arr[3] = 2; /* response_data_format==2 */
1363 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1364 arr[5] = sdebug_dif ? 1 : 0; /* PROTECT bit */
1365 if (0 == sdebug_vpd_use_hostno)
1366 arr[5] = 0x10; /* claim: implicit TGPS */
1367 arr[6] = 0x10; /* claim: MultiP */
1368 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1369 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1370 memcpy(&arr[8], inq_vendor_id, 8);
1371 memcpy(&arr[16], inq_product_id, 16);
1372 memcpy(&arr[32], inq_product_rev, 4);
1373 /* version descriptors (2 bytes each) follow */
1374 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1375 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1377 if (sdebug_ptype == 0) {
1378 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1379 } else if (sdebug_ptype == 1) {
1380 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1382 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1383 ret = fill_from_dev_buffer(scp, arr,
1384 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1389 static int resp_requests(struct scsi_cmnd * scp,
1390 struct sdebug_dev_info * devip)
1392 unsigned char * sbuff;
1393 unsigned char *cmd = scp->cmnd;
1394 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1398 memset(arr, 0, sizeof(arr));
1399 dsense = !!(cmd[1] & 1);
1400 sbuff = scp->sense_buffer;
1401 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1404 arr[1] = 0x0; /* NO_SENSE in sense_key */
1405 arr[2] = THRESHOLD_EXCEEDED;
1406 arr[3] = 0xff; /* TEST set and MRIE==6 */
1410 arr[2] = 0x0; /* NO_SENSE in sense_key */
1411 arr[7] = 0xa; /* 18 byte sense buffer */
1412 arr[12] = THRESHOLD_EXCEEDED;
1413 arr[13] = 0xff; /* TEST set and MRIE==6 */
1416 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1417 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1418 ; /* have sense and formats match */
1419 else if (arr[0] <= 0x70) {
1429 } else if (dsense) {
1432 arr[1] = sbuff[2]; /* sense key */
1433 arr[2] = sbuff[12]; /* asc */
1434 arr[3] = sbuff[13]; /* ascq */
1446 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1447 return fill_from_dev_buffer(scp, arr, len);
1450 static int resp_start_stop(struct scsi_cmnd * scp,
1451 struct sdebug_dev_info * devip)
1453 unsigned char *cmd = scp->cmnd;
1454 int power_cond, start;
1456 power_cond = (cmd[4] & 0xf0) >> 4;
1458 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1459 return check_condition_result;
1462 if (start == devip->stopped)
1463 devip->stopped = !start;
1467 static sector_t get_sdebug_capacity(void)
1469 static const unsigned int gibibyte = 1073741824;
1471 if (sdebug_virtual_gb > 0)
1472 return (sector_t)sdebug_virtual_gb *
1473 (gibibyte / sdebug_sector_size);
1475 return sdebug_store_sectors;
1478 #define SDEBUG_READCAP_ARR_SZ 8
1479 static int resp_readcap(struct scsi_cmnd * scp,
1480 struct sdebug_dev_info * devip)
1482 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1485 /* following just in case virtual_gb changed */
1486 sdebug_capacity = get_sdebug_capacity();
1487 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1488 if (sdebug_capacity < 0xffffffff) {
1489 capac = (unsigned int)sdebug_capacity - 1;
1490 put_unaligned_be32(capac, arr + 0);
1492 put_unaligned_be32(0xffffffff, arr + 0);
1493 put_unaligned_be16(sdebug_sector_size, arr + 6);
1494 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1497 #define SDEBUG_READCAP16_ARR_SZ 32
1498 static int resp_readcap16(struct scsi_cmnd * scp,
1499 struct sdebug_dev_info * devip)
1501 unsigned char *cmd = scp->cmnd;
1502 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1505 alloc_len = get_unaligned_be32(cmd + 10);
1506 /* following just in case virtual_gb changed */
1507 sdebug_capacity = get_sdebug_capacity();
1508 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1509 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1510 put_unaligned_be32(sdebug_sector_size, arr + 8);
1511 arr[13] = sdebug_physblk_exp & 0xf;
1512 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1514 if (scsi_debug_lbp()) {
1515 arr[14] |= 0x80; /* LBPME */
1517 arr[14] |= 0x40; /* LBPRZ */
1520 arr[15] = sdebug_lowest_aligned & 0xff;
1523 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1524 arr[12] |= 1; /* PROT_EN */
1527 return fill_from_dev_buffer(scp, arr,
1528 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1531 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1533 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1534 struct sdebug_dev_info * devip)
1536 unsigned char *cmd = scp->cmnd;
1537 unsigned char * arr;
1538 int host_no = devip->sdbg_host->shost->host_no;
1539 int n, ret, alen, rlen;
1540 int port_group_a, port_group_b, port_a, port_b;
1542 alen = get_unaligned_be32(cmd + 6);
1543 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1545 return DID_REQUEUE << 16;
1547 * EVPD page 0x88 states we have two ports, one
1548 * real and a fake port with no device connected.
1549 * So we create two port groups with one port each
1550 * and set the group with port B to unavailable.
1552 port_a = 0x1; /* relative port A */
1553 port_b = 0x2; /* relative port B */
1554 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1555 (devip->channel & 0x7f);
1556 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1557 (devip->channel & 0x7f) + 0x80;
1560 * The asymmetric access state is cycled according to the host_id.
1563 if (0 == sdebug_vpd_use_hostno) {
1564 arr[n++] = host_no % 3; /* Asymm access state */
1565 arr[n++] = 0x0F; /* claim: all states are supported */
1567 arr[n++] = 0x0; /* Active/Optimized path */
1568 arr[n++] = 0x01; /* only support active/optimized paths */
1570 put_unaligned_be16(port_group_a, arr + n);
1572 arr[n++] = 0; /* Reserved */
1573 arr[n++] = 0; /* Status code */
1574 arr[n++] = 0; /* Vendor unique */
1575 arr[n++] = 0x1; /* One port per group */
1576 arr[n++] = 0; /* Reserved */
1577 arr[n++] = 0; /* Reserved */
1578 put_unaligned_be16(port_a, arr + n);
1580 arr[n++] = 3; /* Port unavailable */
1581 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1582 put_unaligned_be16(port_group_b, arr + n);
1584 arr[n++] = 0; /* Reserved */
1585 arr[n++] = 0; /* Status code */
1586 arr[n++] = 0; /* Vendor unique */
1587 arr[n++] = 0x1; /* One port per group */
1588 arr[n++] = 0; /* Reserved */
1589 arr[n++] = 0; /* Reserved */
1590 put_unaligned_be16(port_b, arr + n);
1594 put_unaligned_be32(rlen, arr + 0);
1597 * Return the smallest value of either
1598 * - The allocated length
1599 * - The constructed command length
1600 * - The maximum array size
1603 ret = fill_from_dev_buffer(scp, arr,
1604 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1610 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1613 u8 reporting_opts, req_opcode, sdeb_i, supp;
1615 u32 alloc_len, a_len;
1616 int k, offset, len, errsts, count, bump, na;
1617 const struct opcode_info_t *oip;
1618 const struct opcode_info_t *r_oip;
1620 u8 *cmd = scp->cmnd;
1622 rctd = !!(cmd[2] & 0x80);
1623 reporting_opts = cmd[2] & 0x7;
1624 req_opcode = cmd[3];
1625 req_sa = get_unaligned_be16(cmd + 4);
1626 alloc_len = get_unaligned_be32(cmd + 6);
1627 if (alloc_len < 4 || alloc_len > 0xffff) {
1628 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1629 return check_condition_result;
1631 if (alloc_len > 8192)
1635 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1637 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1639 return check_condition_result;
1641 switch (reporting_opts) {
1642 case 0: /* all commands */
1643 /* count number of commands */
1644 for (count = 0, oip = opcode_info_arr;
1645 oip->num_attached != 0xff; ++oip) {
1646 if (F_INV_OP & oip->flags)
1648 count += (oip->num_attached + 1);
1650 bump = rctd ? 20 : 8;
1651 put_unaligned_be32(count * bump, arr);
1652 for (offset = 4, oip = opcode_info_arr;
1653 oip->num_attached != 0xff && offset < a_len; ++oip) {
1654 if (F_INV_OP & oip->flags)
1656 na = oip->num_attached;
1657 arr[offset] = oip->opcode;
1658 put_unaligned_be16(oip->sa, arr + offset + 2);
1660 arr[offset + 5] |= 0x2;
1661 if (FF_SA & oip->flags)
1662 arr[offset + 5] |= 0x1;
1663 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1665 put_unaligned_be16(0xa, arr + offset + 8);
1667 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1668 if (F_INV_OP & oip->flags)
1671 arr[offset] = oip->opcode;
1672 put_unaligned_be16(oip->sa, arr + offset + 2);
1674 arr[offset + 5] |= 0x2;
1675 if (FF_SA & oip->flags)
1676 arr[offset + 5] |= 0x1;
1677 put_unaligned_be16(oip->len_mask[0],
1680 put_unaligned_be16(0xa,
1687 case 1: /* one command: opcode only */
1688 case 2: /* one command: opcode plus service action */
1689 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1690 sdeb_i = opcode_ind_arr[req_opcode];
1691 oip = &opcode_info_arr[sdeb_i];
1692 if (F_INV_OP & oip->flags) {
1696 if (1 == reporting_opts) {
1697 if (FF_SA & oip->flags) {
1698 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1701 return check_condition_result;
1704 } else if (2 == reporting_opts &&
1705 0 == (FF_SA & oip->flags)) {
1706 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1707 kfree(arr); /* point at requested sa */
1708 return check_condition_result;
1710 if (0 == (FF_SA & oip->flags) &&
1711 req_opcode == oip->opcode)
1713 else if (0 == (FF_SA & oip->flags)) {
1714 na = oip->num_attached;
1715 for (k = 0, oip = oip->arrp; k < na;
1717 if (req_opcode == oip->opcode)
1720 supp = (k >= na) ? 1 : 3;
1721 } else if (req_sa != oip->sa) {
1722 na = oip->num_attached;
1723 for (k = 0, oip = oip->arrp; k < na;
1725 if (req_sa == oip->sa)
1728 supp = (k >= na) ? 1 : 3;
1732 u = oip->len_mask[0];
1733 put_unaligned_be16(u, arr + 2);
1734 arr[4] = oip->opcode;
1735 for (k = 1; k < u; ++k)
1736 arr[4 + k] = (k < 16) ?
1737 oip->len_mask[k] : 0xff;
1742 arr[1] = (rctd ? 0x80 : 0) | supp;
1744 put_unaligned_be16(0xa, arr + offset);
1749 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1751 return check_condition_result;
1753 offset = (offset < a_len) ? offset : a_len;
1754 len = (offset < alloc_len) ? offset : alloc_len;
1755 errsts = fill_from_dev_buffer(scp, arr, len);
1761 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1766 u8 *cmd = scp->cmnd;
1768 memset(arr, 0, sizeof(arr));
1769 repd = !!(cmd[2] & 0x80);
1770 alloc_len = get_unaligned_be32(cmd + 6);
1771 if (alloc_len < 4) {
1772 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1773 return check_condition_result;
1775 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1776 arr[1] = 0x1; /* ITNRS */
1783 len = (len < alloc_len) ? len : alloc_len;
1784 return fill_from_dev_buffer(scp, arr, len);
1787 /* <<Following mode page info copied from ST318451LW>> */
1789 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1790 { /* Read-Write Error Recovery page for mode_sense */
1791 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1794 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1796 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1797 return sizeof(err_recov_pg);
1800 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1801 { /* Disconnect-Reconnect page for mode_sense */
1802 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1803 0, 0, 0, 0, 0, 0, 0, 0};
1805 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1807 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1808 return sizeof(disconnect_pg);
1811 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1812 { /* Format device page for mode_sense */
1813 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1814 0, 0, 0, 0, 0, 0, 0, 0,
1815 0, 0, 0, 0, 0x40, 0, 0, 0};
1817 memcpy(p, format_pg, sizeof(format_pg));
1818 put_unaligned_be16(sdebug_sectors_per, p + 10);
1819 put_unaligned_be16(sdebug_sector_size, p + 12);
1820 if (sdebug_removable)
1821 p[20] |= 0x20; /* should agree with INQUIRY */
1823 memset(p + 2, 0, sizeof(format_pg) - 2);
1824 return sizeof(format_pg);
1827 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1828 { /* Caching page for mode_sense */
1829 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1830 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1831 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1832 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1834 if (SDEBUG_OPT_N_WCE & sdebug_opts)
1835 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1836 memcpy(p, caching_pg, sizeof(caching_pg));
1838 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1839 else if (2 == pcontrol)
1840 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1841 return sizeof(caching_pg);
1844 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1845 { /* Control mode page for mode_sense */
1846 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1848 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1852 ctrl_m_pg[2] |= 0x4;
1854 ctrl_m_pg[2] &= ~0x4;
1857 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1859 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1861 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1862 else if (2 == pcontrol)
1863 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1864 return sizeof(ctrl_m_pg);
1868 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1869 { /* Informational Exceptions control mode page for mode_sense */
1870 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1872 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1875 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1877 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1878 else if (2 == pcontrol)
1879 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1880 return sizeof(iec_m_pg);
1883 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1884 { /* SAS SSP mode page - short format for mode_sense */
1885 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1886 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1888 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1890 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1891 return sizeof(sas_sf_m_pg);
1895 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1897 { /* SAS phy control and discover mode page for mode_sense */
1898 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1899 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1900 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1901 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1902 0x2, 0, 0, 0, 0, 0, 0, 0,
1903 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1904 0, 0, 0, 0, 0, 0, 0, 0,
1905 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1906 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1907 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1908 0x3, 0, 0, 0, 0, 0, 0, 0,
1909 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1910 0, 0, 0, 0, 0, 0, 0, 0,
1914 put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1915 put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1916 put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1917 put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1918 port_a = target_dev_id + 1;
1919 port_b = port_a + 1;
1920 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1921 put_unaligned_be32(port_a, p + 20);
1922 put_unaligned_be32(port_b, p + 48 + 20);
1924 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1925 return sizeof(sas_pcd_m_pg);
1928 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1929 { /* SAS SSP shared protocol specific port mode subpage */
1930 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1931 0, 0, 0, 0, 0, 0, 0, 0,
1934 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1936 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1937 return sizeof(sas_sha_m_pg);
1940 #define SDEBUG_MAX_MSENSE_SZ 256
1943 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1945 unsigned char dbd, llbaa;
1946 int pcontrol, pcode, subpcode, bd_len;
1947 unsigned char dev_spec;
1948 int alloc_len, msense_6, offset, len, target_dev_id;
1949 int target = scp->device->id;
1951 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1952 unsigned char *cmd = scp->cmnd;
1954 dbd = !!(cmd[1] & 0x8);
1955 pcontrol = (cmd[2] & 0xc0) >> 6;
1956 pcode = cmd[2] & 0x3f;
1958 msense_6 = (MODE_SENSE == cmd[0]);
1959 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1960 if ((0 == sdebug_ptype) && (0 == dbd))
1961 bd_len = llbaa ? 16 : 8;
1964 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1965 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1966 if (0x3 == pcontrol) { /* Saving values not supported */
1967 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1968 return check_condition_result;
1970 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1971 (devip->target * 1000) - 3;
1972 /* set DPOFUA bit for disks */
1973 if (0 == sdebug_ptype)
1974 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1984 arr[4] = 0x1; /* set LONGLBA bit */
1985 arr[7] = bd_len; /* assume 255 or less */
1989 if ((bd_len > 0) && (!sdebug_capacity))
1990 sdebug_capacity = get_sdebug_capacity();
1993 if (sdebug_capacity > 0xfffffffe)
1994 put_unaligned_be32(0xffffffff, ap + 0);
1996 put_unaligned_be32(sdebug_capacity, ap + 0);
1997 put_unaligned_be16(sdebug_sector_size, ap + 6);
2000 } else if (16 == bd_len) {
2001 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2002 put_unaligned_be32(sdebug_sector_size, ap + 12);
2007 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2008 /* TODO: Control Extension page */
2009 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2010 return check_condition_result;
2013 case 0x1: /* Read-Write error recovery page, direct access */
2014 len = resp_err_recov_pg(ap, pcontrol, target);
2017 case 0x2: /* Disconnect-Reconnect page, all devices */
2018 len = resp_disconnect_pg(ap, pcontrol, target);
2021 case 0x3: /* Format device page, direct access */
2022 len = resp_format_pg(ap, pcontrol, target);
2025 case 0x8: /* Caching page, direct access */
2026 len = resp_caching_pg(ap, pcontrol, target);
2029 case 0xa: /* Control Mode page, all devices */
2030 len = resp_ctrl_m_pg(ap, pcontrol, target);
2033 case 0x19: /* if spc==1 then sas phy, control+discover */
2034 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2035 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2036 return check_condition_result;
2039 if ((0x0 == subpcode) || (0xff == subpcode))
2040 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2041 if ((0x1 == subpcode) || (0xff == subpcode))
2042 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2044 if ((0x2 == subpcode) || (0xff == subpcode))
2045 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2048 case 0x1c: /* Informational Exceptions Mode page, all devices */
2049 len = resp_iec_m_pg(ap, pcontrol, target);
2052 case 0x3f: /* Read all Mode pages */
2053 if ((0 == subpcode) || (0xff == subpcode)) {
2054 len = resp_err_recov_pg(ap, pcontrol, target);
2055 len += resp_disconnect_pg(ap + len, pcontrol, target);
2056 len += resp_format_pg(ap + len, pcontrol, target);
2057 len += resp_caching_pg(ap + len, pcontrol, target);
2058 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2059 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2060 if (0xff == subpcode) {
2061 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2062 target, target_dev_id);
2063 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2065 len += resp_iec_m_pg(ap + len, pcontrol, target);
2067 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2068 return check_condition_result;
2073 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2074 return check_condition_result;
2077 arr[0] = offset - 1;
2079 put_unaligned_be16((offset - 2), arr + 0);
2080 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2083 #define SDEBUG_MAX_MSELECT_SZ 512
2086 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2088 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2089 int param_len, res, mpage;
2090 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2091 unsigned char *cmd = scp->cmnd;
2092 int mselect6 = (MODE_SELECT == cmd[0]);
2094 memset(arr, 0, sizeof(arr));
2097 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2098 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2099 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2100 return check_condition_result;
2102 res = fetch_to_dev_buffer(scp, arr, param_len);
2104 return DID_ERROR << 16;
2105 else if (sdebug_verbose && (res < param_len))
2106 sdev_printk(KERN_INFO, scp->device,
2107 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2108 __func__, param_len, res);
2109 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2110 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2112 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2113 return check_condition_result;
2115 off = bd_len + (mselect6 ? 4 : 8);
2116 mpage = arr[off] & 0x3f;
2117 ps = !!(arr[off] & 0x80);
2119 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2120 return check_condition_result;
2122 spf = !!(arr[off] & 0x40);
2123 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2125 if ((pg_len + off) > param_len) {
2126 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2127 PARAMETER_LIST_LENGTH_ERR, 0);
2128 return check_condition_result;
2131 case 0x8: /* Caching Mode page */
2132 if (caching_pg[1] == arr[off + 1]) {
2133 memcpy(caching_pg + 2, arr + off + 2,
2134 sizeof(caching_pg) - 2);
2135 goto set_mode_changed_ua;
2138 case 0xa: /* Control Mode page */
2139 if (ctrl_m_pg[1] == arr[off + 1]) {
2140 memcpy(ctrl_m_pg + 2, arr + off + 2,
2141 sizeof(ctrl_m_pg) - 2);
2142 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2143 goto set_mode_changed_ua;
2146 case 0x1c: /* Informational Exceptions Mode page */
2147 if (iec_m_pg[1] == arr[off + 1]) {
2148 memcpy(iec_m_pg + 2, arr + off + 2,
2149 sizeof(iec_m_pg) - 2);
2150 goto set_mode_changed_ua;
2156 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2157 return check_condition_result;
2158 set_mode_changed_ua:
2159 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2163 static int resp_temp_l_pg(unsigned char * arr)
2165 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2166 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2169 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2170 return sizeof(temp_l_pg);
2173 static int resp_ie_l_pg(unsigned char * arr)
2175 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2178 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2179 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2180 arr[4] = THRESHOLD_EXCEEDED;
2183 return sizeof(ie_l_pg);
2186 #define SDEBUG_MAX_LSENSE_SZ 512
2188 static int resp_log_sense(struct scsi_cmnd * scp,
2189 struct sdebug_dev_info * devip)
2191 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2192 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2193 unsigned char *cmd = scp->cmnd;
2195 memset(arr, 0, sizeof(arr));
2199 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2200 return check_condition_result;
2202 pcontrol = (cmd[2] & 0xc0) >> 6;
2203 pcode = cmd[2] & 0x3f;
2204 subpcode = cmd[3] & 0xff;
2205 alloc_len = get_unaligned_be16(cmd + 7);
2207 if (0 == subpcode) {
2209 case 0x0: /* Supported log pages log page */
2211 arr[n++] = 0x0; /* this page */
2212 arr[n++] = 0xd; /* Temperature */
2213 arr[n++] = 0x2f; /* Informational exceptions */
2216 case 0xd: /* Temperature log page */
2217 arr[3] = resp_temp_l_pg(arr + 4);
2219 case 0x2f: /* Informational exceptions log page */
2220 arr[3] = resp_ie_l_pg(arr + 4);
2223 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2224 return check_condition_result;
2226 } else if (0xff == subpcode) {
2230 case 0x0: /* Supported log pages and subpages log page */
2233 arr[n++] = 0x0; /* 0,0 page */
2235 arr[n++] = 0xff; /* this page */
2237 arr[n++] = 0x0; /* Temperature */
2239 arr[n++] = 0x0; /* Informational exceptions */
2242 case 0xd: /* Temperature subpages */
2245 arr[n++] = 0x0; /* Temperature */
2248 case 0x2f: /* Informational exceptions subpages */
2251 arr[n++] = 0x0; /* Informational exceptions */
2255 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2256 return check_condition_result;
2259 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2260 return check_condition_result;
2262 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2263 return fill_from_dev_buffer(scp, arr,
2264 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2267 static int check_device_access_params(struct scsi_cmnd *scp,
2268 unsigned long long lba, unsigned int num)
2270 if (lba + num > sdebug_capacity) {
2271 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2272 return check_condition_result;
2274 /* transfer length excessive (tie in to block limits VPD page) */
2275 if (num > sdebug_store_sectors) {
2276 /* needs work to find which cdb byte 'num' comes from */
2277 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2278 return check_condition_result;
2283 /* Returns number of bytes copied or -1 if error. */
2285 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2288 u64 block, rest = 0;
2289 struct scsi_data_buffer *sdb;
2290 enum dma_data_direction dir;
2293 sdb = scsi_out(scmd);
2294 dir = DMA_TO_DEVICE;
2296 sdb = scsi_in(scmd);
2297 dir = DMA_FROM_DEVICE;
2302 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2305 block = do_div(lba, sdebug_store_sectors);
2306 if (block + num > sdebug_store_sectors)
2307 rest = block + num - sdebug_store_sectors;
2309 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2310 fake_storep + (block * sdebug_sector_size),
2311 (num - rest) * sdebug_sector_size, 0, do_write);
2312 if (ret != (num - rest) * sdebug_sector_size)
2316 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2317 fake_storep, rest * sdebug_sector_size,
2318 (num - rest) * sdebug_sector_size, do_write);
2324 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2325 * arr into fake_store(lba,num) and return true. If comparison fails then
2328 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2331 u64 block, rest = 0;
2332 u32 store_blks = sdebug_store_sectors;
2333 u32 lb_size = sdebug_sector_size;
2335 block = do_div(lba, store_blks);
2336 if (block + num > store_blks)
2337 rest = block + num - store_blks;
2339 res = !memcmp(fake_storep + (block * lb_size), arr,
2340 (num - rest) * lb_size);
2344 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2348 arr += num * lb_size;
2349 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2351 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2356 static __be16 dif_compute_csum(const void *buf, int len)
2361 csum = (__force __be16)ip_compute_csum(buf, len);
2363 csum = cpu_to_be16(crc_t10dif(buf, len));
2368 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2369 sector_t sector, u32 ei_lba)
2371 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2373 if (sdt->guard_tag != csum) {
2374 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2375 (unsigned long)sector,
2376 be16_to_cpu(sdt->guard_tag),
2380 if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2381 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2382 pr_err("REF check failed on sector %lu\n",
2383 (unsigned long)sector);
2386 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2387 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2388 pr_err("REF check failed on sector %lu\n",
2389 (unsigned long)sector);
2395 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2396 unsigned int sectors, bool read)
2400 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2401 struct sg_mapping_iter miter;
2403 /* Bytes of protection data to copy into sgl */
2404 resid = sectors * sizeof(*dif_storep);
2406 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2407 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2408 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2410 while (sg_miter_next(&miter) && resid > 0) {
2411 size_t len = min(miter.length, resid);
2412 void *start = dif_store(sector);
2415 if (dif_store_end < start + len)
2416 rest = start + len - dif_store_end;
2421 memcpy(paddr, start, len - rest);
2423 memcpy(start, paddr, len - rest);
2427 memcpy(paddr + len - rest, dif_storep, rest);
2429 memcpy(dif_storep, paddr + len - rest, rest);
2432 sector += len / sizeof(*dif_storep);
2435 sg_miter_stop(&miter);
2438 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2439 unsigned int sectors, u32 ei_lba)
2442 struct sd_dif_tuple *sdt;
2445 for (i = 0; i < sectors; i++, ei_lba++) {
2448 sector = start_sec + i;
2449 sdt = dif_store(sector);
2451 if (sdt->app_tag == cpu_to_be16(0xffff))
2454 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2461 dif_copy_prot(SCpnt, start_sec, sectors, true);
2468 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2470 u8 *cmd = scp->cmnd;
2474 unsigned long iflags;
2481 lba = get_unaligned_be64(cmd + 2);
2482 num = get_unaligned_be32(cmd + 10);
2487 lba = get_unaligned_be32(cmd + 2);
2488 num = get_unaligned_be16(cmd + 7);
2493 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2494 (u32)(cmd[1] & 0x1f) << 16;
2495 num = (0 == cmd[4]) ? 256 : cmd[4];
2500 lba = get_unaligned_be32(cmd + 2);
2501 num = get_unaligned_be32(cmd + 6);
2504 case XDWRITEREAD_10:
2506 lba = get_unaligned_be32(cmd + 2);
2507 num = get_unaligned_be16(cmd + 7);
2510 default: /* assume READ(32) */
2511 lba = get_unaligned_be64(cmd + 12);
2512 ei_lba = get_unaligned_be32(cmd + 20);
2513 num = get_unaligned_be32(cmd + 28);
2518 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2520 mk_sense_invalid_opcode(scp);
2521 return check_condition_result;
2523 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2524 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2525 (cmd[1] & 0xe0) == 0)
2526 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2529 if (sdebug_any_injecting_opt) {
2530 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2536 /* inline check_device_access_params() */
2537 if (lba + num > sdebug_capacity) {
2538 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2539 return check_condition_result;
2541 /* transfer length excessive (tie in to block limits VPD page) */
2542 if (num > sdebug_store_sectors) {
2543 /* needs work to find which cdb byte 'num' comes from */
2544 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2545 return check_condition_result;
2548 if ((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2549 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2550 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2551 /* claim unrecoverable read error */
2552 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2553 /* set info field and valid bit for fixed descriptor */
2554 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2555 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2556 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2557 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2558 put_unaligned_be32(ret, scp->sense_buffer + 3);
2560 scsi_set_resid(scp, scsi_bufflen(scp));
2561 return check_condition_result;
2564 read_lock_irqsave(&atomic_rw, iflags);
2567 if (sdebug_dix && scsi_prot_sg_count(scp)) {
2568 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2571 read_unlock_irqrestore(&atomic_rw, iflags);
2572 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2573 return illegal_condition_result;
2577 ret = do_device_access(scp, lba, num, false);
2578 read_unlock_irqrestore(&atomic_rw, iflags);
2580 return DID_ERROR << 16;
2582 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2584 if (sdebug_any_injecting_opt) {
2585 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2587 if (ep->inj_recovered) {
2588 mk_sense_buffer(scp, RECOVERED_ERROR,
2589 THRESHOLD_EXCEEDED, 0);
2590 return check_condition_result;
2591 } else if (ep->inj_transport) {
2592 mk_sense_buffer(scp, ABORTED_COMMAND,
2593 TRANSPORT_PROBLEM, ACK_NAK_TO);
2594 return check_condition_result;
2595 } else if (ep->inj_dif) {
2596 /* Logical block guard check failed */
2597 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2598 return illegal_condition_result;
2599 } else if (ep->inj_dix) {
2600 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2601 return illegal_condition_result;
2607 static void dump_sector(unsigned char *buf, int len)
2611 pr_err(">>> Sector Dump <<<\n");
2612 for (i = 0 ; i < len ; i += 16) {
2615 for (j = 0, n = 0; j < 16; j++) {
2616 unsigned char c = buf[i+j];
2618 if (c >= 0x20 && c < 0x7e)
2619 n += scnprintf(b + n, sizeof(b) - n,
2622 n += scnprintf(b + n, sizeof(b) - n,
2625 pr_err("%04d: %s\n", i, b);
2629 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2630 unsigned int sectors, u32 ei_lba)
2633 struct sd_dif_tuple *sdt;
2635 sector_t sector = start_sec;
2638 struct sg_mapping_iter diter;
2639 struct sg_mapping_iter piter;
2641 BUG_ON(scsi_sg_count(SCpnt) == 0);
2642 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2644 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2645 scsi_prot_sg_count(SCpnt),
2646 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2647 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2648 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2650 /* For each protection page */
2651 while (sg_miter_next(&piter)) {
2653 if (WARN_ON(!sg_miter_next(&diter))) {
2658 for (ppage_offset = 0; ppage_offset < piter.length;
2659 ppage_offset += sizeof(struct sd_dif_tuple)) {
2660 /* If we're at the end of the current
2661 * data page advance to the next one
2663 if (dpage_offset >= diter.length) {
2664 if (WARN_ON(!sg_miter_next(&diter))) {
2671 sdt = piter.addr + ppage_offset;
2672 daddr = diter.addr + dpage_offset;
2674 ret = dif_verify(sdt, daddr, sector, ei_lba);
2676 dump_sector(daddr, sdebug_sector_size);
2682 dpage_offset += sdebug_sector_size;
2684 diter.consumed = dpage_offset;
2685 sg_miter_stop(&diter);
2687 sg_miter_stop(&piter);
2689 dif_copy_prot(SCpnt, start_sec, sectors, false);
2696 sg_miter_stop(&diter);
2697 sg_miter_stop(&piter);
2701 static unsigned long lba_to_map_index(sector_t lba)
2703 if (sdebug_unmap_alignment)
2704 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2705 sector_div(lba, sdebug_unmap_granularity);
2709 static sector_t map_index_to_lba(unsigned long index)
2711 sector_t lba = index * sdebug_unmap_granularity;
2713 if (sdebug_unmap_alignment)
2714 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2718 static unsigned int map_state(sector_t lba, unsigned int *num)
2721 unsigned int mapped;
2722 unsigned long index;
2725 index = lba_to_map_index(lba);
2726 mapped = test_bit(index, map_storep);
2729 next = find_next_zero_bit(map_storep, map_size, index);
2731 next = find_next_bit(map_storep, map_size, index);
2733 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2738 static void map_region(sector_t lba, unsigned int len)
2740 sector_t end = lba + len;
2743 unsigned long index = lba_to_map_index(lba);
2745 if (index < map_size)
2746 set_bit(index, map_storep);
2748 lba = map_index_to_lba(index + 1);
2752 static void unmap_region(sector_t lba, unsigned int len)
2754 sector_t end = lba + len;
2757 unsigned long index = lba_to_map_index(lba);
2759 if (lba == map_index_to_lba(index) &&
2760 lba + sdebug_unmap_granularity <= end &&
2762 clear_bit(index, map_storep);
2764 memset(fake_storep +
2765 lba * sdebug_sector_size, 0,
2766 sdebug_sector_size *
2767 sdebug_unmap_granularity);
2770 memset(dif_storep + lba, 0xff,
2771 sizeof(*dif_storep) *
2772 sdebug_unmap_granularity);
2775 lba = map_index_to_lba(index + 1);
2780 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2782 u8 *cmd = scp->cmnd;
2786 unsigned long iflags;
2793 lba = get_unaligned_be64(cmd + 2);
2794 num = get_unaligned_be32(cmd + 10);
2799 lba = get_unaligned_be32(cmd + 2);
2800 num = get_unaligned_be16(cmd + 7);
2805 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2806 (u32)(cmd[1] & 0x1f) << 16;
2807 num = (0 == cmd[4]) ? 256 : cmd[4];
2812 lba = get_unaligned_be32(cmd + 2);
2813 num = get_unaligned_be32(cmd + 6);
2816 case 0x53: /* XDWRITEREAD(10) */
2818 lba = get_unaligned_be32(cmd + 2);
2819 num = get_unaligned_be16(cmd + 7);
2822 default: /* assume WRITE(32) */
2823 lba = get_unaligned_be64(cmd + 12);
2824 ei_lba = get_unaligned_be32(cmd + 20);
2825 num = get_unaligned_be32(cmd + 28);
2830 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2832 mk_sense_invalid_opcode(scp);
2833 return check_condition_result;
2835 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2836 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2837 (cmd[1] & 0xe0) == 0)
2838 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2842 /* inline check_device_access_params() */
2843 if (lba + num > sdebug_capacity) {
2844 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2845 return check_condition_result;
2847 /* transfer length excessive (tie in to block limits VPD page) */
2848 if (num > sdebug_store_sectors) {
2849 /* needs work to find which cdb byte 'num' comes from */
2850 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2851 return check_condition_result;
2854 write_lock_irqsave(&atomic_rw, iflags);
2857 if (sdebug_dix && scsi_prot_sg_count(scp)) {
2858 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2861 write_unlock_irqrestore(&atomic_rw, iflags);
2862 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2863 return illegal_condition_result;
2867 ret = do_device_access(scp, lba, num, true);
2868 if (scsi_debug_lbp())
2869 map_region(lba, num);
2870 write_unlock_irqrestore(&atomic_rw, iflags);
2872 return DID_ERROR << 16;
2873 else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2874 sdev_printk(KERN_INFO, scp->device,
2875 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2876 my_name, num * sdebug_sector_size, ret);
2878 if (sdebug_any_injecting_opt) {
2879 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2881 if (ep->inj_recovered) {
2882 mk_sense_buffer(scp, RECOVERED_ERROR,
2883 THRESHOLD_EXCEEDED, 0);
2884 return check_condition_result;
2885 } else if (ep->inj_dif) {
2886 /* Logical block guard check failed */
2887 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2888 return illegal_condition_result;
2889 } else if (ep->inj_dix) {
2890 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2891 return illegal_condition_result;
2898 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2899 bool unmap, bool ndob)
2901 unsigned long iflags;
2902 unsigned long long i;
2906 ret = check_device_access_params(scp, lba, num);
2910 write_lock_irqsave(&atomic_rw, iflags);
2912 if (unmap && scsi_debug_lbp()) {
2913 unmap_region(lba, num);
2917 lba_off = lba * sdebug_sector_size;
2918 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2920 memset(fake_storep + lba_off, 0, sdebug_sector_size);
2923 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2924 sdebug_sector_size);
2927 write_unlock_irqrestore(&atomic_rw, iflags);
2928 return DID_ERROR << 16;
2929 } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2930 sdev_printk(KERN_INFO, scp->device,
2931 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2932 my_name, "write same",
2933 num * sdebug_sector_size, ret);
2935 /* Copy first sector to remaining blocks */
2936 for (i = 1 ; i < num ; i++)
2937 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2938 fake_storep + lba_off,
2939 sdebug_sector_size);
2941 if (scsi_debug_lbp())
2942 map_region(lba, num);
2944 write_unlock_irqrestore(&atomic_rw, iflags);
2950 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2952 u8 *cmd = scp->cmnd;
2959 if (sdebug_lbpws10 == 0) {
2960 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2961 return check_condition_result;
2965 lba = get_unaligned_be32(cmd + 2);
2966 num = get_unaligned_be16(cmd + 7);
2967 if (num > sdebug_write_same_length) {
2968 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2969 return check_condition_result;
2971 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2975 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2977 u8 *cmd = scp->cmnd;
2984 if (cmd[1] & 0x8) { /* UNMAP */
2985 if (sdebug_lbpws == 0) {
2986 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2987 return check_condition_result;
2991 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
2993 lba = get_unaligned_be64(cmd + 2);
2994 num = get_unaligned_be32(cmd + 10);
2995 if (num > sdebug_write_same_length) {
2996 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2997 return check_condition_result;
2999 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3002 /* Note the mode field is in the same position as the (lower) service action
3003 * field. For the Report supported operation codes command, SPC-4 suggests
3004 * each mode of this command should be reported separately; for future. */
3006 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3008 u8 *cmd = scp->cmnd;
3009 struct scsi_device *sdp = scp->device;
3010 struct sdebug_dev_info *dp;
3013 mode = cmd[1] & 0x1f;
3015 case 0x4: /* download microcode (MC) and activate (ACT) */
3016 /* set UAs on this device only */
3017 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3018 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3020 case 0x5: /* download MC, save and ACT */
3021 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3023 case 0x6: /* download MC with offsets and ACT */
3024 /* set UAs on most devices (LUs) in this target */
3025 list_for_each_entry(dp,
3026 &devip->sdbg_host->dev_info_list,
3028 if (dp->target == sdp->id) {
3029 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3031 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3035 case 0x7: /* download MC with offsets, save, and ACT */
3036 /* set UA on all devices (LUs) in this target */
3037 list_for_each_entry(dp,
3038 &devip->sdbg_host->dev_info_list,
3040 if (dp->target == sdp->id)
3041 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3045 /* do nothing for this command for other mode values */
3052 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3054 u8 *cmd = scp->cmnd;
3056 u8 *fake_storep_hold;
3059 u32 lb_size = sdebug_sector_size;
3061 unsigned long iflags;
3065 lba = get_unaligned_be64(cmd + 2);
3066 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3068 return 0; /* degenerate case, not an error */
3069 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3071 mk_sense_invalid_opcode(scp);
3072 return check_condition_result;
3074 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3075 sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3076 (cmd[1] & 0xe0) == 0)
3077 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3080 /* inline check_device_access_params() */
3081 if (lba + num > sdebug_capacity) {
3082 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3083 return check_condition_result;
3085 /* transfer length excessive (tie in to block limits VPD page) */
3086 if (num > sdebug_store_sectors) {
3087 /* needs work to find which cdb byte 'num' comes from */
3088 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3089 return check_condition_result;
3092 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3094 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3096 return check_condition_result;
3099 write_lock_irqsave(&atomic_rw, iflags);
3101 /* trick do_device_access() to fetch both compare and write buffers
3102 * from data-in into arr. Safe (atomic) since write_lock held. */
3103 fake_storep_hold = fake_storep;
3105 ret = do_device_access(scp, 0, dnum, true);
3106 fake_storep = fake_storep_hold;
3108 retval = DID_ERROR << 16;
3110 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3111 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3112 "indicated=%u, IO sent=%d bytes\n", my_name,
3113 dnum * lb_size, ret);
3114 if (!comp_write_worker(lba, num, arr)) {
3115 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3116 retval = check_condition_result;
3119 if (scsi_debug_lbp())
3120 map_region(lba, num);
3122 write_unlock_irqrestore(&atomic_rw, iflags);
3127 struct unmap_block_desc {
3134 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3137 struct unmap_block_desc *desc;
3138 unsigned int i, payload_len, descriptors;
3140 unsigned long iflags;
3143 if (!scsi_debug_lbp())
3144 return 0; /* fib and say its done */
3145 payload_len = get_unaligned_be16(scp->cmnd + 7);
3146 BUG_ON(scsi_bufflen(scp) != payload_len);
3148 descriptors = (payload_len - 8) / 16;
3149 if (descriptors > sdebug_unmap_max_desc) {
3150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3151 return check_condition_result;
3154 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3156 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3158 return check_condition_result;
3161 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3163 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3164 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3166 desc = (void *)&buf[8];
3168 write_lock_irqsave(&atomic_rw, iflags);
3170 for (i = 0 ; i < descriptors ; i++) {
3171 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3172 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3174 ret = check_device_access_params(scp, lba, num);
3178 unmap_region(lba, num);
3184 write_unlock_irqrestore(&atomic_rw, iflags);
3190 #define SDEBUG_GET_LBA_STATUS_LEN 32
3193 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3195 u8 *cmd = scp->cmnd;
3197 u32 alloc_len, mapped, num;
3198 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3201 lba = get_unaligned_be64(cmd + 2);
3202 alloc_len = get_unaligned_be32(cmd + 10);
3207 ret = check_device_access_params(scp, lba, 1);
3211 if (scsi_debug_lbp())
3212 mapped = map_state(lba, &num);
3215 /* following just in case virtual_gb changed */
3216 sdebug_capacity = get_sdebug_capacity();
3217 if (sdebug_capacity - lba <= 0xffffffff)
3218 num = sdebug_capacity - lba;
3223 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3224 put_unaligned_be32(20, arr); /* Parameter Data Length */
3225 put_unaligned_be64(lba, arr + 8); /* LBA */
3226 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3227 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3229 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3232 #define SDEBUG_RLUN_ARR_SZ 256
3234 static int resp_report_luns(struct scsi_cmnd * scp,
3235 struct sdebug_dev_info * devip)
3237 unsigned int alloc_len;
3238 int lun_cnt, i, upper, num, n, want_wlun, shortish;
3240 unsigned char *cmd = scp->cmnd;
3241 int select_report = (int)cmd[2];
3242 struct scsi_lun *one_lun;
3243 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3244 unsigned char * max_addr;
3246 clear_luns_changed_on_target(devip);
3247 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3248 shortish = (alloc_len < 4);
3249 if (shortish || (select_report > 2)) {
3250 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3251 return check_condition_result;
3253 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3254 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3255 lun_cnt = sdebug_max_luns;
3256 if (1 == select_report)
3258 else if (sdebug_no_lun_0 && (lun_cnt > 0))
3260 want_wlun = (select_report > 0) ? 1 : 0;
3261 num = lun_cnt + want_wlun;
3262 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3263 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3264 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3265 sizeof(struct scsi_lun)), num);
3270 one_lun = (struct scsi_lun *) &arr[8];
3271 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3272 for (i = 0, lun = (sdebug_no_lun_0 ? 1 : 0);
3273 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3275 upper = (lun >> 8) & 0x3f;
3277 one_lun[i].scsi_lun[0] =
3278 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3279 one_lun[i].scsi_lun[1] = lun & 0xff;
3282 one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3283 one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3286 alloc_len = (unsigned char *)(one_lun + i) - arr;
3287 return fill_from_dev_buffer(scp, arr,
3288 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3291 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3292 unsigned int num, struct sdebug_dev_info *devip)
3295 unsigned char *kaddr, *buf;
3296 unsigned int offset;
3297 struct scsi_data_buffer *sdb = scsi_in(scp);
3298 struct sg_mapping_iter miter;
3300 /* better not to use temporary buffer. */
3301 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3303 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3305 return check_condition_result;
3308 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3311 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3312 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3314 while (sg_miter_next(&miter)) {
3316 for (j = 0; j < miter.length; j++)
3317 *(kaddr + j) ^= *(buf + offset + j);
3319 offset += miter.length;
3321 sg_miter_stop(&miter);
3328 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3330 u8 *cmd = scp->cmnd;
3335 if (!scsi_bidi_cmnd(scp)) {
3336 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3338 return check_condition_result;
3340 errsts = resp_read_dt0(scp, devip);
3343 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3344 errsts = resp_write_dt0(scp, devip);
3348 lba = get_unaligned_be32(cmd + 2);
3349 num = get_unaligned_be16(cmd + 7);
3350 return resp_xdwriteread(scp, lba, num, devip);
3353 /* When timer or tasklet goes off this function is called. */
3354 static void sdebug_q_cmd_complete(unsigned long indx)
3358 unsigned long iflags;
3359 struct sdebug_queued_cmd *sqcp;
3360 struct scsi_cmnd *scp;
3361 struct sdebug_dev_info *devip;
3363 atomic_inc(&sdebug_completions);
3365 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3366 pr_err("wild qa_indx=%d\n", qa_indx);
3369 spin_lock_irqsave(&queued_arr_lock, iflags);
3370 sqcp = &queued_arr[qa_indx];
3373 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3374 pr_err("scp is NULL\n");
3377 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3379 atomic_dec(&devip->num_in_q);
3381 pr_err("devip=NULL\n");
3382 if (atomic_read(&retired_max_queue) > 0)
3385 sqcp->a_cmnd = NULL;
3386 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3387 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3388 pr_err("Unexpected completion\n");
3392 if (unlikely(retiring)) { /* user has reduced max_queue */
3395 retval = atomic_read(&retired_max_queue);
3396 if (qa_indx >= retval) {
3397 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3398 pr_err("index %d too large\n", retval);
3401 k = find_last_bit(queued_in_use_bm, retval);
3402 if ((k < sdebug_max_queue) || (k == retval))
3403 atomic_set(&retired_max_queue, 0);
3405 atomic_set(&retired_max_queue, k + 1);
3407 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3408 scp->scsi_done(scp); /* callback to mid level */
3411 /* When high resolution timer goes off this function is called. */
3412 static enum hrtimer_restart
3413 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3417 unsigned long iflags;
3418 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3419 struct sdebug_queued_cmd *sqcp;
3420 struct scsi_cmnd *scp;
3421 struct sdebug_dev_info *devip;
3423 atomic_inc(&sdebug_completions);
3424 qa_indx = sd_hrtp->qa_indx;
3425 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3426 pr_err("wild qa_indx=%d\n", qa_indx);
3429 spin_lock_irqsave(&queued_arr_lock, iflags);
3430 sqcp = &queued_arr[qa_indx];
3433 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3434 pr_err("scp is NULL\n");
3437 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3439 atomic_dec(&devip->num_in_q);
3441 pr_err("devip=NULL\n");
3442 if (atomic_read(&retired_max_queue) > 0)
3445 sqcp->a_cmnd = NULL;
3446 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3447 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3448 pr_err("Unexpected completion\n");
3452 if (unlikely(retiring)) { /* user has reduced max_queue */
3455 retval = atomic_read(&retired_max_queue);
3456 if (qa_indx >= retval) {
3457 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3458 pr_err("index %d too large\n", retval);
3461 k = find_last_bit(queued_in_use_bm, retval);
3462 if ((k < sdebug_max_queue) || (k == retval))
3463 atomic_set(&retired_max_queue, 0);
3465 atomic_set(&retired_max_queue, k + 1);
3467 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3468 scp->scsi_done(scp); /* callback to mid level */
3470 return HRTIMER_NORESTART;
3473 static struct sdebug_dev_info *
3474 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3476 struct sdebug_dev_info *devip;
3478 devip = kzalloc(sizeof(*devip), flags);
3480 devip->sdbg_host = sdbg_host;
3481 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3486 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3488 struct sdebug_host_info * sdbg_host;
3489 struct sdebug_dev_info * open_devip = NULL;
3490 struct sdebug_dev_info * devip =
3491 (struct sdebug_dev_info *)sdev->hostdata;
3495 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3497 pr_err("Host info NULL\n");
3500 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3501 if ((devip->used) && (devip->channel == sdev->channel) &&
3502 (devip->target == sdev->id) &&
3503 (devip->lun == sdev->lun))
3506 if ((!devip->used) && (!open_devip))
3510 if (!open_devip) { /* try and make a new one */
3511 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3513 pr_err("out of memory at line %d\n", __LINE__);
3518 open_devip->channel = sdev->channel;
3519 open_devip->target = sdev->id;
3520 open_devip->lun = sdev->lun;
3521 open_devip->sdbg_host = sdbg_host;
3522 atomic_set(&open_devip->num_in_q, 0);
3523 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3524 open_devip->used = true;
3528 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3531 pr_info("slave_alloc <%u %u %u %llu>\n",
3532 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3533 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3537 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3539 struct sdebug_dev_info *devip;
3542 pr_info("slave_configure <%u %u %u %llu>\n",
3543 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3544 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3545 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3546 devip = devInfoReg(sdp);
3548 return 1; /* no resources, will be marked offline */
3549 sdp->hostdata = devip;
3550 blk_queue_max_segment_size(sdp->request_queue, -1U);
3552 sdp->no_uld_attach = 1;
3556 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3558 struct sdebug_dev_info *devip =
3559 (struct sdebug_dev_info *)sdp->hostdata;
3562 pr_info("slave_destroy <%u %u %u %llu>\n",
3563 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3565 /* make this slot available for re-use */
3566 devip->used = false;
3567 sdp->hostdata = NULL;
3571 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3572 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3574 unsigned long iflags;
3575 int k, qmax, r_qmax;
3576 struct sdebug_queued_cmd *sqcp;
3577 struct sdebug_dev_info *devip;
3579 spin_lock_irqsave(&queued_arr_lock, iflags);
3580 qmax = sdebug_max_queue;
3581 r_qmax = atomic_read(&retired_max_queue);
3584 for (k = 0; k < qmax; ++k) {
3585 if (test_bit(k, queued_in_use_bm)) {
3586 sqcp = &queued_arr[k];
3587 if (cmnd == sqcp->a_cmnd) {
3588 devip = (struct sdebug_dev_info *)
3589 cmnd->device->hostdata;
3591 atomic_dec(&devip->num_in_q);
3592 sqcp->a_cmnd = NULL;
3593 spin_unlock_irqrestore(&queued_arr_lock,
3595 if (sdebug_ndelay > 0) {
3598 &sqcp->sd_hrtp->hrt);
3599 } else if (sdebug_delay > 0) {
3600 if (sqcp->cmnd_timerp)
3603 } else if (sdebug_delay < 0) {
3605 tasklet_kill(sqcp->tletp);
3607 clear_bit(k, queued_in_use_bm);
3612 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3616 /* Deletes (stops) timers or tasklets of all queued commands */
3617 static void stop_all_queued(void)
3619 unsigned long iflags;
3621 struct sdebug_queued_cmd *sqcp;
3622 struct sdebug_dev_info *devip;
3624 spin_lock_irqsave(&queued_arr_lock, iflags);
3625 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3626 if (test_bit(k, queued_in_use_bm)) {
3627 sqcp = &queued_arr[k];
3629 devip = (struct sdebug_dev_info *)
3630 sqcp->a_cmnd->device->hostdata;
3632 atomic_dec(&devip->num_in_q);
3633 sqcp->a_cmnd = NULL;
3634 spin_unlock_irqrestore(&queued_arr_lock,
3636 if (sdebug_ndelay > 0) {
3639 &sqcp->sd_hrtp->hrt);
3640 } else if (sdebug_delay > 0) {
3641 if (sqcp->cmnd_timerp)
3644 } else if (sdebug_delay < 0) {
3646 tasklet_kill(sqcp->tletp);
3648 clear_bit(k, queued_in_use_bm);
3649 spin_lock_irqsave(&queued_arr_lock, iflags);
3653 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3656 /* Free queued command memory on heap */
3657 static void free_all_queued(void)
3659 unsigned long iflags;
3661 struct sdebug_queued_cmd *sqcp;
3663 spin_lock_irqsave(&queued_arr_lock, iflags);
3664 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3665 sqcp = &queued_arr[k];
3666 kfree(sqcp->cmnd_timerp);
3667 sqcp->cmnd_timerp = NULL;
3670 kfree(sqcp->sd_hrtp);
3671 sqcp->sd_hrtp = NULL;
3673 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3676 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3680 if (SCpnt->device &&
3681 (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3682 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3684 stop_queued_cmnd(SCpnt);
3689 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3691 struct sdebug_dev_info * devip;
3694 if (SCpnt && SCpnt->device) {
3695 struct scsi_device *sdp = SCpnt->device;
3697 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3698 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3699 devip = devInfoReg(sdp);
3701 set_bit(SDEBUG_UA_POR, devip->uas_bm);
3706 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3708 struct sdebug_host_info *sdbg_host;
3709 struct sdebug_dev_info *devip;
3710 struct scsi_device *sdp;
3711 struct Scsi_Host *hp;
3714 ++num_target_resets;
3717 sdp = SCpnt->device;
3720 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3721 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3725 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3727 list_for_each_entry(devip,
3728 &sdbg_host->dev_info_list,
3730 if (devip->target == sdp->id) {
3731 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3735 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3736 sdev_printk(KERN_INFO, sdp,
3737 "%s: %d device(s) found in target\n", __func__, k);
3742 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3744 struct sdebug_host_info *sdbg_host;
3745 struct sdebug_dev_info *devip;
3746 struct scsi_device * sdp;
3747 struct Scsi_Host * hp;
3751 if (!(SCpnt && SCpnt->device))
3753 sdp = SCpnt->device;
3754 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3755 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3758 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3760 list_for_each_entry(devip,
3761 &sdbg_host->dev_info_list,
3763 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3768 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3769 sdev_printk(KERN_INFO, sdp,
3770 "%s: %d device(s) found in host\n", __func__, k);
3775 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3777 struct sdebug_host_info * sdbg_host;
3778 struct sdebug_dev_info *devip;
3782 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3783 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3784 spin_lock(&sdebug_host_list_lock);
3785 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3786 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3788 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3792 spin_unlock(&sdebug_host_list_lock);
3794 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3795 sdev_printk(KERN_INFO, SCpnt->device,
3796 "%s: %d device(s) found\n", __func__, k);
3800 static void __init sdebug_build_parts(unsigned char *ramp,
3801 unsigned long store_size)
3803 struct partition * pp;
3804 int starts[SDEBUG_MAX_PARTS + 2];
3805 int sectors_per_part, num_sectors, k;
3806 int heads_by_sects, start_sec, end_sec;
3808 /* assume partition table already zeroed */
3809 if ((sdebug_num_parts < 1) || (store_size < 1048576))
3811 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3812 sdebug_num_parts = SDEBUG_MAX_PARTS;
3813 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3815 num_sectors = (int)sdebug_store_sectors;
3816 sectors_per_part = (num_sectors - sdebug_sectors_per)
3818 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3819 starts[0] = sdebug_sectors_per;
3820 for (k = 1; k < sdebug_num_parts; ++k)
3821 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3823 starts[sdebug_num_parts] = num_sectors;
3824 starts[sdebug_num_parts + 1] = 0;
3826 ramp[510] = 0x55; /* magic partition markings */
3828 pp = (struct partition *)(ramp + 0x1be);
3829 for (k = 0; starts[k + 1]; ++k, ++pp) {
3830 start_sec = starts[k];
3831 end_sec = starts[k + 1] - 1;
3834 pp->cyl = start_sec / heads_by_sects;
3835 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3836 / sdebug_sectors_per;
3837 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3839 pp->end_cyl = end_sec / heads_by_sects;
3840 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3841 / sdebug_sectors_per;
3842 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3844 pp->start_sect = cpu_to_le32(start_sec);
3845 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3846 pp->sys_ind = 0x83; /* plain Linux partition */
3851 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3852 int scsi_result, int delta_jiff)
3854 unsigned long iflags;
3855 int k, num_in_q, qdepth, inject;
3856 struct sdebug_queued_cmd *sqcp = NULL;
3857 struct scsi_device *sdp;
3859 /* this should never happen */
3861 return SCSI_MLQUEUE_HOST_BUSY;
3863 if (NULL == devip) {
3864 pr_warn("called devip == NULL\n");
3865 /* no particularly good error to report back */
3866 return SCSI_MLQUEUE_HOST_BUSY;
3871 if (sdebug_verbose && scsi_result)
3872 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3873 __func__, scsi_result);
3874 if (delta_jiff == 0)
3875 goto respond_in_thread;
3877 /* schedule the response at a later time if resources permit */
3878 spin_lock_irqsave(&queued_arr_lock, iflags);
3879 num_in_q = atomic_read(&devip->num_in_q);
3880 qdepth = cmnd->device->queue_depth;
3882 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3884 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3885 goto respond_in_thread;
3887 scsi_result = device_qfull_result;
3888 } else if ((sdebug_every_nth != 0) &&
3889 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
3890 (scsi_result == 0)) {
3891 if ((num_in_q == (qdepth - 1)) &&
3892 (atomic_inc_return(&sdebug_a_tsf) >=
3893 abs(sdebug_every_nth))) {
3894 atomic_set(&sdebug_a_tsf, 0);
3896 scsi_result = device_qfull_result;
3900 k = find_first_zero_bit(queued_in_use_bm, sdebug_max_queue);
3901 if (k >= sdebug_max_queue) {
3902 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3904 goto respond_in_thread;
3905 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
3906 scsi_result = device_qfull_result;
3907 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
3908 sdev_printk(KERN_INFO, sdp,
3909 "%s: max_queue=%d exceeded, %s\n",
3910 __func__, sdebug_max_queue,
3911 (scsi_result ? "status: TASK SET FULL" :
3912 "report: host busy"));
3914 goto respond_in_thread;
3916 return SCSI_MLQUEUE_HOST_BUSY;
3918 __set_bit(k, queued_in_use_bm);
3919 atomic_inc(&devip->num_in_q);
3920 sqcp = &queued_arr[k];
3921 sqcp->a_cmnd = cmnd;
3922 cmnd->result = scsi_result;
3923 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3924 if (delta_jiff > 0) {
3925 if (NULL == sqcp->cmnd_timerp) {
3926 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3928 if (NULL == sqcp->cmnd_timerp)
3929 return SCSI_MLQUEUE_HOST_BUSY;
3930 init_timer(sqcp->cmnd_timerp);
3932 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3933 sqcp->cmnd_timerp->data = k;
3934 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3935 add_timer(sqcp->cmnd_timerp);
3936 } else if (sdebug_ndelay > 0) {
3937 ktime_t kt = ktime_set(0, sdebug_ndelay);
3938 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3940 if (NULL == sd_hp) {
3941 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3943 return SCSI_MLQUEUE_HOST_BUSY;
3944 sqcp->sd_hrtp = sd_hp;
3945 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3947 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3950 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3951 } else { /* delay < 0 */
3952 if (NULL == sqcp->tletp) {
3953 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3955 if (NULL == sqcp->tletp)
3956 return SCSI_MLQUEUE_HOST_BUSY;
3957 tasklet_init(sqcp->tletp,
3958 sdebug_q_cmd_complete, k);
3960 if (-1 == delta_jiff)
3961 tasklet_hi_schedule(sqcp->tletp);
3963 tasklet_schedule(sqcp->tletp);
3965 if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
3966 (scsi_result == device_qfull_result))
3967 sdev_printk(KERN_INFO, sdp,
3968 "%s: num_in_q=%d +1, %s%s\n", __func__,
3969 num_in_q, (inject ? "<inject> " : ""),
3970 "status: TASK SET FULL");
3973 respond_in_thread: /* call back to mid-layer using invocation thread */
3974 cmnd->result = scsi_result;
3975 cmnd->scsi_done(cmnd);
3979 /* Note: The following macros create attribute files in the
3980 /sys/module/scsi_debug/parameters directory. Unfortunately this
3981 driver is unaware of a change and cannot trigger auxiliary actions
3982 as it can when the corresponding attribute in the
3983 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3985 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
3986 module_param_named(ato, sdebug_ato, int, S_IRUGO);
3987 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
3988 module_param_named(delay, sdebug_delay, int, S_IRUGO | S_IWUSR);
3989 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
3990 module_param_named(dif, sdebug_dif, int, S_IRUGO);
3991 module_param_named(dix, sdebug_dix, int, S_IRUGO);
3992 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
3993 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
3994 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
3995 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
3996 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
3997 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
3998 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
3999 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4000 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4001 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4002 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4003 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4004 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4005 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4006 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4007 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4008 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4009 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4010 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4011 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4012 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4013 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4014 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4015 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4016 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4017 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4018 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4019 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4020 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4021 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4022 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4024 module_param_named(write_same_length, sdebug_write_same_length, int,
4027 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4028 MODULE_DESCRIPTION("SCSI debug adapter driver");
4029 MODULE_LICENSE("GPL");
4030 MODULE_VERSION(SCSI_DEBUG_VERSION);
4032 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4033 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4034 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4035 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4036 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4037 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4038 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4039 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4040 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4041 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4042 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4043 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4044 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4045 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4046 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4047 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4048 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4049 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4050 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4051 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4052 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4053 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4054 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4055 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4056 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4057 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4058 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4059 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4060 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4061 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4062 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4063 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4064 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4065 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4066 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4067 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4068 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4069 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4070 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4072 static char sdebug_info[256];
4074 static const char * scsi_debug_info(struct Scsi_Host * shp)
4076 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4077 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4078 sdebug_version_date, sdebug_dev_size_mb, sdebug_opts);
4082 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4083 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4087 int minLen = length > 15 ? 15 : length;
4089 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4091 memcpy(arr, buffer, minLen);
4093 if (1 != sscanf(arr, "%d", &opts))
4096 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4097 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4098 if (sdebug_every_nth != 0)
4099 atomic_set(&sdebug_cmnd_count, 0);
4103 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4104 * same for each scsi_debug host (if more than one). Some of the counters
4105 * output are not atomics so might be inaccurate in a busy system. */
4106 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4111 if (sdebug_every_nth > 0)
4112 snprintf(b, sizeof(b), " (curr:%d)",
4113 ((SDEBUG_OPT_RARE_TSF & sdebug_opts) ?
4114 atomic_read(&sdebug_a_tsf) :
4115 atomic_read(&sdebug_cmnd_count)));
4119 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4120 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4122 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4123 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4124 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4125 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4126 "usec_in_jiffy=%lu\n",
4127 SCSI_DEBUG_VERSION, sdebug_version_date,
4128 sdebug_num_tgts, sdebug_dev_size_mb, sdebug_opts,
4129 sdebug_every_nth, b, sdebug_delay, sdebug_ndelay,
4130 sdebug_max_luns, atomic_read(&sdebug_completions),
4131 sdebug_sector_size, sdebug_cylinders_per, sdebug_heads,
4132 sdebug_sectors_per, num_aborts, num_dev_resets,
4133 num_target_resets, num_bus_resets, num_host_resets,
4134 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4136 f = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4137 if (f != sdebug_max_queue) {
4138 l = find_last_bit(queued_in_use_bm, sdebug_max_queue);
4139 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
4140 "queued_in_use_bm", f, l);
4145 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4147 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_delay);
4149 /* Returns -EBUSY if delay is being changed and commands are queued */
4150 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4155 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4157 if (sdebug_delay != delay) {
4158 unsigned long iflags;
4161 spin_lock_irqsave(&queued_arr_lock, iflags);
4162 k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4163 if (k != sdebug_max_queue)
4164 res = -EBUSY; /* have queued commands */
4166 sdebug_delay = delay;
4169 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4175 static DRIVER_ATTR_RW(delay);
4177 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4179 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4181 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4182 /* If > 0 and accepted then sdebug_delay is set to DELAY_OVERRIDDEN */
4183 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4186 unsigned long iflags;
4189 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4190 (ndelay >= 0) && (ndelay < 1000000000)) {
4192 if (sdebug_ndelay != ndelay) {
4193 spin_lock_irqsave(&queued_arr_lock, iflags);
4194 k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4195 if (k != sdebug_max_queue)
4196 res = -EBUSY; /* have queued commands */
4198 sdebug_ndelay = ndelay;
4199 sdebug_delay = ndelay ? DELAY_OVERRIDDEN
4202 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4208 static DRIVER_ATTR_RW(ndelay);
4210 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4212 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4215 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4221 if (1 == sscanf(buf, "%10s", work)) {
4222 if (0 == strncasecmp(work,"0x", 2)) {
4223 if (1 == sscanf(&work[2], "%x", &opts))
4226 if (1 == sscanf(work, "%d", &opts))
4233 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4234 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4235 atomic_set(&sdebug_cmnd_count, 0);
4236 atomic_set(&sdebug_a_tsf, 0);
4239 static DRIVER_ATTR_RW(opts);
4241 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4243 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4245 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4250 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4256 static DRIVER_ATTR_RW(ptype);
4258 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4260 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4262 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4267 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4273 static DRIVER_ATTR_RW(dsense);
4275 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4277 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4279 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4284 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4286 sdebug_fake_rw = (sdebug_fake_rw > 0);
4287 if (sdebug_fake_rw != n) {
4288 if ((0 == n) && (NULL == fake_storep)) {
4290 (unsigned long)sdebug_dev_size_mb *
4293 fake_storep = vmalloc(sz);
4294 if (NULL == fake_storep) {
4295 pr_err("out of memory, 9\n");
4298 memset(fake_storep, 0, sz);
4306 static DRIVER_ATTR_RW(fake_rw);
4308 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4310 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4312 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4317 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4318 sdebug_no_lun_0 = n;
4323 static DRIVER_ATTR_RW(no_lun_0);
4325 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4327 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4329 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4334 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4335 sdebug_num_tgts = n;
4336 sdebug_max_tgts_luns();
4341 static DRIVER_ATTR_RW(num_tgts);
4343 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4345 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4347 static DRIVER_ATTR_RO(dev_size_mb);
4349 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4351 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4353 static DRIVER_ATTR_RO(num_parts);
4355 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4357 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4359 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4364 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4365 sdebug_every_nth = nth;
4366 atomic_set(&sdebug_cmnd_count, 0);
4371 static DRIVER_ATTR_RW(every_nth);
4373 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4375 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4377 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4383 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4384 changed = (sdebug_max_luns != n);
4385 sdebug_max_luns = n;
4386 sdebug_max_tgts_luns();
4387 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
4388 struct sdebug_host_info *sdhp;
4389 struct sdebug_dev_info *dp;
4391 spin_lock(&sdebug_host_list_lock);
4392 list_for_each_entry(sdhp, &sdebug_host_list,
4394 list_for_each_entry(dp, &sdhp->dev_info_list,
4396 set_bit(SDEBUG_UA_LUNS_CHANGED,
4400 spin_unlock(&sdebug_host_list_lock);
4406 static DRIVER_ATTR_RW(max_luns);
4408 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4410 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4412 /* N.B. max_queue can be changed while there are queued commands. In flight
4413 * commands beyond the new max_queue will be completed. */
4414 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4417 unsigned long iflags;
4420 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4421 (n <= SCSI_DEBUG_CANQUEUE)) {
4422 spin_lock_irqsave(&queued_arr_lock, iflags);
4423 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4424 sdebug_max_queue = n;
4425 if (SCSI_DEBUG_CANQUEUE == k)
4426 atomic_set(&retired_max_queue, 0);
4428 atomic_set(&retired_max_queue, k + 1);
4430 atomic_set(&retired_max_queue, 0);
4431 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4436 static DRIVER_ATTR_RW(max_queue);
4438 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4440 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4442 static DRIVER_ATTR_RO(no_uld);
4444 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4446 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4448 static DRIVER_ATTR_RO(scsi_level);
4450 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4452 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4454 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4460 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4461 changed = (sdebug_virtual_gb != n);
4462 sdebug_virtual_gb = n;
4463 sdebug_capacity = get_sdebug_capacity();
4465 struct sdebug_host_info *sdhp;
4466 struct sdebug_dev_info *dp;
4468 spin_lock(&sdebug_host_list_lock);
4469 list_for_each_entry(sdhp, &sdebug_host_list,
4471 list_for_each_entry(dp, &sdhp->dev_info_list,
4473 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4477 spin_unlock(&sdebug_host_list_lock);
4483 static DRIVER_ATTR_RW(virtual_gb);
4485 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4487 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4490 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4495 if (sscanf(buf, "%d", &delta_hosts) != 1)
4497 if (delta_hosts > 0) {
4499 sdebug_add_adapter();
4500 } while (--delta_hosts);
4501 } else if (delta_hosts < 0) {
4503 sdebug_remove_adapter();
4504 } while (++delta_hosts);
4508 static DRIVER_ATTR_RW(add_host);
4510 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4512 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4514 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4519 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4520 sdebug_vpd_use_hostno = n;
4525 static DRIVER_ATTR_RW(vpd_use_hostno);
4527 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4529 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4531 static DRIVER_ATTR_RO(sector_size);
4533 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4535 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4537 static DRIVER_ATTR_RO(dix);
4539 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4541 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4543 static DRIVER_ATTR_RO(dif);
4545 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4547 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4549 static DRIVER_ATTR_RO(guard);
4551 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4553 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4555 static DRIVER_ATTR_RO(ato);
4557 static ssize_t map_show(struct device_driver *ddp, char *buf)
4561 if (!scsi_debug_lbp())
4562 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4563 sdebug_store_sectors);
4565 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4566 (int)map_size, map_storep);
4567 buf[count++] = '\n';
4572 static DRIVER_ATTR_RO(map);
4574 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4576 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4578 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4583 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4584 sdebug_removable = (n > 0);
4589 static DRIVER_ATTR_RW(removable);
4591 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4593 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4595 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4596 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4601 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4602 bool new_host_lock = (n > 0);
4605 if (new_host_lock != sdebug_host_lock) {
4606 unsigned long iflags;
4609 spin_lock_irqsave(&queued_arr_lock, iflags);
4610 k = find_first_bit(queued_in_use_bm,
4612 if (k != sdebug_max_queue)
4613 res = -EBUSY; /* have queued commands */
4615 sdebug_host_lock = new_host_lock;
4616 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4622 static DRIVER_ATTR_RW(host_lock);
4624 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4626 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4628 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4633 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4634 sdebug_strict = (n > 0);
4639 static DRIVER_ATTR_RW(strict);
4642 /* Note: The following array creates attribute files in the
4643 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4644 files (over those found in the /sys/module/scsi_debug/parameters
4645 directory) is that auxiliary actions can be triggered when an attribute
4646 is changed. For example see: sdebug_add_host_store() above.
4649 static struct attribute *sdebug_drv_attrs[] = {
4650 &driver_attr_delay.attr,
4651 &driver_attr_opts.attr,
4652 &driver_attr_ptype.attr,
4653 &driver_attr_dsense.attr,
4654 &driver_attr_fake_rw.attr,
4655 &driver_attr_no_lun_0.attr,
4656 &driver_attr_num_tgts.attr,
4657 &driver_attr_dev_size_mb.attr,
4658 &driver_attr_num_parts.attr,
4659 &driver_attr_every_nth.attr,
4660 &driver_attr_max_luns.attr,
4661 &driver_attr_max_queue.attr,
4662 &driver_attr_no_uld.attr,
4663 &driver_attr_scsi_level.attr,
4664 &driver_attr_virtual_gb.attr,
4665 &driver_attr_add_host.attr,
4666 &driver_attr_vpd_use_hostno.attr,
4667 &driver_attr_sector_size.attr,
4668 &driver_attr_dix.attr,
4669 &driver_attr_dif.attr,
4670 &driver_attr_guard.attr,
4671 &driver_attr_ato.attr,
4672 &driver_attr_map.attr,
4673 &driver_attr_removable.attr,
4674 &driver_attr_host_lock.attr,
4675 &driver_attr_ndelay.attr,
4676 &driver_attr_strict.attr,
4679 ATTRIBUTE_GROUPS(sdebug_drv);
4681 static struct device *pseudo_primary;
4683 static int __init scsi_debug_init(void)
4690 atomic_set(&sdebug_cmnd_count, 0);
4691 atomic_set(&sdebug_completions, 0);
4692 atomic_set(&retired_max_queue, 0);
4694 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4695 pr_warn("ndelay must be less than 1 second, ignored\n");
4697 } else if (sdebug_ndelay > 0)
4698 sdebug_delay = DELAY_OVERRIDDEN;
4700 switch (sdebug_sector_size) {
4707 pr_err("invalid sector_size %d\n", sdebug_sector_size);
4711 switch (sdebug_dif) {
4713 case SD_DIF_TYPE0_PROTECTION:
4714 case SD_DIF_TYPE1_PROTECTION:
4715 case SD_DIF_TYPE2_PROTECTION:
4716 case SD_DIF_TYPE3_PROTECTION:
4720 pr_err("dif must be 0, 1, 2 or 3\n");
4724 if (sdebug_guard > 1) {
4725 pr_err("guard must be 0 or 1\n");
4729 if (sdebug_ato > 1) {
4730 pr_err("ato must be 0 or 1\n");
4734 if (sdebug_physblk_exp > 15) {
4735 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4739 if (sdebug_lowest_aligned > 0x3fff) {
4740 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4744 if (sdebug_dev_size_mb < 1)
4745 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
4746 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4747 sdebug_store_sectors = sz / sdebug_sector_size;
4748 sdebug_capacity = get_sdebug_capacity();
4750 /* play around with geometry, don't waste too much on track 0 */
4752 sdebug_sectors_per = 32;
4753 if (sdebug_dev_size_mb >= 256)
4755 else if (sdebug_dev_size_mb >= 16)
4757 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4758 (sdebug_sectors_per * sdebug_heads);
4759 if (sdebug_cylinders_per >= 1024) {
4760 /* other LLDs do this; implies >= 1GB ram disk ... */
4762 sdebug_sectors_per = 63;
4763 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4764 (sdebug_sectors_per * sdebug_heads);
4767 if (0 == sdebug_fake_rw) {
4768 fake_storep = vmalloc(sz);
4769 if (NULL == fake_storep) {
4770 pr_err("out of memory, 1\n");
4773 memset(fake_storep, 0, sz);
4774 if (sdebug_num_parts > 0)
4775 sdebug_build_parts(fake_storep, sz);
4781 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4782 dif_storep = vmalloc(dif_size);
4784 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4786 if (dif_storep == NULL) {
4787 pr_err("out of mem. (DIX)\n");
4792 memset(dif_storep, 0xff, dif_size);
4795 /* Logical Block Provisioning */
4796 if (scsi_debug_lbp()) {
4797 sdebug_unmap_max_blocks =
4798 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
4800 sdebug_unmap_max_desc =
4801 clamp(sdebug_unmap_max_desc, 0U, 256U);
4803 sdebug_unmap_granularity =
4804 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
4806 if (sdebug_unmap_alignment &&
4807 sdebug_unmap_granularity <=
4808 sdebug_unmap_alignment) {
4809 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4813 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4814 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4816 pr_info("%lu provisioning blocks\n", map_size);
4818 if (map_storep == NULL) {
4819 pr_err("out of mem. (MAP)\n");
4824 bitmap_zero(map_storep, map_size);
4826 /* Map first 1KB for partition table */
4827 if (sdebug_num_parts)
4831 pseudo_primary = root_device_register("pseudo_0");
4832 if (IS_ERR(pseudo_primary)) {
4833 pr_warn("root_device_register() error\n");
4834 ret = PTR_ERR(pseudo_primary);
4837 ret = bus_register(&pseudo_lld_bus);
4839 pr_warn("bus_register error: %d\n", ret);
4842 ret = driver_register(&sdebug_driverfs_driver);
4844 pr_warn("driver_register error: %d\n", ret);
4848 host_to_add = sdebug_add_host;
4849 sdebug_add_host = 0;
4851 for (k = 0; k < host_to_add; k++) {
4852 if (sdebug_add_adapter()) {
4853 pr_err("sdebug_add_adapter failed k=%d\n", k);
4859 pr_info("built %d host(s)\n", sdebug_add_host);
4864 bus_unregister(&pseudo_lld_bus);
4866 root_device_unregister(pseudo_primary);
4875 static void __exit scsi_debug_exit(void)
4877 int k = sdebug_add_host;
4882 sdebug_remove_adapter();
4883 driver_unregister(&sdebug_driverfs_driver);
4884 bus_unregister(&pseudo_lld_bus);
4885 root_device_unregister(pseudo_primary);
4891 device_initcall(scsi_debug_init);
4892 module_exit(scsi_debug_exit);
4894 static void sdebug_release_adapter(struct device * dev)
4896 struct sdebug_host_info *sdbg_host;
4898 sdbg_host = to_sdebug_host(dev);
4902 static int sdebug_add_adapter(void)
4904 int k, devs_per_host;
4906 struct sdebug_host_info *sdbg_host;
4907 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4909 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4910 if (NULL == sdbg_host) {
4911 pr_err("out of memory at line %d\n", __LINE__);
4915 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4917 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
4918 for (k = 0; k < devs_per_host; k++) {
4919 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4920 if (!sdbg_devinfo) {
4921 pr_err("out of memory at line %d\n", __LINE__);
4927 spin_lock(&sdebug_host_list_lock);
4928 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4929 spin_unlock(&sdebug_host_list_lock);
4931 sdbg_host->dev.bus = &pseudo_lld_bus;
4932 sdbg_host->dev.parent = pseudo_primary;
4933 sdbg_host->dev.release = &sdebug_release_adapter;
4934 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
4936 error = device_register(&sdbg_host->dev);
4945 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4947 list_del(&sdbg_devinfo->dev_list);
4948 kfree(sdbg_devinfo);
4955 static void sdebug_remove_adapter(void)
4957 struct sdebug_host_info * sdbg_host = NULL;
4959 spin_lock(&sdebug_host_list_lock);
4960 if (!list_empty(&sdebug_host_list)) {
4961 sdbg_host = list_entry(sdebug_host_list.prev,
4962 struct sdebug_host_info, host_list);
4963 list_del(&sdbg_host->host_list);
4965 spin_unlock(&sdebug_host_list_lock);
4970 device_unregister(&sdbg_host->dev);
4975 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4978 unsigned long iflags;
4979 struct sdebug_dev_info *devip;
4981 spin_lock_irqsave(&queued_arr_lock, iflags);
4982 devip = (struct sdebug_dev_info *)sdev->hostdata;
4983 if (NULL == devip) {
4984 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4987 num_in_q = atomic_read(&devip->num_in_q);
4988 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4992 /* allow to exceed max host queued_arr elements for testing */
4993 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4994 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4995 scsi_change_queue_depth(sdev, qdepth);
4997 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
4998 sdev_printk(KERN_INFO, sdev,
4999 "%s: qdepth=%d, num_in_q=%d\n",
5000 __func__, qdepth, num_in_q);
5002 return sdev->queue_depth;
5006 check_inject(struct scsi_cmnd *scp)
5008 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5010 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5012 if (atomic_inc_return(&sdebug_cmnd_count) >= abs(sdebug_every_nth)) {
5013 atomic_set(&sdebug_cmnd_count, 0);
5014 if (sdebug_every_nth < -1)
5015 sdebug_every_nth = -1;
5016 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5017 return 1; /* ignore command causing timeout */
5018 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5019 scsi_medium_access_command(scp))
5020 return 1; /* time out reads and writes */
5021 if (sdebug_any_injecting_opt) {
5022 if (SDEBUG_OPT_RECOVERED_ERR & sdebug_opts)
5023 ep->inj_recovered = true;
5024 if (SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts)
5025 ep->inj_transport = true;
5026 if (SDEBUG_OPT_DIF_ERR & sdebug_opts)
5028 if (SDEBUG_OPT_DIX_ERR & sdebug_opts)
5030 if (SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts)
5031 ep->inj_short = true;
5038 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5041 struct scsi_device *sdp = scp->device;
5042 const struct opcode_info_t *oip;
5043 const struct opcode_info_t *r_oip;
5044 struct sdebug_dev_info *devip;
5045 u8 *cmd = scp->cmnd;
5046 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5054 scsi_set_resid(scp, 0);
5055 if (sdebug_verbose && !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts)) {
5060 sb = (int)sizeof(b);
5062 strcpy(b, "too long, over 32 bytes");
5064 for (k = 0, n = 0; k < len && n < sb; ++k)
5065 n += scnprintf(b + n, sb - n, "%02x ",
5068 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5070 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5071 if ((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)
5072 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5074 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5075 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5076 devip = (struct sdebug_dev_info *)sdp->hostdata;
5078 devip = devInfoReg(sdp);
5080 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16,
5083 na = oip->num_attached;
5085 if (na) { /* multiple commands with this opcode */
5087 if (FF_SA & r_oip->flags) {
5088 if (F_SA_LOW & oip->flags)
5091 sa = get_unaligned_be16(cmd + 8);
5092 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5093 if (opcode == oip->opcode && sa == oip->sa)
5096 } else { /* since no service action only check opcode */
5097 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5098 if (opcode == oip->opcode)
5103 if (F_SA_LOW & r_oip->flags)
5104 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5105 else if (F_SA_HIGH & r_oip->flags)
5106 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5108 mk_sense_invalid_opcode(scp);
5111 } /* else (when na==0) we assume the oip is a match */
5113 if (F_INV_OP & flags) {
5114 mk_sense_invalid_opcode(scp);
5117 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5119 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5120 my_name, opcode, " supported for wlun");
5121 mk_sense_invalid_opcode(scp);
5124 if (sdebug_strict) { /* check cdb against mask */
5128 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5129 rem = ~oip->len_mask[k] & cmd[k];
5131 for (j = 7; j >= 0; --j, rem <<= 1) {
5135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5140 if (!(F_SKIP_UA & flags) &&
5141 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5142 errsts = check_readiness(scp, UAS_ONLY, devip);
5146 if ((F_M_ACCESS & flags) && devip->stopped) {
5147 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5149 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5150 "%s\n", my_name, "initializing command "
5152 errsts = check_condition_result;
5155 if (sdebug_fake_rw && (F_FAKE_RW & flags))
5157 if (sdebug_every_nth) {
5158 if (check_inject(scp))
5159 return 0; /* ignore command: make trouble */
5161 if (oip->pfp) /* if this command has a resp_* function, call it */
5162 errsts = oip->pfp(scp, devip);
5163 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5164 errsts = r_pfp(scp, devip);
5167 return schedule_resp(scp, devip, errsts,
5168 ((F_DELAY_OVERR & flags) ? 0 : sdebug_delay));
5170 return schedule_resp(scp, devip, check_condition_result, 0);
5174 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5176 if (sdebug_host_lock) {
5177 unsigned long iflags;
5180 spin_lock_irqsave(shost->host_lock, iflags);
5181 rc = scsi_debug_queuecommand(cmd);
5182 spin_unlock_irqrestore(shost->host_lock, iflags);
5185 return scsi_debug_queuecommand(cmd);
5188 static struct scsi_host_template sdebug_driver_template = {
5189 .show_info = scsi_debug_show_info,
5190 .write_info = scsi_debug_write_info,
5191 .proc_name = sdebug_proc_name,
5192 .name = "SCSI DEBUG",
5193 .info = scsi_debug_info,
5194 .slave_alloc = scsi_debug_slave_alloc,
5195 .slave_configure = scsi_debug_slave_configure,
5196 .slave_destroy = scsi_debug_slave_destroy,
5197 .ioctl = scsi_debug_ioctl,
5198 .queuecommand = sdebug_queuecommand_lock_or_not,
5199 .change_queue_depth = sdebug_change_qdepth,
5200 .eh_abort_handler = scsi_debug_abort,
5201 .eh_device_reset_handler = scsi_debug_device_reset,
5202 .eh_target_reset_handler = scsi_debug_target_reset,
5203 .eh_bus_reset_handler = scsi_debug_bus_reset,
5204 .eh_host_reset_handler = scsi_debug_host_reset,
5205 .can_queue = SCSI_DEBUG_CANQUEUE,
5207 .sg_tablesize = SG_MAX_SEGMENTS,
5208 .cmd_per_lun = DEF_CMD_PER_LUN,
5210 .use_clustering = DISABLE_CLUSTERING,
5211 .module = THIS_MODULE,
5212 .track_queue_depth = 1,
5213 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
5216 static int sdebug_driver_probe(struct device * dev)
5219 struct sdebug_host_info *sdbg_host;
5220 struct Scsi_Host *hpnt;
5223 sdbg_host = to_sdebug_host(dev);
5225 sdebug_driver_template.can_queue = sdebug_max_queue;
5226 if (sdebug_clustering)
5227 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5228 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5230 pr_err("scsi_host_alloc failed\n");
5235 sdbg_host->shost = hpnt;
5236 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5237 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5238 hpnt->max_id = sdebug_num_tgts + 1;
5240 hpnt->max_id = sdebug_num_tgts;
5241 /* = sdebug_max_luns; */
5242 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5246 switch (sdebug_dif) {
5248 case SD_DIF_TYPE1_PROTECTION:
5249 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5251 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5254 case SD_DIF_TYPE2_PROTECTION:
5255 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5257 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5260 case SD_DIF_TYPE3_PROTECTION:
5261 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5263 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5268 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5272 scsi_host_set_prot(hpnt, host_prot);
5274 pr_info("host protection%s%s%s%s%s%s%s\n",
5275 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5276 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5277 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5278 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5279 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5280 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5281 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5283 if (sdebug_guard == 1)
5284 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5286 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5288 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5289 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5290 error = scsi_add_host(hpnt, &sdbg_host->dev);
5292 pr_err("scsi_add_host failed\n");
5294 scsi_host_put(hpnt);
5296 scsi_scan_host(hpnt);
5301 static int sdebug_driver_remove(struct device * dev)
5303 struct sdebug_host_info *sdbg_host;
5304 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5306 sdbg_host = to_sdebug_host(dev);
5309 pr_err("Unable to locate host info\n");
5313 scsi_remove_host(sdbg_host->shost);
5315 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5317 list_del(&sdbg_devinfo->dev_list);
5318 kfree(sdbg_devinfo);
5321 scsi_host_put(sdbg_host->shost);
5325 static int pseudo_lld_bus_match(struct device *dev,
5326 struct device_driver *dev_driver)
5331 static struct bus_type pseudo_lld_bus = {
5333 .match = pseudo_lld_bus_match,
5334 .probe = sdebug_driver_probe,
5335 .remove = sdebug_driver_remove,
5336 .drv_groups = sdebug_drv_groups,