ac74cbde7adfb68977a1537a8f05ad40584f11ba
[linux-2.6-block.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22
23 #include <linux/module.h>
24
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/timer.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44
45 #include <net/checksum.h>
46
47 #include <asm/unaligned.h>
48
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SCSI_DEBUG_VERSION "1.86"
63 static const char *sdebug_version_date = "20160422";
64
65 #define MY_NAME "scsi_debug"
66
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define UA_RESET_ASC 0x29
78 #define UA_CHANGED_ASC 0x2a
79 #define TARGET_CHANGED_ASC 0x3f
80 #define LUNS_CHANGED_ASCQ 0x0e
81 #define INSUFF_RES_ASC 0x55
82 #define INSUFF_RES_ASCQ 0x3
83 #define POWER_ON_RESET_ASCQ 0x0
84 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
85 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
86 #define CAPACITY_CHANGED_ASCQ 0x9
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91 #define MISCOMPARE_VERIFY_ASC 0x1d
92 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
93 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DELAY_OVERRIDDEN -9999
140
141 /* bit mask values for sdebug_opts */
142 #define SDEBUG_OPT_NOISE                1
143 #define SDEBUG_OPT_MEDIUM_ERR           2
144 #define SDEBUG_OPT_TIMEOUT              4
145 #define SDEBUG_OPT_RECOVERED_ERR        8
146 #define SDEBUG_OPT_TRANSPORT_ERR        16
147 #define SDEBUG_OPT_DIF_ERR              32
148 #define SDEBUG_OPT_DIX_ERR              64
149 #define SDEBUG_OPT_MAC_TIMEOUT          128
150 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
151 #define SDEBUG_OPT_Q_NOISE              0x200
152 #define SDEBUG_OPT_ALL_TSF              0x400
153 #define SDEBUG_OPT_RARE_TSF             0x800
154 #define SDEBUG_OPT_N_WCE                0x1000
155 #define SDEBUG_OPT_RESET_NOISE          0x2000
156 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
157 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
158                               SDEBUG_OPT_RESET_NOISE)
159 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
160                                   SDEBUG_OPT_TRANSPORT_ERR | \
161                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
162                                   SDEBUG_OPT_SHORT_TRANSFER)
163 /* When "every_nth" > 0 then modulo "every_nth" commands:
164  *   - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
165  *   - a RECOVERED_ERROR is simulated on successful read and write
166  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
167  *   - a TRANSPORT_ERROR is simulated on successful read and write
168  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
169  *
170  * When "every_nth" < 0 then after "- every_nth" commands:
171  *   - a no response is simulated if SDEBUG_OPT_TIMEOUT is set
172  *   - a RECOVERED_ERROR is simulated on successful read and write
173  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
174  *   - a TRANSPORT_ERROR is simulated on successful read and write
175  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
176  * This will continue on every subsequent command until some other action
177  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
178  * every_nth via sysfs).
179  */
180
181 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
182  * priority order. In the subset implemented here lower numbers have higher
183  * priority. The UA numbers should be a sequence starting from 0 with
184  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
185 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
186 #define SDEBUG_UA_BUS_RESET 1
187 #define SDEBUG_UA_MODE_CHANGED 2
188 #define SDEBUG_UA_CAPACITY_CHANGED 3
189 #define SDEBUG_UA_LUNS_CHANGED 4
190 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
191 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
192 #define SDEBUG_NUM_UAS 7
193
194 /* for check_readiness() */
195 #define UAS_ONLY 1      /* check for UAs only */
196 #define UAS_TUR 0       /* if no UAs then check if media access possible */
197
198 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
199  * sector on read commands: */
200 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
201 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
202
203 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
204  * or "peripheral device" addressing (value 0) */
205 #define SAM2_LUN_ADDRESS_METHOD 0
206
207 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
208  * (for response) at one time. Can be reduced by max_queue option. Command
209  * responses are not queued when delay=0 and ndelay=0. The per-device
210  * DEF_CMD_PER_LUN can be changed via sysfs:
211  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
212  * SCSI_DEBUG_CANQUEUE. */
213 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
214 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
215 #define DEF_CMD_PER_LUN  255
216
217 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
218 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
219 #endif
220
221 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
222 enum sdeb_opcode_index {
223         SDEB_I_INVALID_OPCODE = 0,
224         SDEB_I_INQUIRY = 1,
225         SDEB_I_REPORT_LUNS = 2,
226         SDEB_I_REQUEST_SENSE = 3,
227         SDEB_I_TEST_UNIT_READY = 4,
228         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
229         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
230         SDEB_I_LOG_SENSE = 7,
231         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
232         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
233         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
234         SDEB_I_START_STOP = 11,
235         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
236         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
237         SDEB_I_MAINT_IN = 14,
238         SDEB_I_MAINT_OUT = 15,
239         SDEB_I_VERIFY = 16,             /* 10 only */
240         SDEB_I_VARIABLE_LEN = 17,
241         SDEB_I_RESERVE = 18,            /* 6, 10 */
242         SDEB_I_RELEASE = 19,            /* 6, 10 */
243         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
244         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
245         SDEB_I_ATA_PT = 22,             /* 12, 16 */
246         SDEB_I_SEND_DIAG = 23,
247         SDEB_I_UNMAP = 24,
248         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
249         SDEB_I_WRITE_BUFFER = 26,
250         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
251         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
252         SDEB_I_COMP_WRITE = 29,
253         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
254 };
255
256 static const unsigned char opcode_ind_arr[256] = {
257 /* 0x0; 0x0->0x1f: 6 byte cdbs */
258         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
259             0, 0, 0, 0,
260         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
261         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
262             SDEB_I_RELEASE,
263         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
264             SDEB_I_ALLOW_REMOVAL, 0,
265 /* 0x20; 0x20->0x3f: 10 byte cdbs */
266         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
267         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
268         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
269         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
270 /* 0x40; 0x40->0x5f: 10 byte cdbs */
271         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
272         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
273         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
274             SDEB_I_RELEASE,
275         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
276 /* 0x60; 0x60->0x7d are reserved */
277         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279         0, SDEB_I_VARIABLE_LEN,
280 /* 0x80; 0x80->0x9f: 16 byte cdbs */
281         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
282         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
283         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
284         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
285 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
286         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
287              SDEB_I_MAINT_OUT, 0, 0, 0,
288         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
289              0, 0, 0, 0,
290         0, 0, 0, 0, 0, 0, 0, 0,
291         0, 0, 0, 0, 0, 0, 0, 0,
292 /* 0xc0; 0xc0->0xff: vendor specific */
293         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
297 };
298
299 #define F_D_IN                  1
300 #define F_D_OUT                 2
301 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
302 #define F_D_UNKN                8
303 #define F_RL_WLUN_OK            0x10
304 #define F_SKIP_UA               0x20
305 #define F_DELAY_OVERR           0x40
306 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
307 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
308 #define F_INV_OP                0x200
309 #define F_FAKE_RW               0x400
310 #define F_M_ACCESS              0x800   /* media access */
311
312 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
313 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
314 #define FF_SA (F_SA_HIGH | F_SA_LOW)
315
316 struct sdebug_dev_info;
317 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
337 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
338
339 struct opcode_info_t {
340         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
341                                  * for terminating element */
342         u8 opcode;              /* if num_attached > 0, preferred */
343         u16 sa;                 /* service action */
344         u32 flags;              /* OR-ed set of SDEB_F_* */
345         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
346         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
347         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
348                                 /* ignore cdb bytes after position 15 */
349 };
350
351 static const struct opcode_info_t msense_iarr[1] = {
352         {0, 0x1a, 0, F_D_IN, NULL, NULL,
353             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
354 };
355
356 static const struct opcode_info_t mselect_iarr[1] = {
357         {0, 0x15, 0, F_D_OUT, NULL, NULL,
358             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
359 };
360
361 static const struct opcode_info_t read_iarr[3] = {
362         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
363             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
364              0, 0, 0, 0} },
365         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
366             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
367         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
368             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
369              0xc7, 0, 0, 0, 0} },
370 };
371
372 static const struct opcode_info_t write_iarr[3] = {
373         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
374             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
375              0, 0, 0, 0} },
376         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
377             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
378         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
379             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
380              0xc7, 0, 0, 0, 0} },
381 };
382
383 static const struct opcode_info_t sa_in_iarr[1] = {
384         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
385             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
386              0xff, 0xff, 0xff, 0, 0xc7} },
387 };
388
389 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
390         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
391             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
392                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
393 };
394
395 static const struct opcode_info_t maint_in_iarr[2] = {
396         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
397             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
398              0xc7, 0, 0, 0, 0} },
399         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
400             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
401              0, 0} },
402 };
403
404 static const struct opcode_info_t write_same_iarr[1] = {
405         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
406             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
407              0xff, 0xff, 0xff, 0x1f, 0xc7} },
408 };
409
410 static const struct opcode_info_t reserve_iarr[1] = {
411         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
412             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
413 };
414
415 static const struct opcode_info_t release_iarr[1] = {
416         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
417             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 };
419
420
421 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
422  * plus the terminating elements for logic that scans this table such as
423  * REPORT SUPPORTED OPERATION CODES. */
424 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
425 /* 0 */
426         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
427             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
429             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
430         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
431             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
432              0, 0} },
433         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
434             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
435         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
436             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
438             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
439              0} },
440         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
441             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
442         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
443             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
444              0, 0, 0} },
445         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
446             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
447              0, 0} },
448         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
449             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
451 /* 10 */
452         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
453             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
454              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
455         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
456             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
457         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
458             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
460         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
461             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
462         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
463             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
464              0} },
465         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
466             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
467         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
468             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
469              0, 0, 0, 0, 0, 0} },
470         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
473         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
474             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
475              0} },
476         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
477             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478              0} },
479 /* 20 */
480         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
485             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
487             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
488         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
489             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
490         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
491             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
492                    0, 0, 0, 0, 0, 0} },
493         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
494             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
495              0, 0, 0, 0} },                     /* WRITE_BUFFER */
496         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
497             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
498                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
499         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
500             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
501              0, 0, 0, 0} },
502         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
503             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
504              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
505
506 /* 30 */
507         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
508             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
509 };
510
511 struct sdebug_scmd_extra_t {
512         bool inj_recovered;
513         bool inj_transport;
514         bool inj_dif;
515         bool inj_dix;
516         bool inj_short;
517 };
518
519 static int sdebug_add_host = DEF_NUM_HOST;
520 static int sdebug_ato = DEF_ATO;
521 static int sdebug_delay = DEF_DELAY;
522 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
523 static int sdebug_dif = DEF_DIF;
524 static int sdebug_dix = DEF_DIX;
525 static int sdebug_dsense = DEF_D_SENSE;
526 static int sdebug_every_nth = DEF_EVERY_NTH;
527 static int sdebug_fake_rw = DEF_FAKE_RW;
528 static unsigned int sdebug_guard = DEF_GUARD;
529 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
530 static int sdebug_max_luns = DEF_MAX_LUNS;
531 static int sdebug_max_queue = SCSI_DEBUG_CANQUEUE;
532 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
533 static int sdebug_ndelay = DEF_NDELAY;
534 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
535 static int sdebug_no_uld;
536 static int sdebug_num_parts = DEF_NUM_PARTS;
537 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
538 static int sdebug_opt_blks = DEF_OPT_BLKS;
539 static int sdebug_opts = DEF_OPTS;
540 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
541 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
542 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
543 static int sdebug_sector_size = DEF_SECTOR_SIZE;
544 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
545 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
546 static unsigned int sdebug_lbpu = DEF_LBPU;
547 static unsigned int sdebug_lbpws = DEF_LBPWS;
548 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
549 static unsigned int sdebug_lbprz = DEF_LBPRZ;
550 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
551 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
552 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
553 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
554 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
555 static bool sdebug_removable = DEF_REMOVABLE;
556 static bool sdebug_clustering;
557 static bool sdebug_host_lock = DEF_HOST_LOCK;
558 static bool sdebug_strict = DEF_STRICT;
559 static bool sdebug_any_injecting_opt;
560 static bool sdebug_verbose;
561
562 static atomic_t sdebug_cmnd_count;
563 static atomic_t sdebug_completions;
564 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
565
566 #define DEV_READONLY(TGT)      (0)
567
568 static unsigned int sdebug_store_sectors;
569 static sector_t sdebug_capacity;        /* in sectors */
570
571 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
572    may still need them */
573 static int sdebug_heads;                /* heads per disk */
574 static int sdebug_cylinders_per;        /* cylinders per surface */
575 static int sdebug_sectors_per;          /* sectors per cylinder */
576
577 #define SDEBUG_MAX_PARTS 4
578
579 #define SCSI_DEBUG_MAX_CMD_LEN 32
580
581 static unsigned int scsi_debug_lbp(void)
582 {
583         return 0 == sdebug_fake_rw &&
584                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
585 }
586
587 struct sdebug_dev_info {
588         struct list_head dev_list;
589         unsigned int channel;
590         unsigned int target;
591         u64 lun;
592         struct sdebug_host_info *sdbg_host;
593         unsigned long uas_bm[1];
594         atomic_t num_in_q;
595         char stopped;           /* TODO: should be atomic */
596         bool used;
597 };
598
599 struct sdebug_host_info {
600         struct list_head host_list;
601         struct Scsi_Host *shost;
602         struct device dev;
603         struct list_head dev_info_list;
604 };
605
606 #define to_sdebug_host(d)       \
607         container_of(d, struct sdebug_host_info, dev)
608
609 static LIST_HEAD(sdebug_host_list);
610 static DEFINE_SPINLOCK(sdebug_host_list_lock);
611
612
613 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
614         struct hrtimer hrt;     /* must be first element */
615         int qa_indx;
616 };
617
618 struct sdebug_queued_cmd {
619         /* in_use flagged by a bit in queued_in_use_bm[] */
620         struct timer_list *cmnd_timerp;
621         struct tasklet_struct *tletp;
622         struct sdebug_hrtimer *sd_hrtp;
623         struct scsi_cmnd * a_cmnd;
624 };
625 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
626 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
627
628
629 static unsigned char * fake_storep;     /* ramdisk storage */
630 static struct sd_dif_tuple *dif_storep; /* protection info */
631 static void *map_storep;                /* provisioning map */
632
633 static unsigned long map_size;
634 static int num_aborts;
635 static int num_dev_resets;
636 static int num_target_resets;
637 static int num_bus_resets;
638 static int num_host_resets;
639 static int dix_writes;
640 static int dix_reads;
641 static int dif_errors;
642
643 static DEFINE_SPINLOCK(queued_arr_lock);
644 static DEFINE_RWLOCK(atomic_rw);
645
646 static char sdebug_proc_name[] = MY_NAME;
647 static const char *my_name = MY_NAME;
648
649 static struct bus_type pseudo_lld_bus;
650
651 static struct device_driver sdebug_driverfs_driver = {
652         .name           = sdebug_proc_name,
653         .bus            = &pseudo_lld_bus,
654 };
655
656 static const int check_condition_result =
657                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
658
659 static const int illegal_condition_result =
660         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
661
662 static const int device_qfull_result =
663         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
664
665 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
666                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
667                                      0, 0, 0, 0};
668 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
669                                     0, 0, 0x2, 0x4b};
670 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
671                                    0, 0, 0x0, 0x0};
672
673 static void *fake_store(unsigned long long lba)
674 {
675         lba = do_div(lba, sdebug_store_sectors);
676
677         return fake_storep + lba * sdebug_sector_size;
678 }
679
680 static struct sd_dif_tuple *dif_store(sector_t sector)
681 {
682         sector = sector_div(sector, sdebug_store_sectors);
683
684         return dif_storep + sector;
685 }
686
687 static int sdebug_add_adapter(void);
688 static void sdebug_remove_adapter(void);
689
690 static void sdebug_max_tgts_luns(void)
691 {
692         struct sdebug_host_info *sdbg_host;
693         struct Scsi_Host *hpnt;
694
695         spin_lock(&sdebug_host_list_lock);
696         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
697                 hpnt = sdbg_host->shost;
698                 if ((hpnt->this_id >= 0) &&
699                     (sdebug_num_tgts > hpnt->this_id))
700                         hpnt->max_id = sdebug_num_tgts + 1;
701                 else
702                         hpnt->max_id = sdebug_num_tgts;
703                 /* sdebug_max_luns; */
704                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
705         }
706         spin_unlock(&sdebug_host_list_lock);
707 }
708
709 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
710
711 /* Set in_bit to -1 to indicate no bit position of invalid field */
712 static void
713 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
714                      int in_byte, int in_bit)
715 {
716         unsigned char *sbuff;
717         u8 sks[4];
718         int sl, asc;
719
720         sbuff = scp->sense_buffer;
721         if (!sbuff) {
722                 sdev_printk(KERN_ERR, scp->device,
723                             "%s: sense_buffer is NULL\n", __func__);
724                 return;
725         }
726         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
727         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
728         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
729         memset(sks, 0, sizeof(sks));
730         sks[0] = 0x80;
731         if (c_d)
732                 sks[0] |= 0x40;
733         if (in_bit >= 0) {
734                 sks[0] |= 0x8;
735                 sks[0] |= 0x7 & in_bit;
736         }
737         put_unaligned_be16(in_byte, sks + 1);
738         if (sdebug_dsense) {
739                 sl = sbuff[7] + 8;
740                 sbuff[7] = sl;
741                 sbuff[sl] = 0x2;
742                 sbuff[sl + 1] = 0x6;
743                 memcpy(sbuff + sl + 4, sks, 3);
744         } else
745                 memcpy(sbuff + 15, sks, 3);
746         if (sdebug_verbose)
747                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
748                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
749                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
750 }
751
752 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
753 {
754         unsigned char *sbuff;
755
756         sbuff = scp->sense_buffer;
757         if (!sbuff) {
758                 sdev_printk(KERN_ERR, scp->device,
759                             "%s: sense_buffer is NULL\n", __func__);
760                 return;
761         }
762         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
763
764         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
765
766         if (sdebug_verbose)
767                 sdev_printk(KERN_INFO, scp->device,
768                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
769                             my_name, key, asc, asq);
770 }
771
772 static void
773 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
774 {
775         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
776 }
777
778 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
779 {
780         if (sdebug_verbose) {
781                 if (0x1261 == cmd)
782                         sdev_printk(KERN_INFO, dev,
783                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
784                 else if (0x5331 == cmd)
785                         sdev_printk(KERN_INFO, dev,
786                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
787                                     __func__);
788                 else
789                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
790                                     __func__, cmd);
791         }
792         return -EINVAL;
793         /* return -ENOTTY; // correct return but upsets fdisk */
794 }
795
796 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
797 {
798         struct sdebug_host_info *sdhp;
799         struct sdebug_dev_info *dp;
800
801         spin_lock(&sdebug_host_list_lock);
802         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
803                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
804                         if ((devip->sdbg_host == dp->sdbg_host) &&
805                             (devip->target == dp->target))
806                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
807                 }
808         }
809         spin_unlock(&sdebug_host_list_lock);
810 }
811
812 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
813                            struct sdebug_dev_info * devip)
814 {
815         int k;
816
817         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
818         if (k != SDEBUG_NUM_UAS) {
819                 const char *cp = NULL;
820
821                 switch (k) {
822                 case SDEBUG_UA_POR:
823                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
824                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
825                         if (sdebug_verbose)
826                                 cp = "power on reset";
827                         break;
828                 case SDEBUG_UA_BUS_RESET:
829                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
830                                         UA_RESET_ASC, BUS_RESET_ASCQ);
831                         if (sdebug_verbose)
832                                 cp = "bus reset";
833                         break;
834                 case SDEBUG_UA_MODE_CHANGED:
835                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
836                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
837                         if (sdebug_verbose)
838                                 cp = "mode parameters changed";
839                         break;
840                 case SDEBUG_UA_CAPACITY_CHANGED:
841                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
842                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
843                         if (sdebug_verbose)
844                                 cp = "capacity data changed";
845                         break;
846                 case SDEBUG_UA_MICROCODE_CHANGED:
847                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
848                                  TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
849                         if (sdebug_verbose)
850                                 cp = "microcode has been changed";
851                         break;
852                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
853                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
854                                         TARGET_CHANGED_ASC,
855                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
856                         if (sdebug_verbose)
857                                 cp = "microcode has been changed without reset";
858                         break;
859                 case SDEBUG_UA_LUNS_CHANGED:
860                         /*
861                          * SPC-3 behavior is to report a UNIT ATTENTION with
862                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
863                          * on the target, until a REPORT LUNS command is
864                          * received.  SPC-4 behavior is to report it only once.
865                          * NOTE:  sdebug_scsi_level does not use the same
866                          * values as struct scsi_device->scsi_level.
867                          */
868                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
869                                 clear_luns_changed_on_target(devip);
870                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
871                                         TARGET_CHANGED_ASC,
872                                         LUNS_CHANGED_ASCQ);
873                         if (sdebug_verbose)
874                                 cp = "reported luns data has changed";
875                         break;
876                 default:
877                         pr_warn("unexpected unit attention code=%d\n", k);
878                         if (sdebug_verbose)
879                                 cp = "unknown";
880                         break;
881                 }
882                 clear_bit(k, devip->uas_bm);
883                 if (sdebug_verbose)
884                         sdev_printk(KERN_INFO, SCpnt->device,
885                                    "%s reports: Unit attention: %s\n",
886                                    my_name, cp);
887                 return check_condition_result;
888         }
889         if ((UAS_TUR == uas_only) && devip->stopped) {
890                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
891                                 0x2);
892                 if (sdebug_verbose)
893                         sdev_printk(KERN_INFO, SCpnt->device,
894                                     "%s reports: Not ready: %s\n", my_name,
895                                     "initializing command required");
896                 return check_condition_result;
897         }
898         return 0;
899 }
900
901 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
902 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
903                                 int arr_len)
904 {
905         int act_len;
906         struct scsi_data_buffer *sdb = scsi_in(scp);
907
908         if (!sdb->length)
909                 return 0;
910         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
911                 return DID_ERROR << 16;
912
913         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
914                                       arr, arr_len);
915         sdb->resid = scsi_bufflen(scp) - act_len;
916
917         return 0;
918 }
919
920 /* Returns number of bytes fetched into 'arr' or -1 if error. */
921 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
922                                int arr_len)
923 {
924         if (!scsi_bufflen(scp))
925                 return 0;
926         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
927                 return -1;
928
929         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
930 }
931
932
933 static const char * inq_vendor_id = "Linux   ";
934 static const char * inq_product_id = "scsi_debug      ";
935 static const char *inq_product_rev = "0186";    /* version less '.' */
936 static const u64 naa5_comp_a = 0x5222222000000000ULL;
937 static const u64 naa5_comp_b = 0x5333333000000000ULL;
938 static const u64 naa5_comp_c = 0x5111111000000000ULL;
939
940 /* Device identification VPD page. Returns number of bytes placed in arr */
941 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
942                            int target_dev_id, int dev_id_num,
943                            const char * dev_id_str,
944                            int dev_id_str_len)
945 {
946         int num, port_a;
947         char b[32];
948
949         port_a = target_dev_id + 1;
950         /* T10 vendor identifier field format (faked) */
951         arr[0] = 0x2;   /* ASCII */
952         arr[1] = 0x1;
953         arr[2] = 0x0;
954         memcpy(&arr[4], inq_vendor_id, 8);
955         memcpy(&arr[12], inq_product_id, 16);
956         memcpy(&arr[28], dev_id_str, dev_id_str_len);
957         num = 8 + 16 + dev_id_str_len;
958         arr[3] = num;
959         num += 4;
960         if (dev_id_num >= 0) {
961                 /* NAA-5, Logical unit identifier (binary) */
962                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
963                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
964                 arr[num++] = 0x0;
965                 arr[num++] = 0x8;
966                 put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
967                 num += 8;
968                 /* Target relative port number */
969                 arr[num++] = 0x61;      /* proto=sas, binary */
970                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
971                 arr[num++] = 0x0;       /* reserved */
972                 arr[num++] = 0x4;       /* length */
973                 arr[num++] = 0x0;       /* reserved */
974                 arr[num++] = 0x0;       /* reserved */
975                 arr[num++] = 0x0;
976                 arr[num++] = 0x1;       /* relative port A */
977         }
978         /* NAA-5, Target port identifier */
979         arr[num++] = 0x61;      /* proto=sas, binary */
980         arr[num++] = 0x93;      /* piv=1, target port, naa */
981         arr[num++] = 0x0;
982         arr[num++] = 0x8;
983         put_unaligned_be64(naa5_comp_a + port_a, arr + num);
984         num += 8;
985         /* NAA-5, Target port group identifier */
986         arr[num++] = 0x61;      /* proto=sas, binary */
987         arr[num++] = 0x95;      /* piv=1, target port group id */
988         arr[num++] = 0x0;
989         arr[num++] = 0x4;
990         arr[num++] = 0;
991         arr[num++] = 0;
992         put_unaligned_be16(port_group_id, arr + num);
993         num += 2;
994         /* NAA-5, Target device identifier */
995         arr[num++] = 0x61;      /* proto=sas, binary */
996         arr[num++] = 0xa3;      /* piv=1, target device, naa */
997         arr[num++] = 0x0;
998         arr[num++] = 0x8;
999         put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
1000         num += 8;
1001         /* SCSI name string: Target device identifier */
1002         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1003         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1004         arr[num++] = 0x0;
1005         arr[num++] = 24;
1006         memcpy(arr + num, "naa.52222220", 12);
1007         num += 12;
1008         snprintf(b, sizeof(b), "%08X", target_dev_id);
1009         memcpy(arr + num, b, 8);
1010         num += 8;
1011         memset(arr + num, 0, 4);
1012         num += 4;
1013         return num;
1014 }
1015
1016 static unsigned char vpd84_data[] = {
1017 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1018     0x22,0x22,0x22,0x0,0xbb,0x1,
1019     0x22,0x22,0x22,0x0,0xbb,0x2,
1020 };
1021
1022 /*  Software interface identification VPD page */
1023 static int inquiry_evpd_84(unsigned char * arr)
1024 {
1025         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1026         return sizeof(vpd84_data);
1027 }
1028
1029 /* Management network addresses VPD page */
1030 static int inquiry_evpd_85(unsigned char * arr)
1031 {
1032         int num = 0;
1033         const char * na1 = "https://www.kernel.org/config";
1034         const char * na2 = "http://www.kernel.org/log";
1035         int plen, olen;
1036
1037         arr[num++] = 0x1;       /* lu, storage config */
1038         arr[num++] = 0x0;       /* reserved */
1039         arr[num++] = 0x0;
1040         olen = strlen(na1);
1041         plen = olen + 1;
1042         if (plen % 4)
1043                 plen = ((plen / 4) + 1) * 4;
1044         arr[num++] = plen;      /* length, null termianted, padded */
1045         memcpy(arr + num, na1, olen);
1046         memset(arr + num + olen, 0, plen - olen);
1047         num += plen;
1048
1049         arr[num++] = 0x4;       /* lu, logging */
1050         arr[num++] = 0x0;       /* reserved */
1051         arr[num++] = 0x0;
1052         olen = strlen(na2);
1053         plen = olen + 1;
1054         if (plen % 4)
1055                 plen = ((plen / 4) + 1) * 4;
1056         arr[num++] = plen;      /* length, null terminated, padded */
1057         memcpy(arr + num, na2, olen);
1058         memset(arr + num + olen, 0, plen - olen);
1059         num += plen;
1060
1061         return num;
1062 }
1063
1064 /* SCSI ports VPD page */
1065 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1066 {
1067         int num = 0;
1068         int port_a, port_b;
1069
1070         port_a = target_dev_id + 1;
1071         port_b = port_a + 1;
1072         arr[num++] = 0x0;       /* reserved */
1073         arr[num++] = 0x0;       /* reserved */
1074         arr[num++] = 0x0;
1075         arr[num++] = 0x1;       /* relative port 1 (primary) */
1076         memset(arr + num, 0, 6);
1077         num += 6;
1078         arr[num++] = 0x0;
1079         arr[num++] = 12;        /* length tp descriptor */
1080         /* naa-5 target port identifier (A) */
1081         arr[num++] = 0x61;      /* proto=sas, binary */
1082         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1083         arr[num++] = 0x0;       /* reserved */
1084         arr[num++] = 0x8;       /* length */
1085         put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1086         num += 8;
1087         arr[num++] = 0x0;       /* reserved */
1088         arr[num++] = 0x0;       /* reserved */
1089         arr[num++] = 0x0;
1090         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1091         memset(arr + num, 0, 6);
1092         num += 6;
1093         arr[num++] = 0x0;
1094         arr[num++] = 12;        /* length tp descriptor */
1095         /* naa-5 target port identifier (B) */
1096         arr[num++] = 0x61;      /* proto=sas, binary */
1097         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1098         arr[num++] = 0x0;       /* reserved */
1099         arr[num++] = 0x8;       /* length */
1100         put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1101         num += 8;
1102
1103         return num;
1104 }
1105
1106
1107 static unsigned char vpd89_data[] = {
1108 /* from 4th byte */ 0,0,0,0,
1109 'l','i','n','u','x',' ',' ',' ',
1110 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1111 '1','2','3','4',
1112 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1113 0xec,0,0,0,
1114 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1115 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1116 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1117 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1118 0x53,0x41,
1119 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1120 0x20,0x20,
1121 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1122 0x10,0x80,
1123 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1124 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1125 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1126 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1127 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1128 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1129 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1130 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1132 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1133 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1134 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1135 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1136 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1139 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1140 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1141 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1149 };
1150
1151 /* ATA Information VPD page */
1152 static int inquiry_evpd_89(unsigned char * arr)
1153 {
1154         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1155         return sizeof(vpd89_data);
1156 }
1157
1158
1159 static unsigned char vpdb0_data[] = {
1160         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1161         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1163         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1164 };
1165
1166 /* Block limits VPD page (SBC-3) */
1167 static int inquiry_evpd_b0(unsigned char * arr)
1168 {
1169         unsigned int gran;
1170
1171         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1172
1173         /* Optimal transfer length granularity */
1174         gran = 1 << sdebug_physblk_exp;
1175         put_unaligned_be16(gran, arr + 2);
1176
1177         /* Maximum Transfer Length */
1178         if (sdebug_store_sectors > 0x400)
1179                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1180
1181         /* Optimal Transfer Length */
1182         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1183
1184         if (sdebug_lbpu) {
1185                 /* Maximum Unmap LBA Count */
1186                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1187
1188                 /* Maximum Unmap Block Descriptor Count */
1189                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1190         }
1191
1192         /* Unmap Granularity Alignment */
1193         if (sdebug_unmap_alignment) {
1194                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1195                 arr[28] |= 0x80; /* UGAVALID */
1196         }
1197
1198         /* Optimal Unmap Granularity */
1199         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1200
1201         /* Maximum WRITE SAME Length */
1202         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1203
1204         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1205
1206         return sizeof(vpdb0_data);
1207 }
1208
1209 /* Block device characteristics VPD page (SBC-3) */
1210 static int inquiry_evpd_b1(unsigned char *arr)
1211 {
1212         memset(arr, 0, 0x3c);
1213         arr[0] = 0;
1214         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1215         arr[2] = 0;
1216         arr[3] = 5;     /* less than 1.8" */
1217
1218         return 0x3c;
1219 }
1220
1221 /* Logical block provisioning VPD page (SBC-3) */
1222 static int inquiry_evpd_b2(unsigned char *arr)
1223 {
1224         memset(arr, 0, 0x4);
1225         arr[0] = 0;                     /* threshold exponent */
1226
1227         if (sdebug_lbpu)
1228                 arr[1] = 1 << 7;
1229
1230         if (sdebug_lbpws)
1231                 arr[1] |= 1 << 6;
1232
1233         if (sdebug_lbpws10)
1234                 arr[1] |= 1 << 5;
1235
1236         if (sdebug_lbprz)
1237                 arr[1] |= 1 << 2;
1238
1239         return 0x4;
1240 }
1241
1242 #define SDEBUG_LONG_INQ_SZ 96
1243 #define SDEBUG_MAX_INQ_ARR_SZ 584
1244
1245 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1246 {
1247         unsigned char pq_pdt;
1248         unsigned char * arr;
1249         unsigned char *cmd = scp->cmnd;
1250         int alloc_len, n, ret;
1251         bool have_wlun;
1252
1253         alloc_len = get_unaligned_be16(cmd + 3);
1254         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1255         if (! arr)
1256                 return DID_REQUEUE << 16;
1257         have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1258         if (have_wlun)
1259                 pq_pdt = 0x1e;  /* present, wlun */
1260         else if (sdebug_no_lun_0 && (0 == devip->lun))
1261                 pq_pdt = 0x7f;  /* not present, no device type */
1262         else
1263                 pq_pdt = (sdebug_ptype & 0x1f);
1264         arr[0] = pq_pdt;
1265         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1266                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1267                 kfree(arr);
1268                 return check_condition_result;
1269         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1270                 int lu_id_num, port_group_id, target_dev_id, len;
1271                 char lu_id_str[6];
1272                 int host_no = devip->sdbg_host->shost->host_no;
1273                 
1274                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1275                     (devip->channel & 0x7f);
1276                 if (0 == sdebug_vpd_use_hostno)
1277                         host_no = 0;
1278                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1279                             (devip->target * 1000) + devip->lun);
1280                 target_dev_id = ((host_no + 1) * 2000) +
1281                                  (devip->target * 1000) - 3;
1282                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1283                 if (0 == cmd[2]) { /* supported vital product data pages */
1284                         arr[1] = cmd[2];        /*sanity */
1285                         n = 4;
1286                         arr[n++] = 0x0;   /* this page */
1287                         arr[n++] = 0x80;  /* unit serial number */
1288                         arr[n++] = 0x83;  /* device identification */
1289                         arr[n++] = 0x84;  /* software interface ident. */
1290                         arr[n++] = 0x85;  /* management network addresses */
1291                         arr[n++] = 0x86;  /* extended inquiry */
1292                         arr[n++] = 0x87;  /* mode page policy */
1293                         arr[n++] = 0x88;  /* SCSI ports */
1294                         arr[n++] = 0x89;  /* ATA information */
1295                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1296                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1297                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1298                                 arr[n++] = 0xb2;
1299                         arr[3] = n - 4;   /* number of supported VPD pages */
1300                 } else if (0x80 == cmd[2]) { /* unit serial number */
1301                         arr[1] = cmd[2];        /*sanity */
1302                         arr[3] = len;
1303                         memcpy(&arr[4], lu_id_str, len);
1304                 } else if (0x83 == cmd[2]) { /* device identification */
1305                         arr[1] = cmd[2];        /*sanity */
1306                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1307                                                  target_dev_id, lu_id_num,
1308                                                  lu_id_str, len);
1309                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1310                         arr[1] = cmd[2];        /*sanity */
1311                         arr[3] = inquiry_evpd_84(&arr[4]);
1312                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1313                         arr[1] = cmd[2];        /*sanity */
1314                         arr[3] = inquiry_evpd_85(&arr[4]);
1315                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1316                         arr[1] = cmd[2];        /*sanity */
1317                         arr[3] = 0x3c;  /* number of following entries */
1318                         if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1319                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1320                         else if (sdebug_dif)
1321                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1322                         else
1323                                 arr[4] = 0x0;   /* no protection stuff */
1324                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1325                 } else if (0x87 == cmd[2]) { /* mode page policy */
1326                         arr[1] = cmd[2];        /*sanity */
1327                         arr[3] = 0x8;   /* number of following entries */
1328                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1329                         arr[6] = 0x80;  /* mlus, shared */
1330                         arr[8] = 0x18;   /* protocol specific lu */
1331                         arr[10] = 0x82;  /* mlus, per initiator port */
1332                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1333                         arr[1] = cmd[2];        /*sanity */
1334                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1335                 } else if (0x89 == cmd[2]) { /* ATA information */
1336                         arr[1] = cmd[2];        /*sanity */
1337                         n = inquiry_evpd_89(&arr[4]);
1338                         put_unaligned_be16(n, arr + 2);
1339                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1340                         arr[1] = cmd[2];        /*sanity */
1341                         arr[3] = inquiry_evpd_b0(&arr[4]);
1342                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1343                         arr[1] = cmd[2];        /*sanity */
1344                         arr[3] = inquiry_evpd_b1(&arr[4]);
1345                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1346                         arr[1] = cmd[2];        /*sanity */
1347                         arr[3] = inquiry_evpd_b2(&arr[4]);
1348                 } else {
1349                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1350                         kfree(arr);
1351                         return check_condition_result;
1352                 }
1353                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1354                 ret = fill_from_dev_buffer(scp, arr,
1355                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1356                 kfree(arr);
1357                 return ret;
1358         }
1359         /* drops through here for a standard inquiry */
1360         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1361         arr[2] = sdebug_scsi_level;
1362         arr[3] = 2;    /* response_data_format==2 */
1363         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1364         arr[5] = sdebug_dif ? 1 : 0; /* PROTECT bit */
1365         if (0 == sdebug_vpd_use_hostno)
1366                 arr[5] = 0x10; /* claim: implicit TGPS */
1367         arr[6] = 0x10; /* claim: MultiP */
1368         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1369         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1370         memcpy(&arr[8], inq_vendor_id, 8);
1371         memcpy(&arr[16], inq_product_id, 16);
1372         memcpy(&arr[32], inq_product_rev, 4);
1373         /* version descriptors (2 bytes each) follow */
1374         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1375         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1376         n = 62;
1377         if (sdebug_ptype == 0) {
1378                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1379         } else if (sdebug_ptype == 1) {
1380                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1381         }
1382         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1383         ret = fill_from_dev_buffer(scp, arr,
1384                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1385         kfree(arr);
1386         return ret;
1387 }
1388
1389 static int resp_requests(struct scsi_cmnd * scp,
1390                          struct sdebug_dev_info * devip)
1391 {
1392         unsigned char * sbuff;
1393         unsigned char *cmd = scp->cmnd;
1394         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1395         bool dsense;
1396         int len = 18;
1397
1398         memset(arr, 0, sizeof(arr));
1399         dsense = !!(cmd[1] & 1);
1400         sbuff = scp->sense_buffer;
1401         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1402                 if (dsense) {
1403                         arr[0] = 0x72;
1404                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1405                         arr[2] = THRESHOLD_EXCEEDED;
1406                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1407                         len = 8;
1408                 } else {
1409                         arr[0] = 0x70;
1410                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1411                         arr[7] = 0xa;           /* 18 byte sense buffer */
1412                         arr[12] = THRESHOLD_EXCEEDED;
1413                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1414                 }
1415         } else {
1416                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1417                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1418                         ;       /* have sense and formats match */
1419                 else if (arr[0] <= 0x70) {
1420                         if (dsense) {
1421                                 memset(arr, 0, 8);
1422                                 arr[0] = 0x72;
1423                                 len = 8;
1424                         } else {
1425                                 memset(arr, 0, 18);
1426                                 arr[0] = 0x70;
1427                                 arr[7] = 0xa;
1428                         }
1429                 } else if (dsense) {
1430                         memset(arr, 0, 8);
1431                         arr[0] = 0x72;
1432                         arr[1] = sbuff[2];     /* sense key */
1433                         arr[2] = sbuff[12];    /* asc */
1434                         arr[3] = sbuff[13];    /* ascq */
1435                         len = 8;
1436                 } else {
1437                         memset(arr, 0, 18);
1438                         arr[0] = 0x70;
1439                         arr[2] = sbuff[1];
1440                         arr[7] = 0xa;
1441                         arr[12] = sbuff[1];
1442                         arr[13] = sbuff[3];
1443                 }
1444
1445         }
1446         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1447         return fill_from_dev_buffer(scp, arr, len);
1448 }
1449
1450 static int resp_start_stop(struct scsi_cmnd * scp,
1451                            struct sdebug_dev_info * devip)
1452 {
1453         unsigned char *cmd = scp->cmnd;
1454         int power_cond, start;
1455
1456         power_cond = (cmd[4] & 0xf0) >> 4;
1457         if (power_cond) {
1458                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1459                 return check_condition_result;
1460         }
1461         start = cmd[4] & 1;
1462         if (start == devip->stopped)
1463                 devip->stopped = !start;
1464         return 0;
1465 }
1466
1467 static sector_t get_sdebug_capacity(void)
1468 {
1469         static const unsigned int gibibyte = 1073741824;
1470
1471         if (sdebug_virtual_gb > 0)
1472                 return (sector_t)sdebug_virtual_gb *
1473                         (gibibyte / sdebug_sector_size);
1474         else
1475                 return sdebug_store_sectors;
1476 }
1477
1478 #define SDEBUG_READCAP_ARR_SZ 8
1479 static int resp_readcap(struct scsi_cmnd * scp,
1480                         struct sdebug_dev_info * devip)
1481 {
1482         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1483         unsigned int capac;
1484
1485         /* following just in case virtual_gb changed */
1486         sdebug_capacity = get_sdebug_capacity();
1487         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1488         if (sdebug_capacity < 0xffffffff) {
1489                 capac = (unsigned int)sdebug_capacity - 1;
1490                 put_unaligned_be32(capac, arr + 0);
1491         } else
1492                 put_unaligned_be32(0xffffffff, arr + 0);
1493         put_unaligned_be16(sdebug_sector_size, arr + 6);
1494         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1495 }
1496
1497 #define SDEBUG_READCAP16_ARR_SZ 32
1498 static int resp_readcap16(struct scsi_cmnd * scp,
1499                           struct sdebug_dev_info * devip)
1500 {
1501         unsigned char *cmd = scp->cmnd;
1502         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1503         int alloc_len;
1504
1505         alloc_len = get_unaligned_be32(cmd + 10);
1506         /* following just in case virtual_gb changed */
1507         sdebug_capacity = get_sdebug_capacity();
1508         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1509         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1510         put_unaligned_be32(sdebug_sector_size, arr + 8);
1511         arr[13] = sdebug_physblk_exp & 0xf;
1512         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1513
1514         if (scsi_debug_lbp()) {
1515                 arr[14] |= 0x80; /* LBPME */
1516                 if (sdebug_lbprz)
1517                         arr[14] |= 0x40; /* LBPRZ */
1518         }
1519
1520         arr[15] = sdebug_lowest_aligned & 0xff;
1521
1522         if (sdebug_dif) {
1523                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1524                 arr[12] |= 1; /* PROT_EN */
1525         }
1526
1527         return fill_from_dev_buffer(scp, arr,
1528                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1529 }
1530
1531 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1532
1533 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1534                               struct sdebug_dev_info * devip)
1535 {
1536         unsigned char *cmd = scp->cmnd;
1537         unsigned char * arr;
1538         int host_no = devip->sdbg_host->shost->host_no;
1539         int n, ret, alen, rlen;
1540         int port_group_a, port_group_b, port_a, port_b;
1541
1542         alen = get_unaligned_be32(cmd + 6);
1543         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1544         if (! arr)
1545                 return DID_REQUEUE << 16;
1546         /*
1547          * EVPD page 0x88 states we have two ports, one
1548          * real and a fake port with no device connected.
1549          * So we create two port groups with one port each
1550          * and set the group with port B to unavailable.
1551          */
1552         port_a = 0x1; /* relative port A */
1553         port_b = 0x2; /* relative port B */
1554         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1555                         (devip->channel & 0x7f);
1556         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1557                         (devip->channel & 0x7f) + 0x80;
1558
1559         /*
1560          * The asymmetric access state is cycled according to the host_id.
1561          */
1562         n = 4;
1563         if (0 == sdebug_vpd_use_hostno) {
1564                 arr[n++] = host_no % 3; /* Asymm access state */
1565                 arr[n++] = 0x0F; /* claim: all states are supported */
1566         } else {
1567                 arr[n++] = 0x0; /* Active/Optimized path */
1568                 arr[n++] = 0x01; /* only support active/optimized paths */
1569         }
1570         put_unaligned_be16(port_group_a, arr + n);
1571         n += 2;
1572         arr[n++] = 0;    /* Reserved */
1573         arr[n++] = 0;    /* Status code */
1574         arr[n++] = 0;    /* Vendor unique */
1575         arr[n++] = 0x1;  /* One port per group */
1576         arr[n++] = 0;    /* Reserved */
1577         arr[n++] = 0;    /* Reserved */
1578         put_unaligned_be16(port_a, arr + n);
1579         n += 2;
1580         arr[n++] = 3;    /* Port unavailable */
1581         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1582         put_unaligned_be16(port_group_b, arr + n);
1583         n += 2;
1584         arr[n++] = 0;    /* Reserved */
1585         arr[n++] = 0;    /* Status code */
1586         arr[n++] = 0;    /* Vendor unique */
1587         arr[n++] = 0x1;  /* One port per group */
1588         arr[n++] = 0;    /* Reserved */
1589         arr[n++] = 0;    /* Reserved */
1590         put_unaligned_be16(port_b, arr + n);
1591         n += 2;
1592
1593         rlen = n - 4;
1594         put_unaligned_be32(rlen, arr + 0);
1595
1596         /*
1597          * Return the smallest value of either
1598          * - The allocated length
1599          * - The constructed command length
1600          * - The maximum array size
1601          */
1602         rlen = min(alen,n);
1603         ret = fill_from_dev_buffer(scp, arr,
1604                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1605         kfree(arr);
1606         return ret;
1607 }
1608
1609 static int
1610 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1611 {
1612         bool rctd;
1613         u8 reporting_opts, req_opcode, sdeb_i, supp;
1614         u16 req_sa, u;
1615         u32 alloc_len, a_len;
1616         int k, offset, len, errsts, count, bump, na;
1617         const struct opcode_info_t *oip;
1618         const struct opcode_info_t *r_oip;
1619         u8 *arr;
1620         u8 *cmd = scp->cmnd;
1621
1622         rctd = !!(cmd[2] & 0x80);
1623         reporting_opts = cmd[2] & 0x7;
1624         req_opcode = cmd[3];
1625         req_sa = get_unaligned_be16(cmd + 4);
1626         alloc_len = get_unaligned_be32(cmd + 6);
1627         if (alloc_len < 4 || alloc_len > 0xffff) {
1628                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1629                 return check_condition_result;
1630         }
1631         if (alloc_len > 8192)
1632                 a_len = 8192;
1633         else
1634                 a_len = alloc_len;
1635         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1636         if (NULL == arr) {
1637                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1638                                 INSUFF_RES_ASCQ);
1639                 return check_condition_result;
1640         }
1641         switch (reporting_opts) {
1642         case 0: /* all commands */
1643                 /* count number of commands */
1644                 for (count = 0, oip = opcode_info_arr;
1645                      oip->num_attached != 0xff; ++oip) {
1646                         if (F_INV_OP & oip->flags)
1647                                 continue;
1648                         count += (oip->num_attached + 1);
1649                 }
1650                 bump = rctd ? 20 : 8;
1651                 put_unaligned_be32(count * bump, arr);
1652                 for (offset = 4, oip = opcode_info_arr;
1653                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1654                         if (F_INV_OP & oip->flags)
1655                                 continue;
1656                         na = oip->num_attached;
1657                         arr[offset] = oip->opcode;
1658                         put_unaligned_be16(oip->sa, arr + offset + 2);
1659                         if (rctd)
1660                                 arr[offset + 5] |= 0x2;
1661                         if (FF_SA & oip->flags)
1662                                 arr[offset + 5] |= 0x1;
1663                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1664                         if (rctd)
1665                                 put_unaligned_be16(0xa, arr + offset + 8);
1666                         r_oip = oip;
1667                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1668                                 if (F_INV_OP & oip->flags)
1669                                         continue;
1670                                 offset += bump;
1671                                 arr[offset] = oip->opcode;
1672                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1673                                 if (rctd)
1674                                         arr[offset + 5] |= 0x2;
1675                                 if (FF_SA & oip->flags)
1676                                         arr[offset + 5] |= 0x1;
1677                                 put_unaligned_be16(oip->len_mask[0],
1678                                                    arr + offset + 6);
1679                                 if (rctd)
1680                                         put_unaligned_be16(0xa,
1681                                                            arr + offset + 8);
1682                         }
1683                         oip = r_oip;
1684                         offset += bump;
1685                 }
1686                 break;
1687         case 1: /* one command: opcode only */
1688         case 2: /* one command: opcode plus service action */
1689         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1690                 sdeb_i = opcode_ind_arr[req_opcode];
1691                 oip = &opcode_info_arr[sdeb_i];
1692                 if (F_INV_OP & oip->flags) {
1693                         supp = 1;
1694                         offset = 4;
1695                 } else {
1696                         if (1 == reporting_opts) {
1697                                 if (FF_SA & oip->flags) {
1698                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1699                                                              2, 2);
1700                                         kfree(arr);
1701                                         return check_condition_result;
1702                                 }
1703                                 req_sa = 0;
1704                         } else if (2 == reporting_opts &&
1705                                    0 == (FF_SA & oip->flags)) {
1706                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1707                                 kfree(arr);     /* point at requested sa */
1708                                 return check_condition_result;
1709                         }
1710                         if (0 == (FF_SA & oip->flags) &&
1711                             req_opcode == oip->opcode)
1712                                 supp = 3;
1713                         else if (0 == (FF_SA & oip->flags)) {
1714                                 na = oip->num_attached;
1715                                 for (k = 0, oip = oip->arrp; k < na;
1716                                      ++k, ++oip) {
1717                                         if (req_opcode == oip->opcode)
1718                                                 break;
1719                                 }
1720                                 supp = (k >= na) ? 1 : 3;
1721                         } else if (req_sa != oip->sa) {
1722                                 na = oip->num_attached;
1723                                 for (k = 0, oip = oip->arrp; k < na;
1724                                      ++k, ++oip) {
1725                                         if (req_sa == oip->sa)
1726                                                 break;
1727                                 }
1728                                 supp = (k >= na) ? 1 : 3;
1729                         } else
1730                                 supp = 3;
1731                         if (3 == supp) {
1732                                 u = oip->len_mask[0];
1733                                 put_unaligned_be16(u, arr + 2);
1734                                 arr[4] = oip->opcode;
1735                                 for (k = 1; k < u; ++k)
1736                                         arr[4 + k] = (k < 16) ?
1737                                                  oip->len_mask[k] : 0xff;
1738                                 offset = 4 + u;
1739                         } else
1740                                 offset = 4;
1741                 }
1742                 arr[1] = (rctd ? 0x80 : 0) | supp;
1743                 if (rctd) {
1744                         put_unaligned_be16(0xa, arr + offset);
1745                         offset += 12;
1746                 }
1747                 break;
1748         default:
1749                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1750                 kfree(arr);
1751                 return check_condition_result;
1752         }
1753         offset = (offset < a_len) ? offset : a_len;
1754         len = (offset < alloc_len) ? offset : alloc_len;
1755         errsts = fill_from_dev_buffer(scp, arr, len);
1756         kfree(arr);
1757         return errsts;
1758 }
1759
1760 static int
1761 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1762 {
1763         bool repd;
1764         u32 alloc_len, len;
1765         u8 arr[16];
1766         u8 *cmd = scp->cmnd;
1767
1768         memset(arr, 0, sizeof(arr));
1769         repd = !!(cmd[2] & 0x80);
1770         alloc_len = get_unaligned_be32(cmd + 6);
1771         if (alloc_len < 4) {
1772                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1773                 return check_condition_result;
1774         }
1775         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1776         arr[1] = 0x1;           /* ITNRS */
1777         if (repd) {
1778                 arr[3] = 0xc;
1779                 len = 16;
1780         } else
1781                 len = 4;
1782
1783         len = (len < alloc_len) ? len : alloc_len;
1784         return fill_from_dev_buffer(scp, arr, len);
1785 }
1786
1787 /* <<Following mode page info copied from ST318451LW>> */
1788
1789 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1790 {       /* Read-Write Error Recovery page for mode_sense */
1791         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1792                                         5, 0, 0xff, 0xff};
1793
1794         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1795         if (1 == pcontrol)
1796                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1797         return sizeof(err_recov_pg);
1798 }
1799
1800 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1801 {       /* Disconnect-Reconnect page for mode_sense */
1802         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1803                                          0, 0, 0, 0, 0, 0, 0, 0};
1804
1805         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1806         if (1 == pcontrol)
1807                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1808         return sizeof(disconnect_pg);
1809 }
1810
1811 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1812 {       /* Format device page for mode_sense */
1813         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1814                                      0, 0, 0, 0, 0, 0, 0, 0,
1815                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1816
1817         memcpy(p, format_pg, sizeof(format_pg));
1818         put_unaligned_be16(sdebug_sectors_per, p + 10);
1819         put_unaligned_be16(sdebug_sector_size, p + 12);
1820         if (sdebug_removable)
1821                 p[20] |= 0x20; /* should agree with INQUIRY */
1822         if (1 == pcontrol)
1823                 memset(p + 2, 0, sizeof(format_pg) - 2);
1824         return sizeof(format_pg);
1825 }
1826
1827 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1828 {       /* Caching page for mode_sense */
1829         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1830                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1831         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1832                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1833
1834         if (SDEBUG_OPT_N_WCE & sdebug_opts)
1835                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1836         memcpy(p, caching_pg, sizeof(caching_pg));
1837         if (1 == pcontrol)
1838                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1839         else if (2 == pcontrol)
1840                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1841         return sizeof(caching_pg);
1842 }
1843
1844 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1845 {       /* Control mode page for mode_sense */
1846         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1847                                         0, 0, 0, 0};
1848         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1849                                      0, 0, 0x2, 0x4b};
1850
1851         if (sdebug_dsense)
1852                 ctrl_m_pg[2] |= 0x4;
1853         else
1854                 ctrl_m_pg[2] &= ~0x4;
1855
1856         if (sdebug_ato)
1857                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1858
1859         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1860         if (1 == pcontrol)
1861                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1862         else if (2 == pcontrol)
1863                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1864         return sizeof(ctrl_m_pg);
1865 }
1866
1867
1868 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1869 {       /* Informational Exceptions control mode page for mode_sense */
1870         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1871                                        0, 0, 0x0, 0x0};
1872         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1873                                       0, 0, 0x0, 0x0};
1874
1875         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1876         if (1 == pcontrol)
1877                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1878         else if (2 == pcontrol)
1879                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1880         return sizeof(iec_m_pg);
1881 }
1882
1883 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1884 {       /* SAS SSP mode page - short format for mode_sense */
1885         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1886                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1887
1888         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1889         if (1 == pcontrol)
1890                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1891         return sizeof(sas_sf_m_pg);
1892 }
1893
1894
1895 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1896                               int target_dev_id)
1897 {       /* SAS phy control and discover mode page for mode_sense */
1898         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1899                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1900                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1901                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1902                     0x2, 0, 0, 0, 0, 0, 0, 0,
1903                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1904                     0, 0, 0, 0, 0, 0, 0, 0,
1905                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1906                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1907                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1908                     0x3, 0, 0, 0, 0, 0, 0, 0,
1909                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1910                     0, 0, 0, 0, 0, 0, 0, 0,
1911                 };
1912         int port_a, port_b;
1913
1914         put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1915         put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1916         put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1917         put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1918         port_a = target_dev_id + 1;
1919         port_b = port_a + 1;
1920         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1921         put_unaligned_be32(port_a, p + 20);
1922         put_unaligned_be32(port_b, p + 48 + 20);
1923         if (1 == pcontrol)
1924                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1925         return sizeof(sas_pcd_m_pg);
1926 }
1927
1928 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1929 {       /* SAS SSP shared protocol specific port mode subpage */
1930         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1931                     0, 0, 0, 0, 0, 0, 0, 0,
1932                 };
1933
1934         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1935         if (1 == pcontrol)
1936                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1937         return sizeof(sas_sha_m_pg);
1938 }
1939
1940 #define SDEBUG_MAX_MSENSE_SZ 256
1941
1942 static int
1943 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1944 {
1945         unsigned char dbd, llbaa;
1946         int pcontrol, pcode, subpcode, bd_len;
1947         unsigned char dev_spec;
1948         int alloc_len, msense_6, offset, len, target_dev_id;
1949         int target = scp->device->id;
1950         unsigned char * ap;
1951         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1952         unsigned char *cmd = scp->cmnd;
1953
1954         dbd = !!(cmd[1] & 0x8);
1955         pcontrol = (cmd[2] & 0xc0) >> 6;
1956         pcode = cmd[2] & 0x3f;
1957         subpcode = cmd[3];
1958         msense_6 = (MODE_SENSE == cmd[0]);
1959         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1960         if ((0 == sdebug_ptype) && (0 == dbd))
1961                 bd_len = llbaa ? 16 : 8;
1962         else
1963                 bd_len = 0;
1964         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1965         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1966         if (0x3 == pcontrol) {  /* Saving values not supported */
1967                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1968                 return check_condition_result;
1969         }
1970         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1971                         (devip->target * 1000) - 3;
1972         /* set DPOFUA bit for disks */
1973         if (0 == sdebug_ptype)
1974                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1975         else
1976                 dev_spec = 0x0;
1977         if (msense_6) {
1978                 arr[2] = dev_spec;
1979                 arr[3] = bd_len;
1980                 offset = 4;
1981         } else {
1982                 arr[3] = dev_spec;
1983                 if (16 == bd_len)
1984                         arr[4] = 0x1;   /* set LONGLBA bit */
1985                 arr[7] = bd_len;        /* assume 255 or less */
1986                 offset = 8;
1987         }
1988         ap = arr + offset;
1989         if ((bd_len > 0) && (!sdebug_capacity))
1990                 sdebug_capacity = get_sdebug_capacity();
1991
1992         if (8 == bd_len) {
1993                 if (sdebug_capacity > 0xfffffffe)
1994                         put_unaligned_be32(0xffffffff, ap + 0);
1995                 else
1996                         put_unaligned_be32(sdebug_capacity, ap + 0);
1997                 put_unaligned_be16(sdebug_sector_size, ap + 6);
1998                 offset += bd_len;
1999                 ap = arr + offset;
2000         } else if (16 == bd_len) {
2001                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2002                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2003                 offset += bd_len;
2004                 ap = arr + offset;
2005         }
2006
2007         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2008                 /* TODO: Control Extension page */
2009                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2010                 return check_condition_result;
2011         }
2012         switch (pcode) {
2013         case 0x1:       /* Read-Write error recovery page, direct access */
2014                 len = resp_err_recov_pg(ap, pcontrol, target);
2015                 offset += len;
2016                 break;
2017         case 0x2:       /* Disconnect-Reconnect page, all devices */
2018                 len = resp_disconnect_pg(ap, pcontrol, target);
2019                 offset += len;
2020                 break;
2021         case 0x3:       /* Format device page, direct access */
2022                 len = resp_format_pg(ap, pcontrol, target);
2023                 offset += len;
2024                 break;
2025         case 0x8:       /* Caching page, direct access */
2026                 len = resp_caching_pg(ap, pcontrol, target);
2027                 offset += len;
2028                 break;
2029         case 0xa:       /* Control Mode page, all devices */
2030                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2031                 offset += len;
2032                 break;
2033         case 0x19:      /* if spc==1 then sas phy, control+discover */
2034                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2035                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2036                         return check_condition_result;
2037                 }
2038                 len = 0;
2039                 if ((0x0 == subpcode) || (0xff == subpcode))
2040                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2041                 if ((0x1 == subpcode) || (0xff == subpcode))
2042                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2043                                                   target_dev_id);
2044                 if ((0x2 == subpcode) || (0xff == subpcode))
2045                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2046                 offset += len;
2047                 break;
2048         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2049                 len = resp_iec_m_pg(ap, pcontrol, target);
2050                 offset += len;
2051                 break;
2052         case 0x3f:      /* Read all Mode pages */
2053                 if ((0 == subpcode) || (0xff == subpcode)) {
2054                         len = resp_err_recov_pg(ap, pcontrol, target);
2055                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2056                         len += resp_format_pg(ap + len, pcontrol, target);
2057                         len += resp_caching_pg(ap + len, pcontrol, target);
2058                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2059                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2060                         if (0xff == subpcode) {
2061                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2062                                                   target, target_dev_id);
2063                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2064                         }
2065                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2066                 } else {
2067                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2068                         return check_condition_result;
2069                 }
2070                 offset += len;
2071                 break;
2072         default:
2073                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2074                 return check_condition_result;
2075         }
2076         if (msense_6)
2077                 arr[0] = offset - 1;
2078         else
2079                 put_unaligned_be16((offset - 2), arr + 0);
2080         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2081 }
2082
2083 #define SDEBUG_MAX_MSELECT_SZ 512
2084
2085 static int
2086 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2087 {
2088         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2089         int param_len, res, mpage;
2090         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2091         unsigned char *cmd = scp->cmnd;
2092         int mselect6 = (MODE_SELECT == cmd[0]);
2093
2094         memset(arr, 0, sizeof(arr));
2095         pf = cmd[1] & 0x10;
2096         sp = cmd[1] & 0x1;
2097         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2098         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2099                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2100                 return check_condition_result;
2101         }
2102         res = fetch_to_dev_buffer(scp, arr, param_len);
2103         if (-1 == res)
2104                 return DID_ERROR << 16;
2105         else if (sdebug_verbose && (res < param_len))
2106                 sdev_printk(KERN_INFO, scp->device,
2107                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2108                             __func__, param_len, res);
2109         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2110         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2111         if (md_len > 2) {
2112                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2113                 return check_condition_result;
2114         }
2115         off = bd_len + (mselect6 ? 4 : 8);
2116         mpage = arr[off] & 0x3f;
2117         ps = !!(arr[off] & 0x80);
2118         if (ps) {
2119                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2120                 return check_condition_result;
2121         }
2122         spf = !!(arr[off] & 0x40);
2123         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2124                        (arr[off + 1] + 2);
2125         if ((pg_len + off) > param_len) {
2126                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2127                                 PARAMETER_LIST_LENGTH_ERR, 0);
2128                 return check_condition_result;
2129         }
2130         switch (mpage) {
2131         case 0x8:      /* Caching Mode page */
2132                 if (caching_pg[1] == arr[off + 1]) {
2133                         memcpy(caching_pg + 2, arr + off + 2,
2134                                sizeof(caching_pg) - 2);
2135                         goto set_mode_changed_ua;
2136                 }
2137                 break;
2138         case 0xa:      /* Control Mode page */
2139                 if (ctrl_m_pg[1] == arr[off + 1]) {
2140                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2141                                sizeof(ctrl_m_pg) - 2);
2142                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2143                         goto set_mode_changed_ua;
2144                 }
2145                 break;
2146         case 0x1c:      /* Informational Exceptions Mode page */
2147                 if (iec_m_pg[1] == arr[off + 1]) {
2148                         memcpy(iec_m_pg + 2, arr + off + 2,
2149                                sizeof(iec_m_pg) - 2);
2150                         goto set_mode_changed_ua;
2151                 }
2152                 break;
2153         default:
2154                 break;
2155         }
2156         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2157         return check_condition_result;
2158 set_mode_changed_ua:
2159         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2160         return 0;
2161 }
2162
2163 static int resp_temp_l_pg(unsigned char * arr)
2164 {
2165         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2166                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2167                 };
2168
2169         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2170         return sizeof(temp_l_pg);
2171 }
2172
2173 static int resp_ie_l_pg(unsigned char * arr)
2174 {
2175         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2176                 };
2177
2178         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2179         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2180                 arr[4] = THRESHOLD_EXCEEDED;
2181                 arr[5] = 0xff;
2182         }
2183         return sizeof(ie_l_pg);
2184 }
2185
2186 #define SDEBUG_MAX_LSENSE_SZ 512
2187
2188 static int resp_log_sense(struct scsi_cmnd * scp,
2189                           struct sdebug_dev_info * devip)
2190 {
2191         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2192         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2193         unsigned char *cmd = scp->cmnd;
2194
2195         memset(arr, 0, sizeof(arr));
2196         ppc = cmd[1] & 0x2;
2197         sp = cmd[1] & 0x1;
2198         if (ppc || sp) {
2199                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2200                 return check_condition_result;
2201         }
2202         pcontrol = (cmd[2] & 0xc0) >> 6;
2203         pcode = cmd[2] & 0x3f;
2204         subpcode = cmd[3] & 0xff;
2205         alloc_len = get_unaligned_be16(cmd + 7);
2206         arr[0] = pcode;
2207         if (0 == subpcode) {
2208                 switch (pcode) {
2209                 case 0x0:       /* Supported log pages log page */
2210                         n = 4;
2211                         arr[n++] = 0x0;         /* this page */
2212                         arr[n++] = 0xd;         /* Temperature */
2213                         arr[n++] = 0x2f;        /* Informational exceptions */
2214                         arr[3] = n - 4;
2215                         break;
2216                 case 0xd:       /* Temperature log page */
2217                         arr[3] = resp_temp_l_pg(arr + 4);
2218                         break;
2219                 case 0x2f:      /* Informational exceptions log page */
2220                         arr[3] = resp_ie_l_pg(arr + 4);
2221                         break;
2222                 default:
2223                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2224                         return check_condition_result;
2225                 }
2226         } else if (0xff == subpcode) {
2227                 arr[0] |= 0x40;
2228                 arr[1] = subpcode;
2229                 switch (pcode) {
2230                 case 0x0:       /* Supported log pages and subpages log page */
2231                         n = 4;
2232                         arr[n++] = 0x0;
2233                         arr[n++] = 0x0;         /* 0,0 page */
2234                         arr[n++] = 0x0;
2235                         arr[n++] = 0xff;        /* this page */
2236                         arr[n++] = 0xd;
2237                         arr[n++] = 0x0;         /* Temperature */
2238                         arr[n++] = 0x2f;
2239                         arr[n++] = 0x0; /* Informational exceptions */
2240                         arr[3] = n - 4;
2241                         break;
2242                 case 0xd:       /* Temperature subpages */
2243                         n = 4;
2244                         arr[n++] = 0xd;
2245                         arr[n++] = 0x0;         /* Temperature */
2246                         arr[3] = n - 4;
2247                         break;
2248                 case 0x2f:      /* Informational exceptions subpages */
2249                         n = 4;
2250                         arr[n++] = 0x2f;
2251                         arr[n++] = 0x0;         /* Informational exceptions */
2252                         arr[3] = n - 4;
2253                         break;
2254                 default:
2255                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2256                         return check_condition_result;
2257                 }
2258         } else {
2259                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2260                 return check_condition_result;
2261         }
2262         len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2263         return fill_from_dev_buffer(scp, arr,
2264                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2265 }
2266
2267 static int check_device_access_params(struct scsi_cmnd *scp,
2268                                       unsigned long long lba, unsigned int num)
2269 {
2270         if (lba + num > sdebug_capacity) {
2271                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2272                 return check_condition_result;
2273         }
2274         /* transfer length excessive (tie in to block limits VPD page) */
2275         if (num > sdebug_store_sectors) {
2276                 /* needs work to find which cdb byte 'num' comes from */
2277                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2278                 return check_condition_result;
2279         }
2280         return 0;
2281 }
2282
2283 /* Returns number of bytes copied or -1 if error. */
2284 static int
2285 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2286 {
2287         int ret;
2288         u64 block, rest = 0;
2289         struct scsi_data_buffer *sdb;
2290         enum dma_data_direction dir;
2291
2292         if (do_write) {
2293                 sdb = scsi_out(scmd);
2294                 dir = DMA_TO_DEVICE;
2295         } else {
2296                 sdb = scsi_in(scmd);
2297                 dir = DMA_FROM_DEVICE;
2298         }
2299
2300         if (!sdb->length)
2301                 return 0;
2302         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2303                 return -1;
2304
2305         block = do_div(lba, sdebug_store_sectors);
2306         if (block + num > sdebug_store_sectors)
2307                 rest = block + num - sdebug_store_sectors;
2308
2309         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2310                    fake_storep + (block * sdebug_sector_size),
2311                    (num - rest) * sdebug_sector_size, 0, do_write);
2312         if (ret != (num - rest) * sdebug_sector_size)
2313                 return ret;
2314
2315         if (rest) {
2316                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2317                             fake_storep, rest * sdebug_sector_size,
2318                             (num - rest) * sdebug_sector_size, do_write);
2319         }
2320
2321         return ret;
2322 }
2323
2324 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2325  * arr into fake_store(lba,num) and return true. If comparison fails then
2326  * return false. */
2327 static bool
2328 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2329 {
2330         bool res;
2331         u64 block, rest = 0;
2332         u32 store_blks = sdebug_store_sectors;
2333         u32 lb_size = sdebug_sector_size;
2334
2335         block = do_div(lba, store_blks);
2336         if (block + num > store_blks)
2337                 rest = block + num - store_blks;
2338
2339         res = !memcmp(fake_storep + (block * lb_size), arr,
2340                       (num - rest) * lb_size);
2341         if (!res)
2342                 return res;
2343         if (rest)
2344                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2345                              rest * lb_size);
2346         if (!res)
2347                 return res;
2348         arr += num * lb_size;
2349         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2350         if (rest)
2351                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2352                        rest * lb_size);
2353         return res;
2354 }
2355
2356 static __be16 dif_compute_csum(const void *buf, int len)
2357 {
2358         __be16 csum;
2359
2360         if (sdebug_guard)
2361                 csum = (__force __be16)ip_compute_csum(buf, len);
2362         else
2363                 csum = cpu_to_be16(crc_t10dif(buf, len));
2364
2365         return csum;
2366 }
2367
2368 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2369                       sector_t sector, u32 ei_lba)
2370 {
2371         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2372
2373         if (sdt->guard_tag != csum) {
2374                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2375                         (unsigned long)sector,
2376                         be16_to_cpu(sdt->guard_tag),
2377                         be16_to_cpu(csum));
2378                 return 0x01;
2379         }
2380         if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2381             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2382                 pr_err("REF check failed on sector %lu\n",
2383                         (unsigned long)sector);
2384                 return 0x03;
2385         }
2386         if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2387             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2388                 pr_err("REF check failed on sector %lu\n",
2389                         (unsigned long)sector);
2390                 return 0x03;
2391         }
2392         return 0;
2393 }
2394
2395 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2396                           unsigned int sectors, bool read)
2397 {
2398         size_t resid;
2399         void *paddr;
2400         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2401         struct sg_mapping_iter miter;
2402
2403         /* Bytes of protection data to copy into sgl */
2404         resid = sectors * sizeof(*dif_storep);
2405
2406         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2407                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2408                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2409
2410         while (sg_miter_next(&miter) && resid > 0) {
2411                 size_t len = min(miter.length, resid);
2412                 void *start = dif_store(sector);
2413                 size_t rest = 0;
2414
2415                 if (dif_store_end < start + len)
2416                         rest = start + len - dif_store_end;
2417
2418                 paddr = miter.addr;
2419
2420                 if (read)
2421                         memcpy(paddr, start, len - rest);
2422                 else
2423                         memcpy(start, paddr, len - rest);
2424
2425                 if (rest) {
2426                         if (read)
2427                                 memcpy(paddr + len - rest, dif_storep, rest);
2428                         else
2429                                 memcpy(dif_storep, paddr + len - rest, rest);
2430                 }
2431
2432                 sector += len / sizeof(*dif_storep);
2433                 resid -= len;
2434         }
2435         sg_miter_stop(&miter);
2436 }
2437
2438 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2439                             unsigned int sectors, u32 ei_lba)
2440 {
2441         unsigned int i;
2442         struct sd_dif_tuple *sdt;
2443         sector_t sector;
2444
2445         for (i = 0; i < sectors; i++, ei_lba++) {
2446                 int ret;
2447
2448                 sector = start_sec + i;
2449                 sdt = dif_store(sector);
2450
2451                 if (sdt->app_tag == cpu_to_be16(0xffff))
2452                         continue;
2453
2454                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2455                 if (ret) {
2456                         dif_errors++;
2457                         return ret;
2458                 }
2459         }
2460
2461         dif_copy_prot(SCpnt, start_sec, sectors, true);
2462         dix_reads++;
2463
2464         return 0;
2465 }
2466
2467 static int
2468 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2469 {
2470         u8 *cmd = scp->cmnd;
2471         u64 lba;
2472         u32 num;
2473         u32 ei_lba;
2474         unsigned long iflags;
2475         int ret;
2476         bool check_prot;
2477
2478         switch (cmd[0]) {
2479         case READ_16:
2480                 ei_lba = 0;
2481                 lba = get_unaligned_be64(cmd + 2);
2482                 num = get_unaligned_be32(cmd + 10);
2483                 check_prot = true;
2484                 break;
2485         case READ_10:
2486                 ei_lba = 0;
2487                 lba = get_unaligned_be32(cmd + 2);
2488                 num = get_unaligned_be16(cmd + 7);
2489                 check_prot = true;
2490                 break;
2491         case READ_6:
2492                 ei_lba = 0;
2493                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2494                       (u32)(cmd[1] & 0x1f) << 16;
2495                 num = (0 == cmd[4]) ? 256 : cmd[4];
2496                 check_prot = true;
2497                 break;
2498         case READ_12:
2499                 ei_lba = 0;
2500                 lba = get_unaligned_be32(cmd + 2);
2501                 num = get_unaligned_be32(cmd + 6);
2502                 check_prot = true;
2503                 break;
2504         case XDWRITEREAD_10:
2505                 ei_lba = 0;
2506                 lba = get_unaligned_be32(cmd + 2);
2507                 num = get_unaligned_be16(cmd + 7);
2508                 check_prot = false;
2509                 break;
2510         default:        /* assume READ(32) */
2511                 lba = get_unaligned_be64(cmd + 12);
2512                 ei_lba = get_unaligned_be32(cmd + 20);
2513                 num = get_unaligned_be32(cmd + 28);
2514                 check_prot = false;
2515                 break;
2516         }
2517         if (check_prot) {
2518                 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2519                     (cmd[1] & 0xe0)) {
2520                         mk_sense_invalid_opcode(scp);
2521                         return check_condition_result;
2522                 }
2523                 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2524                      sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2525                     (cmd[1] & 0xe0) == 0)
2526                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2527                                     "to DIF device\n");
2528         }
2529         if (sdebug_any_injecting_opt) {
2530                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2531
2532                 if (ep->inj_short)
2533                         num /= 2;
2534         }
2535
2536         /* inline check_device_access_params() */
2537         if (lba + num > sdebug_capacity) {
2538                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2539                 return check_condition_result;
2540         }
2541         /* transfer length excessive (tie in to block limits VPD page) */
2542         if (num > sdebug_store_sectors) {
2543                 /* needs work to find which cdb byte 'num' comes from */
2544                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2545                 return check_condition_result;
2546         }
2547
2548         if ((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2549             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2550             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2551                 /* claim unrecoverable read error */
2552                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2553                 /* set info field and valid bit for fixed descriptor */
2554                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2555                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2556                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2557                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2558                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2559                 }
2560                 scsi_set_resid(scp, scsi_bufflen(scp));
2561                 return check_condition_result;
2562         }
2563
2564         read_lock_irqsave(&atomic_rw, iflags);
2565
2566         /* DIX + T10 DIF */
2567         if (sdebug_dix && scsi_prot_sg_count(scp)) {
2568                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2569
2570                 if (prot_ret) {
2571                         read_unlock_irqrestore(&atomic_rw, iflags);
2572                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2573                         return illegal_condition_result;
2574                 }
2575         }
2576
2577         ret = do_device_access(scp, lba, num, false);
2578         read_unlock_irqrestore(&atomic_rw, iflags);
2579         if (ret == -1)
2580                 return DID_ERROR << 16;
2581
2582         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2583
2584         if (sdebug_any_injecting_opt) {
2585                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2586
2587                 if (ep->inj_recovered) {
2588                         mk_sense_buffer(scp, RECOVERED_ERROR,
2589                                         THRESHOLD_EXCEEDED, 0);
2590                         return check_condition_result;
2591                 } else if (ep->inj_transport) {
2592                         mk_sense_buffer(scp, ABORTED_COMMAND,
2593                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2594                         return check_condition_result;
2595                 } else if (ep->inj_dif) {
2596                         /* Logical block guard check failed */
2597                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2598                         return illegal_condition_result;
2599                 } else if (ep->inj_dix) {
2600                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2601                         return illegal_condition_result;
2602                 }
2603         }
2604         return 0;
2605 }
2606
2607 static void dump_sector(unsigned char *buf, int len)
2608 {
2609         int i, j, n;
2610
2611         pr_err(">>> Sector Dump <<<\n");
2612         for (i = 0 ; i < len ; i += 16) {
2613                 char b[128];
2614
2615                 for (j = 0, n = 0; j < 16; j++) {
2616                         unsigned char c = buf[i+j];
2617
2618                         if (c >= 0x20 && c < 0x7e)
2619                                 n += scnprintf(b + n, sizeof(b) - n,
2620                                                " %c ", buf[i+j]);
2621                         else
2622                                 n += scnprintf(b + n, sizeof(b) - n,
2623                                                "%02x ", buf[i+j]);
2624                 }
2625                 pr_err("%04d: %s\n", i, b);
2626         }
2627 }
2628
2629 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2630                              unsigned int sectors, u32 ei_lba)
2631 {
2632         int ret;
2633         struct sd_dif_tuple *sdt;
2634         void *daddr;
2635         sector_t sector = start_sec;
2636         int ppage_offset;
2637         int dpage_offset;
2638         struct sg_mapping_iter diter;
2639         struct sg_mapping_iter piter;
2640
2641         BUG_ON(scsi_sg_count(SCpnt) == 0);
2642         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2643
2644         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2645                         scsi_prot_sg_count(SCpnt),
2646                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2647         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2648                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2649
2650         /* For each protection page */
2651         while (sg_miter_next(&piter)) {
2652                 dpage_offset = 0;
2653                 if (WARN_ON(!sg_miter_next(&diter))) {
2654                         ret = 0x01;
2655                         goto out;
2656                 }
2657
2658                 for (ppage_offset = 0; ppage_offset < piter.length;
2659                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2660                         /* If we're at the end of the current
2661                          * data page advance to the next one
2662                          */
2663                         if (dpage_offset >= diter.length) {
2664                                 if (WARN_ON(!sg_miter_next(&diter))) {
2665                                         ret = 0x01;
2666                                         goto out;
2667                                 }
2668                                 dpage_offset = 0;
2669                         }
2670
2671                         sdt = piter.addr + ppage_offset;
2672                         daddr = diter.addr + dpage_offset;
2673
2674                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2675                         if (ret) {
2676                                 dump_sector(daddr, sdebug_sector_size);
2677                                 goto out;
2678                         }
2679
2680                         sector++;
2681                         ei_lba++;
2682                         dpage_offset += sdebug_sector_size;
2683                 }
2684                 diter.consumed = dpage_offset;
2685                 sg_miter_stop(&diter);
2686         }
2687         sg_miter_stop(&piter);
2688
2689         dif_copy_prot(SCpnt, start_sec, sectors, false);
2690         dix_writes++;
2691
2692         return 0;
2693
2694 out:
2695         dif_errors++;
2696         sg_miter_stop(&diter);
2697         sg_miter_stop(&piter);
2698         return ret;
2699 }
2700
2701 static unsigned long lba_to_map_index(sector_t lba)
2702 {
2703         if (sdebug_unmap_alignment)
2704                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2705         sector_div(lba, sdebug_unmap_granularity);
2706         return lba;
2707 }
2708
2709 static sector_t map_index_to_lba(unsigned long index)
2710 {
2711         sector_t lba = index * sdebug_unmap_granularity;
2712
2713         if (sdebug_unmap_alignment)
2714                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2715         return lba;
2716 }
2717
2718 static unsigned int map_state(sector_t lba, unsigned int *num)
2719 {
2720         sector_t end;
2721         unsigned int mapped;
2722         unsigned long index;
2723         unsigned long next;
2724
2725         index = lba_to_map_index(lba);
2726         mapped = test_bit(index, map_storep);
2727
2728         if (mapped)
2729                 next = find_next_zero_bit(map_storep, map_size, index);
2730         else
2731                 next = find_next_bit(map_storep, map_size, index);
2732
2733         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2734         *num = end - lba;
2735         return mapped;
2736 }
2737
2738 static void map_region(sector_t lba, unsigned int len)
2739 {
2740         sector_t end = lba + len;
2741
2742         while (lba < end) {
2743                 unsigned long index = lba_to_map_index(lba);
2744
2745                 if (index < map_size)
2746                         set_bit(index, map_storep);
2747
2748                 lba = map_index_to_lba(index + 1);
2749         }
2750 }
2751
2752 static void unmap_region(sector_t lba, unsigned int len)
2753 {
2754         sector_t end = lba + len;
2755
2756         while (lba < end) {
2757                 unsigned long index = lba_to_map_index(lba);
2758
2759                 if (lba == map_index_to_lba(index) &&
2760                     lba + sdebug_unmap_granularity <= end &&
2761                     index < map_size) {
2762                         clear_bit(index, map_storep);
2763                         if (sdebug_lbprz) {
2764                                 memset(fake_storep +
2765                                        lba * sdebug_sector_size, 0,
2766                                        sdebug_sector_size *
2767                                        sdebug_unmap_granularity);
2768                         }
2769                         if (dif_storep) {
2770                                 memset(dif_storep + lba, 0xff,
2771                                        sizeof(*dif_storep) *
2772                                        sdebug_unmap_granularity);
2773                         }
2774                 }
2775                 lba = map_index_to_lba(index + 1);
2776         }
2777 }
2778
2779 static int
2780 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2781 {
2782         u8 *cmd = scp->cmnd;
2783         u64 lba;
2784         u32 num;
2785         u32 ei_lba;
2786         unsigned long iflags;
2787         int ret;
2788         bool check_prot;
2789
2790         switch (cmd[0]) {
2791         case WRITE_16:
2792                 ei_lba = 0;
2793                 lba = get_unaligned_be64(cmd + 2);
2794                 num = get_unaligned_be32(cmd + 10);
2795                 check_prot = true;
2796                 break;
2797         case WRITE_10:
2798                 ei_lba = 0;
2799                 lba = get_unaligned_be32(cmd + 2);
2800                 num = get_unaligned_be16(cmd + 7);
2801                 check_prot = true;
2802                 break;
2803         case WRITE_6:
2804                 ei_lba = 0;
2805                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2806                       (u32)(cmd[1] & 0x1f) << 16;
2807                 num = (0 == cmd[4]) ? 256 : cmd[4];
2808                 check_prot = true;
2809                 break;
2810         case WRITE_12:
2811                 ei_lba = 0;
2812                 lba = get_unaligned_be32(cmd + 2);
2813                 num = get_unaligned_be32(cmd + 6);
2814                 check_prot = true;
2815                 break;
2816         case 0x53:      /* XDWRITEREAD(10) */
2817                 ei_lba = 0;
2818                 lba = get_unaligned_be32(cmd + 2);
2819                 num = get_unaligned_be16(cmd + 7);
2820                 check_prot = false;
2821                 break;
2822         default:        /* assume WRITE(32) */
2823                 lba = get_unaligned_be64(cmd + 12);
2824                 ei_lba = get_unaligned_be32(cmd + 20);
2825                 num = get_unaligned_be32(cmd + 28);
2826                 check_prot = false;
2827                 break;
2828         }
2829         if (check_prot) {
2830                 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2831                     (cmd[1] & 0xe0)) {
2832                         mk_sense_invalid_opcode(scp);
2833                         return check_condition_result;
2834                 }
2835                 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2836                      sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2837                     (cmd[1] & 0xe0) == 0)
2838                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2839                                     "to DIF device\n");
2840         }
2841
2842         /* inline check_device_access_params() */
2843         if (lba + num > sdebug_capacity) {
2844                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2845                 return check_condition_result;
2846         }
2847         /* transfer length excessive (tie in to block limits VPD page) */
2848         if (num > sdebug_store_sectors) {
2849                 /* needs work to find which cdb byte 'num' comes from */
2850                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2851                 return check_condition_result;
2852         }
2853
2854         write_lock_irqsave(&atomic_rw, iflags);
2855
2856         /* DIX + T10 DIF */
2857         if (sdebug_dix && scsi_prot_sg_count(scp)) {
2858                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2859
2860                 if (prot_ret) {
2861                         write_unlock_irqrestore(&atomic_rw, iflags);
2862                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2863                         return illegal_condition_result;
2864                 }
2865         }
2866
2867         ret = do_device_access(scp, lba, num, true);
2868         if (scsi_debug_lbp())
2869                 map_region(lba, num);
2870         write_unlock_irqrestore(&atomic_rw, iflags);
2871         if (-1 == ret)
2872                 return DID_ERROR << 16;
2873         else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2874                 sdev_printk(KERN_INFO, scp->device,
2875                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2876                             my_name, num * sdebug_sector_size, ret);
2877
2878         if (sdebug_any_injecting_opt) {
2879                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2880
2881                 if (ep->inj_recovered) {
2882                         mk_sense_buffer(scp, RECOVERED_ERROR,
2883                                         THRESHOLD_EXCEEDED, 0);
2884                         return check_condition_result;
2885                 } else if (ep->inj_dif) {
2886                         /* Logical block guard check failed */
2887                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2888                         return illegal_condition_result;
2889                 } else if (ep->inj_dix) {
2890                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2891                         return illegal_condition_result;
2892                 }
2893         }
2894         return 0;
2895 }
2896
2897 static int
2898 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2899                 bool unmap, bool ndob)
2900 {
2901         unsigned long iflags;
2902         unsigned long long i;
2903         int ret;
2904         u64 lba_off;
2905
2906         ret = check_device_access_params(scp, lba, num);
2907         if (ret)
2908                 return ret;
2909
2910         write_lock_irqsave(&atomic_rw, iflags);
2911
2912         if (unmap && scsi_debug_lbp()) {
2913                 unmap_region(lba, num);
2914                 goto out;
2915         }
2916
2917         lba_off = lba * sdebug_sector_size;
2918         /* if ndob then zero 1 logical block, else fetch 1 logical block */
2919         if (ndob) {
2920                 memset(fake_storep + lba_off, 0, sdebug_sector_size);
2921                 ret = 0;
2922         } else
2923                 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2924                                           sdebug_sector_size);
2925
2926         if (-1 == ret) {
2927                 write_unlock_irqrestore(&atomic_rw, iflags);
2928                 return DID_ERROR << 16;
2929         } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2930                 sdev_printk(KERN_INFO, scp->device,
2931                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2932                             my_name, "write same",
2933                             num * sdebug_sector_size, ret);
2934
2935         /* Copy first sector to remaining blocks */
2936         for (i = 1 ; i < num ; i++)
2937                 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2938                        fake_storep + lba_off,
2939                        sdebug_sector_size);
2940
2941         if (scsi_debug_lbp())
2942                 map_region(lba, num);
2943 out:
2944         write_unlock_irqrestore(&atomic_rw, iflags);
2945
2946         return 0;
2947 }
2948
2949 static int
2950 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2951 {
2952         u8 *cmd = scp->cmnd;
2953         u32 lba;
2954         u16 num;
2955         u32 ei_lba = 0;
2956         bool unmap = false;
2957
2958         if (cmd[1] & 0x8) {
2959                 if (sdebug_lbpws10 == 0) {
2960                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2961                         return check_condition_result;
2962                 } else
2963                         unmap = true;
2964         }
2965         lba = get_unaligned_be32(cmd + 2);
2966         num = get_unaligned_be16(cmd + 7);
2967         if (num > sdebug_write_same_length) {
2968                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2969                 return check_condition_result;
2970         }
2971         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2972 }
2973
2974 static int
2975 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2976 {
2977         u8 *cmd = scp->cmnd;
2978         u64 lba;
2979         u32 num;
2980         u32 ei_lba = 0;
2981         bool unmap = false;
2982         bool ndob = false;
2983
2984         if (cmd[1] & 0x8) {     /* UNMAP */
2985                 if (sdebug_lbpws == 0) {
2986                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2987                         return check_condition_result;
2988                 } else
2989                         unmap = true;
2990         }
2991         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
2992                 ndob = true;
2993         lba = get_unaligned_be64(cmd + 2);
2994         num = get_unaligned_be32(cmd + 10);
2995         if (num > sdebug_write_same_length) {
2996                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2997                 return check_condition_result;
2998         }
2999         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3000 }
3001
3002 /* Note the mode field is in the same position as the (lower) service action
3003  * field. For the Report supported operation codes command, SPC-4 suggests
3004  * each mode of this command should be reported separately; for future. */
3005 static int
3006 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3007 {
3008         u8 *cmd = scp->cmnd;
3009         struct scsi_device *sdp = scp->device;
3010         struct sdebug_dev_info *dp;
3011         u8 mode;
3012
3013         mode = cmd[1] & 0x1f;
3014         switch (mode) {
3015         case 0x4:       /* download microcode (MC) and activate (ACT) */
3016                 /* set UAs on this device only */
3017                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3018                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3019                 break;
3020         case 0x5:       /* download MC, save and ACT */
3021                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3022                 break;
3023         case 0x6:       /* download MC with offsets and ACT */
3024                 /* set UAs on most devices (LUs) in this target */
3025                 list_for_each_entry(dp,
3026                                     &devip->sdbg_host->dev_info_list,
3027                                     dev_list)
3028                         if (dp->target == sdp->id) {
3029                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3030                                 if (devip != dp)
3031                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3032                                                 dp->uas_bm);
3033                         }
3034                 break;
3035         case 0x7:       /* download MC with offsets, save, and ACT */
3036                 /* set UA on all devices (LUs) in this target */
3037                 list_for_each_entry(dp,
3038                                     &devip->sdbg_host->dev_info_list,
3039                                     dev_list)
3040                         if (dp->target == sdp->id)
3041                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3042                                         dp->uas_bm);
3043                 break;
3044         default:
3045                 /* do nothing for this command for other mode values */
3046                 break;
3047         }
3048         return 0;
3049 }
3050
3051 static int
3052 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3053 {
3054         u8 *cmd = scp->cmnd;
3055         u8 *arr;
3056         u8 *fake_storep_hold;
3057         u64 lba;
3058         u32 dnum;
3059         u32 lb_size = sdebug_sector_size;
3060         u8 num;
3061         unsigned long iflags;
3062         int ret;
3063         int retval = 0;
3064
3065         lba = get_unaligned_be64(cmd + 2);
3066         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3067         if (0 == num)
3068                 return 0;       /* degenerate case, not an error */
3069         if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3070             (cmd[1] & 0xe0)) {
3071                 mk_sense_invalid_opcode(scp);
3072                 return check_condition_result;
3073         }
3074         if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3075              sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3076             (cmd[1] & 0xe0) == 0)
3077                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3078                             "to DIF device\n");
3079
3080         /* inline check_device_access_params() */
3081         if (lba + num > sdebug_capacity) {
3082                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3083                 return check_condition_result;
3084         }
3085         /* transfer length excessive (tie in to block limits VPD page) */
3086         if (num > sdebug_store_sectors) {
3087                 /* needs work to find which cdb byte 'num' comes from */
3088                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3089                 return check_condition_result;
3090         }
3091         dnum = 2 * num;
3092         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3093         if (NULL == arr) {
3094                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3095                                 INSUFF_RES_ASCQ);
3096                 return check_condition_result;
3097         }
3098
3099         write_lock_irqsave(&atomic_rw, iflags);
3100
3101         /* trick do_device_access() to fetch both compare and write buffers
3102          * from data-in into arr. Safe (atomic) since write_lock held. */
3103         fake_storep_hold = fake_storep;
3104         fake_storep = arr;
3105         ret = do_device_access(scp, 0, dnum, true);
3106         fake_storep = fake_storep_hold;
3107         if (ret == -1) {
3108                 retval = DID_ERROR << 16;
3109                 goto cleanup;
3110         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3111                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3112                             "indicated=%u, IO sent=%d bytes\n", my_name,
3113                             dnum * lb_size, ret);
3114         if (!comp_write_worker(lba, num, arr)) {
3115                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3116                 retval = check_condition_result;
3117                 goto cleanup;
3118         }
3119         if (scsi_debug_lbp())
3120                 map_region(lba, num);
3121 cleanup:
3122         write_unlock_irqrestore(&atomic_rw, iflags);
3123         kfree(arr);
3124         return retval;
3125 }
3126
3127 struct unmap_block_desc {
3128         __be64  lba;
3129         __be32  blocks;
3130         __be32  __reserved;
3131 };
3132
3133 static int
3134 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3135 {
3136         unsigned char *buf;
3137         struct unmap_block_desc *desc;
3138         unsigned int i, payload_len, descriptors;
3139         int ret;
3140         unsigned long iflags;
3141
3142
3143         if (!scsi_debug_lbp())
3144                 return 0;       /* fib and say its done */
3145         payload_len = get_unaligned_be16(scp->cmnd + 7);
3146         BUG_ON(scsi_bufflen(scp) != payload_len);
3147
3148         descriptors = (payload_len - 8) / 16;
3149         if (descriptors > sdebug_unmap_max_desc) {
3150                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3151                 return check_condition_result;
3152         }
3153
3154         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3155         if (!buf) {
3156                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3157                                 INSUFF_RES_ASCQ);
3158                 return check_condition_result;
3159         }
3160
3161         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3162
3163         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3164         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3165
3166         desc = (void *)&buf[8];
3167
3168         write_lock_irqsave(&atomic_rw, iflags);
3169
3170         for (i = 0 ; i < descriptors ; i++) {
3171                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3172                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3173
3174                 ret = check_device_access_params(scp, lba, num);
3175                 if (ret)
3176                         goto out;
3177
3178                 unmap_region(lba, num);
3179         }
3180
3181         ret = 0;
3182
3183 out:
3184         write_unlock_irqrestore(&atomic_rw, iflags);
3185         kfree(buf);
3186
3187         return ret;
3188 }
3189
3190 #define SDEBUG_GET_LBA_STATUS_LEN 32
3191
3192 static int
3193 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3194 {
3195         u8 *cmd = scp->cmnd;
3196         u64 lba;
3197         u32 alloc_len, mapped, num;
3198         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3199         int ret;
3200
3201         lba = get_unaligned_be64(cmd + 2);
3202         alloc_len = get_unaligned_be32(cmd + 10);
3203
3204         if (alloc_len < 24)
3205                 return 0;
3206
3207         ret = check_device_access_params(scp, lba, 1);
3208         if (ret)
3209                 return ret;
3210
3211         if (scsi_debug_lbp())
3212                 mapped = map_state(lba, &num);
3213         else {
3214                 mapped = 1;
3215                 /* following just in case virtual_gb changed */
3216                 sdebug_capacity = get_sdebug_capacity();
3217                 if (sdebug_capacity - lba <= 0xffffffff)
3218                         num = sdebug_capacity - lba;
3219                 else
3220                         num = 0xffffffff;
3221         }
3222
3223         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3224         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3225         put_unaligned_be64(lba, arr + 8);       /* LBA */
3226         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3227         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3228
3229         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3230 }
3231
3232 #define SDEBUG_RLUN_ARR_SZ 256
3233
3234 static int resp_report_luns(struct scsi_cmnd * scp,
3235                             struct sdebug_dev_info * devip)
3236 {
3237         unsigned int alloc_len;
3238         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3239         u64 lun;
3240         unsigned char *cmd = scp->cmnd;
3241         int select_report = (int)cmd[2];
3242         struct scsi_lun *one_lun;
3243         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3244         unsigned char * max_addr;
3245
3246         clear_luns_changed_on_target(devip);
3247         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3248         shortish = (alloc_len < 4);
3249         if (shortish || (select_report > 2)) {
3250                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3251                 return check_condition_result;
3252         }
3253         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3254         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3255         lun_cnt = sdebug_max_luns;
3256         if (1 == select_report)
3257                 lun_cnt = 0;
3258         else if (sdebug_no_lun_0 && (lun_cnt > 0))
3259                 --lun_cnt;
3260         want_wlun = (select_report > 0) ? 1 : 0;
3261         num = lun_cnt + want_wlun;
3262         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3263         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3264         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3265                             sizeof(struct scsi_lun)), num);
3266         if (n < num) {
3267                 want_wlun = 0;
3268                 lun_cnt = n;
3269         }
3270         one_lun = (struct scsi_lun *) &arr[8];
3271         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3272         for (i = 0, lun = (sdebug_no_lun_0 ? 1 : 0);
3273              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3274              i++, lun++) {
3275                 upper = (lun >> 8) & 0x3f;
3276                 if (upper)
3277                         one_lun[i].scsi_lun[0] =
3278                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3279                 one_lun[i].scsi_lun[1] = lun & 0xff;
3280         }
3281         if (want_wlun) {
3282                 one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3283                 one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3284                 i++;
3285         }
3286         alloc_len = (unsigned char *)(one_lun + i) - arr;
3287         return fill_from_dev_buffer(scp, arr,
3288                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3289 }
3290
3291 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3292                             unsigned int num, struct sdebug_dev_info *devip)
3293 {
3294         int j;
3295         unsigned char *kaddr, *buf;
3296         unsigned int offset;
3297         struct scsi_data_buffer *sdb = scsi_in(scp);
3298         struct sg_mapping_iter miter;
3299
3300         /* better not to use temporary buffer. */
3301         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3302         if (!buf) {
3303                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3304                                 INSUFF_RES_ASCQ);
3305                 return check_condition_result;
3306         }
3307
3308         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3309
3310         offset = 0;
3311         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3312                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3313
3314         while (sg_miter_next(&miter)) {
3315                 kaddr = miter.addr;
3316                 for (j = 0; j < miter.length; j++)
3317                         *(kaddr + j) ^= *(buf + offset + j);
3318
3319                 offset += miter.length;
3320         }
3321         sg_miter_stop(&miter);
3322         kfree(buf);
3323
3324         return 0;
3325 }
3326
3327 static int
3328 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3329 {
3330         u8 *cmd = scp->cmnd;
3331         u64 lba;
3332         u32 num;
3333         int errsts;
3334
3335         if (!scsi_bidi_cmnd(scp)) {
3336                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3337                                 INSUFF_RES_ASCQ);
3338                 return check_condition_result;
3339         }
3340         errsts = resp_read_dt0(scp, devip);
3341         if (errsts)
3342                 return errsts;
3343         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3344                 errsts = resp_write_dt0(scp, devip);
3345                 if (errsts)
3346                         return errsts;
3347         }
3348         lba = get_unaligned_be32(cmd + 2);
3349         num = get_unaligned_be16(cmd + 7);
3350         return resp_xdwriteread(scp, lba, num, devip);
3351 }
3352
3353 /* When timer or tasklet goes off this function is called. */
3354 static void sdebug_q_cmd_complete(unsigned long indx)
3355 {
3356         int qa_indx;
3357         int retiring = 0;
3358         unsigned long iflags;
3359         struct sdebug_queued_cmd *sqcp;
3360         struct scsi_cmnd *scp;
3361         struct sdebug_dev_info *devip;
3362
3363         atomic_inc(&sdebug_completions);
3364         qa_indx = indx;
3365         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3366                 pr_err("wild qa_indx=%d\n", qa_indx);
3367                 return;
3368         }
3369         spin_lock_irqsave(&queued_arr_lock, iflags);
3370         sqcp = &queued_arr[qa_indx];
3371         scp = sqcp->a_cmnd;
3372         if (NULL == scp) {
3373                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3374                 pr_err("scp is NULL\n");
3375                 return;
3376         }
3377         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3378         if (devip)
3379                 atomic_dec(&devip->num_in_q);
3380         else
3381                 pr_err("devip=NULL\n");
3382         if (atomic_read(&retired_max_queue) > 0)
3383                 retiring = 1;
3384
3385         sqcp->a_cmnd = NULL;
3386         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3387                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3388                 pr_err("Unexpected completion\n");
3389                 return;
3390         }
3391
3392         if (unlikely(retiring)) {       /* user has reduced max_queue */
3393                 int k, retval;
3394
3395                 retval = atomic_read(&retired_max_queue);
3396                 if (qa_indx >= retval) {
3397                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3398                         pr_err("index %d too large\n", retval);
3399                         return;
3400                 }
3401                 k = find_last_bit(queued_in_use_bm, retval);
3402                 if ((k < sdebug_max_queue) || (k == retval))
3403                         atomic_set(&retired_max_queue, 0);
3404                 else
3405                         atomic_set(&retired_max_queue, k + 1);
3406         }
3407         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3408         scp->scsi_done(scp); /* callback to mid level */
3409 }
3410
3411 /* When high resolution timer goes off this function is called. */
3412 static enum hrtimer_restart
3413 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3414 {
3415         int qa_indx;
3416         int retiring = 0;
3417         unsigned long iflags;
3418         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3419         struct sdebug_queued_cmd *sqcp;
3420         struct scsi_cmnd *scp;
3421         struct sdebug_dev_info *devip;
3422
3423         atomic_inc(&sdebug_completions);
3424         qa_indx = sd_hrtp->qa_indx;
3425         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3426                 pr_err("wild qa_indx=%d\n", qa_indx);
3427                 goto the_end;
3428         }
3429         spin_lock_irqsave(&queued_arr_lock, iflags);
3430         sqcp = &queued_arr[qa_indx];
3431         scp = sqcp->a_cmnd;
3432         if (NULL == scp) {
3433                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3434                 pr_err("scp is NULL\n");
3435                 goto the_end;
3436         }
3437         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3438         if (devip)
3439                 atomic_dec(&devip->num_in_q);
3440         else
3441                 pr_err("devip=NULL\n");
3442         if (atomic_read(&retired_max_queue) > 0)
3443                 retiring = 1;
3444
3445         sqcp->a_cmnd = NULL;
3446         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3447                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3448                 pr_err("Unexpected completion\n");
3449                 goto the_end;
3450         }
3451
3452         if (unlikely(retiring)) {       /* user has reduced max_queue */
3453                 int k, retval;
3454
3455                 retval = atomic_read(&retired_max_queue);
3456                 if (qa_indx >= retval) {
3457                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3458                         pr_err("index %d too large\n", retval);
3459                         goto the_end;
3460                 }
3461                 k = find_last_bit(queued_in_use_bm, retval);
3462                 if ((k < sdebug_max_queue) || (k == retval))
3463                         atomic_set(&retired_max_queue, 0);
3464                 else
3465                         atomic_set(&retired_max_queue, k + 1);
3466         }
3467         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3468         scp->scsi_done(scp); /* callback to mid level */
3469 the_end:
3470         return HRTIMER_NORESTART;
3471 }
3472
3473 static struct sdebug_dev_info *
3474 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3475 {
3476         struct sdebug_dev_info *devip;
3477
3478         devip = kzalloc(sizeof(*devip), flags);
3479         if (devip) {
3480                 devip->sdbg_host = sdbg_host;
3481                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3482         }
3483         return devip;
3484 }
3485
3486 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3487 {
3488         struct sdebug_host_info * sdbg_host;
3489         struct sdebug_dev_info * open_devip = NULL;
3490         struct sdebug_dev_info * devip =
3491                         (struct sdebug_dev_info *)sdev->hostdata;
3492
3493         if (devip)
3494                 return devip;
3495         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3496         if (!sdbg_host) {
3497                 pr_err("Host info NULL\n");
3498                 return NULL;
3499         }
3500         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3501                 if ((devip->used) && (devip->channel == sdev->channel) &&
3502                     (devip->target == sdev->id) &&
3503                     (devip->lun == sdev->lun))
3504                         return devip;
3505                 else {
3506                         if ((!devip->used) && (!open_devip))
3507                                 open_devip = devip;
3508                 }
3509         }
3510         if (!open_devip) { /* try and make a new one */
3511                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3512                 if (!open_devip) {
3513                         pr_err("out of memory at line %d\n", __LINE__);
3514                         return NULL;
3515                 }
3516         }
3517
3518         open_devip->channel = sdev->channel;
3519         open_devip->target = sdev->id;
3520         open_devip->lun = sdev->lun;
3521         open_devip->sdbg_host = sdbg_host;
3522         atomic_set(&open_devip->num_in_q, 0);
3523         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3524         open_devip->used = true;
3525         return open_devip;
3526 }
3527
3528 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3529 {
3530         if (sdebug_verbose)
3531                 pr_info("slave_alloc <%u %u %u %llu>\n",
3532                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3533         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3534         return 0;
3535 }
3536
3537 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3538 {
3539         struct sdebug_dev_info *devip;
3540
3541         if (sdebug_verbose)
3542                 pr_info("slave_configure <%u %u %u %llu>\n",
3543                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3544         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3545                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3546         devip = devInfoReg(sdp);
3547         if (NULL == devip)
3548                 return 1;       /* no resources, will be marked offline */
3549         sdp->hostdata = devip;
3550         blk_queue_max_segment_size(sdp->request_queue, -1U);
3551         if (sdebug_no_uld)
3552                 sdp->no_uld_attach = 1;
3553         return 0;
3554 }
3555
3556 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3557 {
3558         struct sdebug_dev_info *devip =
3559                 (struct sdebug_dev_info *)sdp->hostdata;
3560
3561         if (sdebug_verbose)
3562                 pr_info("slave_destroy <%u %u %u %llu>\n",
3563                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3564         if (devip) {
3565                 /* make this slot available for re-use */
3566                 devip->used = false;
3567                 sdp->hostdata = NULL;
3568         }
3569 }
3570
3571 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3572 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3573 {
3574         unsigned long iflags;
3575         int k, qmax, r_qmax;
3576         struct sdebug_queued_cmd *sqcp;
3577         struct sdebug_dev_info *devip;
3578
3579         spin_lock_irqsave(&queued_arr_lock, iflags);
3580         qmax = sdebug_max_queue;
3581         r_qmax = atomic_read(&retired_max_queue);
3582         if (r_qmax > qmax)
3583                 qmax = r_qmax;
3584         for (k = 0; k < qmax; ++k) {
3585                 if (test_bit(k, queued_in_use_bm)) {
3586                         sqcp = &queued_arr[k];
3587                         if (cmnd == sqcp->a_cmnd) {
3588                                 devip = (struct sdebug_dev_info *)
3589                                         cmnd->device->hostdata;
3590                                 if (devip)
3591                                         atomic_dec(&devip->num_in_q);
3592                                 sqcp->a_cmnd = NULL;
3593                                 spin_unlock_irqrestore(&queued_arr_lock,
3594                                                        iflags);
3595                                 if (sdebug_ndelay > 0) {
3596                                         if (sqcp->sd_hrtp)
3597                                                 hrtimer_cancel(
3598                                                         &sqcp->sd_hrtp->hrt);
3599                                 } else if (sdebug_delay > 0) {
3600                                         if (sqcp->cmnd_timerp)
3601                                                 del_timer_sync(
3602                                                         sqcp->cmnd_timerp);
3603                                 } else if (sdebug_delay < 0) {
3604                                         if (sqcp->tletp)
3605                                                 tasklet_kill(sqcp->tletp);
3606                                 }
3607                                 clear_bit(k, queued_in_use_bm);
3608                                 return 1;
3609                         }
3610                 }
3611         }
3612         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3613         return 0;
3614 }
3615
3616 /* Deletes (stops) timers or tasklets of all queued commands */
3617 static void stop_all_queued(void)
3618 {
3619         unsigned long iflags;
3620         int k;
3621         struct sdebug_queued_cmd *sqcp;
3622         struct sdebug_dev_info *devip;
3623
3624         spin_lock_irqsave(&queued_arr_lock, iflags);
3625         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3626                 if (test_bit(k, queued_in_use_bm)) {
3627                         sqcp = &queued_arr[k];
3628                         if (sqcp->a_cmnd) {
3629                                 devip = (struct sdebug_dev_info *)
3630                                         sqcp->a_cmnd->device->hostdata;
3631                                 if (devip)
3632                                         atomic_dec(&devip->num_in_q);
3633                                 sqcp->a_cmnd = NULL;
3634                                 spin_unlock_irqrestore(&queued_arr_lock,
3635                                                        iflags);
3636                                 if (sdebug_ndelay > 0) {
3637                                         if (sqcp->sd_hrtp)
3638                                                 hrtimer_cancel(
3639                                                         &sqcp->sd_hrtp->hrt);
3640                                 } else if (sdebug_delay > 0) {
3641                                         if (sqcp->cmnd_timerp)
3642                                                 del_timer_sync(
3643                                                         sqcp->cmnd_timerp);
3644                                 } else if (sdebug_delay < 0) {
3645                                         if (sqcp->tletp)
3646                                                 tasklet_kill(sqcp->tletp);
3647                                 }
3648                                 clear_bit(k, queued_in_use_bm);
3649                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3650                         }
3651                 }
3652         }
3653         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3654 }
3655
3656 /* Free queued command memory on heap */
3657 static void free_all_queued(void)
3658 {
3659         unsigned long iflags;
3660         int k;
3661         struct sdebug_queued_cmd *sqcp;
3662
3663         spin_lock_irqsave(&queued_arr_lock, iflags);
3664         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3665                 sqcp = &queued_arr[k];
3666                 kfree(sqcp->cmnd_timerp);
3667                 sqcp->cmnd_timerp = NULL;
3668                 kfree(sqcp->tletp);
3669                 sqcp->tletp = NULL;
3670                 kfree(sqcp->sd_hrtp);
3671                 sqcp->sd_hrtp = NULL;
3672         }
3673         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3674 }
3675
3676 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3677 {
3678         ++num_aborts;
3679         if (SCpnt) {
3680                 if (SCpnt->device &&
3681                     (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3682                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3683                                     __func__);
3684                 stop_queued_cmnd(SCpnt);
3685         }
3686         return SUCCESS;
3687 }
3688
3689 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3690 {
3691         struct sdebug_dev_info * devip;
3692
3693         ++num_dev_resets;
3694         if (SCpnt && SCpnt->device) {
3695                 struct scsi_device *sdp = SCpnt->device;
3696
3697                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3698                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3699                 devip = devInfoReg(sdp);
3700                 if (devip)
3701                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3702         }
3703         return SUCCESS;
3704 }
3705
3706 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3707 {
3708         struct sdebug_host_info *sdbg_host;
3709         struct sdebug_dev_info *devip;
3710         struct scsi_device *sdp;
3711         struct Scsi_Host *hp;
3712         int k = 0;
3713
3714         ++num_target_resets;
3715         if (!SCpnt)
3716                 goto lie;
3717         sdp = SCpnt->device;
3718         if (!sdp)
3719                 goto lie;
3720         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3721                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3722         hp = sdp->host;
3723         if (!hp)
3724                 goto lie;
3725         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3726         if (sdbg_host) {
3727                 list_for_each_entry(devip,
3728                                     &sdbg_host->dev_info_list,
3729                                     dev_list)
3730                         if (devip->target == sdp->id) {
3731                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3732                                 ++k;
3733                         }
3734         }
3735         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3736                 sdev_printk(KERN_INFO, sdp,
3737                             "%s: %d device(s) found in target\n", __func__, k);
3738 lie:
3739         return SUCCESS;
3740 }
3741
3742 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3743 {
3744         struct sdebug_host_info *sdbg_host;
3745         struct sdebug_dev_info *devip;
3746         struct scsi_device * sdp;
3747         struct Scsi_Host * hp;
3748         int k = 0;
3749
3750         ++num_bus_resets;
3751         if (!(SCpnt && SCpnt->device))
3752                 goto lie;
3753         sdp = SCpnt->device;
3754         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3755                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3756         hp = sdp->host;
3757         if (hp) {
3758                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3759                 if (sdbg_host) {
3760                         list_for_each_entry(devip,
3761                                             &sdbg_host->dev_info_list,
3762                                             dev_list) {
3763                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3764                                 ++k;
3765                         }
3766                 }
3767         }
3768         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3769                 sdev_printk(KERN_INFO, sdp,
3770                             "%s: %d device(s) found in host\n", __func__, k);
3771 lie:
3772         return SUCCESS;
3773 }
3774
3775 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3776 {
3777         struct sdebug_host_info * sdbg_host;
3778         struct sdebug_dev_info *devip;
3779         int k = 0;
3780
3781         ++num_host_resets;
3782         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3783                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3784         spin_lock(&sdebug_host_list_lock);
3785         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3786                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3787                                     dev_list) {
3788                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3789                         ++k;
3790                 }
3791         }
3792         spin_unlock(&sdebug_host_list_lock);
3793         stop_all_queued();
3794         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3795                 sdev_printk(KERN_INFO, SCpnt->device,
3796                             "%s: %d device(s) found\n", __func__, k);
3797         return SUCCESS;
3798 }
3799
3800 static void __init sdebug_build_parts(unsigned char *ramp,
3801                                       unsigned long store_size)
3802 {
3803         struct partition * pp;
3804         int starts[SDEBUG_MAX_PARTS + 2];
3805         int sectors_per_part, num_sectors, k;
3806         int heads_by_sects, start_sec, end_sec;
3807
3808         /* assume partition table already zeroed */
3809         if ((sdebug_num_parts < 1) || (store_size < 1048576))
3810                 return;
3811         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3812                 sdebug_num_parts = SDEBUG_MAX_PARTS;
3813                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3814         }
3815         num_sectors = (int)sdebug_store_sectors;
3816         sectors_per_part = (num_sectors - sdebug_sectors_per)
3817                            / sdebug_num_parts;
3818         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3819         starts[0] = sdebug_sectors_per;
3820         for (k = 1; k < sdebug_num_parts; ++k)
3821                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3822                             * heads_by_sects;
3823         starts[sdebug_num_parts] = num_sectors;
3824         starts[sdebug_num_parts + 1] = 0;
3825
3826         ramp[510] = 0x55;       /* magic partition markings */
3827         ramp[511] = 0xAA;
3828         pp = (struct partition *)(ramp + 0x1be);
3829         for (k = 0; starts[k + 1]; ++k, ++pp) {
3830                 start_sec = starts[k];
3831                 end_sec = starts[k + 1] - 1;
3832                 pp->boot_ind = 0;
3833
3834                 pp->cyl = start_sec / heads_by_sects;
3835                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3836                            / sdebug_sectors_per;
3837                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3838
3839                 pp->end_cyl = end_sec / heads_by_sects;
3840                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3841                                / sdebug_sectors_per;
3842                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3843
3844                 pp->start_sect = cpu_to_le32(start_sec);
3845                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3846                 pp->sys_ind = 0x83;     /* plain Linux partition */
3847         }
3848 }
3849
3850 static int
3851 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3852               int scsi_result, int delta_jiff)
3853 {
3854         unsigned long iflags;
3855         int k, num_in_q, qdepth, inject;
3856         struct sdebug_queued_cmd *sqcp = NULL;
3857         struct scsi_device *sdp;
3858
3859         /* this should never happen */
3860         if (WARN_ON(!cmnd))
3861                 return SCSI_MLQUEUE_HOST_BUSY;
3862
3863         if (NULL == devip) {
3864                 pr_warn("called devip == NULL\n");
3865                 /* no particularly good error to report back */
3866                 return SCSI_MLQUEUE_HOST_BUSY;
3867         }
3868
3869         sdp = cmnd->device;
3870
3871         if (sdebug_verbose && scsi_result)
3872                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3873                             __func__, scsi_result);
3874         if (delta_jiff == 0)
3875                 goto respond_in_thread;
3876
3877         /* schedule the response at a later time if resources permit */
3878         spin_lock_irqsave(&queued_arr_lock, iflags);
3879         num_in_q = atomic_read(&devip->num_in_q);
3880         qdepth = cmnd->device->queue_depth;
3881         inject = 0;
3882         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3883                 if (scsi_result) {
3884                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3885                         goto respond_in_thread;
3886                 } else
3887                         scsi_result = device_qfull_result;
3888         } else if ((sdebug_every_nth != 0) &&
3889                    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
3890                    (scsi_result == 0)) {
3891                 if ((num_in_q == (qdepth - 1)) &&
3892                     (atomic_inc_return(&sdebug_a_tsf) >=
3893                      abs(sdebug_every_nth))) {
3894                         atomic_set(&sdebug_a_tsf, 0);
3895                         inject = 1;
3896                         scsi_result = device_qfull_result;
3897                 }
3898         }
3899
3900         k = find_first_zero_bit(queued_in_use_bm, sdebug_max_queue);
3901         if (k >= sdebug_max_queue) {
3902                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3903                 if (scsi_result)
3904                         goto respond_in_thread;
3905                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
3906                         scsi_result = device_qfull_result;
3907                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
3908                         sdev_printk(KERN_INFO, sdp,
3909                                     "%s: max_queue=%d exceeded, %s\n",
3910                                     __func__, sdebug_max_queue,
3911                                     (scsi_result ?  "status: TASK SET FULL" :
3912                                                     "report: host busy"));
3913                 if (scsi_result)
3914                         goto respond_in_thread;
3915                 else
3916                         return SCSI_MLQUEUE_HOST_BUSY;
3917         }
3918         __set_bit(k, queued_in_use_bm);
3919         atomic_inc(&devip->num_in_q);
3920         sqcp = &queued_arr[k];
3921         sqcp->a_cmnd = cmnd;
3922         cmnd->result = scsi_result;
3923         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3924         if (delta_jiff > 0) {
3925                 if (NULL == sqcp->cmnd_timerp) {
3926                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3927                                                     GFP_ATOMIC);
3928                         if (NULL == sqcp->cmnd_timerp)
3929                                 return SCSI_MLQUEUE_HOST_BUSY;
3930                         init_timer(sqcp->cmnd_timerp);
3931                 }
3932                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3933                 sqcp->cmnd_timerp->data = k;
3934                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3935                 add_timer(sqcp->cmnd_timerp);
3936         } else if (sdebug_ndelay > 0) {
3937                 ktime_t kt = ktime_set(0, sdebug_ndelay);
3938                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3939
3940                 if (NULL == sd_hp) {
3941                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3942                         if (NULL == sd_hp)
3943                                 return SCSI_MLQUEUE_HOST_BUSY;
3944                         sqcp->sd_hrtp = sd_hp;
3945                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3946                                      HRTIMER_MODE_REL);
3947                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3948                         sd_hp->qa_indx = k;
3949                 }
3950                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3951         } else {        /* delay < 0 */
3952                 if (NULL == sqcp->tletp) {
3953                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3954                                               GFP_ATOMIC);
3955                         if (NULL == sqcp->tletp)
3956                                 return SCSI_MLQUEUE_HOST_BUSY;
3957                         tasklet_init(sqcp->tletp,
3958                                      sdebug_q_cmd_complete, k);
3959                 }
3960                 if (-1 == delta_jiff)
3961                         tasklet_hi_schedule(sqcp->tletp);
3962                 else
3963                         tasklet_schedule(sqcp->tletp);
3964         }
3965         if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
3966             (scsi_result == device_qfull_result))
3967                 sdev_printk(KERN_INFO, sdp,
3968                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3969                             num_in_q, (inject ? "<inject> " : ""),
3970                             "status: TASK SET FULL");
3971         return 0;
3972
3973 respond_in_thread:      /* call back to mid-layer using invocation thread */
3974         cmnd->result = scsi_result;
3975         cmnd->scsi_done(cmnd);
3976         return 0;
3977 }
3978
3979 /* Note: The following macros create attribute files in the
3980    /sys/module/scsi_debug/parameters directory. Unfortunately this
3981    driver is unaware of a change and cannot trigger auxiliary actions
3982    as it can when the corresponding attribute in the
3983    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3984  */
3985 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
3986 module_param_named(ato, sdebug_ato, int, S_IRUGO);
3987 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
3988 module_param_named(delay, sdebug_delay, int, S_IRUGO | S_IWUSR);
3989 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
3990 module_param_named(dif, sdebug_dif, int, S_IRUGO);
3991 module_param_named(dix, sdebug_dix, int, S_IRUGO);
3992 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
3993 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
3994 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
3995 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
3996 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
3997 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
3998 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
3999 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4000 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4001 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4002 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4003 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4004 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4005 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4006 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4007 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4008 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4009 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4010 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4011 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4012 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4013 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4014 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4015 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4016 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4017 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4018 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4019 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4020 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4021 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4022 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4023                    S_IRUGO | S_IWUSR);
4024 module_param_named(write_same_length, sdebug_write_same_length, int,
4025                    S_IRUGO | S_IWUSR);
4026
4027 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4028 MODULE_DESCRIPTION("SCSI debug adapter driver");
4029 MODULE_LICENSE("GPL");
4030 MODULE_VERSION(SCSI_DEBUG_VERSION);
4031
4032 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4033 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4034 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4035 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4036 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4037 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4038 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4039 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4040 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4041 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4042 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4043 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4044 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4045 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4046 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4047 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4048 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4049 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4050 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4051 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4052 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4053 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4054 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4055 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4056 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4057 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4058 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4059 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4060 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4061 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4062 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4063 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4064 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4065 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4066 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4067 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4068 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4069 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4070 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4071
4072 static char sdebug_info[256];
4073
4074 static const char * scsi_debug_info(struct Scsi_Host * shp)
4075 {
4076         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4077                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4078                 sdebug_version_date, sdebug_dev_size_mb, sdebug_opts);
4079         return sdebug_info;
4080 }
4081
4082 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4083 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4084 {
4085         char arr[16];
4086         int opts;
4087         int minLen = length > 15 ? 15 : length;
4088
4089         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4090                 return -EACCES;
4091         memcpy(arr, buffer, minLen);
4092         arr[minLen] = '\0';
4093         if (1 != sscanf(arr, "%d", &opts))
4094                 return -EINVAL;
4095         sdebug_opts = opts;
4096         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4097         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4098         if (sdebug_every_nth != 0)
4099                 atomic_set(&sdebug_cmnd_count, 0);
4100         return length;
4101 }
4102
4103 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4104  * same for each scsi_debug host (if more than one). Some of the counters
4105  * output are not atomics so might be inaccurate in a busy system. */
4106 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4107 {
4108         int f, l;
4109         char b[32];
4110
4111         if (sdebug_every_nth > 0)
4112                 snprintf(b, sizeof(b), " (curr:%d)",
4113                          ((SDEBUG_OPT_RARE_TSF & sdebug_opts) ?
4114                                 atomic_read(&sdebug_a_tsf) :
4115                                 atomic_read(&sdebug_cmnd_count)));
4116         else
4117                 b[0] = '\0';
4118
4119         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4120                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4121                 "every_nth=%d%s\n"
4122                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4123                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4124                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4125                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4126                 "usec_in_jiffy=%lu\n",
4127                 SCSI_DEBUG_VERSION, sdebug_version_date,
4128                 sdebug_num_tgts, sdebug_dev_size_mb, sdebug_opts,
4129                 sdebug_every_nth, b, sdebug_delay, sdebug_ndelay,
4130                 sdebug_max_luns, atomic_read(&sdebug_completions),
4131                 sdebug_sector_size, sdebug_cylinders_per, sdebug_heads,
4132                 sdebug_sectors_per, num_aborts, num_dev_resets,
4133                 num_target_resets, num_bus_resets, num_host_resets,
4134                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4135
4136         f = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4137         if (f != sdebug_max_queue) {
4138                 l = find_last_bit(queued_in_use_bm, sdebug_max_queue);
4139                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4140                            "queued_in_use_bm", f, l);
4141         }
4142         return 0;
4143 }
4144
4145 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4146 {
4147         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_delay);
4148 }
4149 /* Returns -EBUSY if delay is being changed and commands are queued */
4150 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4151                            size_t count)
4152 {
4153         int delay, res;
4154
4155         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4156                 res = count;
4157                 if (sdebug_delay != delay) {
4158                         unsigned long iflags;
4159                         int k;
4160
4161                         spin_lock_irqsave(&queued_arr_lock, iflags);
4162                         k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4163                         if (k != sdebug_max_queue)
4164                                 res = -EBUSY;   /* have queued commands */
4165                         else {
4166                                 sdebug_delay = delay;
4167                                 sdebug_ndelay = 0;
4168                         }
4169                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4170                 }
4171                 return res;
4172         }
4173         return -EINVAL;
4174 }
4175 static DRIVER_ATTR_RW(delay);
4176
4177 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4178 {
4179         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4180 }
4181 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4182 /* If > 0 and accepted then sdebug_delay is set to DELAY_OVERRIDDEN */
4183 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4184                            size_t count)
4185 {
4186         unsigned long iflags;
4187         int ndelay, res, k;
4188
4189         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4190             (ndelay >= 0) && (ndelay < 1000000000)) {
4191                 res = count;
4192                 if (sdebug_ndelay != ndelay) {
4193                         spin_lock_irqsave(&queued_arr_lock, iflags);
4194                         k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4195                         if (k != sdebug_max_queue)
4196                                 res = -EBUSY;   /* have queued commands */
4197                         else {
4198                                 sdebug_ndelay = ndelay;
4199                                 sdebug_delay = ndelay ? DELAY_OVERRIDDEN
4200                                                           : DEF_DELAY;
4201                         }
4202                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4203                 }
4204                 return res;
4205         }
4206         return -EINVAL;
4207 }
4208 static DRIVER_ATTR_RW(ndelay);
4209
4210 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4211 {
4212         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4213 }
4214
4215 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4216                           size_t count)
4217 {
4218         int opts;
4219         char work[20];
4220
4221         if (1 == sscanf(buf, "%10s", work)) {
4222                 if (0 == strncasecmp(work,"0x", 2)) {
4223                         if (1 == sscanf(&work[2], "%x", &opts))
4224                                 goto opts_done;
4225                 } else {
4226                         if (1 == sscanf(work, "%d", &opts))
4227                                 goto opts_done;
4228                 }
4229         }
4230         return -EINVAL;
4231 opts_done:
4232         sdebug_opts = opts;
4233         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4234         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4235         atomic_set(&sdebug_cmnd_count, 0);
4236         atomic_set(&sdebug_a_tsf, 0);
4237         return count;
4238 }
4239 static DRIVER_ATTR_RW(opts);
4240
4241 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4242 {
4243         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4244 }
4245 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4246                            size_t count)
4247 {
4248         int n;
4249
4250         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4251                 sdebug_ptype = n;
4252                 return count;
4253         }
4254         return -EINVAL;
4255 }
4256 static DRIVER_ATTR_RW(ptype);
4257
4258 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4259 {
4260         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4261 }
4262 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4263                             size_t count)
4264 {
4265         int n;
4266
4267         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4268                 sdebug_dsense = n;
4269                 return count;
4270         }
4271         return -EINVAL;
4272 }
4273 static DRIVER_ATTR_RW(dsense);
4274
4275 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4276 {
4277         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4278 }
4279 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4280                              size_t count)
4281 {
4282         int n;
4283
4284         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4285                 n = (n > 0);
4286                 sdebug_fake_rw = (sdebug_fake_rw > 0);
4287                 if (sdebug_fake_rw != n) {
4288                         if ((0 == n) && (NULL == fake_storep)) {
4289                                 unsigned long sz =
4290                                         (unsigned long)sdebug_dev_size_mb *
4291                                         1048576;
4292
4293                                 fake_storep = vmalloc(sz);
4294                                 if (NULL == fake_storep) {
4295                                         pr_err("out of memory, 9\n");
4296                                         return -ENOMEM;
4297                                 }
4298                                 memset(fake_storep, 0, sz);
4299                         }
4300                         sdebug_fake_rw = n;
4301                 }
4302                 return count;
4303         }
4304         return -EINVAL;
4305 }
4306 static DRIVER_ATTR_RW(fake_rw);
4307
4308 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4309 {
4310         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4311 }
4312 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4313                               size_t count)
4314 {
4315         int n;
4316
4317         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4318                 sdebug_no_lun_0 = n;
4319                 return count;
4320         }
4321         return -EINVAL;
4322 }
4323 static DRIVER_ATTR_RW(no_lun_0);
4324
4325 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4326 {
4327         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4328 }
4329 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4330                               size_t count)
4331 {
4332         int n;
4333
4334         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4335                 sdebug_num_tgts = n;
4336                 sdebug_max_tgts_luns();
4337                 return count;
4338         }
4339         return -EINVAL;
4340 }
4341 static DRIVER_ATTR_RW(num_tgts);
4342
4343 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4344 {
4345         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4346 }
4347 static DRIVER_ATTR_RO(dev_size_mb);
4348
4349 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4350 {
4351         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4352 }
4353 static DRIVER_ATTR_RO(num_parts);
4354
4355 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4356 {
4357         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4358 }
4359 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4360                                size_t count)
4361 {
4362         int nth;
4363
4364         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4365                 sdebug_every_nth = nth;
4366                 atomic_set(&sdebug_cmnd_count, 0);
4367                 return count;
4368         }
4369         return -EINVAL;
4370 }
4371 static DRIVER_ATTR_RW(every_nth);
4372
4373 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4374 {
4375         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4376 }
4377 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4378                               size_t count)
4379 {
4380         int n;
4381         bool changed;
4382
4383         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4384                 changed = (sdebug_max_luns != n);
4385                 sdebug_max_luns = n;
4386                 sdebug_max_tgts_luns();
4387                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4388                         struct sdebug_host_info *sdhp;
4389                         struct sdebug_dev_info *dp;
4390
4391                         spin_lock(&sdebug_host_list_lock);
4392                         list_for_each_entry(sdhp, &sdebug_host_list,
4393                                             host_list) {
4394                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4395                                                     dev_list) {
4396                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4397                                                 dp->uas_bm);
4398                                 }
4399                         }
4400                         spin_unlock(&sdebug_host_list_lock);
4401                 }
4402                 return count;
4403         }
4404         return -EINVAL;
4405 }
4406 static DRIVER_ATTR_RW(max_luns);
4407
4408 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4409 {
4410         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4411 }
4412 /* N.B. max_queue can be changed while there are queued commands. In flight
4413  * commands beyond the new max_queue will be completed. */
4414 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4415                                size_t count)
4416 {
4417         unsigned long iflags;
4418         int n, k;
4419
4420         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4421             (n <= SCSI_DEBUG_CANQUEUE)) {
4422                 spin_lock_irqsave(&queued_arr_lock, iflags);
4423                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4424                 sdebug_max_queue = n;
4425                 if (SCSI_DEBUG_CANQUEUE == k)
4426                         atomic_set(&retired_max_queue, 0);
4427                 else if (k >= n)
4428                         atomic_set(&retired_max_queue, k + 1);
4429                 else
4430                         atomic_set(&retired_max_queue, 0);
4431                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4432                 return count;
4433         }
4434         return -EINVAL;
4435 }
4436 static DRIVER_ATTR_RW(max_queue);
4437
4438 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4439 {
4440         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4441 }
4442 static DRIVER_ATTR_RO(no_uld);
4443
4444 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4445 {
4446         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4447 }
4448 static DRIVER_ATTR_RO(scsi_level);
4449
4450 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4451 {
4452         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4453 }
4454 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4455                                 size_t count)
4456 {
4457         int n;
4458         bool changed;
4459
4460         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4461                 changed = (sdebug_virtual_gb != n);
4462                 sdebug_virtual_gb = n;
4463                 sdebug_capacity = get_sdebug_capacity();
4464                 if (changed) {
4465                         struct sdebug_host_info *sdhp;
4466                         struct sdebug_dev_info *dp;
4467
4468                         spin_lock(&sdebug_host_list_lock);
4469                         list_for_each_entry(sdhp, &sdebug_host_list,
4470                                             host_list) {
4471                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4472                                                     dev_list) {
4473                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4474                                                 dp->uas_bm);
4475                                 }
4476                         }
4477                         spin_unlock(&sdebug_host_list_lock);
4478                 }
4479                 return count;
4480         }
4481         return -EINVAL;
4482 }
4483 static DRIVER_ATTR_RW(virtual_gb);
4484
4485 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4486 {
4487         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4488 }
4489
4490 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4491                               size_t count)
4492 {
4493         int delta_hosts;
4494
4495         if (sscanf(buf, "%d", &delta_hosts) != 1)
4496                 return -EINVAL;
4497         if (delta_hosts > 0) {
4498                 do {
4499                         sdebug_add_adapter();
4500                 } while (--delta_hosts);
4501         } else if (delta_hosts < 0) {
4502                 do {
4503                         sdebug_remove_adapter();
4504                 } while (++delta_hosts);
4505         }
4506         return count;
4507 }
4508 static DRIVER_ATTR_RW(add_host);
4509
4510 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4511 {
4512         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4513 }
4514 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4515                                     size_t count)
4516 {
4517         int n;
4518
4519         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4520                 sdebug_vpd_use_hostno = n;
4521                 return count;
4522         }
4523         return -EINVAL;
4524 }
4525 static DRIVER_ATTR_RW(vpd_use_hostno);
4526
4527 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4528 {
4529         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4530 }
4531 static DRIVER_ATTR_RO(sector_size);
4532
4533 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4534 {
4535         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4536 }
4537 static DRIVER_ATTR_RO(dix);
4538
4539 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4540 {
4541         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4542 }
4543 static DRIVER_ATTR_RO(dif);
4544
4545 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4546 {
4547         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4548 }
4549 static DRIVER_ATTR_RO(guard);
4550
4551 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4552 {
4553         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4554 }
4555 static DRIVER_ATTR_RO(ato);
4556
4557 static ssize_t map_show(struct device_driver *ddp, char *buf)
4558 {
4559         ssize_t count;
4560
4561         if (!scsi_debug_lbp())
4562                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4563                                  sdebug_store_sectors);
4564
4565         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4566                           (int)map_size, map_storep);
4567         buf[count++] = '\n';
4568         buf[count] = '\0';
4569
4570         return count;
4571 }
4572 static DRIVER_ATTR_RO(map);
4573
4574 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4575 {
4576         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4577 }
4578 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4579                                size_t count)
4580 {
4581         int n;
4582
4583         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4584                 sdebug_removable = (n > 0);
4585                 return count;
4586         }
4587         return -EINVAL;
4588 }
4589 static DRIVER_ATTR_RW(removable);
4590
4591 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4592 {
4593         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4594 }
4595 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4596 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4597                                size_t count)
4598 {
4599         int n, res;
4600
4601         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4602                 bool new_host_lock = (n > 0);
4603
4604                 res = count;
4605                 if (new_host_lock != sdebug_host_lock) {
4606                         unsigned long iflags;
4607                         int k;
4608
4609                         spin_lock_irqsave(&queued_arr_lock, iflags);
4610                         k = find_first_bit(queued_in_use_bm,
4611                                            sdebug_max_queue);
4612                         if (k != sdebug_max_queue)
4613                                 res = -EBUSY;   /* have queued commands */
4614                         else
4615                                 sdebug_host_lock = new_host_lock;
4616                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4617                 }
4618                 return res;
4619         }
4620         return -EINVAL;
4621 }
4622 static DRIVER_ATTR_RW(host_lock);
4623
4624 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4625 {
4626         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4627 }
4628 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4629                             size_t count)
4630 {
4631         int n;
4632
4633         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4634                 sdebug_strict = (n > 0);
4635                 return count;
4636         }
4637         return -EINVAL;
4638 }
4639 static DRIVER_ATTR_RW(strict);
4640
4641
4642 /* Note: The following array creates attribute files in the
4643    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4644    files (over those found in the /sys/module/scsi_debug/parameters
4645    directory) is that auxiliary actions can be triggered when an attribute
4646    is changed. For example see: sdebug_add_host_store() above.
4647  */
4648
4649 static struct attribute *sdebug_drv_attrs[] = {
4650         &driver_attr_delay.attr,
4651         &driver_attr_opts.attr,
4652         &driver_attr_ptype.attr,
4653         &driver_attr_dsense.attr,
4654         &driver_attr_fake_rw.attr,
4655         &driver_attr_no_lun_0.attr,
4656         &driver_attr_num_tgts.attr,
4657         &driver_attr_dev_size_mb.attr,
4658         &driver_attr_num_parts.attr,
4659         &driver_attr_every_nth.attr,
4660         &driver_attr_max_luns.attr,
4661         &driver_attr_max_queue.attr,
4662         &driver_attr_no_uld.attr,
4663         &driver_attr_scsi_level.attr,
4664         &driver_attr_virtual_gb.attr,
4665         &driver_attr_add_host.attr,
4666         &driver_attr_vpd_use_hostno.attr,
4667         &driver_attr_sector_size.attr,
4668         &driver_attr_dix.attr,
4669         &driver_attr_dif.attr,
4670         &driver_attr_guard.attr,
4671         &driver_attr_ato.attr,
4672         &driver_attr_map.attr,
4673         &driver_attr_removable.attr,
4674         &driver_attr_host_lock.attr,
4675         &driver_attr_ndelay.attr,
4676         &driver_attr_strict.attr,
4677         NULL,
4678 };
4679 ATTRIBUTE_GROUPS(sdebug_drv);
4680
4681 static struct device *pseudo_primary;
4682
4683 static int __init scsi_debug_init(void)
4684 {
4685         unsigned long sz;
4686         int host_to_add;
4687         int k;
4688         int ret;
4689
4690         atomic_set(&sdebug_cmnd_count, 0);
4691         atomic_set(&sdebug_completions, 0);
4692         atomic_set(&retired_max_queue, 0);
4693
4694         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4695                 pr_warn("ndelay must be less than 1 second, ignored\n");
4696                 sdebug_ndelay = 0;
4697         } else if (sdebug_ndelay > 0)
4698                 sdebug_delay = DELAY_OVERRIDDEN;
4699
4700         switch (sdebug_sector_size) {
4701         case  512:
4702         case 1024:
4703         case 2048:
4704         case 4096:
4705                 break;
4706         default:
4707                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
4708                 return -EINVAL;
4709         }
4710
4711         switch (sdebug_dif) {
4712
4713         case SD_DIF_TYPE0_PROTECTION:
4714         case SD_DIF_TYPE1_PROTECTION:
4715         case SD_DIF_TYPE2_PROTECTION:
4716         case SD_DIF_TYPE3_PROTECTION:
4717                 break;
4718
4719         default:
4720                 pr_err("dif must be 0, 1, 2 or 3\n");
4721                 return -EINVAL;
4722         }
4723
4724         if (sdebug_guard > 1) {
4725                 pr_err("guard must be 0 or 1\n");
4726                 return -EINVAL;
4727         }
4728
4729         if (sdebug_ato > 1) {
4730                 pr_err("ato must be 0 or 1\n");
4731                 return -EINVAL;
4732         }
4733
4734         if (sdebug_physblk_exp > 15) {
4735                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4736                 return -EINVAL;
4737         }
4738
4739         if (sdebug_lowest_aligned > 0x3fff) {
4740                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4741                 return -EINVAL;
4742         }
4743
4744         if (sdebug_dev_size_mb < 1)
4745                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4746         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4747         sdebug_store_sectors = sz / sdebug_sector_size;
4748         sdebug_capacity = get_sdebug_capacity();
4749
4750         /* play around with geometry, don't waste too much on track 0 */
4751         sdebug_heads = 8;
4752         sdebug_sectors_per = 32;
4753         if (sdebug_dev_size_mb >= 256)
4754                 sdebug_heads = 64;
4755         else if (sdebug_dev_size_mb >= 16)
4756                 sdebug_heads = 32;
4757         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4758                                (sdebug_sectors_per * sdebug_heads);
4759         if (sdebug_cylinders_per >= 1024) {
4760                 /* other LLDs do this; implies >= 1GB ram disk ... */
4761                 sdebug_heads = 255;
4762                 sdebug_sectors_per = 63;
4763                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4764                                (sdebug_sectors_per * sdebug_heads);
4765         }
4766
4767         if (0 == sdebug_fake_rw) {
4768                 fake_storep = vmalloc(sz);
4769                 if (NULL == fake_storep) {
4770                         pr_err("out of memory, 1\n");
4771                         return -ENOMEM;
4772                 }
4773                 memset(fake_storep, 0, sz);
4774                 if (sdebug_num_parts > 0)
4775                         sdebug_build_parts(fake_storep, sz);
4776         }
4777
4778         if (sdebug_dix) {
4779                 int dif_size;
4780
4781                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4782                 dif_storep = vmalloc(dif_size);
4783
4784                 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4785
4786                 if (dif_storep == NULL) {
4787                         pr_err("out of mem. (DIX)\n");
4788                         ret = -ENOMEM;
4789                         goto free_vm;
4790                 }
4791
4792                 memset(dif_storep, 0xff, dif_size);
4793         }
4794
4795         /* Logical Block Provisioning */
4796         if (scsi_debug_lbp()) {
4797                 sdebug_unmap_max_blocks =
4798                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
4799
4800                 sdebug_unmap_max_desc =
4801                         clamp(sdebug_unmap_max_desc, 0U, 256U);
4802
4803                 sdebug_unmap_granularity =
4804                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
4805
4806                 if (sdebug_unmap_alignment &&
4807                     sdebug_unmap_granularity <=
4808                     sdebug_unmap_alignment) {
4809                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4810                         return -EINVAL;
4811                 }
4812
4813                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4814                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4815
4816                 pr_info("%lu provisioning blocks\n", map_size);
4817
4818                 if (map_storep == NULL) {
4819                         pr_err("out of mem. (MAP)\n");
4820                         ret = -ENOMEM;
4821                         goto free_vm;
4822                 }
4823
4824                 bitmap_zero(map_storep, map_size);
4825
4826                 /* Map first 1KB for partition table */
4827                 if (sdebug_num_parts)
4828                         map_region(0, 2);
4829         }
4830
4831         pseudo_primary = root_device_register("pseudo_0");
4832         if (IS_ERR(pseudo_primary)) {
4833                 pr_warn("root_device_register() error\n");
4834                 ret = PTR_ERR(pseudo_primary);
4835                 goto free_vm;
4836         }
4837         ret = bus_register(&pseudo_lld_bus);
4838         if (ret < 0) {
4839                 pr_warn("bus_register error: %d\n", ret);
4840                 goto dev_unreg;
4841         }
4842         ret = driver_register(&sdebug_driverfs_driver);
4843         if (ret < 0) {
4844                 pr_warn("driver_register error: %d\n", ret);
4845                 goto bus_unreg;
4846         }
4847
4848         host_to_add = sdebug_add_host;
4849         sdebug_add_host = 0;
4850
4851         for (k = 0; k < host_to_add; k++) {
4852                 if (sdebug_add_adapter()) {
4853                         pr_err("sdebug_add_adapter failed k=%d\n", k);
4854                         break;
4855                 }
4856         }
4857
4858         if (sdebug_verbose)
4859                 pr_info("built %d host(s)\n", sdebug_add_host);
4860
4861         return 0;
4862
4863 bus_unreg:
4864         bus_unregister(&pseudo_lld_bus);
4865 dev_unreg:
4866         root_device_unregister(pseudo_primary);
4867 free_vm:
4868         vfree(map_storep);
4869         vfree(dif_storep);
4870         vfree(fake_storep);
4871
4872         return ret;
4873 }
4874
4875 static void __exit scsi_debug_exit(void)
4876 {
4877         int k = sdebug_add_host;
4878
4879         stop_all_queued();
4880         free_all_queued();
4881         for (; k; k--)
4882                 sdebug_remove_adapter();
4883         driver_unregister(&sdebug_driverfs_driver);
4884         bus_unregister(&pseudo_lld_bus);
4885         root_device_unregister(pseudo_primary);
4886
4887         vfree(dif_storep);
4888         vfree(fake_storep);
4889 }
4890
4891 device_initcall(scsi_debug_init);
4892 module_exit(scsi_debug_exit);
4893
4894 static void sdebug_release_adapter(struct device * dev)
4895 {
4896         struct sdebug_host_info *sdbg_host;
4897
4898         sdbg_host = to_sdebug_host(dev);
4899         kfree(sdbg_host);
4900 }
4901
4902 static int sdebug_add_adapter(void)
4903 {
4904         int k, devs_per_host;
4905         int error = 0;
4906         struct sdebug_host_info *sdbg_host;
4907         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4908
4909         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4910         if (NULL == sdbg_host) {
4911                 pr_err("out of memory at line %d\n", __LINE__);
4912                 return -ENOMEM;
4913         }
4914
4915         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4916
4917         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
4918         for (k = 0; k < devs_per_host; k++) {
4919                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4920                 if (!sdbg_devinfo) {
4921                         pr_err("out of memory at line %d\n", __LINE__);
4922                         error = -ENOMEM;
4923                         goto clean;
4924                 }
4925         }
4926
4927         spin_lock(&sdebug_host_list_lock);
4928         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4929         spin_unlock(&sdebug_host_list_lock);
4930
4931         sdbg_host->dev.bus = &pseudo_lld_bus;
4932         sdbg_host->dev.parent = pseudo_primary;
4933         sdbg_host->dev.release = &sdebug_release_adapter;
4934         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
4935
4936         error = device_register(&sdbg_host->dev);
4937
4938         if (error)
4939                 goto clean;
4940
4941         ++sdebug_add_host;
4942         return error;
4943
4944 clean:
4945         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4946                                  dev_list) {
4947                 list_del(&sdbg_devinfo->dev_list);
4948                 kfree(sdbg_devinfo);
4949         }
4950
4951         kfree(sdbg_host);
4952         return error;
4953 }
4954
4955 static void sdebug_remove_adapter(void)
4956 {
4957         struct sdebug_host_info * sdbg_host = NULL;
4958
4959         spin_lock(&sdebug_host_list_lock);
4960         if (!list_empty(&sdebug_host_list)) {
4961                 sdbg_host = list_entry(sdebug_host_list.prev,
4962                                        struct sdebug_host_info, host_list);
4963                 list_del(&sdbg_host->host_list);
4964         }
4965         spin_unlock(&sdebug_host_list_lock);
4966
4967         if (!sdbg_host)
4968                 return;
4969
4970         device_unregister(&sdbg_host->dev);
4971         --sdebug_add_host;
4972 }
4973
4974 static int
4975 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4976 {
4977         int num_in_q = 0;
4978         unsigned long iflags;
4979         struct sdebug_dev_info *devip;
4980
4981         spin_lock_irqsave(&queued_arr_lock, iflags);
4982         devip = (struct sdebug_dev_info *)sdev->hostdata;
4983         if (NULL == devip) {
4984                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4985                 return  -ENODEV;
4986         }
4987         num_in_q = atomic_read(&devip->num_in_q);
4988         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4989
4990         if (qdepth < 1)
4991                 qdepth = 1;
4992         /* allow to exceed max host queued_arr elements for testing */
4993         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4994                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4995         scsi_change_queue_depth(sdev, qdepth);
4996
4997         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
4998                 sdev_printk(KERN_INFO, sdev,
4999                             "%s: qdepth=%d, num_in_q=%d\n",
5000                             __func__, qdepth, num_in_q);
5001         }
5002         return sdev->queue_depth;
5003 }
5004
5005 static int
5006 check_inject(struct scsi_cmnd *scp)
5007 {
5008         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5009
5010         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5011
5012         if (atomic_inc_return(&sdebug_cmnd_count) >= abs(sdebug_every_nth)) {
5013                 atomic_set(&sdebug_cmnd_count, 0);
5014                 if (sdebug_every_nth < -1)
5015                         sdebug_every_nth = -1;
5016                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5017                         return 1; /* ignore command causing timeout */
5018                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5019                          scsi_medium_access_command(scp))
5020                         return 1; /* time out reads and writes */
5021                 if (sdebug_any_injecting_opt) {
5022                         if (SDEBUG_OPT_RECOVERED_ERR & sdebug_opts)
5023                                 ep->inj_recovered = true;
5024                         if (SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts)
5025                                 ep->inj_transport = true;
5026                         if (SDEBUG_OPT_DIF_ERR & sdebug_opts)
5027                                 ep->inj_dif = true;
5028                         if (SDEBUG_OPT_DIX_ERR & sdebug_opts)
5029                                 ep->inj_dix = true;
5030                         if (SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts)
5031                                 ep->inj_short = true;
5032                 }
5033         }
5034         return 0;
5035 }
5036
5037 static int
5038 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5039 {
5040         u8 sdeb_i;
5041         struct scsi_device *sdp = scp->device;
5042         const struct opcode_info_t *oip;
5043         const struct opcode_info_t *r_oip;
5044         struct sdebug_dev_info *devip;
5045         u8 *cmd = scp->cmnd;
5046         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5047         int k, na;
5048         int errsts = 0;
5049         u32 flags;
5050         u16 sa;
5051         u8 opcode = cmd[0];
5052         bool has_wlun_rl;
5053
5054         scsi_set_resid(scp, 0);
5055         if (sdebug_verbose && !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts)) {
5056                 char b[120];
5057                 int n, len, sb;
5058
5059                 len = scp->cmd_len;
5060                 sb = (int)sizeof(b);
5061                 if (len > 32)
5062                         strcpy(b, "too long, over 32 bytes");
5063                 else {
5064                         for (k = 0, n = 0; k < len && n < sb; ++k)
5065                                 n += scnprintf(b + n, sb - n, "%02x ",
5066                                                (u32)cmd[k]);
5067                 }
5068                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5069         }
5070         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5071         if ((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)
5072                 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5073
5074         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5075         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5076         devip = (struct sdebug_dev_info *)sdp->hostdata;
5077         if (!devip) {
5078                 devip = devInfoReg(sdp);
5079                 if (NULL == devip)
5080                         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16,
5081                                              0);
5082         }
5083         na = oip->num_attached;
5084         r_pfp = oip->pfp;
5085         if (na) {       /* multiple commands with this opcode */
5086                 r_oip = oip;
5087                 if (FF_SA & r_oip->flags) {
5088                         if (F_SA_LOW & oip->flags)
5089                                 sa = 0x1f & cmd[1];
5090                         else
5091                                 sa = get_unaligned_be16(cmd + 8);
5092                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5093                                 if (opcode == oip->opcode && sa == oip->sa)
5094                                         break;
5095                         }
5096                 } else {   /* since no service action only check opcode */
5097                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5098                                 if (opcode == oip->opcode)
5099                                         break;
5100                         }
5101                 }
5102                 if (k > na) {
5103                         if (F_SA_LOW & r_oip->flags)
5104                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5105                         else if (F_SA_HIGH & r_oip->flags)
5106                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5107                         else
5108                                 mk_sense_invalid_opcode(scp);
5109                         goto check_cond;
5110                 }
5111         }       /* else (when na==0) we assume the oip is a match */
5112         flags = oip->flags;
5113         if (F_INV_OP & flags) {
5114                 mk_sense_invalid_opcode(scp);
5115                 goto check_cond;
5116         }
5117         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5118                 if (sdebug_verbose)
5119                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5120                                     my_name, opcode, " supported for wlun");
5121                 mk_sense_invalid_opcode(scp);
5122                 goto check_cond;
5123         }
5124         if (sdebug_strict) {    /* check cdb against mask */
5125                 u8 rem;
5126                 int j;
5127
5128                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5129                         rem = ~oip->len_mask[k] & cmd[k];
5130                         if (rem) {
5131                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5132                                         if (0x80 & rem)
5133                                                 break;
5134                                 }
5135                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5136                                 goto check_cond;
5137                         }
5138                 }
5139         }
5140         if (!(F_SKIP_UA & flags) &&
5141             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5142                 errsts = check_readiness(scp, UAS_ONLY, devip);
5143                 if (errsts)
5144                         goto check_cond;
5145         }
5146         if ((F_M_ACCESS & flags) && devip->stopped) {
5147                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5148                 if (sdebug_verbose)
5149                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5150                                     "%s\n", my_name, "initializing command "
5151                                     "required");
5152                 errsts = check_condition_result;
5153                 goto fini;
5154         }
5155         if (sdebug_fake_rw && (F_FAKE_RW & flags))
5156                 goto fini;
5157         if (sdebug_every_nth) {
5158                 if (check_inject(scp))
5159                         return 0;       /* ignore command: make trouble */
5160         }
5161         if (oip->pfp)   /* if this command has a resp_* function, call it */
5162                 errsts = oip->pfp(scp, devip);
5163         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5164                 errsts = r_pfp(scp, devip);
5165
5166 fini:
5167         return schedule_resp(scp, devip, errsts,
5168                              ((F_DELAY_OVERR & flags) ? 0 : sdebug_delay));
5169 check_cond:
5170         return schedule_resp(scp, devip, check_condition_result, 0);
5171 }
5172
5173 static int
5174 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5175 {
5176         if (sdebug_host_lock) {
5177                 unsigned long iflags;
5178                 int rc;
5179
5180                 spin_lock_irqsave(shost->host_lock, iflags);
5181                 rc = scsi_debug_queuecommand(cmd);
5182                 spin_unlock_irqrestore(shost->host_lock, iflags);
5183                 return rc;
5184         } else
5185                 return scsi_debug_queuecommand(cmd);
5186 }
5187
5188 static struct scsi_host_template sdebug_driver_template = {
5189         .show_info =            scsi_debug_show_info,
5190         .write_info =           scsi_debug_write_info,
5191         .proc_name =            sdebug_proc_name,
5192         .name =                 "SCSI DEBUG",
5193         .info =                 scsi_debug_info,
5194         .slave_alloc =          scsi_debug_slave_alloc,
5195         .slave_configure =      scsi_debug_slave_configure,
5196         .slave_destroy =        scsi_debug_slave_destroy,
5197         .ioctl =                scsi_debug_ioctl,
5198         .queuecommand =         sdebug_queuecommand_lock_or_not,
5199         .change_queue_depth =   sdebug_change_qdepth,
5200         .eh_abort_handler =     scsi_debug_abort,
5201         .eh_device_reset_handler = scsi_debug_device_reset,
5202         .eh_target_reset_handler = scsi_debug_target_reset,
5203         .eh_bus_reset_handler = scsi_debug_bus_reset,
5204         .eh_host_reset_handler = scsi_debug_host_reset,
5205         .can_queue =            SCSI_DEBUG_CANQUEUE,
5206         .this_id =              7,
5207         .sg_tablesize =         SG_MAX_SEGMENTS,
5208         .cmd_per_lun =          DEF_CMD_PER_LUN,
5209         .max_sectors =          -1U,
5210         .use_clustering =       DISABLE_CLUSTERING,
5211         .module =               THIS_MODULE,
5212         .track_queue_depth =    1,
5213         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5214 };
5215
5216 static int sdebug_driver_probe(struct device * dev)
5217 {
5218         int error = 0;
5219         struct sdebug_host_info *sdbg_host;
5220         struct Scsi_Host *hpnt;
5221         int host_prot;
5222
5223         sdbg_host = to_sdebug_host(dev);
5224
5225         sdebug_driver_template.can_queue = sdebug_max_queue;
5226         if (sdebug_clustering)
5227                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5228         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5229         if (NULL == hpnt) {
5230                 pr_err("scsi_host_alloc failed\n");
5231                 error = -ENODEV;
5232                 return error;
5233         }
5234
5235         sdbg_host->shost = hpnt;
5236         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5237         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5238                 hpnt->max_id = sdebug_num_tgts + 1;
5239         else
5240                 hpnt->max_id = sdebug_num_tgts;
5241         /* = sdebug_max_luns; */
5242         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5243
5244         host_prot = 0;
5245
5246         switch (sdebug_dif) {
5247
5248         case SD_DIF_TYPE1_PROTECTION:
5249                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5250                 if (sdebug_dix)
5251                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5252                 break;
5253
5254         case SD_DIF_TYPE2_PROTECTION:
5255                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5256                 if (sdebug_dix)
5257                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5258                 break;
5259
5260         case SD_DIF_TYPE3_PROTECTION:
5261                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5262                 if (sdebug_dix)
5263                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5264                 break;
5265
5266         default:
5267                 if (sdebug_dix)
5268                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5269                 break;
5270         }
5271
5272         scsi_host_set_prot(hpnt, host_prot);
5273
5274         pr_info("host protection%s%s%s%s%s%s%s\n",
5275                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5276                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5277                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5278                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5279                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5280                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5281                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5282
5283         if (sdebug_guard == 1)
5284                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5285         else
5286                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5287
5288         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5289         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5290         error = scsi_add_host(hpnt, &sdbg_host->dev);
5291         if (error) {
5292                 pr_err("scsi_add_host failed\n");
5293                 error = -ENODEV;
5294                 scsi_host_put(hpnt);
5295         } else
5296                 scsi_scan_host(hpnt);
5297
5298         return error;
5299 }
5300
5301 static int sdebug_driver_remove(struct device * dev)
5302 {
5303         struct sdebug_host_info *sdbg_host;
5304         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5305
5306         sdbg_host = to_sdebug_host(dev);
5307
5308         if (!sdbg_host) {
5309                 pr_err("Unable to locate host info\n");
5310                 return -ENODEV;
5311         }
5312
5313         scsi_remove_host(sdbg_host->shost);
5314
5315         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5316                                  dev_list) {
5317                 list_del(&sdbg_devinfo->dev_list);
5318                 kfree(sdbg_devinfo);
5319         }
5320
5321         scsi_host_put(sdbg_host->shost);
5322         return 0;
5323 }
5324
5325 static int pseudo_lld_bus_match(struct device *dev,
5326                                 struct device_driver *dev_driver)
5327 {
5328         return 1;
5329 }
5330
5331 static struct bus_type pseudo_lld_bus = {
5332         .name = "pseudo",
5333         .match = pseudo_lld_bus_match,
5334         .probe = sdebug_driver_probe,
5335         .remove = sdebug_driver_remove,
5336         .drv_groups = sdebug_drv_groups,
5337 };