2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
50 #include <net/checksum.h>
52 #include <asm/unaligned.h>
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
64 #include "scsi_logging.h"
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
69 #define MY_NAME "scsi_debug"
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132 #define DEF_UNMAP_ALIGNMENT 0
133 #define DEF_UNMAP_GRANULARITY 1
134 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
135 #define DEF_UNMAP_MAX_DESC 256
136 #define DEF_VIRTUAL_GB 0
137 #define DEF_VPD_USE_HOSTNO 1
138 #define DEF_WRITESAME_LENGTH 0xFFFF
140 #define DELAY_OVERRIDDEN -9999
142 /* bit mask values for scsi_debug_opts */
143 #define SCSI_DEBUG_OPT_NOISE 1
144 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
145 #define SCSI_DEBUG_OPT_TIMEOUT 4
146 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
147 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
148 #define SCSI_DEBUG_OPT_DIF_ERR 32
149 #define SCSI_DEBUG_OPT_DIX_ERR 64
150 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
151 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
152 #define SCSI_DEBUG_OPT_Q_NOISE 0x200
153 #define SCSI_DEBUG_OPT_ALL_TSF 0x400
154 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
155 #define SCSI_DEBUG_OPT_N_WCE 0x1000
156 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
157 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
158 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
159 /* When "every_nth" > 0 then modulo "every_nth" commands:
160 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
161 * - a RECOVERED_ERROR is simulated on successful read and write
162 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
163 * - a TRANSPORT_ERROR is simulated on successful read and write
164 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
166 * When "every_nth" < 0 then after "- every_nth" commands:
167 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
168 * - a RECOVERED_ERROR is simulated on successful read and write
169 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
170 * - a TRANSPORT_ERROR is simulated on successful read and write
171 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
172 * This will continue until some other action occurs (e.g. the user
173 * writing a new value (other than -1 or 1) to every_nth via sysfs).
176 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
177 * priority order. In the subset implemented here lower numbers have higher
178 * priority. The UA numbers should be a sequence starting from 0 with
179 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
180 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
181 #define SDEBUG_UA_BUS_RESET 1
182 #define SDEBUG_UA_MODE_CHANGED 2
183 #define SDEBUG_UA_CAPACITY_CHANGED 3
184 #define SDEBUG_NUM_UAS 4
186 /* for check_readiness() */
187 #define UAS_ONLY 1 /* check for UAs only */
188 #define UAS_TUR 0 /* if no UAs then check if media access possible */
190 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
191 * sector on read commands: */
192 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
193 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
195 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
196 * or "peripheral device" addressing (value 0) */
197 #define SAM2_LUN_ADDRESS_METHOD 0
198 #define SAM2_WLUN_REPORT_LUNS 0xc101
200 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
201 * (for response) at one time. Can be reduced by max_queue option. Command
202 * responses are not queued when delay=0 and ndelay=0. The per-device
203 * DEF_CMD_PER_LUN can be changed via sysfs:
204 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
205 * SCSI_DEBUG_CANQUEUE. */
206 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
207 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
208 #define DEF_CMD_PER_LUN 255
210 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
211 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
214 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
215 enum sdeb_opcode_index {
216 SDEB_I_INVALID_OPCODE = 0,
218 SDEB_I_REPORT_LUNS = 2,
219 SDEB_I_REQUEST_SENSE = 3,
220 SDEB_I_TEST_UNIT_READY = 4,
221 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
222 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
223 SDEB_I_LOG_SENSE = 7,
224 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
225 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
226 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
227 SDEB_I_START_STOP = 11,
228 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
229 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
230 SDEB_I_MAINT_IN = 14,
231 SDEB_I_MAINT_OUT = 15,
232 SDEB_I_VERIFY = 16, /* 10 only */
233 SDEB_I_VARIABLE_LEN = 17,
234 SDEB_I_RESERVE = 18, /* 6, 10 */
235 SDEB_I_RELEASE = 19, /* 6, 10 */
236 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
237 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
238 SDEB_I_ATA_PT = 22, /* 12, 16 */
239 SDEB_I_SEND_DIAG = 23,
241 SDEB_I_XDWRITEREAD = 25, /* 10 only */
242 SDEB_I_WRITE_BUFFER = 26,
243 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
244 SDEB_I_SYNC_CACHE = 28, /* 10 only */
245 SDEB_I_COMP_WRITE = 29,
246 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
249 static const unsigned char opcode_ind_arr[256] = {
250 /* 0x0; 0x0->0x1f: 6 byte cdbs */
251 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
253 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
254 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
256 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
257 SDEB_I_ALLOW_REMOVAL, 0,
258 /* 0x20; 0x20->0x3f: 10 byte cdbs */
259 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
260 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
261 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
262 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
263 /* 0x40; 0x40->0x5f: 10 byte cdbs */
264 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
265 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
266 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
268 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
269 /* 0x60; 0x60->0x7d are reserved */
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 0, SDEB_I_VARIABLE_LEN,
273 /* 0x80; 0x80->0x9f: 16 byte cdbs */
274 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
275 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
276 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
278 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
279 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
280 SDEB_I_MAINT_OUT, 0, 0, 0,
281 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
283 0, 0, 0, 0, 0, 0, 0, 0,
284 0, 0, 0, 0, 0, 0, 0, 0,
285 /* 0xc0; 0xc0->0xff: vendor specific */
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
296 #define F_RL_WLUN_OK 0x10
297 #define F_SKIP_UA 0x20
298 #define F_DELAY_OVERR 0x40
299 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
300 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
301 #define F_INV_OP 0x200
302 #define F_FAKE_RW 0x400
303 #define F_M_ACCESS 0x800 /* media access */
305 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
306 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
307 #define FF_SA (F_SA_HIGH | F_SA_LOW)
309 struct sdebug_dev_info;
310 static int scsi_debug_queuecommand(struct scsi_cmnd *scp);
311 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
329 struct opcode_info_t {
330 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
331 * for terminating element */
332 u8 opcode; /* if num_attached > 0, preferred */
333 u16 sa; /* service action */
334 u32 flags; /* OR-ed set of SDEB_F_* */
335 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
336 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
337 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
338 /* ignore cdb bytes after position 15 */
341 static const struct opcode_info_t msense_iarr[1] = {
342 {0, 0x1a, 0, F_D_IN, NULL, NULL,
343 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
346 static const struct opcode_info_t mselect_iarr[1] = {
347 {0, 0x15, 0, F_D_OUT, NULL, NULL,
348 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
351 static const struct opcode_info_t read_iarr[3] = {
352 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
353 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
355 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
356 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
357 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
358 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
362 static const struct opcode_info_t write_iarr[3] = {
363 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
364 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
366 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
367 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
368 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
369 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
373 static const struct opcode_info_t sa_in_iarr[1] = {
374 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
375 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
376 0xff, 0xff, 0xff, 0, 0xc7} },
379 static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
380 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
381 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
382 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
385 static const struct opcode_info_t maint_in_iarr[2] = {
386 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, NULL, NULL,
387 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
389 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, NULL, NULL,
390 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
394 static const struct opcode_info_t write_same_iarr[1] = {
395 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
396 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
397 0xff, 0xff, 0xff, 0x1f, 0xc7} },
400 static const struct opcode_info_t reserve_iarr[1] = {
401 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
402 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
405 static const struct opcode_info_t release_iarr[1] = {
406 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
407 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
411 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
412 * plus the terminating elements for logic that scans this table such as
413 * REPORT SUPPORTED OPERATION CODES. */
414 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
416 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
417 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
419 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
421 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
423 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
424 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
425 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
426 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
428 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
430 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
431 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
432 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
433 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
435 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
436 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
438 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
439 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
440 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
442 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
443 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
444 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
445 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
446 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
447 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
448 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
449 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
450 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
451 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
452 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
453 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
455 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
456 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
457 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
458 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
460 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
461 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
462 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
463 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
465 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
466 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
469 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
470 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
471 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
472 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
473 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
474 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
475 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
476 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
478 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
479 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
480 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
482 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
483 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
485 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
486 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
487 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
488 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
490 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, NULL, NULL,
491 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
492 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
495 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
496 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
499 struct sdebug_scmd_extra_t {
507 static int scsi_debug_add_host = DEF_NUM_HOST;
508 static int scsi_debug_ato = DEF_ATO;
509 static int scsi_debug_delay = DEF_DELAY;
510 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
511 static int scsi_debug_dif = DEF_DIF;
512 static int scsi_debug_dix = DEF_DIX;
513 static int scsi_debug_dsense = DEF_D_SENSE;
514 static int scsi_debug_every_nth = DEF_EVERY_NTH;
515 static int scsi_debug_fake_rw = DEF_FAKE_RW;
516 static unsigned int scsi_debug_guard = DEF_GUARD;
517 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
518 static int scsi_debug_max_luns = DEF_MAX_LUNS;
519 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
520 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
521 static int scsi_debug_ndelay = DEF_NDELAY;
522 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
523 static int scsi_debug_no_uld = 0;
524 static int scsi_debug_num_parts = DEF_NUM_PARTS;
525 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
526 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
527 static int scsi_debug_opts = DEF_OPTS;
528 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
529 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
530 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
531 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
532 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
533 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
534 static unsigned int scsi_debug_lbpu = DEF_LBPU;
535 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
536 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
537 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
538 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
539 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
540 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
541 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
542 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
543 static bool scsi_debug_removable = DEF_REMOVABLE;
544 static bool scsi_debug_clustering;
545 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
546 static bool scsi_debug_strict = DEF_STRICT;
547 static bool sdebug_any_injecting_opt;
549 static atomic_t sdebug_cmnd_count;
550 static atomic_t sdebug_completions;
551 static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
553 #define DEV_READONLY(TGT) (0)
555 static unsigned int sdebug_store_sectors;
556 static sector_t sdebug_capacity; /* in sectors */
558 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
559 may still need them */
560 static int sdebug_heads; /* heads per disk */
561 static int sdebug_cylinders_per; /* cylinders per surface */
562 static int sdebug_sectors_per; /* sectors per cylinder */
564 #define SDEBUG_MAX_PARTS 4
566 #define SCSI_DEBUG_MAX_CMD_LEN 32
568 static unsigned int scsi_debug_lbp(void)
570 return ((0 == scsi_debug_fake_rw) &&
571 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
574 struct sdebug_dev_info {
575 struct list_head dev_list;
576 unsigned int channel;
579 struct sdebug_host_info *sdbg_host;
580 unsigned long uas_bm[1];
582 char stopped; /* TODO: should be atomic */
586 struct sdebug_host_info {
587 struct list_head host_list;
588 struct Scsi_Host *shost;
590 struct list_head dev_info_list;
593 #define to_sdebug_host(d) \
594 container_of(d, struct sdebug_host_info, dev)
596 static LIST_HEAD(sdebug_host_list);
597 static DEFINE_SPINLOCK(sdebug_host_list_lock);
600 struct sdebug_hrtimer { /* ... is derived from hrtimer */
601 struct hrtimer hrt; /* must be first element */
605 struct sdebug_queued_cmd {
606 /* in_use flagged by a bit in queued_in_use_bm[] */
607 struct timer_list *cmnd_timerp;
608 struct tasklet_struct *tletp;
609 struct sdebug_hrtimer *sd_hrtp;
610 struct scsi_cmnd * a_cmnd;
612 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
613 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
616 static unsigned char * fake_storep; /* ramdisk storage */
617 static struct sd_dif_tuple *dif_storep; /* protection info */
618 static void *map_storep; /* provisioning map */
620 static unsigned long map_size;
621 static int num_aborts;
622 static int num_dev_resets;
623 static int num_target_resets;
624 static int num_bus_resets;
625 static int num_host_resets;
626 static int dix_writes;
627 static int dix_reads;
628 static int dif_errors;
630 static DEFINE_SPINLOCK(queued_arr_lock);
631 static DEFINE_RWLOCK(atomic_rw);
633 static char sdebug_proc_name[] = MY_NAME;
634 static const char *my_name = MY_NAME;
636 static struct bus_type pseudo_lld_bus;
638 static struct device_driver sdebug_driverfs_driver = {
639 .name = sdebug_proc_name,
640 .bus = &pseudo_lld_bus,
643 static const int check_condition_result =
644 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
646 static const int illegal_condition_result =
647 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
649 static const int device_qfull_result =
650 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
652 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
653 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
655 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
657 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
660 static void *fake_store(unsigned long long lba)
662 lba = do_div(lba, sdebug_store_sectors);
664 return fake_storep + lba * scsi_debug_sector_size;
667 static struct sd_dif_tuple *dif_store(sector_t sector)
669 sector = do_div(sector, sdebug_store_sectors);
671 return dif_storep + sector;
674 static int sdebug_add_adapter(void);
675 static void sdebug_remove_adapter(void);
677 static void sdebug_max_tgts_luns(void)
679 struct sdebug_host_info *sdbg_host;
680 struct Scsi_Host *hpnt;
682 spin_lock(&sdebug_host_list_lock);
683 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
684 hpnt = sdbg_host->shost;
685 if ((hpnt->this_id >= 0) &&
686 (scsi_debug_num_tgts > hpnt->this_id))
687 hpnt->max_id = scsi_debug_num_tgts + 1;
689 hpnt->max_id = scsi_debug_num_tgts;
690 /* scsi_debug_max_luns; */
691 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
693 spin_unlock(&sdebug_host_list_lock);
696 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
698 /* Set in_bit to -1 to indicate no bit position of invalid field */
700 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
701 int in_byte, int in_bit)
703 unsigned char *sbuff;
707 sbuff = scp->sense_buffer;
709 sdev_printk(KERN_ERR, scp->device,
710 "%s: sense_buffer is NULL\n", __func__);
713 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
714 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
715 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
717 memset(sks, 0, sizeof(sks));
723 sks[0] |= 0x7 & in_bit;
725 put_unaligned_be16(in_byte, sks + 1);
726 if (scsi_debug_dsense) {
731 memcpy(sbuff + sl + 4, sks, 3);
733 memcpy(sbuff + 15, sks, 3);
734 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
735 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
736 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
737 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
740 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
742 unsigned char *sbuff;
744 sbuff = scp->sense_buffer;
746 sdev_printk(KERN_ERR, scp->device,
747 "%s: sense_buffer is NULL\n", __func__);
750 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
752 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
754 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
755 sdev_printk(KERN_INFO, scp->device,
756 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
757 my_name, key, asc, asq);
761 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
763 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
766 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
768 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
770 sdev_printk(KERN_INFO, dev,
771 "%s: BLKFLSBUF [0x1261]\n", __func__);
772 else if (0x5331 == cmd)
773 sdev_printk(KERN_INFO, dev,
774 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
777 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
781 /* return -ENOTTY; // correct return but upsets fdisk */
784 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
785 struct sdebug_dev_info * devip)
788 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
790 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
791 if (k != SDEBUG_NUM_UAS) {
792 const char *cp = NULL;
796 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
797 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
799 cp = "power on reset";
801 case SDEBUG_UA_BUS_RESET:
802 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
803 UA_RESET_ASC, BUS_RESET_ASCQ);
807 case SDEBUG_UA_MODE_CHANGED:
808 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
809 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
811 cp = "mode parameters changed";
813 case SDEBUG_UA_CAPACITY_CHANGED:
814 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
815 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
817 cp = "capacity data changed";
819 pr_warn("%s: unexpected unit attention code=%d\n",
825 clear_bit(k, devip->uas_bm);
827 sdev_printk(KERN_INFO, SCpnt->device,
828 "%s reports: Unit attention: %s\n",
830 return check_condition_result;
832 if ((UAS_TUR == uas_only) && devip->stopped) {
833 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
836 sdev_printk(KERN_INFO, SCpnt->device,
837 "%s reports: Not ready: %s\n", my_name,
838 "initializing command required");
839 return check_condition_result;
844 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
845 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
849 struct scsi_data_buffer *sdb = scsi_in(scp);
853 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
854 return (DID_ERROR << 16);
856 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
858 sdb->resid = scsi_bufflen(scp) - act_len;
863 /* Returns number of bytes fetched into 'arr' or -1 if error. */
864 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
867 if (!scsi_bufflen(scp))
869 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
872 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
876 static const char * inq_vendor_id = "Linux ";
877 static const char * inq_product_id = "scsi_debug ";
878 static const char *inq_product_rev = "0184"; /* version less '.' */
880 /* Device identification VPD page. Returns number of bytes placed in arr */
881 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
882 int target_dev_id, int dev_id_num,
883 const char * dev_id_str,
889 port_a = target_dev_id + 1;
890 /* T10 vendor identifier field format (faked) */
891 arr[0] = 0x2; /* ASCII */
894 memcpy(&arr[4], inq_vendor_id, 8);
895 memcpy(&arr[12], inq_product_id, 16);
896 memcpy(&arr[28], dev_id_str, dev_id_str_len);
897 num = 8 + 16 + dev_id_str_len;
900 if (dev_id_num >= 0) {
901 /* NAA-5, Logical unit identifier (binary) */
902 arr[num++] = 0x1; /* binary (not necessarily sas) */
903 arr[num++] = 0x3; /* PIV=0, lu, naa */
906 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
910 arr[num++] = (dev_id_num >> 24);
911 arr[num++] = (dev_id_num >> 16) & 0xff;
912 arr[num++] = (dev_id_num >> 8) & 0xff;
913 arr[num++] = dev_id_num & 0xff;
914 /* Target relative port number */
915 arr[num++] = 0x61; /* proto=sas, binary */
916 arr[num++] = 0x94; /* PIV=1, target port, rel port */
917 arr[num++] = 0x0; /* reserved */
918 arr[num++] = 0x4; /* length */
919 arr[num++] = 0x0; /* reserved */
920 arr[num++] = 0x0; /* reserved */
922 arr[num++] = 0x1; /* relative port A */
924 /* NAA-5, Target port identifier */
925 arr[num++] = 0x61; /* proto=sas, binary */
926 arr[num++] = 0x93; /* piv=1, target port, naa */
929 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
933 arr[num++] = (port_a >> 24);
934 arr[num++] = (port_a >> 16) & 0xff;
935 arr[num++] = (port_a >> 8) & 0xff;
936 arr[num++] = port_a & 0xff;
937 /* NAA-5, Target port group identifier */
938 arr[num++] = 0x61; /* proto=sas, binary */
939 arr[num++] = 0x95; /* piv=1, target port group id */
944 arr[num++] = (port_group_id >> 8) & 0xff;
945 arr[num++] = port_group_id & 0xff;
946 /* NAA-5, Target device identifier */
947 arr[num++] = 0x61; /* proto=sas, binary */
948 arr[num++] = 0xa3; /* piv=1, target device, naa */
951 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
955 arr[num++] = (target_dev_id >> 24);
956 arr[num++] = (target_dev_id >> 16) & 0xff;
957 arr[num++] = (target_dev_id >> 8) & 0xff;
958 arr[num++] = target_dev_id & 0xff;
959 /* SCSI name string: Target device identifier */
960 arr[num++] = 0x63; /* proto=sas, UTF-8 */
961 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
964 memcpy(arr + num, "naa.52222220", 12);
966 snprintf(b, sizeof(b), "%08X", target_dev_id);
967 memcpy(arr + num, b, 8);
969 memset(arr + num, 0, 4);
975 static unsigned char vpd84_data[] = {
976 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
977 0x22,0x22,0x22,0x0,0xbb,0x1,
978 0x22,0x22,0x22,0x0,0xbb,0x2,
981 /* Software interface identification VPD page */
982 static int inquiry_evpd_84(unsigned char * arr)
984 memcpy(arr, vpd84_data, sizeof(vpd84_data));
985 return sizeof(vpd84_data);
988 /* Management network addresses VPD page */
989 static int inquiry_evpd_85(unsigned char * arr)
992 const char * na1 = "https://www.kernel.org/config";
993 const char * na2 = "http://www.kernel.org/log";
996 arr[num++] = 0x1; /* lu, storage config */
997 arr[num++] = 0x0; /* reserved */
1002 plen = ((plen / 4) + 1) * 4;
1003 arr[num++] = plen; /* length, null termianted, padded */
1004 memcpy(arr + num, na1, olen);
1005 memset(arr + num + olen, 0, plen - olen);
1008 arr[num++] = 0x4; /* lu, logging */
1009 arr[num++] = 0x0; /* reserved */
1014 plen = ((plen / 4) + 1) * 4;
1015 arr[num++] = plen; /* length, null terminated, padded */
1016 memcpy(arr + num, na2, olen);
1017 memset(arr + num + olen, 0, plen - olen);
1023 /* SCSI ports VPD page */
1024 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1029 port_a = target_dev_id + 1;
1030 port_b = port_a + 1;
1031 arr[num++] = 0x0; /* reserved */
1032 arr[num++] = 0x0; /* reserved */
1034 arr[num++] = 0x1; /* relative port 1 (primary) */
1035 memset(arr + num, 0, 6);
1038 arr[num++] = 12; /* length tp descriptor */
1039 /* naa-5 target port identifier (A) */
1040 arr[num++] = 0x61; /* proto=sas, binary */
1041 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1042 arr[num++] = 0x0; /* reserved */
1043 arr[num++] = 0x8; /* length */
1044 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1048 arr[num++] = (port_a >> 24);
1049 arr[num++] = (port_a >> 16) & 0xff;
1050 arr[num++] = (port_a >> 8) & 0xff;
1051 arr[num++] = port_a & 0xff;
1053 arr[num++] = 0x0; /* reserved */
1054 arr[num++] = 0x0; /* reserved */
1056 arr[num++] = 0x2; /* relative port 2 (secondary) */
1057 memset(arr + num, 0, 6);
1060 arr[num++] = 12; /* length tp descriptor */
1061 /* naa-5 target port identifier (B) */
1062 arr[num++] = 0x61; /* proto=sas, binary */
1063 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1064 arr[num++] = 0x0; /* reserved */
1065 arr[num++] = 0x8; /* length */
1066 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1070 arr[num++] = (port_b >> 24);
1071 arr[num++] = (port_b >> 16) & 0xff;
1072 arr[num++] = (port_b >> 8) & 0xff;
1073 arr[num++] = port_b & 0xff;
1079 static unsigned char vpd89_data[] = {
1080 /* from 4th byte */ 0,0,0,0,
1081 'l','i','n','u','x',' ',' ',' ',
1082 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1084 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1086 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1087 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1088 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1089 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1091 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1093 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1095 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1096 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1097 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1098 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1099 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1100 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1101 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1102 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1103 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1106 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1107 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1108 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1109 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1110 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1123 /* ATA Information VPD page */
1124 static int inquiry_evpd_89(unsigned char * arr)
1126 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1127 return sizeof(vpd89_data);
1131 static unsigned char vpdb0_data[] = {
1132 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1133 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1134 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 /* Block limits VPD page (SBC-3) */
1139 static int inquiry_evpd_b0(unsigned char * arr)
1143 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1145 /* Optimal transfer length granularity */
1146 gran = 1 << scsi_debug_physblk_exp;
1147 arr[2] = (gran >> 8) & 0xff;
1148 arr[3] = gran & 0xff;
1150 /* Maximum Transfer Length */
1151 if (sdebug_store_sectors > 0x400) {
1152 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1153 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1154 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1155 arr[7] = sdebug_store_sectors & 0xff;
1158 /* Optimal Transfer Length */
1159 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1161 if (scsi_debug_lbpu) {
1162 /* Maximum Unmap LBA Count */
1163 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1165 /* Maximum Unmap Block Descriptor Count */
1166 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1169 /* Unmap Granularity Alignment */
1170 if (scsi_debug_unmap_alignment) {
1171 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1172 arr[28] |= 0x80; /* UGAVALID */
1175 /* Optimal Unmap Granularity */
1176 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1178 /* Maximum WRITE SAME Length */
1179 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1181 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1183 return sizeof(vpdb0_data);
1186 /* Block device characteristics VPD page (SBC-3) */
1187 static int inquiry_evpd_b1(unsigned char *arr)
1189 memset(arr, 0, 0x3c);
1191 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1193 arr[3] = 5; /* less than 1.8" */
1198 /* Logical block provisioning VPD page (SBC-3) */
1199 static int inquiry_evpd_b2(unsigned char *arr)
1201 memset(arr, 0, 0x4);
1202 arr[0] = 0; /* threshold exponent */
1204 if (scsi_debug_lbpu)
1207 if (scsi_debug_lbpws)
1210 if (scsi_debug_lbpws10)
1213 if (scsi_debug_lbprz)
1219 #define SDEBUG_LONG_INQ_SZ 96
1220 #define SDEBUG_MAX_INQ_ARR_SZ 584
1222 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1224 unsigned char pq_pdt;
1225 unsigned char * arr;
1226 unsigned char *cmd = scp->cmnd;
1227 int alloc_len, n, ret;
1230 alloc_len = (cmd[3] << 8) + cmd[4];
1231 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1233 return DID_REQUEUE << 16;
1234 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1236 pq_pdt = 0x1e; /* present, wlun */
1237 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1238 pq_pdt = 0x7f; /* not present, no device type */
1240 pq_pdt = (scsi_debug_ptype & 0x1f);
1242 if (0x2 & cmd[1]) { /* CMDDT bit set */
1243 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1245 return check_condition_result;
1246 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1247 int lu_id_num, port_group_id, target_dev_id, len;
1249 int host_no = devip->sdbg_host->shost->host_no;
1251 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1252 (devip->channel & 0x7f);
1253 if (0 == scsi_debug_vpd_use_hostno)
1255 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1256 (devip->target * 1000) + devip->lun);
1257 target_dev_id = ((host_no + 1) * 2000) +
1258 (devip->target * 1000) - 3;
1259 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1260 if (0 == cmd[2]) { /* supported vital product data pages */
1261 arr[1] = cmd[2]; /*sanity */
1263 arr[n++] = 0x0; /* this page */
1264 arr[n++] = 0x80; /* unit serial number */
1265 arr[n++] = 0x83; /* device identification */
1266 arr[n++] = 0x84; /* software interface ident. */
1267 arr[n++] = 0x85; /* management network addresses */
1268 arr[n++] = 0x86; /* extended inquiry */
1269 arr[n++] = 0x87; /* mode page policy */
1270 arr[n++] = 0x88; /* SCSI ports */
1271 arr[n++] = 0x89; /* ATA information */
1272 arr[n++] = 0xb0; /* Block limits (SBC) */
1273 arr[n++] = 0xb1; /* Block characteristics (SBC) */
1274 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1276 arr[3] = n - 4; /* number of supported VPD pages */
1277 } else if (0x80 == cmd[2]) { /* unit serial number */
1278 arr[1] = cmd[2]; /*sanity */
1280 memcpy(&arr[4], lu_id_str, len);
1281 } else if (0x83 == cmd[2]) { /* device identification */
1282 arr[1] = cmd[2]; /*sanity */
1283 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1284 target_dev_id, lu_id_num,
1286 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1287 arr[1] = cmd[2]; /*sanity */
1288 arr[3] = inquiry_evpd_84(&arr[4]);
1289 } else if (0x85 == cmd[2]) { /* Management network addresses */
1290 arr[1] = cmd[2]; /*sanity */
1291 arr[3] = inquiry_evpd_85(&arr[4]);
1292 } else if (0x86 == cmd[2]) { /* extended inquiry */
1293 arr[1] = cmd[2]; /*sanity */
1294 arr[3] = 0x3c; /* number of following entries */
1295 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1296 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1297 else if (scsi_debug_dif)
1298 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1300 arr[4] = 0x0; /* no protection stuff */
1301 arr[5] = 0x7; /* head of q, ordered + simple q's */
1302 } else if (0x87 == cmd[2]) { /* mode page policy */
1303 arr[1] = cmd[2]; /*sanity */
1304 arr[3] = 0x8; /* number of following entries */
1305 arr[4] = 0x2; /* disconnect-reconnect mp */
1306 arr[6] = 0x80; /* mlus, shared */
1307 arr[8] = 0x18; /* protocol specific lu */
1308 arr[10] = 0x82; /* mlus, per initiator port */
1309 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1310 arr[1] = cmd[2]; /*sanity */
1311 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1312 } else if (0x89 == cmd[2]) { /* ATA information */
1313 arr[1] = cmd[2]; /*sanity */
1314 n = inquiry_evpd_89(&arr[4]);
1316 arr[3] = (n & 0xff);
1317 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1318 arr[1] = cmd[2]; /*sanity */
1319 arr[3] = inquiry_evpd_b0(&arr[4]);
1320 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1321 arr[1] = cmd[2]; /*sanity */
1322 arr[3] = inquiry_evpd_b1(&arr[4]);
1323 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1324 arr[1] = cmd[2]; /*sanity */
1325 arr[3] = inquiry_evpd_b2(&arr[4]);
1327 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1329 return check_condition_result;
1331 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1332 ret = fill_from_dev_buffer(scp, arr,
1333 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1337 /* drops through here for a standard inquiry */
1338 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
1339 arr[2] = scsi_debug_scsi_level;
1340 arr[3] = 2; /* response_data_format==2 */
1341 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1342 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1343 if (0 == scsi_debug_vpd_use_hostno)
1344 arr[5] = 0x10; /* claim: implicit TGPS */
1345 arr[6] = 0x10; /* claim: MultiP */
1346 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1347 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1348 memcpy(&arr[8], inq_vendor_id, 8);
1349 memcpy(&arr[16], inq_product_id, 16);
1350 memcpy(&arr[32], inq_product_rev, 4);
1351 /* version descriptors (2 bytes each) follow */
1352 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1353 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1355 if (scsi_debug_ptype == 0) {
1356 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1357 } else if (scsi_debug_ptype == 1) {
1358 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1360 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1361 ret = fill_from_dev_buffer(scp, arr,
1362 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1367 static int resp_requests(struct scsi_cmnd * scp,
1368 struct sdebug_dev_info * devip)
1370 unsigned char * sbuff;
1371 unsigned char *cmd = scp->cmnd;
1372 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1373 bool dsense, want_dsense;
1376 memset(arr, 0, sizeof(arr));
1377 dsense = !!(cmd[1] & 1);
1378 want_dsense = dsense || scsi_debug_dsense;
1379 sbuff = scp->sense_buffer;
1380 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1383 arr[1] = 0x0; /* NO_SENSE in sense_key */
1384 arr[2] = THRESHOLD_EXCEEDED;
1385 arr[3] = 0xff; /* TEST set and MRIE==6 */
1389 arr[2] = 0x0; /* NO_SENSE in sense_key */
1390 arr[7] = 0xa; /* 18 byte sense buffer */
1391 arr[12] = THRESHOLD_EXCEEDED;
1392 arr[13] = 0xff; /* TEST set and MRIE==6 */
1395 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1396 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1397 ; /* have sense and formats match */
1398 else if (arr[0] <= 0x70) {
1408 } else if (dsense) {
1411 arr[1] = sbuff[2]; /* sense key */
1412 arr[2] = sbuff[12]; /* asc */
1413 arr[3] = sbuff[13]; /* ascq */
1425 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1426 return fill_from_dev_buffer(scp, arr, len);
1429 static int resp_start_stop(struct scsi_cmnd * scp,
1430 struct sdebug_dev_info * devip)
1432 unsigned char *cmd = scp->cmnd;
1433 int power_cond, start;
1435 power_cond = (cmd[4] & 0xf0) >> 4;
1437 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1438 return check_condition_result;
1441 if (start == devip->stopped)
1442 devip->stopped = !start;
1446 static sector_t get_sdebug_capacity(void)
1448 if (scsi_debug_virtual_gb > 0)
1449 return (sector_t)scsi_debug_virtual_gb *
1450 (1073741824 / scsi_debug_sector_size);
1452 return sdebug_store_sectors;
1455 #define SDEBUG_READCAP_ARR_SZ 8
1456 static int resp_readcap(struct scsi_cmnd * scp,
1457 struct sdebug_dev_info * devip)
1459 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1462 /* following just in case virtual_gb changed */
1463 sdebug_capacity = get_sdebug_capacity();
1464 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1465 if (sdebug_capacity < 0xffffffff) {
1466 capac = (unsigned int)sdebug_capacity - 1;
1467 arr[0] = (capac >> 24);
1468 arr[1] = (capac >> 16) & 0xff;
1469 arr[2] = (capac >> 8) & 0xff;
1470 arr[3] = capac & 0xff;
1477 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1478 arr[7] = scsi_debug_sector_size & 0xff;
1479 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1482 #define SDEBUG_READCAP16_ARR_SZ 32
1483 static int resp_readcap16(struct scsi_cmnd * scp,
1484 struct sdebug_dev_info * devip)
1486 unsigned char *cmd = scp->cmnd;
1487 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1488 unsigned long long capac;
1491 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1493 /* following just in case virtual_gb changed */
1494 sdebug_capacity = get_sdebug_capacity();
1495 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1496 capac = sdebug_capacity - 1;
1497 for (k = 0; k < 8; ++k, capac >>= 8)
1498 arr[7 - k] = capac & 0xff;
1499 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1500 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1501 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1502 arr[11] = scsi_debug_sector_size & 0xff;
1503 arr[13] = scsi_debug_physblk_exp & 0xf;
1504 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1506 if (scsi_debug_lbp()) {
1507 arr[14] |= 0x80; /* LBPME */
1508 if (scsi_debug_lbprz)
1509 arr[14] |= 0x40; /* LBPRZ */
1512 arr[15] = scsi_debug_lowest_aligned & 0xff;
1514 if (scsi_debug_dif) {
1515 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1516 arr[12] |= 1; /* PROT_EN */
1519 return fill_from_dev_buffer(scp, arr,
1520 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1523 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1525 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1526 struct sdebug_dev_info * devip)
1528 unsigned char *cmd = scp->cmnd;
1529 unsigned char * arr;
1530 int host_no = devip->sdbg_host->shost->host_no;
1531 int n, ret, alen, rlen;
1532 int port_group_a, port_group_b, port_a, port_b;
1534 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1537 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1539 return DID_REQUEUE << 16;
1541 * EVPD page 0x88 states we have two ports, one
1542 * real and a fake port with no device connected.
1543 * So we create two port groups with one port each
1544 * and set the group with port B to unavailable.
1546 port_a = 0x1; /* relative port A */
1547 port_b = 0x2; /* relative port B */
1548 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1549 (devip->channel & 0x7f);
1550 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1551 (devip->channel & 0x7f) + 0x80;
1554 * The asymmetric access state is cycled according to the host_id.
1557 if (0 == scsi_debug_vpd_use_hostno) {
1558 arr[n++] = host_no % 3; /* Asymm access state */
1559 arr[n++] = 0x0F; /* claim: all states are supported */
1561 arr[n++] = 0x0; /* Active/Optimized path */
1562 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1564 arr[n++] = (port_group_a >> 8) & 0xff;
1565 arr[n++] = port_group_a & 0xff;
1566 arr[n++] = 0; /* Reserved */
1567 arr[n++] = 0; /* Status code */
1568 arr[n++] = 0; /* Vendor unique */
1569 arr[n++] = 0x1; /* One port per group */
1570 arr[n++] = 0; /* Reserved */
1571 arr[n++] = 0; /* Reserved */
1572 arr[n++] = (port_a >> 8) & 0xff;
1573 arr[n++] = port_a & 0xff;
1574 arr[n++] = 3; /* Port unavailable */
1575 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1576 arr[n++] = (port_group_b >> 8) & 0xff;
1577 arr[n++] = port_group_b & 0xff;
1578 arr[n++] = 0; /* Reserved */
1579 arr[n++] = 0; /* Status code */
1580 arr[n++] = 0; /* Vendor unique */
1581 arr[n++] = 0x1; /* One port per group */
1582 arr[n++] = 0; /* Reserved */
1583 arr[n++] = 0; /* Reserved */
1584 arr[n++] = (port_b >> 8) & 0xff;
1585 arr[n++] = port_b & 0xff;
1588 arr[0] = (rlen >> 24) & 0xff;
1589 arr[1] = (rlen >> 16) & 0xff;
1590 arr[2] = (rlen >> 8) & 0xff;
1591 arr[3] = rlen & 0xff;
1594 * Return the smallest value of either
1595 * - The allocated length
1596 * - The constructed command length
1597 * - The maximum array size
1600 ret = fill_from_dev_buffer(scp, arr,
1601 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1606 /* <<Following mode page info copied from ST318451LW>> */
1608 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1609 { /* Read-Write Error Recovery page for mode_sense */
1610 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1613 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1615 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1616 return sizeof(err_recov_pg);
1619 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1620 { /* Disconnect-Reconnect page for mode_sense */
1621 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1622 0, 0, 0, 0, 0, 0, 0, 0};
1624 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1626 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1627 return sizeof(disconnect_pg);
1630 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1631 { /* Format device page for mode_sense */
1632 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1633 0, 0, 0, 0, 0, 0, 0, 0,
1634 0, 0, 0, 0, 0x40, 0, 0, 0};
1636 memcpy(p, format_pg, sizeof(format_pg));
1637 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1638 p[11] = sdebug_sectors_per & 0xff;
1639 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1640 p[13] = scsi_debug_sector_size & 0xff;
1641 if (scsi_debug_removable)
1642 p[20] |= 0x20; /* should agree with INQUIRY */
1644 memset(p + 2, 0, sizeof(format_pg) - 2);
1645 return sizeof(format_pg);
1648 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1649 { /* Caching page for mode_sense */
1650 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1651 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1652 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1653 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1655 if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1656 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1657 memcpy(p, caching_pg, sizeof(caching_pg));
1659 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1660 else if (2 == pcontrol)
1661 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1662 return sizeof(caching_pg);
1665 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1666 { /* Control mode page for mode_sense */
1667 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1669 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1672 if (scsi_debug_dsense)
1673 ctrl_m_pg[2] |= 0x4;
1675 ctrl_m_pg[2] &= ~0x4;
1678 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1680 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1682 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1683 else if (2 == pcontrol)
1684 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1685 return sizeof(ctrl_m_pg);
1689 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1690 { /* Informational Exceptions control mode page for mode_sense */
1691 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1693 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1696 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1698 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1699 else if (2 == pcontrol)
1700 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1701 return sizeof(iec_m_pg);
1704 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1705 { /* SAS SSP mode page - short format for mode_sense */
1706 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1707 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1709 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1711 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1712 return sizeof(sas_sf_m_pg);
1716 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1718 { /* SAS phy control and discover mode page for mode_sense */
1719 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1720 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1721 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1722 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1723 0x2, 0, 0, 0, 0, 0, 0, 0,
1724 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1725 0, 0, 0, 0, 0, 0, 0, 0,
1726 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1727 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1728 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1729 0x3, 0, 0, 0, 0, 0, 0, 0,
1730 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1731 0, 0, 0, 0, 0, 0, 0, 0,
1735 port_a = target_dev_id + 1;
1736 port_b = port_a + 1;
1737 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1738 p[20] = (port_a >> 24);
1739 p[21] = (port_a >> 16) & 0xff;
1740 p[22] = (port_a >> 8) & 0xff;
1741 p[23] = port_a & 0xff;
1742 p[48 + 20] = (port_b >> 24);
1743 p[48 + 21] = (port_b >> 16) & 0xff;
1744 p[48 + 22] = (port_b >> 8) & 0xff;
1745 p[48 + 23] = port_b & 0xff;
1747 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1748 return sizeof(sas_pcd_m_pg);
1751 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1752 { /* SAS SSP shared protocol specific port mode subpage */
1753 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1754 0, 0, 0, 0, 0, 0, 0, 0,
1757 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1759 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1760 return sizeof(sas_sha_m_pg);
1763 #define SDEBUG_MAX_MSENSE_SZ 256
1766 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1768 unsigned char dbd, llbaa;
1769 int pcontrol, pcode, subpcode, bd_len;
1770 unsigned char dev_spec;
1771 int k, alloc_len, msense_6, offset, len, target_dev_id;
1772 int target = scp->device->id;
1774 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1775 unsigned char *cmd = scp->cmnd;
1777 dbd = !!(cmd[1] & 0x8);
1778 pcontrol = (cmd[2] & 0xc0) >> 6;
1779 pcode = cmd[2] & 0x3f;
1781 msense_6 = (MODE_SENSE == cmd[0]);
1782 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1783 if ((0 == scsi_debug_ptype) && (0 == dbd))
1784 bd_len = llbaa ? 16 : 8;
1787 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1788 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1789 if (0x3 == pcontrol) { /* Saving values not supported */
1790 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1791 return check_condition_result;
1793 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1794 (devip->target * 1000) - 3;
1795 /* set DPOFUA bit for disks */
1796 if (0 == scsi_debug_ptype)
1797 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1807 arr[4] = 0x1; /* set LONGLBA bit */
1808 arr[7] = bd_len; /* assume 255 or less */
1812 if ((bd_len > 0) && (!sdebug_capacity))
1813 sdebug_capacity = get_sdebug_capacity();
1816 if (sdebug_capacity > 0xfffffffe) {
1822 ap[0] = (sdebug_capacity >> 24) & 0xff;
1823 ap[1] = (sdebug_capacity >> 16) & 0xff;
1824 ap[2] = (sdebug_capacity >> 8) & 0xff;
1825 ap[3] = sdebug_capacity & 0xff;
1827 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1828 ap[7] = scsi_debug_sector_size & 0xff;
1831 } else if (16 == bd_len) {
1832 unsigned long long capac = sdebug_capacity;
1834 for (k = 0; k < 8; ++k, capac >>= 8)
1835 ap[7 - k] = capac & 0xff;
1836 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1837 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1838 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1839 ap[15] = scsi_debug_sector_size & 0xff;
1844 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1845 /* TODO: Control Extension page */
1846 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1847 return check_condition_result;
1850 case 0x1: /* Read-Write error recovery page, direct access */
1851 len = resp_err_recov_pg(ap, pcontrol, target);
1854 case 0x2: /* Disconnect-Reconnect page, all devices */
1855 len = resp_disconnect_pg(ap, pcontrol, target);
1858 case 0x3: /* Format device page, direct access */
1859 len = resp_format_pg(ap, pcontrol, target);
1862 case 0x8: /* Caching page, direct access */
1863 len = resp_caching_pg(ap, pcontrol, target);
1866 case 0xa: /* Control Mode page, all devices */
1867 len = resp_ctrl_m_pg(ap, pcontrol, target);
1870 case 0x19: /* if spc==1 then sas phy, control+discover */
1871 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1872 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1873 return check_condition_result;
1876 if ((0x0 == subpcode) || (0xff == subpcode))
1877 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1878 if ((0x1 == subpcode) || (0xff == subpcode))
1879 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1881 if ((0x2 == subpcode) || (0xff == subpcode))
1882 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1885 case 0x1c: /* Informational Exceptions Mode page, all devices */
1886 len = resp_iec_m_pg(ap, pcontrol, target);
1889 case 0x3f: /* Read all Mode pages */
1890 if ((0 == subpcode) || (0xff == subpcode)) {
1891 len = resp_err_recov_pg(ap, pcontrol, target);
1892 len += resp_disconnect_pg(ap + len, pcontrol, target);
1893 len += resp_format_pg(ap + len, pcontrol, target);
1894 len += resp_caching_pg(ap + len, pcontrol, target);
1895 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1896 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1897 if (0xff == subpcode) {
1898 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1899 target, target_dev_id);
1900 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1902 len += resp_iec_m_pg(ap + len, pcontrol, target);
1904 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1905 return check_condition_result;
1910 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1911 return check_condition_result;
1914 arr[0] = offset - 1;
1916 arr[0] = ((offset - 2) >> 8) & 0xff;
1917 arr[1] = (offset - 2) & 0xff;
1919 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1922 #define SDEBUG_MAX_MSELECT_SZ 512
1925 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1927 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1928 int param_len, res, mpage;
1929 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1930 unsigned char *cmd = scp->cmnd;
1931 int mselect6 = (MODE_SELECT == cmd[0]);
1933 memset(arr, 0, sizeof(arr));
1936 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1937 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1938 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
1939 return check_condition_result;
1941 res = fetch_to_dev_buffer(scp, arr, param_len);
1943 return (DID_ERROR << 16);
1944 else if ((res < param_len) &&
1945 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1946 sdev_printk(KERN_INFO, scp->device,
1947 "%s: cdb indicated=%d, IO sent=%d bytes\n",
1948 __func__, param_len, res);
1949 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1950 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1952 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1953 return check_condition_result;
1955 off = bd_len + (mselect6 ? 4 : 8);
1956 mpage = arr[off] & 0x3f;
1957 ps = !!(arr[off] & 0x80);
1959 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
1960 return check_condition_result;
1962 spf = !!(arr[off] & 0x40);
1963 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1965 if ((pg_len + off) > param_len) {
1966 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1967 PARAMETER_LIST_LENGTH_ERR, 0);
1968 return check_condition_result;
1971 case 0x8: /* Caching Mode page */
1972 if (caching_pg[1] == arr[off + 1]) {
1973 memcpy(caching_pg + 2, arr + off + 2,
1974 sizeof(caching_pg) - 2);
1975 goto set_mode_changed_ua;
1978 case 0xa: /* Control Mode page */
1979 if (ctrl_m_pg[1] == arr[off + 1]) {
1980 memcpy(ctrl_m_pg + 2, arr + off + 2,
1981 sizeof(ctrl_m_pg) - 2);
1982 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1983 goto set_mode_changed_ua;
1986 case 0x1c: /* Informational Exceptions Mode page */
1987 if (iec_m_pg[1] == arr[off + 1]) {
1988 memcpy(iec_m_pg + 2, arr + off + 2,
1989 sizeof(iec_m_pg) - 2);
1990 goto set_mode_changed_ua;
1996 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
1997 return check_condition_result;
1998 set_mode_changed_ua:
1999 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2003 static int resp_temp_l_pg(unsigned char * arr)
2005 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2006 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2009 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2010 return sizeof(temp_l_pg);
2013 static int resp_ie_l_pg(unsigned char * arr)
2015 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2018 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2019 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2020 arr[4] = THRESHOLD_EXCEEDED;
2023 return sizeof(ie_l_pg);
2026 #define SDEBUG_MAX_LSENSE_SZ 512
2028 static int resp_log_sense(struct scsi_cmnd * scp,
2029 struct sdebug_dev_info * devip)
2031 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2032 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2033 unsigned char *cmd = scp->cmnd;
2035 memset(arr, 0, sizeof(arr));
2039 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2040 return check_condition_result;
2042 pcontrol = (cmd[2] & 0xc0) >> 6;
2043 pcode = cmd[2] & 0x3f;
2044 subpcode = cmd[3] & 0xff;
2045 alloc_len = (cmd[7] << 8) + cmd[8];
2047 if (0 == subpcode) {
2049 case 0x0: /* Supported log pages log page */
2051 arr[n++] = 0x0; /* this page */
2052 arr[n++] = 0xd; /* Temperature */
2053 arr[n++] = 0x2f; /* Informational exceptions */
2056 case 0xd: /* Temperature log page */
2057 arr[3] = resp_temp_l_pg(arr + 4);
2059 case 0x2f: /* Informational exceptions log page */
2060 arr[3] = resp_ie_l_pg(arr + 4);
2063 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2064 return check_condition_result;
2066 } else if (0xff == subpcode) {
2070 case 0x0: /* Supported log pages and subpages log page */
2073 arr[n++] = 0x0; /* 0,0 page */
2075 arr[n++] = 0xff; /* this page */
2077 arr[n++] = 0x0; /* Temperature */
2079 arr[n++] = 0x0; /* Informational exceptions */
2082 case 0xd: /* Temperature subpages */
2085 arr[n++] = 0x0; /* Temperature */
2088 case 0x2f: /* Informational exceptions subpages */
2091 arr[n++] = 0x0; /* Informational exceptions */
2095 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2096 return check_condition_result;
2099 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2100 return check_condition_result;
2102 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2103 return fill_from_dev_buffer(scp, arr,
2104 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2107 static int check_device_access_params(struct scsi_cmnd *scp,
2108 unsigned long long lba, unsigned int num)
2110 if (lba + num > sdebug_capacity) {
2111 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2112 return check_condition_result;
2114 /* transfer length excessive (tie in to block limits VPD page) */
2115 if (num > sdebug_store_sectors) {
2116 /* needs work to find which cdb byte 'num' comes from */
2117 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2118 return check_condition_result;
2123 /* Returns number of bytes copied or -1 if error. */
2125 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2128 u64 block, rest = 0;
2129 struct scsi_data_buffer *sdb;
2130 enum dma_data_direction dir;
2131 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2135 sdb = scsi_out(scmd);
2136 dir = DMA_TO_DEVICE;
2137 func = sg_pcopy_to_buffer;
2139 sdb = scsi_in(scmd);
2140 dir = DMA_FROM_DEVICE;
2141 func = sg_pcopy_from_buffer;
2146 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2149 block = do_div(lba, sdebug_store_sectors);
2150 if (block + num > sdebug_store_sectors)
2151 rest = block + num - sdebug_store_sectors;
2153 ret = func(sdb->table.sgl, sdb->table.nents,
2154 fake_storep + (block * scsi_debug_sector_size),
2155 (num - rest) * scsi_debug_sector_size, 0);
2156 if (ret != (num - rest) * scsi_debug_sector_size)
2160 ret += func(sdb->table.sgl, sdb->table.nents,
2161 fake_storep, rest * scsi_debug_sector_size,
2162 (num - rest) * scsi_debug_sector_size);
2168 static __be16 dif_compute_csum(const void *buf, int len)
2172 if (scsi_debug_guard)
2173 csum = (__force __be16)ip_compute_csum(buf, len);
2175 csum = cpu_to_be16(crc_t10dif(buf, len));
2180 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2181 sector_t sector, u32 ei_lba)
2183 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2185 if (sdt->guard_tag != csum) {
2186 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2188 (unsigned long)sector,
2189 be16_to_cpu(sdt->guard_tag),
2193 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2194 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2195 pr_err("%s: REF check failed on sector %lu\n",
2196 __func__, (unsigned long)sector);
2199 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2200 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2201 pr_err("%s: REF check failed on sector %lu\n",
2202 __func__, (unsigned long)sector);
2208 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2209 unsigned int sectors, bool read)
2213 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2214 struct sg_mapping_iter miter;
2216 /* Bytes of protection data to copy into sgl */
2217 resid = sectors * sizeof(*dif_storep);
2219 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2220 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2221 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2223 while (sg_miter_next(&miter) && resid > 0) {
2224 size_t len = min(miter.length, resid);
2225 void *start = dif_store(sector);
2228 if (dif_store_end < start + len)
2229 rest = start + len - dif_store_end;
2234 memcpy(paddr, start, len - rest);
2236 memcpy(start, paddr, len - rest);
2240 memcpy(paddr + len - rest, dif_storep, rest);
2242 memcpy(dif_storep, paddr + len - rest, rest);
2245 sector += len / sizeof(*dif_storep);
2248 sg_miter_stop(&miter);
2251 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2252 unsigned int sectors, u32 ei_lba)
2255 struct sd_dif_tuple *sdt;
2258 for (i = 0; i < sectors; i++, ei_lba++) {
2261 sector = start_sec + i;
2262 sdt = dif_store(sector);
2264 if (sdt->app_tag == cpu_to_be16(0xffff))
2267 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2274 dif_copy_prot(SCpnt, start_sec, sectors, true);
2281 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2283 u8 *cmd = scp->cmnd;
2287 unsigned long iflags;
2294 lba = get_unaligned_be64(cmd + 2);
2295 num = get_unaligned_be32(cmd + 10);
2300 lba = get_unaligned_be32(cmd + 2);
2301 num = get_unaligned_be16(cmd + 7);
2306 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2307 (u32)(cmd[1] & 0x1f) << 16;
2308 num = (0 == cmd[4]) ? 256 : cmd[4];
2313 lba = get_unaligned_be32(cmd + 2);
2314 num = get_unaligned_be32(cmd + 6);
2317 case XDWRITEREAD_10:
2319 lba = get_unaligned_be32(cmd + 2);
2320 num = get_unaligned_be16(cmd + 7);
2323 default: /* assume READ(32) */
2324 lba = get_unaligned_be64(cmd + 12);
2325 ei_lba = get_unaligned_be32(cmd + 20);
2326 num = get_unaligned_be32(cmd + 28);
2331 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2333 mk_sense_invalid_opcode(scp);
2334 return check_condition_result;
2336 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2337 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2338 (cmd[1] & 0xe0) == 0)
2339 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2342 if (sdebug_any_injecting_opt) {
2343 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2349 /* inline check_device_access_params() */
2350 if (lba + num > sdebug_capacity) {
2351 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2352 return check_condition_result;
2354 /* transfer length excessive (tie in to block limits VPD page) */
2355 if (num > sdebug_store_sectors) {
2356 /* needs work to find which cdb byte 'num' comes from */
2357 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2358 return check_condition_result;
2361 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2362 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2363 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2364 /* claim unrecoverable read error */
2365 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2366 /* set info field and valid bit for fixed descriptor */
2367 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2368 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2369 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2370 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2371 put_unaligned_be32(ret, scp->sense_buffer + 3);
2373 scsi_set_resid(scp, scsi_bufflen(scp));
2374 return check_condition_result;
2377 read_lock_irqsave(&atomic_rw, iflags);
2380 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2381 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2384 read_unlock_irqrestore(&atomic_rw, iflags);
2385 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2386 return illegal_condition_result;
2390 ret = do_device_access(scp, lba, num, false);
2391 read_unlock_irqrestore(&atomic_rw, iflags);
2393 return DID_ERROR << 16;
2395 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2397 if (sdebug_any_injecting_opt) {
2398 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2400 if (ep->inj_recovered) {
2401 mk_sense_buffer(scp, RECOVERED_ERROR,
2402 THRESHOLD_EXCEEDED, 0);
2403 return check_condition_result;
2404 } else if (ep->inj_transport) {
2405 mk_sense_buffer(scp, ABORTED_COMMAND,
2406 TRANSPORT_PROBLEM, ACK_NAK_TO);
2407 return check_condition_result;
2408 } else if (ep->inj_dif) {
2409 /* Logical block guard check failed */
2410 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2411 return illegal_condition_result;
2412 } else if (ep->inj_dix) {
2413 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2414 return illegal_condition_result;
2420 void dump_sector(unsigned char *buf, int len)
2424 pr_err(">>> Sector Dump <<<\n");
2425 for (i = 0 ; i < len ; i += 16) {
2428 for (j = 0, n = 0; j < 16; j++) {
2429 unsigned char c = buf[i+j];
2431 if (c >= 0x20 && c < 0x7e)
2432 n += scnprintf(b + n, sizeof(b) - n,
2435 n += scnprintf(b + n, sizeof(b) - n,
2438 pr_err("%04d: %s\n", i, b);
2442 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2443 unsigned int sectors, u32 ei_lba)
2446 struct sd_dif_tuple *sdt;
2448 sector_t sector = start_sec;
2451 struct sg_mapping_iter diter;
2452 struct sg_mapping_iter piter;
2454 BUG_ON(scsi_sg_count(SCpnt) == 0);
2455 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2457 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2458 scsi_prot_sg_count(SCpnt),
2459 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2460 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2461 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2463 /* For each protection page */
2464 while (sg_miter_next(&piter)) {
2466 if (WARN_ON(!sg_miter_next(&diter))) {
2471 for (ppage_offset = 0; ppage_offset < piter.length;
2472 ppage_offset += sizeof(struct sd_dif_tuple)) {
2473 /* If we're at the end of the current
2474 * data page advance to the next one
2476 if (dpage_offset >= diter.length) {
2477 if (WARN_ON(!sg_miter_next(&diter))) {
2484 sdt = piter.addr + ppage_offset;
2485 daddr = diter.addr + dpage_offset;
2487 ret = dif_verify(sdt, daddr, sector, ei_lba);
2489 dump_sector(daddr, scsi_debug_sector_size);
2495 dpage_offset += scsi_debug_sector_size;
2497 diter.consumed = dpage_offset;
2498 sg_miter_stop(&diter);
2500 sg_miter_stop(&piter);
2502 dif_copy_prot(SCpnt, start_sec, sectors, false);
2509 sg_miter_stop(&diter);
2510 sg_miter_stop(&piter);
2514 static unsigned long lba_to_map_index(sector_t lba)
2516 if (scsi_debug_unmap_alignment) {
2517 lba += scsi_debug_unmap_granularity -
2518 scsi_debug_unmap_alignment;
2520 do_div(lba, scsi_debug_unmap_granularity);
2525 static sector_t map_index_to_lba(unsigned long index)
2527 sector_t lba = index * scsi_debug_unmap_granularity;
2529 if (scsi_debug_unmap_alignment) {
2530 lba -= scsi_debug_unmap_granularity -
2531 scsi_debug_unmap_alignment;
2537 static unsigned int map_state(sector_t lba, unsigned int *num)
2540 unsigned int mapped;
2541 unsigned long index;
2544 index = lba_to_map_index(lba);
2545 mapped = test_bit(index, map_storep);
2548 next = find_next_zero_bit(map_storep, map_size, index);
2550 next = find_next_bit(map_storep, map_size, index);
2552 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2558 static void map_region(sector_t lba, unsigned int len)
2560 sector_t end = lba + len;
2563 unsigned long index = lba_to_map_index(lba);
2565 if (index < map_size)
2566 set_bit(index, map_storep);
2568 lba = map_index_to_lba(index + 1);
2572 static void unmap_region(sector_t lba, unsigned int len)
2574 sector_t end = lba + len;
2577 unsigned long index = lba_to_map_index(lba);
2579 if (lba == map_index_to_lba(index) &&
2580 lba + scsi_debug_unmap_granularity <= end &&
2582 clear_bit(index, map_storep);
2583 if (scsi_debug_lbprz) {
2584 memset(fake_storep +
2585 lba * scsi_debug_sector_size, 0,
2586 scsi_debug_sector_size *
2587 scsi_debug_unmap_granularity);
2590 memset(dif_storep + lba, 0xff,
2591 sizeof(*dif_storep) *
2592 scsi_debug_unmap_granularity);
2595 lba = map_index_to_lba(index + 1);
2600 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2602 u8 *cmd = scp->cmnd;
2606 unsigned long iflags;
2613 lba = get_unaligned_be64(cmd + 2);
2614 num = get_unaligned_be32(cmd + 10);
2619 lba = get_unaligned_be32(cmd + 2);
2620 num = get_unaligned_be16(cmd + 7);
2625 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2626 (u32)(cmd[1] & 0x1f) << 16;
2627 num = (0 == cmd[4]) ? 256 : cmd[4];
2632 lba = get_unaligned_be32(cmd + 2);
2633 num = get_unaligned_be32(cmd + 6);
2636 case 0x53: /* XDWRITEREAD(10) */
2638 lba = get_unaligned_be32(cmd + 2);
2639 num = get_unaligned_be16(cmd + 7);
2642 default: /* assume WRITE(32) */
2643 lba = get_unaligned_be64(cmd + 12);
2644 ei_lba = get_unaligned_be32(cmd + 20);
2645 num = get_unaligned_be32(cmd + 28);
2650 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2652 mk_sense_invalid_opcode(scp);
2653 return check_condition_result;
2655 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2656 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2657 (cmd[1] & 0xe0) == 0)
2658 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2662 /* inline check_device_access_params() */
2663 if (lba + num > sdebug_capacity) {
2664 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2665 return check_condition_result;
2667 /* transfer length excessive (tie in to block limits VPD page) */
2668 if (num > sdebug_store_sectors) {
2669 /* needs work to find which cdb byte 'num' comes from */
2670 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2671 return check_condition_result;
2674 write_lock_irqsave(&atomic_rw, iflags);
2677 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2678 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2681 write_unlock_irqrestore(&atomic_rw, iflags);
2682 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2683 return illegal_condition_result;
2687 ret = do_device_access(scp, lba, num, true);
2688 if (scsi_debug_lbp())
2689 map_region(lba, num);
2690 write_unlock_irqrestore(&atomic_rw, iflags);
2692 return (DID_ERROR << 16);
2693 else if ((ret < (num * scsi_debug_sector_size)) &&
2694 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2695 sdev_printk(KERN_INFO, scp->device,
2696 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2697 my_name, num * scsi_debug_sector_size, ret);
2699 if (sdebug_any_injecting_opt) {
2700 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2702 if (ep->inj_recovered) {
2703 mk_sense_buffer(scp, RECOVERED_ERROR,
2704 THRESHOLD_EXCEEDED, 0);
2705 return check_condition_result;
2706 } else if (ep->inj_dif) {
2707 /* Logical block guard check failed */
2708 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2709 return illegal_condition_result;
2710 } else if (ep->inj_dix) {
2711 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2712 return illegal_condition_result;
2719 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2720 bool unmap, bool ndob)
2722 unsigned long iflags;
2723 unsigned long long i;
2726 ret = check_device_access_params(scp, lba, num);
2730 write_lock_irqsave(&atomic_rw, iflags);
2732 if (unmap && scsi_debug_lbp()) {
2733 unmap_region(lba, num);
2737 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2739 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2740 scsi_debug_sector_size);
2743 ret = fetch_to_dev_buffer(scp, fake_storep +
2744 (lba * scsi_debug_sector_size),
2745 scsi_debug_sector_size);
2748 write_unlock_irqrestore(&atomic_rw, iflags);
2749 return (DID_ERROR << 16);
2750 } else if ((ret < (num * scsi_debug_sector_size)) &&
2751 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2752 sdev_printk(KERN_INFO, scp->device,
2753 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2754 my_name, "write same",
2755 num * scsi_debug_sector_size, ret);
2757 /* Copy first sector to remaining blocks */
2758 for (i = 1 ; i < num ; i++)
2759 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2760 fake_storep + (lba * scsi_debug_sector_size),
2761 scsi_debug_sector_size);
2763 if (scsi_debug_lbp())
2764 map_region(lba, num);
2766 write_unlock_irqrestore(&atomic_rw, iflags);
2772 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2774 u8 *cmd = scp->cmnd;
2781 if (scsi_debug_lbpws10 == 0) {
2782 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2783 return check_condition_result;
2787 lba = get_unaligned_be32(cmd + 2);
2788 num = get_unaligned_be16(cmd + 7);
2789 if (num > scsi_debug_write_same_length) {
2790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2791 return check_condition_result;
2793 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2797 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2799 u8 *cmd = scp->cmnd;
2806 if (cmd[1] & 0x8) { /* UNMAP */
2807 if (scsi_debug_lbpws == 0) {
2808 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2809 return check_condition_result;
2813 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
2815 lba = get_unaligned_be64(cmd + 2);
2816 num = get_unaligned_be32(cmd + 10);
2817 if (num > scsi_debug_write_same_length) {
2818 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2819 return check_condition_result;
2821 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
2824 struct unmap_block_desc {
2831 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2834 struct unmap_block_desc *desc;
2835 unsigned int i, payload_len, descriptors;
2837 unsigned long iflags;
2840 if (!scsi_debug_lbp())
2841 return 0; /* fib and say its done */
2842 payload_len = get_unaligned_be16(scp->cmnd + 7);
2843 BUG_ON(scsi_bufflen(scp) != payload_len);
2845 descriptors = (payload_len - 8) / 16;
2846 if (descriptors > scsi_debug_unmap_max_desc) {
2847 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2848 return check_condition_result;
2851 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2853 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2855 return check_condition_result;
2858 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2860 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2861 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2863 desc = (void *)&buf[8];
2865 write_lock_irqsave(&atomic_rw, iflags);
2867 for (i = 0 ; i < descriptors ; i++) {
2868 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2869 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2871 ret = check_device_access_params(scp, lba, num);
2875 unmap_region(lba, num);
2881 write_unlock_irqrestore(&atomic_rw, iflags);
2887 #define SDEBUG_GET_LBA_STATUS_LEN 32
2890 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2892 u8 *cmd = scp->cmnd;
2894 u32 alloc_len, mapped, num;
2895 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
2898 lba = get_unaligned_be64(cmd + 2);
2899 alloc_len = get_unaligned_be32(cmd + 10);
2904 ret = check_device_access_params(scp, lba, 1);
2908 if (scsi_debug_lbp())
2909 mapped = map_state(lba, &num);
2912 /* following just in case virtual_gb changed */
2913 sdebug_capacity = get_sdebug_capacity();
2914 if (sdebug_capacity - lba <= 0xffffffff)
2915 num = sdebug_capacity - lba;
2920 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2921 put_unaligned_be32(20, arr); /* Parameter Data Length */
2922 put_unaligned_be64(lba, arr + 8); /* LBA */
2923 put_unaligned_be32(num, arr + 16); /* Number of blocks */
2924 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
2926 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
2929 #define SDEBUG_RLUN_ARR_SZ 256
2931 static int resp_report_luns(struct scsi_cmnd * scp,
2932 struct sdebug_dev_info * devip)
2934 unsigned int alloc_len;
2935 int lun_cnt, i, upper, num, n, want_wlun, shortish;
2937 unsigned char *cmd = scp->cmnd;
2938 int select_report = (int)cmd[2];
2939 struct scsi_lun *one_lun;
2940 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2941 unsigned char * max_addr;
2943 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2944 shortish = (alloc_len < 4);
2945 if (shortish || (select_report > 2)) {
2946 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
2947 return check_condition_result;
2949 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2950 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2951 lun_cnt = scsi_debug_max_luns;
2952 if (1 == select_report)
2954 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2956 want_wlun = (select_report > 0) ? 1 : 0;
2957 num = lun_cnt + want_wlun;
2958 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2959 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2960 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2961 sizeof(struct scsi_lun)), num);
2966 one_lun = (struct scsi_lun *) &arr[8];
2967 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2968 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2969 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2971 upper = (lun >> 8) & 0x3f;
2973 one_lun[i].scsi_lun[0] =
2974 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2975 one_lun[i].scsi_lun[1] = lun & 0xff;
2978 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2979 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2982 alloc_len = (unsigned char *)(one_lun + i) - arr;
2983 return fill_from_dev_buffer(scp, arr,
2984 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2987 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2988 unsigned int num, struct sdebug_dev_info *devip)
2991 unsigned char *kaddr, *buf;
2992 unsigned int offset;
2993 struct scsi_data_buffer *sdb = scsi_in(scp);
2994 struct sg_mapping_iter miter;
2996 /* better not to use temporary buffer. */
2997 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2999 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3001 return check_condition_result;
3004 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3007 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3008 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3010 while (sg_miter_next(&miter)) {
3012 for (j = 0; j < miter.length; j++)
3013 *(kaddr + j) ^= *(buf + offset + j);
3015 offset += miter.length;
3017 sg_miter_stop(&miter);
3024 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3026 u8 *cmd = scp->cmnd;
3031 if (!scsi_bidi_cmnd(scp)) {
3032 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3034 return check_condition_result;
3036 errsts = resp_read_dt0(scp, devip);
3039 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3040 errsts = resp_write_dt0(scp, devip);
3044 lba = get_unaligned_be32(cmd + 2);
3045 num = get_unaligned_be16(cmd + 7);
3046 return resp_xdwriteread(scp, lba, num, devip);
3049 /* When timer or tasklet goes off this function is called. */
3050 static void sdebug_q_cmd_complete(unsigned long indx)
3054 unsigned long iflags;
3055 struct sdebug_queued_cmd *sqcp;
3056 struct scsi_cmnd *scp;
3057 struct sdebug_dev_info *devip;
3059 atomic_inc(&sdebug_completions);
3061 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3062 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3065 spin_lock_irqsave(&queued_arr_lock, iflags);
3066 sqcp = &queued_arr[qa_indx];
3069 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3070 pr_err("%s: scp is NULL\n", __func__);
3073 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3075 atomic_dec(&devip->num_in_q);
3077 pr_err("%s: devip=NULL\n", __func__);
3078 if (atomic_read(&retired_max_queue) > 0)
3081 sqcp->a_cmnd = NULL;
3082 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3083 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3084 pr_err("%s: Unexpected completion\n", __func__);
3088 if (unlikely(retiring)) { /* user has reduced max_queue */
3091 retval = atomic_read(&retired_max_queue);
3092 if (qa_indx >= retval) {
3093 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3094 pr_err("%s: index %d too large\n", __func__, retval);
3097 k = find_last_bit(queued_in_use_bm, retval);
3098 if ((k < scsi_debug_max_queue) || (k == retval))
3099 atomic_set(&retired_max_queue, 0);
3101 atomic_set(&retired_max_queue, k + 1);
3103 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3104 scp->scsi_done(scp); /* callback to mid level */
3107 /* When high resolution timer goes off this function is called. */
3108 static enum hrtimer_restart
3109 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3113 unsigned long iflags;
3114 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3115 struct sdebug_queued_cmd *sqcp;
3116 struct scsi_cmnd *scp;
3117 struct sdebug_dev_info *devip;
3119 atomic_inc(&sdebug_completions);
3120 qa_indx = sd_hrtp->qa_indx;
3121 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3122 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3125 spin_lock_irqsave(&queued_arr_lock, iflags);
3126 sqcp = &queued_arr[qa_indx];
3129 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3130 pr_err("%s: scp is NULL\n", __func__);
3133 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3135 atomic_dec(&devip->num_in_q);
3137 pr_err("%s: devip=NULL\n", __func__);
3138 if (atomic_read(&retired_max_queue) > 0)
3141 sqcp->a_cmnd = NULL;
3142 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3143 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3144 pr_err("%s: Unexpected completion\n", __func__);
3148 if (unlikely(retiring)) { /* user has reduced max_queue */
3151 retval = atomic_read(&retired_max_queue);
3152 if (qa_indx >= retval) {
3153 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3154 pr_err("%s: index %d too large\n", __func__, retval);
3157 k = find_last_bit(queued_in_use_bm, retval);
3158 if ((k < scsi_debug_max_queue) || (k == retval))
3159 atomic_set(&retired_max_queue, 0);
3161 atomic_set(&retired_max_queue, k + 1);
3163 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3164 scp->scsi_done(scp); /* callback to mid level */
3166 return HRTIMER_NORESTART;
3169 static struct sdebug_dev_info *
3170 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3172 struct sdebug_dev_info *devip;
3174 devip = kzalloc(sizeof(*devip), flags);
3176 devip->sdbg_host = sdbg_host;
3177 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3182 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3184 struct sdebug_host_info * sdbg_host;
3185 struct sdebug_dev_info * open_devip = NULL;
3186 struct sdebug_dev_info * devip =
3187 (struct sdebug_dev_info *)sdev->hostdata;
3191 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3193 pr_err("%s: Host info NULL\n", __func__);
3196 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3197 if ((devip->used) && (devip->channel == sdev->channel) &&
3198 (devip->target == sdev->id) &&
3199 (devip->lun == sdev->lun))
3202 if ((!devip->used) && (!open_devip))
3206 if (!open_devip) { /* try and make a new one */
3207 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3209 printk(KERN_ERR "%s: out of memory at line %d\n",
3210 __func__, __LINE__);
3215 open_devip->channel = sdev->channel;
3216 open_devip->target = sdev->id;
3217 open_devip->lun = sdev->lun;
3218 open_devip->sdbg_host = sdbg_host;
3219 atomic_set(&open_devip->num_in_q, 0);
3220 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3221 open_devip->used = true;
3225 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3227 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3228 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3229 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3230 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3234 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3236 struct sdebug_dev_info *devip;
3238 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3239 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3240 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3241 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3242 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3243 devip = devInfoReg(sdp);
3245 return 1; /* no resources, will be marked offline */
3246 sdp->hostdata = devip;
3247 blk_queue_max_segment_size(sdp->request_queue, -1U);
3248 if (scsi_debug_no_uld)
3249 sdp->no_uld_attach = 1;
3253 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3255 struct sdebug_dev_info *devip =
3256 (struct sdebug_dev_info *)sdp->hostdata;
3258 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3259 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3260 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3262 /* make this slot available for re-use */
3263 devip->used = false;
3264 sdp->hostdata = NULL;
3268 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3269 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3271 unsigned long iflags;
3272 int k, qmax, r_qmax;
3273 struct sdebug_queued_cmd *sqcp;
3274 struct sdebug_dev_info *devip;
3276 spin_lock_irqsave(&queued_arr_lock, iflags);
3277 qmax = scsi_debug_max_queue;
3278 r_qmax = atomic_read(&retired_max_queue);
3281 for (k = 0; k < qmax; ++k) {
3282 if (test_bit(k, queued_in_use_bm)) {
3283 sqcp = &queued_arr[k];
3284 if (cmnd == sqcp->a_cmnd) {
3285 devip = (struct sdebug_dev_info *)
3286 cmnd->device->hostdata;
3288 atomic_dec(&devip->num_in_q);
3289 sqcp->a_cmnd = NULL;
3290 spin_unlock_irqrestore(&queued_arr_lock,
3292 if (scsi_debug_ndelay > 0) {
3295 &sqcp->sd_hrtp->hrt);
3296 } else if (scsi_debug_delay > 0) {
3297 if (sqcp->cmnd_timerp)
3300 } else if (scsi_debug_delay < 0) {
3302 tasklet_kill(sqcp->tletp);
3304 clear_bit(k, queued_in_use_bm);
3309 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3313 /* Deletes (stops) timers or tasklets of all queued commands */
3314 static void stop_all_queued(void)
3316 unsigned long iflags;
3318 struct sdebug_queued_cmd *sqcp;
3319 struct sdebug_dev_info *devip;
3321 spin_lock_irqsave(&queued_arr_lock, iflags);
3322 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3323 if (test_bit(k, queued_in_use_bm)) {
3324 sqcp = &queued_arr[k];
3326 devip = (struct sdebug_dev_info *)
3327 sqcp->a_cmnd->device->hostdata;
3329 atomic_dec(&devip->num_in_q);
3330 sqcp->a_cmnd = NULL;
3331 spin_unlock_irqrestore(&queued_arr_lock,
3333 if (scsi_debug_ndelay > 0) {
3336 &sqcp->sd_hrtp->hrt);
3337 } else if (scsi_debug_delay > 0) {
3338 if (sqcp->cmnd_timerp)
3341 } else if (scsi_debug_delay < 0) {
3343 tasklet_kill(sqcp->tletp);
3345 clear_bit(k, queued_in_use_bm);
3346 spin_lock_irqsave(&queued_arr_lock, iflags);
3350 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3353 /* Free queued command memory on heap */
3354 static void free_all_queued(void)
3356 unsigned long iflags;
3358 struct sdebug_queued_cmd *sqcp;
3360 spin_lock_irqsave(&queued_arr_lock, iflags);
3361 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3362 sqcp = &queued_arr[k];
3363 kfree(sqcp->cmnd_timerp);
3364 sqcp->cmnd_timerp = NULL;
3367 kfree(sqcp->sd_hrtp);
3368 sqcp->sd_hrtp = NULL;
3370 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3373 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3377 if (SCpnt->device &&
3378 (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3379 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3381 stop_queued_cmnd(SCpnt);
3386 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3388 struct sdebug_dev_info * devip;
3391 if (SCpnt && SCpnt->device) {
3392 struct scsi_device *sdp = SCpnt->device;
3394 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3395 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3396 devip = devInfoReg(sdp);
3398 set_bit(SDEBUG_UA_POR, devip->uas_bm);
3403 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3405 struct sdebug_host_info *sdbg_host;
3406 struct sdebug_dev_info *devip;
3407 struct scsi_device *sdp;
3408 struct Scsi_Host *hp;
3411 ++num_target_resets;
3414 sdp = SCpnt->device;
3417 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3418 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3422 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3424 list_for_each_entry(devip,
3425 &sdbg_host->dev_info_list,
3427 if (devip->target == sdp->id) {
3428 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3432 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3433 sdev_printk(KERN_INFO, sdp,
3434 "%s: %d device(s) found in target\n", __func__, k);
3439 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3441 struct sdebug_host_info *sdbg_host;
3442 struct sdebug_dev_info *devip;
3443 struct scsi_device * sdp;
3444 struct Scsi_Host * hp;
3448 if (!(SCpnt && SCpnt->device))
3450 sdp = SCpnt->device;
3451 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3452 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3455 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3457 list_for_each_entry(devip,
3458 &sdbg_host->dev_info_list,
3460 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3465 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3466 sdev_printk(KERN_INFO, sdp,
3467 "%s: %d device(s) found in host\n", __func__, k);
3472 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3474 struct sdebug_host_info * sdbg_host;
3475 struct sdebug_dev_info *devip;
3479 if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3480 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3481 spin_lock(&sdebug_host_list_lock);
3482 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3483 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3485 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3489 spin_unlock(&sdebug_host_list_lock);
3491 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3492 sdev_printk(KERN_INFO, SCpnt->device,
3493 "%s: %d device(s) found\n", __func__, k);
3497 static void __init sdebug_build_parts(unsigned char *ramp,
3498 unsigned long store_size)
3500 struct partition * pp;
3501 int starts[SDEBUG_MAX_PARTS + 2];
3502 int sectors_per_part, num_sectors, k;
3503 int heads_by_sects, start_sec, end_sec;
3505 /* assume partition table already zeroed */
3506 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3508 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3509 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3510 pr_warn("%s: reducing partitions to %d\n", __func__,
3513 num_sectors = (int)sdebug_store_sectors;
3514 sectors_per_part = (num_sectors - sdebug_sectors_per)
3515 / scsi_debug_num_parts;
3516 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3517 starts[0] = sdebug_sectors_per;
3518 for (k = 1; k < scsi_debug_num_parts; ++k)
3519 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3521 starts[scsi_debug_num_parts] = num_sectors;
3522 starts[scsi_debug_num_parts + 1] = 0;
3524 ramp[510] = 0x55; /* magic partition markings */
3526 pp = (struct partition *)(ramp + 0x1be);
3527 for (k = 0; starts[k + 1]; ++k, ++pp) {
3528 start_sec = starts[k];
3529 end_sec = starts[k + 1] - 1;
3532 pp->cyl = start_sec / heads_by_sects;
3533 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3534 / sdebug_sectors_per;
3535 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3537 pp->end_cyl = end_sec / heads_by_sects;
3538 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3539 / sdebug_sectors_per;
3540 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3542 pp->start_sect = cpu_to_le32(start_sec);
3543 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3544 pp->sys_ind = 0x83; /* plain Linux partition */
3549 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3550 int scsi_result, int delta_jiff)
3552 unsigned long iflags;
3553 int k, num_in_q, qdepth, inject;
3554 struct sdebug_queued_cmd *sqcp = NULL;
3555 struct scsi_device *sdp = cmnd->device;
3557 if (NULL == cmnd || NULL == devip) {
3558 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3560 /* no particularly good error to report back */
3561 return SCSI_MLQUEUE_HOST_BUSY;
3563 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3564 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3565 __func__, scsi_result);
3566 if (delta_jiff == 0)
3567 goto respond_in_thread;
3569 /* schedule the response at a later time if resources permit */
3570 spin_lock_irqsave(&queued_arr_lock, iflags);
3571 num_in_q = atomic_read(&devip->num_in_q);
3572 qdepth = cmnd->device->queue_depth;
3574 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3576 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3577 goto respond_in_thread;
3579 scsi_result = device_qfull_result;
3580 } else if ((scsi_debug_every_nth != 0) &&
3581 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3582 (scsi_result == 0)) {
3583 if ((num_in_q == (qdepth - 1)) &&
3584 (atomic_inc_return(&sdebug_a_tsf) >=
3585 abs(scsi_debug_every_nth))) {
3586 atomic_set(&sdebug_a_tsf, 0);
3588 scsi_result = device_qfull_result;
3592 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3593 if (k >= scsi_debug_max_queue) {
3594 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3596 goto respond_in_thread;
3597 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3598 scsi_result = device_qfull_result;
3599 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3600 sdev_printk(KERN_INFO, sdp,
3601 "%s: max_queue=%d exceeded, %s\n",
3602 __func__, scsi_debug_max_queue,
3603 (scsi_result ? "status: TASK SET FULL" :
3604 "report: host busy"));
3606 goto respond_in_thread;
3608 return SCSI_MLQUEUE_HOST_BUSY;
3610 __set_bit(k, queued_in_use_bm);
3611 atomic_inc(&devip->num_in_q);
3612 sqcp = &queued_arr[k];
3613 sqcp->a_cmnd = cmnd;
3614 cmnd->result = scsi_result;
3615 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3616 if (delta_jiff > 0) {
3617 if (NULL == sqcp->cmnd_timerp) {
3618 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3620 if (NULL == sqcp->cmnd_timerp)
3621 return SCSI_MLQUEUE_HOST_BUSY;
3622 init_timer(sqcp->cmnd_timerp);
3624 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3625 sqcp->cmnd_timerp->data = k;
3626 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3627 add_timer(sqcp->cmnd_timerp);
3628 } else if (scsi_debug_ndelay > 0) {
3629 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3630 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3632 if (NULL == sd_hp) {
3633 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3635 return SCSI_MLQUEUE_HOST_BUSY;
3636 sqcp->sd_hrtp = sd_hp;
3637 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3639 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3642 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3643 } else { /* delay < 0 */
3644 if (NULL == sqcp->tletp) {
3645 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3647 if (NULL == sqcp->tletp)
3648 return SCSI_MLQUEUE_HOST_BUSY;
3649 tasklet_init(sqcp->tletp,
3650 sdebug_q_cmd_complete, k);
3652 if (-1 == delta_jiff)
3653 tasklet_hi_schedule(sqcp->tletp);
3655 tasklet_schedule(sqcp->tletp);
3657 if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3658 (scsi_result == device_qfull_result))
3659 sdev_printk(KERN_INFO, sdp,
3660 "%s: num_in_q=%d +1, %s%s\n", __func__,
3661 num_in_q, (inject ? "<inject> " : ""),
3662 "status: TASK SET FULL");
3665 respond_in_thread: /* call back to mid-layer using invocation thread */
3666 cmnd->result = scsi_result;
3667 cmnd->scsi_done(cmnd);
3671 /* Note: The following macros create attribute files in the
3672 /sys/module/scsi_debug/parameters directory. Unfortunately this
3673 driver is unaware of a change and cannot trigger auxiliary actions
3674 as it can when the corresponding attribute in the
3675 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3677 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3678 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3679 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3680 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3681 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3682 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3683 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3684 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3685 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3686 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3687 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3688 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3689 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3690 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3691 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3692 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3693 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3694 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3695 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3696 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3697 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3698 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3699 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3700 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3701 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3702 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3703 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3704 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3705 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3706 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3707 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3708 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3709 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3710 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3711 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3712 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3713 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3714 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3716 module_param_named(write_same_length, scsi_debug_write_same_length, int,
3719 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3720 MODULE_DESCRIPTION("SCSI debug adapter driver");
3721 MODULE_LICENSE("GPL");
3722 MODULE_VERSION(SCSI_DEBUG_VERSION);
3724 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3725 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3726 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3727 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3728 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
3729 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3730 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3731 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3732 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3733 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3734 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3735 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3736 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3737 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3738 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3739 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3740 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3741 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3742 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3743 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3744 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3745 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3746 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3747 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3748 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3749 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3750 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3751 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3752 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3753 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3754 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3755 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
3756 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3757 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3758 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3759 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3760 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
3761 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3762 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3764 static char sdebug_info[256];
3766 static const char * scsi_debug_info(struct Scsi_Host * shp)
3768 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3769 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3770 scsi_debug_version_date, scsi_debug_dev_size_mb,
3775 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3776 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3780 int minLen = length > 15 ? 15 : length;
3782 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3784 memcpy(arr, buffer, minLen);
3786 if (1 != sscanf(arr, "%d", &opts))
3788 scsi_debug_opts = opts;
3789 if (scsi_debug_every_nth != 0)
3790 atomic_set(&sdebug_cmnd_count, 0);
3794 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
3795 * same for each scsi_debug host (if more than one). Some of the counters
3796 * output are not atomics so might be inaccurate in a busy system. */
3797 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3802 if (scsi_debug_every_nth > 0)
3803 snprintf(b, sizeof(b), " (curr:%d)",
3804 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3805 atomic_read(&sdebug_a_tsf) :
3806 atomic_read(&sdebug_cmnd_count)));
3810 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3811 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3813 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3814 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3815 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3816 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3817 "usec_in_jiffy=%lu\n",
3818 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3819 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3820 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3821 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3822 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3823 sdebug_sectors_per, num_aborts, num_dev_resets,
3824 num_target_resets, num_bus_resets, num_host_resets,
3825 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3827 f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3828 if (f != scsi_debug_max_queue) {
3829 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3830 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
3831 "queued_in_use_bm", f, l);
3836 static ssize_t delay_show(struct device_driver *ddp, char *buf)
3838 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3840 /* Returns -EBUSY if delay is being changed and commands are queued */
3841 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3846 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3848 if (scsi_debug_delay != delay) {
3849 unsigned long iflags;
3852 spin_lock_irqsave(&queued_arr_lock, iflags);
3853 k = find_first_bit(queued_in_use_bm,
3854 scsi_debug_max_queue);
3855 if (k != scsi_debug_max_queue)
3856 res = -EBUSY; /* have queued commands */
3858 scsi_debug_delay = delay;
3859 scsi_debug_ndelay = 0;
3861 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3867 static DRIVER_ATTR_RW(delay);
3869 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3871 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3873 /* Returns -EBUSY if ndelay is being changed and commands are queued */
3874 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
3875 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3878 unsigned long iflags;
3881 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3882 (ndelay >= 0) && (ndelay < 1000000000)) {
3884 if (scsi_debug_ndelay != ndelay) {
3885 spin_lock_irqsave(&queued_arr_lock, iflags);
3886 k = find_first_bit(queued_in_use_bm,
3887 scsi_debug_max_queue);
3888 if (k != scsi_debug_max_queue)
3889 res = -EBUSY; /* have queued commands */
3891 scsi_debug_ndelay = ndelay;
3892 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3895 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3901 static DRIVER_ATTR_RW(ndelay);
3903 static ssize_t opts_show(struct device_driver *ddp, char *buf)
3905 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3908 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3914 if (1 == sscanf(buf, "%10s", work)) {
3915 if (0 == strncasecmp(work,"0x", 2)) {
3916 if (1 == sscanf(&work[2], "%x", &opts))
3919 if (1 == sscanf(work, "%d", &opts))
3925 scsi_debug_opts = opts;
3926 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
3927 sdebug_any_injecting_opt = true;
3928 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
3929 sdebug_any_injecting_opt = true;
3930 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
3931 sdebug_any_injecting_opt = true;
3932 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
3933 sdebug_any_injecting_opt = true;
3934 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
3935 sdebug_any_injecting_opt = true;
3936 atomic_set(&sdebug_cmnd_count, 0);
3937 atomic_set(&sdebug_a_tsf, 0);
3940 static DRIVER_ATTR_RW(opts);
3942 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3944 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3946 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3951 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3952 scsi_debug_ptype = n;
3957 static DRIVER_ATTR_RW(ptype);
3959 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3961 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3963 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3968 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3969 scsi_debug_dsense = n;
3974 static DRIVER_ATTR_RW(dsense);
3976 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3978 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3980 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3985 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3987 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3988 if (scsi_debug_fake_rw != n) {
3989 if ((0 == n) && (NULL == fake_storep)) {
3991 (unsigned long)scsi_debug_dev_size_mb *
3994 fake_storep = vmalloc(sz);
3995 if (NULL == fake_storep) {
3996 pr_err("%s: out of memory, 9\n",
4000 memset(fake_storep, 0, sz);
4002 scsi_debug_fake_rw = n;
4008 static DRIVER_ATTR_RW(fake_rw);
4010 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4012 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4014 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4019 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4020 scsi_debug_no_lun_0 = n;
4025 static DRIVER_ATTR_RW(no_lun_0);
4027 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4029 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4031 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4036 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4037 scsi_debug_num_tgts = n;
4038 sdebug_max_tgts_luns();
4043 static DRIVER_ATTR_RW(num_tgts);
4045 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4047 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4049 static DRIVER_ATTR_RO(dev_size_mb);
4051 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4053 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4055 static DRIVER_ATTR_RO(num_parts);
4057 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4059 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4061 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4066 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4067 scsi_debug_every_nth = nth;
4068 atomic_set(&sdebug_cmnd_count, 0);
4073 static DRIVER_ATTR_RW(every_nth);
4075 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4077 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4079 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4084 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4085 scsi_debug_max_luns = n;
4086 sdebug_max_tgts_luns();
4091 static DRIVER_ATTR_RW(max_luns);
4093 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4095 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4097 /* N.B. max_queue can be changed while there are queued commands. In flight
4098 * commands beyond the new max_queue will be completed. */
4099 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4102 unsigned long iflags;
4105 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4106 (n <= SCSI_DEBUG_CANQUEUE)) {
4107 spin_lock_irqsave(&queued_arr_lock, iflags);
4108 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4109 scsi_debug_max_queue = n;
4110 if (SCSI_DEBUG_CANQUEUE == k)
4111 atomic_set(&retired_max_queue, 0);
4113 atomic_set(&retired_max_queue, k + 1);
4115 atomic_set(&retired_max_queue, 0);
4116 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4121 static DRIVER_ATTR_RW(max_queue);
4123 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4125 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4127 static DRIVER_ATTR_RO(no_uld);
4129 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4131 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4133 static DRIVER_ATTR_RO(scsi_level);
4135 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4137 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4139 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4145 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4146 changed = (scsi_debug_virtual_gb != n);
4147 scsi_debug_virtual_gb = n;
4148 sdebug_capacity = get_sdebug_capacity();
4150 struct sdebug_host_info *sdhp;
4151 struct sdebug_dev_info *dp;
4153 list_for_each_entry(sdhp, &sdebug_host_list,
4155 list_for_each_entry(dp, &sdhp->dev_info_list,
4157 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4166 static DRIVER_ATTR_RW(virtual_gb);
4168 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4170 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4173 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4178 if (sscanf(buf, "%d", &delta_hosts) != 1)
4180 if (delta_hosts > 0) {
4182 sdebug_add_adapter();
4183 } while (--delta_hosts);
4184 } else if (delta_hosts < 0) {
4186 sdebug_remove_adapter();
4187 } while (++delta_hosts);
4191 static DRIVER_ATTR_RW(add_host);
4193 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4195 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4197 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4202 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4203 scsi_debug_vpd_use_hostno = n;
4208 static DRIVER_ATTR_RW(vpd_use_hostno);
4210 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4212 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4214 static DRIVER_ATTR_RO(sector_size);
4216 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4218 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4220 static DRIVER_ATTR_RO(dix);
4222 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4224 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4226 static DRIVER_ATTR_RO(dif);
4228 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4230 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4232 static DRIVER_ATTR_RO(guard);
4234 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4236 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4238 static DRIVER_ATTR_RO(ato);
4240 static ssize_t map_show(struct device_driver *ddp, char *buf)
4244 if (!scsi_debug_lbp())
4245 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4246 sdebug_store_sectors);
4248 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4250 buf[count++] = '\n';
4255 static DRIVER_ATTR_RO(map);
4257 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4259 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4261 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4266 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4267 scsi_debug_removable = (n > 0);
4272 static DRIVER_ATTR_RW(removable);
4274 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4276 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4278 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4279 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4284 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4285 bool new_host_lock = (n > 0);
4288 if (new_host_lock != scsi_debug_host_lock) {
4289 unsigned long iflags;
4292 spin_lock_irqsave(&queued_arr_lock, iflags);
4293 k = find_first_bit(queued_in_use_bm,
4294 scsi_debug_max_queue);
4295 if (k != scsi_debug_max_queue)
4296 res = -EBUSY; /* have queued commands */
4298 scsi_debug_host_lock = new_host_lock;
4299 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4305 static DRIVER_ATTR_RW(host_lock);
4307 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4309 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4311 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4316 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4317 scsi_debug_strict = (n > 0);
4322 static DRIVER_ATTR_RW(strict);
4325 /* Note: The following array creates attribute files in the
4326 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4327 files (over those found in the /sys/module/scsi_debug/parameters
4328 directory) is that auxiliary actions can be triggered when an attribute
4329 is changed. For example see: sdebug_add_host_store() above.
4332 static struct attribute *sdebug_drv_attrs[] = {
4333 &driver_attr_delay.attr,
4334 &driver_attr_opts.attr,
4335 &driver_attr_ptype.attr,
4336 &driver_attr_dsense.attr,
4337 &driver_attr_fake_rw.attr,
4338 &driver_attr_no_lun_0.attr,
4339 &driver_attr_num_tgts.attr,
4340 &driver_attr_dev_size_mb.attr,
4341 &driver_attr_num_parts.attr,
4342 &driver_attr_every_nth.attr,
4343 &driver_attr_max_luns.attr,
4344 &driver_attr_max_queue.attr,
4345 &driver_attr_no_uld.attr,
4346 &driver_attr_scsi_level.attr,
4347 &driver_attr_virtual_gb.attr,
4348 &driver_attr_add_host.attr,
4349 &driver_attr_vpd_use_hostno.attr,
4350 &driver_attr_sector_size.attr,
4351 &driver_attr_dix.attr,
4352 &driver_attr_dif.attr,
4353 &driver_attr_guard.attr,
4354 &driver_attr_ato.attr,
4355 &driver_attr_map.attr,
4356 &driver_attr_removable.attr,
4357 &driver_attr_host_lock.attr,
4358 &driver_attr_ndelay.attr,
4359 &driver_attr_strict.attr,
4362 ATTRIBUTE_GROUPS(sdebug_drv);
4364 static struct device *pseudo_primary;
4366 static int __init scsi_debug_init(void)
4373 atomic_set(&sdebug_cmnd_count, 0);
4374 atomic_set(&sdebug_completions, 0);
4375 atomic_set(&retired_max_queue, 0);
4377 if (scsi_debug_ndelay >= 1000000000) {
4378 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4380 scsi_debug_ndelay = 0;
4381 } else if (scsi_debug_ndelay > 0)
4382 scsi_debug_delay = DELAY_OVERRIDDEN;
4384 switch (scsi_debug_sector_size) {
4391 pr_err("%s: invalid sector_size %d\n", __func__,
4392 scsi_debug_sector_size);
4396 switch (scsi_debug_dif) {
4398 case SD_DIF_TYPE0_PROTECTION:
4399 case SD_DIF_TYPE1_PROTECTION:
4400 case SD_DIF_TYPE2_PROTECTION:
4401 case SD_DIF_TYPE3_PROTECTION:
4405 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4409 if (scsi_debug_guard > 1) {
4410 pr_err("%s: guard must be 0 or 1\n", __func__);
4414 if (scsi_debug_ato > 1) {
4415 pr_err("%s: ato must be 0 or 1\n", __func__);
4419 if (scsi_debug_physblk_exp > 15) {
4420 pr_err("%s: invalid physblk_exp %u\n", __func__,
4421 scsi_debug_physblk_exp);
4425 if (scsi_debug_lowest_aligned > 0x3fff) {
4426 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4427 scsi_debug_lowest_aligned);
4431 if (scsi_debug_dev_size_mb < 1)
4432 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
4433 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4434 sdebug_store_sectors = sz / scsi_debug_sector_size;
4435 sdebug_capacity = get_sdebug_capacity();
4437 /* play around with geometry, don't waste too much on track 0 */
4439 sdebug_sectors_per = 32;
4440 if (scsi_debug_dev_size_mb >= 16)
4442 else if (scsi_debug_dev_size_mb >= 256)
4444 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4445 (sdebug_sectors_per * sdebug_heads);
4446 if (sdebug_cylinders_per >= 1024) {
4447 /* other LLDs do this; implies >= 1GB ram disk ... */
4449 sdebug_sectors_per = 63;
4450 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4451 (sdebug_sectors_per * sdebug_heads);
4454 if (0 == scsi_debug_fake_rw) {
4455 fake_storep = vmalloc(sz);
4456 if (NULL == fake_storep) {
4457 pr_err("%s: out of memory, 1\n", __func__);
4460 memset(fake_storep, 0, sz);
4461 if (scsi_debug_num_parts > 0)
4462 sdebug_build_parts(fake_storep, sz);
4465 if (scsi_debug_dix) {
4468 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4469 dif_storep = vmalloc(dif_size);
4471 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4474 if (dif_storep == NULL) {
4475 pr_err("%s: out of mem. (DIX)\n", __func__);
4480 memset(dif_storep, 0xff, dif_size);
4483 /* Logical Block Provisioning */
4484 if (scsi_debug_lbp()) {
4485 scsi_debug_unmap_max_blocks =
4486 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4488 scsi_debug_unmap_max_desc =
4489 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4491 scsi_debug_unmap_granularity =
4492 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4494 if (scsi_debug_unmap_alignment &&
4495 scsi_debug_unmap_granularity <=
4496 scsi_debug_unmap_alignment) {
4497 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4502 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4503 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4505 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4507 if (map_storep == NULL) {
4508 pr_err("%s: out of mem. (MAP)\n", __func__);
4513 bitmap_zero(map_storep, map_size);
4515 /* Map first 1KB for partition table */
4516 if (scsi_debug_num_parts)
4520 pseudo_primary = root_device_register("pseudo_0");
4521 if (IS_ERR(pseudo_primary)) {
4522 pr_warn("%s: root_device_register() error\n", __func__);
4523 ret = PTR_ERR(pseudo_primary);
4526 ret = bus_register(&pseudo_lld_bus);
4528 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4531 ret = driver_register(&sdebug_driverfs_driver);
4533 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4537 host_to_add = scsi_debug_add_host;
4538 scsi_debug_add_host = 0;
4540 for (k = 0; k < host_to_add; k++) {
4541 if (sdebug_add_adapter()) {
4542 pr_err("%s: sdebug_add_adapter failed k=%d\n",
4548 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4549 pr_info("%s: built %d host(s)\n", __func__,
4550 scsi_debug_add_host);
4555 bus_unregister(&pseudo_lld_bus);
4557 root_device_unregister(pseudo_primary);
4568 static void __exit scsi_debug_exit(void)
4570 int k = scsi_debug_add_host;
4575 sdebug_remove_adapter();
4576 driver_unregister(&sdebug_driverfs_driver);
4577 bus_unregister(&pseudo_lld_bus);
4578 root_device_unregister(pseudo_primary);
4586 device_initcall(scsi_debug_init);
4587 module_exit(scsi_debug_exit);
4589 static void sdebug_release_adapter(struct device * dev)
4591 struct sdebug_host_info *sdbg_host;
4593 sdbg_host = to_sdebug_host(dev);
4597 static int sdebug_add_adapter(void)
4599 int k, devs_per_host;
4601 struct sdebug_host_info *sdbg_host;
4602 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4604 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4605 if (NULL == sdbg_host) {
4606 printk(KERN_ERR "%s: out of memory at line %d\n",
4607 __func__, __LINE__);
4611 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4613 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4614 for (k = 0; k < devs_per_host; k++) {
4615 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4616 if (!sdbg_devinfo) {
4617 printk(KERN_ERR "%s: out of memory at line %d\n",
4618 __func__, __LINE__);
4624 spin_lock(&sdebug_host_list_lock);
4625 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4626 spin_unlock(&sdebug_host_list_lock);
4628 sdbg_host->dev.bus = &pseudo_lld_bus;
4629 sdbg_host->dev.parent = pseudo_primary;
4630 sdbg_host->dev.release = &sdebug_release_adapter;
4631 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4633 error = device_register(&sdbg_host->dev);
4638 ++scsi_debug_add_host;
4642 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4644 list_del(&sdbg_devinfo->dev_list);
4645 kfree(sdbg_devinfo);
4652 static void sdebug_remove_adapter(void)
4654 struct sdebug_host_info * sdbg_host = NULL;
4656 spin_lock(&sdebug_host_list_lock);
4657 if (!list_empty(&sdebug_host_list)) {
4658 sdbg_host = list_entry(sdebug_host_list.prev,
4659 struct sdebug_host_info, host_list);
4660 list_del(&sdbg_host->host_list);
4662 spin_unlock(&sdebug_host_list_lock);
4667 device_unregister(&sdbg_host->dev);
4668 --scsi_debug_add_host;
4672 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4674 if (scsi_debug_host_lock) {
4675 unsigned long iflags;
4678 spin_lock_irqsave(shost->host_lock, iflags);
4679 rc = scsi_debug_queuecommand(cmd);
4680 spin_unlock_irqrestore(shost->host_lock, iflags);
4683 return scsi_debug_queuecommand(cmd);
4687 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4690 unsigned long iflags;
4691 struct sdebug_dev_info *devip;
4693 spin_lock_irqsave(&queued_arr_lock, iflags);
4694 devip = (struct sdebug_dev_info *)sdev->hostdata;
4695 if (NULL == devip) {
4696 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4699 num_in_q = atomic_read(&devip->num_in_q);
4700 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4704 /* allow to exceed max host queued_arr elements for testing */
4705 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4706 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4707 scsi_change_queue_depth(sdev, qdepth);
4709 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4710 sdev_printk(KERN_INFO, sdev,
4711 "%s: qdepth=%d, num_in_q=%d\n",
4712 __func__, qdepth, num_in_q);
4714 return sdev->queue_depth;
4718 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4720 qtype = scsi_change_queue_type(sdev, qtype);
4721 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4728 case MSG_SIMPLE_TAG:
4731 case MSG_ORDERED_TAG:
4732 cp = "ordered tags";
4738 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4744 check_inject(struct scsi_cmnd *scp)
4746 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4748 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4750 if (atomic_inc_return(&sdebug_cmnd_count) >=
4751 abs(scsi_debug_every_nth)) {
4752 atomic_set(&sdebug_cmnd_count, 0);
4753 if (scsi_debug_every_nth < -1)
4754 scsi_debug_every_nth = -1;
4755 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4756 return 1; /* ignore command causing timeout */
4757 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4758 scsi_medium_access_command(scp))
4759 return 1; /* time out reads and writes */
4760 if (sdebug_any_injecting_opt) {
4761 int opts = scsi_debug_opts;
4763 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4764 ep->inj_recovered = true;
4765 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4766 ep->inj_transport = true;
4767 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4769 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4771 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4772 ep->inj_short = true;
4779 scsi_debug_queuecommand(struct scsi_cmnd *scp)
4782 struct scsi_device *sdp = scp->device;
4783 const struct opcode_info_t *oip;
4784 const struct opcode_info_t *r_oip;
4785 struct sdebug_dev_info *devip;
4786 u8 *cmd = scp->cmnd;
4787 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
4790 int errsts_no_connect = DID_NO_CONNECT << 16;
4795 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
4797 scsi_set_resid(scp, 0);
4798 if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4803 sb = (int)sizeof(b);
4805 strcpy(b, "too long, over 32 bytes");
4807 for (k = 0, n = 0; k < len && n < sb; ++k)
4808 n += scnprintf(b + n, sb - n, "%02x ",
4811 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
4813 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
4814 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
4815 return schedule_resp(scp, NULL, errsts_no_connect, 0);
4817 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
4818 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
4819 devip = (struct sdebug_dev_info *)sdp->hostdata;
4821 devip = devInfoReg(sdp);
4823 return schedule_resp(scp, NULL, errsts_no_connect, 0);
4825 na = oip->num_attached;
4827 if (na) { /* multiple commands with this opcode */
4829 if (FF_SA & r_oip->flags) {
4830 if (F_SA_LOW & oip->flags)
4833 sa = get_unaligned_be16(cmd + 8);
4834 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
4835 if (opcode == oip->opcode && sa == oip->sa)
4838 } else { /* since no service action only check opcode */
4839 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
4840 if (opcode == oip->opcode)
4845 if (F_SA_LOW & r_oip->flags)
4846 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
4847 else if (F_SA_HIGH & r_oip->flags)
4848 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
4850 mk_sense_invalid_opcode(scp);
4853 } /* else (when na==0) we assume the oip is a match */
4855 if (F_INV_OP & flags) {
4856 mk_sense_invalid_opcode(scp);
4859 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
4861 sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
4862 "0x%x not supported for wlun\n", opcode);
4863 mk_sense_invalid_opcode(scp);
4866 if (scsi_debug_strict) { /* check cdb against mask */
4870 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
4871 rem = ~oip->len_mask[k] & cmd[k];
4873 for (j = 7; j >= 0; --j, rem <<= 1) {
4877 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
4882 if (!(F_SKIP_UA & flags) &&
4883 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
4884 errsts = check_readiness(scp, UAS_ONLY, devip);
4888 if ((F_M_ACCESS & flags) && devip->stopped) {
4889 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
4891 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
4892 "%s\n", my_name, "initializing command "
4894 errsts = check_condition_result;
4897 if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
4899 if (scsi_debug_every_nth) {
4900 if (check_inject(scp))
4901 return 0; /* ignore command: make trouble */
4903 if (oip->pfp) /* if this command has a resp_* function, call it */
4904 errsts = oip->pfp(scp, devip);
4905 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
4906 errsts = r_pfp(scp, devip);
4909 return schedule_resp(scp, devip, errsts,
4910 ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
4912 return schedule_resp(scp, devip, check_condition_result, 0);
4915 static struct scsi_host_template sdebug_driver_template = {
4916 .show_info = scsi_debug_show_info,
4917 .write_info = scsi_debug_write_info,
4918 .proc_name = sdebug_proc_name,
4919 .name = "SCSI DEBUG",
4920 .info = scsi_debug_info,
4921 .slave_alloc = scsi_debug_slave_alloc,
4922 .slave_configure = scsi_debug_slave_configure,
4923 .slave_destroy = scsi_debug_slave_destroy,
4924 .ioctl = scsi_debug_ioctl,
4925 .queuecommand = sdebug_queuecommand_lock_or_not,
4926 .change_queue_depth = sdebug_change_qdepth,
4927 .change_queue_type = sdebug_change_qtype,
4928 .eh_abort_handler = scsi_debug_abort,
4929 .eh_device_reset_handler = scsi_debug_device_reset,
4930 .eh_target_reset_handler = scsi_debug_target_reset,
4931 .eh_bus_reset_handler = scsi_debug_bus_reset,
4932 .eh_host_reset_handler = scsi_debug_host_reset,
4933 .can_queue = SCSI_DEBUG_CANQUEUE,
4935 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
4936 .cmd_per_lun = DEF_CMD_PER_LUN,
4938 .use_clustering = DISABLE_CLUSTERING,
4939 .module = THIS_MODULE,
4940 .track_queue_depth = 1,
4941 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
4944 static int sdebug_driver_probe(struct device * dev)
4948 struct sdebug_host_info *sdbg_host;
4949 struct Scsi_Host *hpnt;
4952 sdbg_host = to_sdebug_host(dev);
4954 sdebug_driver_template.can_queue = scsi_debug_max_queue;
4955 if (scsi_debug_clustering)
4956 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4957 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4959 pr_err("%s: scsi_host_alloc failed\n", __func__);
4964 sdbg_host->shost = hpnt;
4965 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4966 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4967 hpnt->max_id = scsi_debug_num_tgts + 1;
4969 hpnt->max_id = scsi_debug_num_tgts;
4970 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
4974 switch (scsi_debug_dif) {
4976 case SD_DIF_TYPE1_PROTECTION:
4977 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4979 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4982 case SD_DIF_TYPE2_PROTECTION:
4983 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4985 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4988 case SD_DIF_TYPE3_PROTECTION:
4989 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4991 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4996 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5000 scsi_host_set_prot(hpnt, host_prot);
5002 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5003 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5004 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5005 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5006 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5007 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5008 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5009 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5011 if (scsi_debug_guard == 1)
5012 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5014 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5016 opts = scsi_debug_opts;
5017 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5018 sdebug_any_injecting_opt = true;
5019 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5020 sdebug_any_injecting_opt = true;
5021 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5022 sdebug_any_injecting_opt = true;
5023 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5024 sdebug_any_injecting_opt = true;
5025 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5026 sdebug_any_injecting_opt = true;
5028 error = scsi_add_host(hpnt, &sdbg_host->dev);
5030 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5032 scsi_host_put(hpnt);
5034 scsi_scan_host(hpnt);
5039 static int sdebug_driver_remove(struct device * dev)
5041 struct sdebug_host_info *sdbg_host;
5042 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5044 sdbg_host = to_sdebug_host(dev);
5047 printk(KERN_ERR "%s: Unable to locate host info\n",
5052 scsi_remove_host(sdbg_host->shost);
5054 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5056 list_del(&sdbg_devinfo->dev_list);
5057 kfree(sdbg_devinfo);
5060 scsi_host_put(sdbg_host->shost);
5064 static int pseudo_lld_bus_match(struct device *dev,
5065 struct device_driver *dev_driver)
5070 static struct bus_type pseudo_lld_bus = {
5072 .match = pseudo_lld_bus_match,
5073 .probe = sdebug_driver_probe,
5074 .remove = sdebug_driver_remove,
5075 .drv_groups = sdebug_drv_groups,