2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
50 #include <net/checksum.h>
52 #include <asm/unaligned.h>
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
64 #include "scsi_logging.h"
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
69 #define MY_NAME "scsi_debug"
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DELAY_OVERRIDDEN -9999
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE 1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
144 #define SCSI_DEBUG_OPT_TIMEOUT 4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
147 #define SCSI_DEBUG_OPT_DIF_ERR 32
148 #define SCSI_DEBUG_OPT_DIX_ERR 64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE 0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF 0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE 0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160 * - a RECOVERED_ERROR is simulated on successful read and write
161 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162 * - a TRANSPORT_ERROR is simulated on successful read and write
163 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
165 * When "every_nth" < 0 then after "- every_nth" commands:
166 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167 * - a RECOVERED_ERROR is simulated on successful read and write
168 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169 * - a TRANSPORT_ERROR is simulated on successful read and write
170 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171 * This will continue until some other action occurs (e.g. the user
172 * writing a new value (other than -1 or 1) to every_nth via sysfs).
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176 * priority order. In the subset implemented here lower numbers have higher
177 * priority. The UA numbers should be a sequence starting from 0 with
178 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_UA_CAPACITY_CHANGED 3
183 #define SDEBUG_NUM_UAS 4
185 /* for check_readiness() */
186 #define UAS_ONLY 1 /* check for UAs only */
187 #define UAS_TUR 0 /* if no UAs then check if media access possible */
189 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
190 * sector on read commands: */
191 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
192 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
194 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
195 * or "peripheral device" addressing (value 0) */
196 #define SAM2_LUN_ADDRESS_METHOD 0
197 #define SAM2_WLUN_REPORT_LUNS 0xc101
199 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
200 * (for response) at one time. Can be reduced by max_queue option. Command
201 * responses are not queued when delay=0 and ndelay=0. The per-device
202 * DEF_CMD_PER_LUN can be changed via sysfs:
203 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
204 * SCSI_DEBUG_CANQUEUE. */
205 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
206 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
207 #define DEF_CMD_PER_LUN 255
209 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
210 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
213 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
214 enum sdeb_opcode_index {
215 SDEB_I_INVALID_OPCODE = 0,
217 SDEB_I_REPORT_LUNS = 2,
218 SDEB_I_REQUEST_SENSE = 3,
219 SDEB_I_TEST_UNIT_READY = 4,
220 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
221 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
222 SDEB_I_LOG_SENSE = 7,
223 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
224 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
225 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
226 SDEB_I_START_STOP = 11,
227 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
228 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
229 SDEB_I_MAINT_IN = 14,
230 SDEB_I_MAINT_OUT = 15,
231 SDEB_I_VERIFY = 16, /* 10 only */
232 SDEB_I_VARIABLE_LEN = 17,
233 SDEB_I_RESERVE = 18, /* 6, 10 */
234 SDEB_I_RELEASE = 19, /* 6, 10 */
235 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
236 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
237 SDEB_I_ATA_PT = 22, /* 12, 16 */
238 SDEB_I_SEND_DIAG = 23,
240 SDEB_I_XDWRITEREAD = 25, /* 10 only */
241 SDEB_I_WRITE_BUFFER = 26,
242 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
243 SDEB_I_SYNC_CACHE = 28, /* 10 only */
244 SDEB_I_COMP_WRITE = 29,
245 SDEB_I_LAST_ELEMENT = 30, /* keep this last */
248 static const unsigned char opcode_ind_arr[256] = {
249 /* 0x0; 0x0->0x1f: 6 byte cdbs */
250 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
252 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
253 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
255 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
256 SDEB_I_ALLOW_REMOVAL, 0,
257 /* 0x20; 0x20->0x3f: 10 byte cdbs */
258 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
259 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
260 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
261 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
262 /* 0x40; 0x40->0x5f: 10 byte cdbs */
263 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
264 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
265 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
267 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
268 /* 0x60; 0x60->0x7d are reserved */
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 0, SDEB_I_VARIABLE_LEN,
272 /* 0x80; 0x80->0x9f: 16 byte cdbs */
273 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
274 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
275 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
276 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
277 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
278 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
279 SDEB_I_MAINT_OUT, 0, 0, 0,
280 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
282 0, 0, 0, 0, 0, 0, 0, 0,
283 0, 0, 0, 0, 0, 0, 0, 0,
284 /* 0xc0; 0xc0->0xff: vendor specific */
285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
295 #define F_RL_WLUN_OK 0x10
296 #define F_SKIP_UA 0x20
297 #define F_DELAY_OVERR 0x40
298 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
299 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
300 #define F_INV_OP 0x200
301 #define F_FAKE_RW 0x400
302 #define F_M_ACCESS 0x800 /* media access */
304 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
305 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
306 #define FF_SA (F_SA_HIGH | F_SA_LOW)
308 struct sdebug_dev_info;
309 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
310 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
311 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
330 struct opcode_info_t {
331 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
332 * for terminating element */
333 u8 opcode; /* if num_attached > 0, preferred */
334 u16 sa; /* service action */
335 u32 flags; /* OR-ed set of SDEB_F_* */
336 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
337 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
338 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
339 /* ignore cdb bytes after position 15 */
342 static const struct opcode_info_t msense_iarr[1] = {
343 {0, 0x1a, 0, F_D_IN, NULL, NULL,
344 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
347 static const struct opcode_info_t mselect_iarr[1] = {
348 {0, 0x15, 0, F_D_OUT, NULL, NULL,
349 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
352 static const struct opcode_info_t read_iarr[3] = {
353 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
354 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
356 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
357 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
359 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
363 static const struct opcode_info_t write_iarr[3] = {
364 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
365 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
367 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
368 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
369 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
370 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
374 static const struct opcode_info_t sa_in_iarr[1] = {
375 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
376 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
377 0xff, 0xff, 0xff, 0, 0xc7} },
380 static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
381 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
382 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
383 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
386 static const struct opcode_info_t maint_in_iarr[2] = {
387 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
388 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
390 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
391 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
395 static const struct opcode_info_t write_same_iarr[1] = {
396 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
397 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
398 0xff, 0xff, 0xff, 0x1f, 0xc7} },
401 static const struct opcode_info_t reserve_iarr[1] = {
402 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
403 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
406 static const struct opcode_info_t release_iarr[1] = {
407 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
408 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
412 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
413 * plus the terminating elements for logic that scans this table such as
414 * REPORT SUPPORTED OPERATION CODES. */
415 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
417 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
418 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
420 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
422 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
424 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
425 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
427 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
429 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
431 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
432 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
433 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
434 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
436 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
437 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
439 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
440 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
441 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
443 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
444 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
446 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
447 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
448 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
449 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
451 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
452 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
454 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
456 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
457 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
459 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
460 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
461 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
462 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
463 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
464 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
466 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
467 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
470 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
471 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
473 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
475 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
476 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
477 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
479 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
480 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
481 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
483 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
484 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
485 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
486 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
487 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
489 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
491 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
492 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
493 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
496 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
497 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
500 struct sdebug_scmd_extra_t {
508 static int scsi_debug_add_host = DEF_NUM_HOST;
509 static int scsi_debug_ato = DEF_ATO;
510 static int scsi_debug_delay = DEF_DELAY;
511 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
512 static int scsi_debug_dif = DEF_DIF;
513 static int scsi_debug_dix = DEF_DIX;
514 static int scsi_debug_dsense = DEF_D_SENSE;
515 static int scsi_debug_every_nth = DEF_EVERY_NTH;
516 static int scsi_debug_fake_rw = DEF_FAKE_RW;
517 static unsigned int scsi_debug_guard = DEF_GUARD;
518 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
519 static int scsi_debug_max_luns = DEF_MAX_LUNS;
520 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
521 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
522 static int scsi_debug_ndelay = DEF_NDELAY;
523 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
524 static int scsi_debug_no_uld = 0;
525 static int scsi_debug_num_parts = DEF_NUM_PARTS;
526 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
527 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
528 static int scsi_debug_opts = DEF_OPTS;
529 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
530 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
531 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
532 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
533 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
534 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
535 static unsigned int scsi_debug_lbpu = DEF_LBPU;
536 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
537 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
538 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
539 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
540 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
541 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
542 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
543 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
544 static bool scsi_debug_removable = DEF_REMOVABLE;
545 static bool scsi_debug_clustering;
546 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
547 static bool scsi_debug_strict = DEF_STRICT;
548 static bool sdebug_any_injecting_opt;
550 static atomic_t sdebug_cmnd_count;
551 static atomic_t sdebug_completions;
552 static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
554 #define DEV_READONLY(TGT) (0)
556 static unsigned int sdebug_store_sectors;
557 static sector_t sdebug_capacity; /* in sectors */
559 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
560 may still need them */
561 static int sdebug_heads; /* heads per disk */
562 static int sdebug_cylinders_per; /* cylinders per surface */
563 static int sdebug_sectors_per; /* sectors per cylinder */
565 #define SDEBUG_MAX_PARTS 4
567 #define SCSI_DEBUG_MAX_CMD_LEN 32
569 static unsigned int scsi_debug_lbp(void)
571 return ((0 == scsi_debug_fake_rw) &&
572 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
575 struct sdebug_dev_info {
576 struct list_head dev_list;
577 unsigned int channel;
580 struct sdebug_host_info *sdbg_host;
581 unsigned long uas_bm[1];
583 char stopped; /* TODO: should be atomic */
587 struct sdebug_host_info {
588 struct list_head host_list;
589 struct Scsi_Host *shost;
591 struct list_head dev_info_list;
594 #define to_sdebug_host(d) \
595 container_of(d, struct sdebug_host_info, dev)
597 static LIST_HEAD(sdebug_host_list);
598 static DEFINE_SPINLOCK(sdebug_host_list_lock);
601 struct sdebug_hrtimer { /* ... is derived from hrtimer */
602 struct hrtimer hrt; /* must be first element */
606 struct sdebug_queued_cmd {
607 /* in_use flagged by a bit in queued_in_use_bm[] */
608 struct timer_list *cmnd_timerp;
609 struct tasklet_struct *tletp;
610 struct sdebug_hrtimer *sd_hrtp;
611 struct scsi_cmnd * a_cmnd;
613 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
614 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
617 static unsigned char * fake_storep; /* ramdisk storage */
618 static struct sd_dif_tuple *dif_storep; /* protection info */
619 static void *map_storep; /* provisioning map */
621 static unsigned long map_size;
622 static int num_aborts;
623 static int num_dev_resets;
624 static int num_target_resets;
625 static int num_bus_resets;
626 static int num_host_resets;
627 static int dix_writes;
628 static int dix_reads;
629 static int dif_errors;
631 static DEFINE_SPINLOCK(queued_arr_lock);
632 static DEFINE_RWLOCK(atomic_rw);
634 static char sdebug_proc_name[] = MY_NAME;
635 static const char *my_name = MY_NAME;
637 static struct bus_type pseudo_lld_bus;
639 static struct device_driver sdebug_driverfs_driver = {
640 .name = sdebug_proc_name,
641 .bus = &pseudo_lld_bus,
644 static const int check_condition_result =
645 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
647 static const int illegal_condition_result =
648 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
650 static const int device_qfull_result =
651 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
653 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
654 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
656 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
658 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
661 static void *fake_store(unsigned long long lba)
663 lba = do_div(lba, sdebug_store_sectors);
665 return fake_storep + lba * scsi_debug_sector_size;
668 static struct sd_dif_tuple *dif_store(sector_t sector)
670 sector = do_div(sector, sdebug_store_sectors);
672 return dif_storep + sector;
675 static int sdebug_add_adapter(void);
676 static void sdebug_remove_adapter(void);
678 static void sdebug_max_tgts_luns(void)
680 struct sdebug_host_info *sdbg_host;
681 struct Scsi_Host *hpnt;
683 spin_lock(&sdebug_host_list_lock);
684 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
685 hpnt = sdbg_host->shost;
686 if ((hpnt->this_id >= 0) &&
687 (scsi_debug_num_tgts > hpnt->this_id))
688 hpnt->max_id = scsi_debug_num_tgts + 1;
690 hpnt->max_id = scsi_debug_num_tgts;
691 /* scsi_debug_max_luns; */
692 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
694 spin_unlock(&sdebug_host_list_lock);
697 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
699 /* Set in_bit to -1 to indicate no bit position of invalid field */
701 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
702 int in_byte, int in_bit)
704 unsigned char *sbuff;
708 sbuff = scp->sense_buffer;
710 sdev_printk(KERN_ERR, scp->device,
711 "%s: sense_buffer is NULL\n", __func__);
714 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
715 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
716 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
718 memset(sks, 0, sizeof(sks));
724 sks[0] |= 0x7 & in_bit;
726 put_unaligned_be16(in_byte, sks + 1);
727 if (scsi_debug_dsense) {
732 memcpy(sbuff + sl + 4, sks, 3);
734 memcpy(sbuff + 15, sks, 3);
735 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
736 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
737 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
738 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
741 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
743 unsigned char *sbuff;
745 sbuff = scp->sense_buffer;
747 sdev_printk(KERN_ERR, scp->device,
748 "%s: sense_buffer is NULL\n", __func__);
751 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
753 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
755 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
756 sdev_printk(KERN_INFO, scp->device,
757 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
758 my_name, key, asc, asq);
762 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
764 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
767 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
769 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
771 sdev_printk(KERN_INFO, dev,
772 "%s: BLKFLSBUF [0x1261]\n", __func__);
773 else if (0x5331 == cmd)
774 sdev_printk(KERN_INFO, dev,
775 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
778 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
782 /* return -ENOTTY; // correct return but upsets fdisk */
785 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
786 struct sdebug_dev_info * devip)
789 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
791 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
792 if (k != SDEBUG_NUM_UAS) {
793 const char *cp = NULL;
797 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
798 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
800 cp = "power on reset";
802 case SDEBUG_UA_BUS_RESET:
803 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
804 UA_RESET_ASC, BUS_RESET_ASCQ);
808 case SDEBUG_UA_MODE_CHANGED:
809 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
810 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
812 cp = "mode parameters changed";
814 case SDEBUG_UA_CAPACITY_CHANGED:
815 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
816 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 cp = "capacity data changed";
820 pr_warn("%s: unexpected unit attention code=%d\n",
826 clear_bit(k, devip->uas_bm);
828 sdev_printk(KERN_INFO, SCpnt->device,
829 "%s reports: Unit attention: %s\n",
831 return check_condition_result;
833 if ((UAS_TUR == uas_only) && devip->stopped) {
834 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
837 sdev_printk(KERN_INFO, SCpnt->device,
838 "%s reports: Not ready: %s\n", my_name,
839 "initializing command required");
840 return check_condition_result;
845 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
846 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
850 struct scsi_data_buffer *sdb = scsi_in(scp);
854 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
855 return (DID_ERROR << 16);
857 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
859 sdb->resid = scsi_bufflen(scp) - act_len;
864 /* Returns number of bytes fetched into 'arr' or -1 if error. */
865 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
868 if (!scsi_bufflen(scp))
870 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
873 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
877 static const char * inq_vendor_id = "Linux ";
878 static const char * inq_product_id = "scsi_debug ";
879 static const char *inq_product_rev = "0184"; /* version less '.' */
881 /* Device identification VPD page. Returns number of bytes placed in arr */
882 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
883 int target_dev_id, int dev_id_num,
884 const char * dev_id_str,
890 port_a = target_dev_id + 1;
891 /* T10 vendor identifier field format (faked) */
892 arr[0] = 0x2; /* ASCII */
895 memcpy(&arr[4], inq_vendor_id, 8);
896 memcpy(&arr[12], inq_product_id, 16);
897 memcpy(&arr[28], dev_id_str, dev_id_str_len);
898 num = 8 + 16 + dev_id_str_len;
901 if (dev_id_num >= 0) {
902 /* NAA-5, Logical unit identifier (binary) */
903 arr[num++] = 0x1; /* binary (not necessarily sas) */
904 arr[num++] = 0x3; /* PIV=0, lu, naa */
907 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
911 arr[num++] = (dev_id_num >> 24);
912 arr[num++] = (dev_id_num >> 16) & 0xff;
913 arr[num++] = (dev_id_num >> 8) & 0xff;
914 arr[num++] = dev_id_num & 0xff;
915 /* Target relative port number */
916 arr[num++] = 0x61; /* proto=sas, binary */
917 arr[num++] = 0x94; /* PIV=1, target port, rel port */
918 arr[num++] = 0x0; /* reserved */
919 arr[num++] = 0x4; /* length */
920 arr[num++] = 0x0; /* reserved */
921 arr[num++] = 0x0; /* reserved */
923 arr[num++] = 0x1; /* relative port A */
925 /* NAA-5, Target port identifier */
926 arr[num++] = 0x61; /* proto=sas, binary */
927 arr[num++] = 0x93; /* piv=1, target port, naa */
930 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
934 arr[num++] = (port_a >> 24);
935 arr[num++] = (port_a >> 16) & 0xff;
936 arr[num++] = (port_a >> 8) & 0xff;
937 arr[num++] = port_a & 0xff;
938 /* NAA-5, Target port group identifier */
939 arr[num++] = 0x61; /* proto=sas, binary */
940 arr[num++] = 0x95; /* piv=1, target port group id */
945 arr[num++] = (port_group_id >> 8) & 0xff;
946 arr[num++] = port_group_id & 0xff;
947 /* NAA-5, Target device identifier */
948 arr[num++] = 0x61; /* proto=sas, binary */
949 arr[num++] = 0xa3; /* piv=1, target device, naa */
952 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
956 arr[num++] = (target_dev_id >> 24);
957 arr[num++] = (target_dev_id >> 16) & 0xff;
958 arr[num++] = (target_dev_id >> 8) & 0xff;
959 arr[num++] = target_dev_id & 0xff;
960 /* SCSI name string: Target device identifier */
961 arr[num++] = 0x63; /* proto=sas, UTF-8 */
962 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
965 memcpy(arr + num, "naa.52222220", 12);
967 snprintf(b, sizeof(b), "%08X", target_dev_id);
968 memcpy(arr + num, b, 8);
970 memset(arr + num, 0, 4);
976 static unsigned char vpd84_data[] = {
977 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
978 0x22,0x22,0x22,0x0,0xbb,0x1,
979 0x22,0x22,0x22,0x0,0xbb,0x2,
982 /* Software interface identification VPD page */
983 static int inquiry_evpd_84(unsigned char * arr)
985 memcpy(arr, vpd84_data, sizeof(vpd84_data));
986 return sizeof(vpd84_data);
989 /* Management network addresses VPD page */
990 static int inquiry_evpd_85(unsigned char * arr)
993 const char * na1 = "https://www.kernel.org/config";
994 const char * na2 = "http://www.kernel.org/log";
997 arr[num++] = 0x1; /* lu, storage config */
998 arr[num++] = 0x0; /* reserved */
1003 plen = ((plen / 4) + 1) * 4;
1004 arr[num++] = plen; /* length, null termianted, padded */
1005 memcpy(arr + num, na1, olen);
1006 memset(arr + num + olen, 0, plen - olen);
1009 arr[num++] = 0x4; /* lu, logging */
1010 arr[num++] = 0x0; /* reserved */
1015 plen = ((plen / 4) + 1) * 4;
1016 arr[num++] = plen; /* length, null terminated, padded */
1017 memcpy(arr + num, na2, olen);
1018 memset(arr + num + olen, 0, plen - olen);
1024 /* SCSI ports VPD page */
1025 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1030 port_a = target_dev_id + 1;
1031 port_b = port_a + 1;
1032 arr[num++] = 0x0; /* reserved */
1033 arr[num++] = 0x0; /* reserved */
1035 arr[num++] = 0x1; /* relative port 1 (primary) */
1036 memset(arr + num, 0, 6);
1039 arr[num++] = 12; /* length tp descriptor */
1040 /* naa-5 target port identifier (A) */
1041 arr[num++] = 0x61; /* proto=sas, binary */
1042 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1043 arr[num++] = 0x0; /* reserved */
1044 arr[num++] = 0x8; /* length */
1045 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1049 arr[num++] = (port_a >> 24);
1050 arr[num++] = (port_a >> 16) & 0xff;
1051 arr[num++] = (port_a >> 8) & 0xff;
1052 arr[num++] = port_a & 0xff;
1054 arr[num++] = 0x0; /* reserved */
1055 arr[num++] = 0x0; /* reserved */
1057 arr[num++] = 0x2; /* relative port 2 (secondary) */
1058 memset(arr + num, 0, 6);
1061 arr[num++] = 12; /* length tp descriptor */
1062 /* naa-5 target port identifier (B) */
1063 arr[num++] = 0x61; /* proto=sas, binary */
1064 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1065 arr[num++] = 0x0; /* reserved */
1066 arr[num++] = 0x8; /* length */
1067 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1071 arr[num++] = (port_b >> 24);
1072 arr[num++] = (port_b >> 16) & 0xff;
1073 arr[num++] = (port_b >> 8) & 0xff;
1074 arr[num++] = port_b & 0xff;
1080 static unsigned char vpd89_data[] = {
1081 /* from 4th byte */ 0,0,0,0,
1082 'l','i','n','u','x',' ',' ',' ',
1083 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1085 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1087 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1088 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1089 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1090 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1092 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1094 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1096 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1097 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1098 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1099 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1100 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1101 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1102 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1103 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1106 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1107 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1108 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1109 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1110 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1124 /* ATA Information VPD page */
1125 static int inquiry_evpd_89(unsigned char * arr)
1127 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1128 return sizeof(vpd89_data);
1132 static unsigned char vpdb0_data[] = {
1133 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1134 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1139 /* Block limits VPD page (SBC-3) */
1140 static int inquiry_evpd_b0(unsigned char * arr)
1144 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1146 /* Optimal transfer length granularity */
1147 gran = 1 << scsi_debug_physblk_exp;
1148 arr[2] = (gran >> 8) & 0xff;
1149 arr[3] = gran & 0xff;
1151 /* Maximum Transfer Length */
1152 if (sdebug_store_sectors > 0x400) {
1153 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1154 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1155 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1156 arr[7] = sdebug_store_sectors & 0xff;
1159 /* Optimal Transfer Length */
1160 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1162 if (scsi_debug_lbpu) {
1163 /* Maximum Unmap LBA Count */
1164 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1166 /* Maximum Unmap Block Descriptor Count */
1167 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1170 /* Unmap Granularity Alignment */
1171 if (scsi_debug_unmap_alignment) {
1172 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1173 arr[28] |= 0x80; /* UGAVALID */
1176 /* Optimal Unmap Granularity */
1177 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1179 /* Maximum WRITE SAME Length */
1180 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1182 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1184 return sizeof(vpdb0_data);
1187 /* Block device characteristics VPD page (SBC-3) */
1188 static int inquiry_evpd_b1(unsigned char *arr)
1190 memset(arr, 0, 0x3c);
1192 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1194 arr[3] = 5; /* less than 1.8" */
1199 /* Logical block provisioning VPD page (SBC-3) */
1200 static int inquiry_evpd_b2(unsigned char *arr)
1202 memset(arr, 0, 0x4);
1203 arr[0] = 0; /* threshold exponent */
1205 if (scsi_debug_lbpu)
1208 if (scsi_debug_lbpws)
1211 if (scsi_debug_lbpws10)
1214 if (scsi_debug_lbprz)
1220 #define SDEBUG_LONG_INQ_SZ 96
1221 #define SDEBUG_MAX_INQ_ARR_SZ 584
1223 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1225 unsigned char pq_pdt;
1226 unsigned char * arr;
1227 unsigned char *cmd = scp->cmnd;
1228 int alloc_len, n, ret;
1231 alloc_len = (cmd[3] << 8) + cmd[4];
1232 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1234 return DID_REQUEUE << 16;
1235 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1237 pq_pdt = 0x1e; /* present, wlun */
1238 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1239 pq_pdt = 0x7f; /* not present, no device type */
1241 pq_pdt = (scsi_debug_ptype & 0x1f);
1243 if (0x2 & cmd[1]) { /* CMDDT bit set */
1244 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1246 return check_condition_result;
1247 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1248 int lu_id_num, port_group_id, target_dev_id, len;
1250 int host_no = devip->sdbg_host->shost->host_no;
1252 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1253 (devip->channel & 0x7f);
1254 if (0 == scsi_debug_vpd_use_hostno)
1256 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1257 (devip->target * 1000) + devip->lun);
1258 target_dev_id = ((host_no + 1) * 2000) +
1259 (devip->target * 1000) - 3;
1260 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1261 if (0 == cmd[2]) { /* supported vital product data pages */
1262 arr[1] = cmd[2]; /*sanity */
1264 arr[n++] = 0x0; /* this page */
1265 arr[n++] = 0x80; /* unit serial number */
1266 arr[n++] = 0x83; /* device identification */
1267 arr[n++] = 0x84; /* software interface ident. */
1268 arr[n++] = 0x85; /* management network addresses */
1269 arr[n++] = 0x86; /* extended inquiry */
1270 arr[n++] = 0x87; /* mode page policy */
1271 arr[n++] = 0x88; /* SCSI ports */
1272 arr[n++] = 0x89; /* ATA information */
1273 arr[n++] = 0xb0; /* Block limits (SBC) */
1274 arr[n++] = 0xb1; /* Block characteristics (SBC) */
1275 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1277 arr[3] = n - 4; /* number of supported VPD pages */
1278 } else if (0x80 == cmd[2]) { /* unit serial number */
1279 arr[1] = cmd[2]; /*sanity */
1281 memcpy(&arr[4], lu_id_str, len);
1282 } else if (0x83 == cmd[2]) { /* device identification */
1283 arr[1] = cmd[2]; /*sanity */
1284 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1285 target_dev_id, lu_id_num,
1287 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1288 arr[1] = cmd[2]; /*sanity */
1289 arr[3] = inquiry_evpd_84(&arr[4]);
1290 } else if (0x85 == cmd[2]) { /* Management network addresses */
1291 arr[1] = cmd[2]; /*sanity */
1292 arr[3] = inquiry_evpd_85(&arr[4]);
1293 } else if (0x86 == cmd[2]) { /* extended inquiry */
1294 arr[1] = cmd[2]; /*sanity */
1295 arr[3] = 0x3c; /* number of following entries */
1296 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1297 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1298 else if (scsi_debug_dif)
1299 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1301 arr[4] = 0x0; /* no protection stuff */
1302 arr[5] = 0x7; /* head of q, ordered + simple q's */
1303 } else if (0x87 == cmd[2]) { /* mode page policy */
1304 arr[1] = cmd[2]; /*sanity */
1305 arr[3] = 0x8; /* number of following entries */
1306 arr[4] = 0x2; /* disconnect-reconnect mp */
1307 arr[6] = 0x80; /* mlus, shared */
1308 arr[8] = 0x18; /* protocol specific lu */
1309 arr[10] = 0x82; /* mlus, per initiator port */
1310 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1311 arr[1] = cmd[2]; /*sanity */
1312 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1313 } else if (0x89 == cmd[2]) { /* ATA information */
1314 arr[1] = cmd[2]; /*sanity */
1315 n = inquiry_evpd_89(&arr[4]);
1317 arr[3] = (n & 0xff);
1318 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1319 arr[1] = cmd[2]; /*sanity */
1320 arr[3] = inquiry_evpd_b0(&arr[4]);
1321 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1322 arr[1] = cmd[2]; /*sanity */
1323 arr[3] = inquiry_evpd_b1(&arr[4]);
1324 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1325 arr[1] = cmd[2]; /*sanity */
1326 arr[3] = inquiry_evpd_b2(&arr[4]);
1328 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1330 return check_condition_result;
1332 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1333 ret = fill_from_dev_buffer(scp, arr,
1334 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1338 /* drops through here for a standard inquiry */
1339 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
1340 arr[2] = scsi_debug_scsi_level;
1341 arr[3] = 2; /* response_data_format==2 */
1342 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1343 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1344 if (0 == scsi_debug_vpd_use_hostno)
1345 arr[5] = 0x10; /* claim: implicit TGPS */
1346 arr[6] = 0x10; /* claim: MultiP */
1347 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1348 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1349 memcpy(&arr[8], inq_vendor_id, 8);
1350 memcpy(&arr[16], inq_product_id, 16);
1351 memcpy(&arr[32], inq_product_rev, 4);
1352 /* version descriptors (2 bytes each) follow */
1353 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1354 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1356 if (scsi_debug_ptype == 0) {
1357 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1358 } else if (scsi_debug_ptype == 1) {
1359 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1361 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1362 ret = fill_from_dev_buffer(scp, arr,
1363 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1368 static int resp_requests(struct scsi_cmnd * scp,
1369 struct sdebug_dev_info * devip)
1371 unsigned char * sbuff;
1372 unsigned char *cmd = scp->cmnd;
1373 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1374 bool dsense, want_dsense;
1377 memset(arr, 0, sizeof(arr));
1378 dsense = !!(cmd[1] & 1);
1379 want_dsense = dsense || scsi_debug_dsense;
1380 sbuff = scp->sense_buffer;
1381 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1384 arr[1] = 0x0; /* NO_SENSE in sense_key */
1385 arr[2] = THRESHOLD_EXCEEDED;
1386 arr[3] = 0xff; /* TEST set and MRIE==6 */
1390 arr[2] = 0x0; /* NO_SENSE in sense_key */
1391 arr[7] = 0xa; /* 18 byte sense buffer */
1392 arr[12] = THRESHOLD_EXCEEDED;
1393 arr[13] = 0xff; /* TEST set and MRIE==6 */
1396 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1397 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1398 ; /* have sense and formats match */
1399 else if (arr[0] <= 0x70) {
1409 } else if (dsense) {
1412 arr[1] = sbuff[2]; /* sense key */
1413 arr[2] = sbuff[12]; /* asc */
1414 arr[3] = sbuff[13]; /* ascq */
1426 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1427 return fill_from_dev_buffer(scp, arr, len);
1430 static int resp_start_stop(struct scsi_cmnd * scp,
1431 struct sdebug_dev_info * devip)
1433 unsigned char *cmd = scp->cmnd;
1434 int power_cond, start;
1436 power_cond = (cmd[4] & 0xf0) >> 4;
1438 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1439 return check_condition_result;
1442 if (start == devip->stopped)
1443 devip->stopped = !start;
1447 static sector_t get_sdebug_capacity(void)
1449 if (scsi_debug_virtual_gb > 0)
1450 return (sector_t)scsi_debug_virtual_gb *
1451 (1073741824 / scsi_debug_sector_size);
1453 return sdebug_store_sectors;
1456 #define SDEBUG_READCAP_ARR_SZ 8
1457 static int resp_readcap(struct scsi_cmnd * scp,
1458 struct sdebug_dev_info * devip)
1460 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1463 /* following just in case virtual_gb changed */
1464 sdebug_capacity = get_sdebug_capacity();
1465 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1466 if (sdebug_capacity < 0xffffffff) {
1467 capac = (unsigned int)sdebug_capacity - 1;
1468 arr[0] = (capac >> 24);
1469 arr[1] = (capac >> 16) & 0xff;
1470 arr[2] = (capac >> 8) & 0xff;
1471 arr[3] = capac & 0xff;
1478 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1479 arr[7] = scsi_debug_sector_size & 0xff;
1480 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1483 #define SDEBUG_READCAP16_ARR_SZ 32
1484 static int resp_readcap16(struct scsi_cmnd * scp,
1485 struct sdebug_dev_info * devip)
1487 unsigned char *cmd = scp->cmnd;
1488 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1489 unsigned long long capac;
1492 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1494 /* following just in case virtual_gb changed */
1495 sdebug_capacity = get_sdebug_capacity();
1496 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1497 capac = sdebug_capacity - 1;
1498 for (k = 0; k < 8; ++k, capac >>= 8)
1499 arr[7 - k] = capac & 0xff;
1500 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1501 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1502 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1503 arr[11] = scsi_debug_sector_size & 0xff;
1504 arr[13] = scsi_debug_physblk_exp & 0xf;
1505 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1507 if (scsi_debug_lbp()) {
1508 arr[14] |= 0x80; /* LBPME */
1509 if (scsi_debug_lbprz)
1510 arr[14] |= 0x40; /* LBPRZ */
1513 arr[15] = scsi_debug_lowest_aligned & 0xff;
1515 if (scsi_debug_dif) {
1516 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1517 arr[12] |= 1; /* PROT_EN */
1520 return fill_from_dev_buffer(scp, arr,
1521 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1524 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1526 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1527 struct sdebug_dev_info * devip)
1529 unsigned char *cmd = scp->cmnd;
1530 unsigned char * arr;
1531 int host_no = devip->sdbg_host->shost->host_no;
1532 int n, ret, alen, rlen;
1533 int port_group_a, port_group_b, port_a, port_b;
1535 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1538 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1540 return DID_REQUEUE << 16;
1542 * EVPD page 0x88 states we have two ports, one
1543 * real and a fake port with no device connected.
1544 * So we create two port groups with one port each
1545 * and set the group with port B to unavailable.
1547 port_a = 0x1; /* relative port A */
1548 port_b = 0x2; /* relative port B */
1549 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1550 (devip->channel & 0x7f);
1551 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1552 (devip->channel & 0x7f) + 0x80;
1555 * The asymmetric access state is cycled according to the host_id.
1558 if (0 == scsi_debug_vpd_use_hostno) {
1559 arr[n++] = host_no % 3; /* Asymm access state */
1560 arr[n++] = 0x0F; /* claim: all states are supported */
1562 arr[n++] = 0x0; /* Active/Optimized path */
1563 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1565 arr[n++] = (port_group_a >> 8) & 0xff;
1566 arr[n++] = port_group_a & 0xff;
1567 arr[n++] = 0; /* Reserved */
1568 arr[n++] = 0; /* Status code */
1569 arr[n++] = 0; /* Vendor unique */
1570 arr[n++] = 0x1; /* One port per group */
1571 arr[n++] = 0; /* Reserved */
1572 arr[n++] = 0; /* Reserved */
1573 arr[n++] = (port_a >> 8) & 0xff;
1574 arr[n++] = port_a & 0xff;
1575 arr[n++] = 3; /* Port unavailable */
1576 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1577 arr[n++] = (port_group_b >> 8) & 0xff;
1578 arr[n++] = port_group_b & 0xff;
1579 arr[n++] = 0; /* Reserved */
1580 arr[n++] = 0; /* Status code */
1581 arr[n++] = 0; /* Vendor unique */
1582 arr[n++] = 0x1; /* One port per group */
1583 arr[n++] = 0; /* Reserved */
1584 arr[n++] = 0; /* Reserved */
1585 arr[n++] = (port_b >> 8) & 0xff;
1586 arr[n++] = port_b & 0xff;
1589 arr[0] = (rlen >> 24) & 0xff;
1590 arr[1] = (rlen >> 16) & 0xff;
1591 arr[2] = (rlen >> 8) & 0xff;
1592 arr[3] = rlen & 0xff;
1595 * Return the smallest value of either
1596 * - The allocated length
1597 * - The constructed command length
1598 * - The maximum array size
1601 ret = fill_from_dev_buffer(scp, arr,
1602 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1608 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1611 u8 reporting_opts, req_opcode, sdeb_i, supp;
1613 u32 alloc_len, a_len;
1614 int k, offset, len, errsts, count, bump, na;
1615 const struct opcode_info_t *oip;
1616 const struct opcode_info_t *r_oip;
1618 u8 *cmd = scp->cmnd;
1620 rctd = !!(cmd[2] & 0x80);
1621 reporting_opts = cmd[2] & 0x7;
1622 req_opcode = cmd[3];
1623 req_sa = get_unaligned_be16(cmd + 4);
1624 alloc_len = get_unaligned_be32(cmd + 6);
1625 if (alloc_len < 4 && alloc_len > 0xffff) {
1626 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1627 return check_condition_result;
1629 if (alloc_len > 8192)
1633 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
1635 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637 return check_condition_result;
1639 switch (reporting_opts) {
1640 case 0: /* all commands */
1641 /* count number of commands */
1642 for (count = 0, oip = opcode_info_arr;
1643 oip->num_attached != 0xff; ++oip) {
1644 if (F_INV_OP & oip->flags)
1646 count += (oip->num_attached + 1);
1648 bump = rctd ? 20 : 8;
1649 put_unaligned_be32(count * bump, arr);
1650 for (offset = 4, oip = opcode_info_arr;
1651 oip->num_attached != 0xff && offset < a_len; ++oip) {
1652 if (F_INV_OP & oip->flags)
1654 na = oip->num_attached;
1655 arr[offset] = oip->opcode;
1656 put_unaligned_be16(oip->sa, arr + offset + 2);
1658 arr[offset + 5] |= 0x2;
1659 if (FF_SA & oip->flags)
1660 arr[offset + 5] |= 0x1;
1661 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663 put_unaligned_be16(0xa, arr + offset + 8);
1665 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1666 if (F_INV_OP & oip->flags)
1669 arr[offset] = oip->opcode;
1670 put_unaligned_be16(oip->sa, arr + offset + 2);
1672 arr[offset + 5] |= 0x2;
1673 if (FF_SA & oip->flags)
1674 arr[offset + 5] |= 0x1;
1675 put_unaligned_be16(oip->len_mask[0],
1678 put_unaligned_be16(0xa,
1685 case 1: /* one command: opcode only */
1686 case 2: /* one command: opcode plus service action */
1687 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1688 sdeb_i = opcode_ind_arr[req_opcode];
1689 oip = &opcode_info_arr[sdeb_i];
1690 if (F_INV_OP & oip->flags) {
1694 if (1 == reporting_opts) {
1695 if (FF_SA & oip->flags) {
1696 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1699 return check_condition_result;
1702 } else if (2 == reporting_opts &&
1703 0 == (FF_SA & oip->flags)) {
1704 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1705 kfree(arr); /* point at requested sa */
1706 return check_condition_result;
1708 if (0 == (FF_SA & oip->flags) &&
1709 req_opcode == oip->opcode)
1711 else if (0 == (FF_SA & oip->flags)) {
1712 na = oip->num_attached;
1713 for (k = 0, oip = oip->arrp; k < na;
1715 if (req_opcode == oip->opcode)
1718 supp = (k >= na) ? 1 : 3;
1719 } else if (req_sa != oip->sa) {
1720 na = oip->num_attached;
1721 for (k = 0, oip = oip->arrp; k < na;
1723 if (req_sa == oip->sa)
1726 supp = (k >= na) ? 1 : 3;
1730 u = oip->len_mask[0];
1731 put_unaligned_be16(u, arr + 2);
1732 arr[4] = oip->opcode;
1733 for (k = 1; k < u; ++k)
1734 arr[4 + k] = (k < 16) ?
1735 oip->len_mask[k] : 0xff;
1740 arr[1] = (rctd ? 0x80 : 0) | supp;
1742 put_unaligned_be16(0xa, arr + offset);
1747 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749 return check_condition_result;
1751 offset = (offset < a_len) ? offset : a_len;
1752 len = (offset < alloc_len) ? offset : alloc_len;
1753 errsts = fill_from_dev_buffer(scp, arr, len);
1759 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1764 u8 *cmd = scp->cmnd;
1766 memset(arr, 0, sizeof(arr));
1767 repd = !!(cmd[2] & 0x80);
1768 alloc_len = get_unaligned_be32(cmd + 6);
1769 if (alloc_len < 4) {
1770 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1771 return check_condition_result;
1773 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1774 arr[1] = 0x1; /* ITNRS */
1781 len = (len < alloc_len) ? len : alloc_len;
1782 return fill_from_dev_buffer(scp, arr, len);
1785 /* <<Following mode page info copied from ST318451LW>> */
1787 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1788 { /* Read-Write Error Recovery page for mode_sense */
1789 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1792 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1794 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1795 return sizeof(err_recov_pg);
1798 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1799 { /* Disconnect-Reconnect page for mode_sense */
1800 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1801 0, 0, 0, 0, 0, 0, 0, 0};
1803 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1805 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1806 return sizeof(disconnect_pg);
1809 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1810 { /* Format device page for mode_sense */
1811 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1812 0, 0, 0, 0, 0, 0, 0, 0,
1813 0, 0, 0, 0, 0x40, 0, 0, 0};
1815 memcpy(p, format_pg, sizeof(format_pg));
1816 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1817 p[11] = sdebug_sectors_per & 0xff;
1818 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1819 p[13] = scsi_debug_sector_size & 0xff;
1820 if (scsi_debug_removable)
1821 p[20] |= 0x20; /* should agree with INQUIRY */
1823 memset(p + 2, 0, sizeof(format_pg) - 2);
1824 return sizeof(format_pg);
1827 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1828 { /* Caching page for mode_sense */
1829 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1830 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1831 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1832 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1834 if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1835 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1836 memcpy(p, caching_pg, sizeof(caching_pg));
1838 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1839 else if (2 == pcontrol)
1840 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1841 return sizeof(caching_pg);
1844 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1845 { /* Control mode page for mode_sense */
1846 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1848 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1851 if (scsi_debug_dsense)
1852 ctrl_m_pg[2] |= 0x4;
1854 ctrl_m_pg[2] &= ~0x4;
1857 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1859 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1861 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1862 else if (2 == pcontrol)
1863 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1864 return sizeof(ctrl_m_pg);
1868 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1869 { /* Informational Exceptions control mode page for mode_sense */
1870 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1872 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1875 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1877 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1878 else if (2 == pcontrol)
1879 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1880 return sizeof(iec_m_pg);
1883 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1884 { /* SAS SSP mode page - short format for mode_sense */
1885 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1886 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1888 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1890 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1891 return sizeof(sas_sf_m_pg);
1895 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1897 { /* SAS phy control and discover mode page for mode_sense */
1898 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1899 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1900 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1901 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1902 0x2, 0, 0, 0, 0, 0, 0, 0,
1903 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1904 0, 0, 0, 0, 0, 0, 0, 0,
1905 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1906 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1907 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1908 0x3, 0, 0, 0, 0, 0, 0, 0,
1909 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1910 0, 0, 0, 0, 0, 0, 0, 0,
1914 port_a = target_dev_id + 1;
1915 port_b = port_a + 1;
1916 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1917 p[20] = (port_a >> 24);
1918 p[21] = (port_a >> 16) & 0xff;
1919 p[22] = (port_a >> 8) & 0xff;
1920 p[23] = port_a & 0xff;
1921 p[48 + 20] = (port_b >> 24);
1922 p[48 + 21] = (port_b >> 16) & 0xff;
1923 p[48 + 22] = (port_b >> 8) & 0xff;
1924 p[48 + 23] = port_b & 0xff;
1926 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1927 return sizeof(sas_pcd_m_pg);
1930 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1931 { /* SAS SSP shared protocol specific port mode subpage */
1932 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1933 0, 0, 0, 0, 0, 0, 0, 0,
1936 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1938 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1939 return sizeof(sas_sha_m_pg);
1942 #define SDEBUG_MAX_MSENSE_SZ 256
1945 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1947 unsigned char dbd, llbaa;
1948 int pcontrol, pcode, subpcode, bd_len;
1949 unsigned char dev_spec;
1950 int k, alloc_len, msense_6, offset, len, target_dev_id;
1951 int target = scp->device->id;
1953 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1954 unsigned char *cmd = scp->cmnd;
1956 dbd = !!(cmd[1] & 0x8);
1957 pcontrol = (cmd[2] & 0xc0) >> 6;
1958 pcode = cmd[2] & 0x3f;
1960 msense_6 = (MODE_SENSE == cmd[0]);
1961 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1962 if ((0 == scsi_debug_ptype) && (0 == dbd))
1963 bd_len = llbaa ? 16 : 8;
1966 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1967 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1968 if (0x3 == pcontrol) { /* Saving values not supported */
1969 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1970 return check_condition_result;
1972 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1973 (devip->target * 1000) - 3;
1974 /* set DPOFUA bit for disks */
1975 if (0 == scsi_debug_ptype)
1976 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1986 arr[4] = 0x1; /* set LONGLBA bit */
1987 arr[7] = bd_len; /* assume 255 or less */
1991 if ((bd_len > 0) && (!sdebug_capacity))
1992 sdebug_capacity = get_sdebug_capacity();
1995 if (sdebug_capacity > 0xfffffffe) {
2001 ap[0] = (sdebug_capacity >> 24) & 0xff;
2002 ap[1] = (sdebug_capacity >> 16) & 0xff;
2003 ap[2] = (sdebug_capacity >> 8) & 0xff;
2004 ap[3] = sdebug_capacity & 0xff;
2006 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2007 ap[7] = scsi_debug_sector_size & 0xff;
2010 } else if (16 == bd_len) {
2011 unsigned long long capac = sdebug_capacity;
2013 for (k = 0; k < 8; ++k, capac >>= 8)
2014 ap[7 - k] = capac & 0xff;
2015 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2016 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2017 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2018 ap[15] = scsi_debug_sector_size & 0xff;
2023 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2024 /* TODO: Control Extension page */
2025 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2026 return check_condition_result;
2029 case 0x1: /* Read-Write error recovery page, direct access */
2030 len = resp_err_recov_pg(ap, pcontrol, target);
2033 case 0x2: /* Disconnect-Reconnect page, all devices */
2034 len = resp_disconnect_pg(ap, pcontrol, target);
2037 case 0x3: /* Format device page, direct access */
2038 len = resp_format_pg(ap, pcontrol, target);
2041 case 0x8: /* Caching page, direct access */
2042 len = resp_caching_pg(ap, pcontrol, target);
2045 case 0xa: /* Control Mode page, all devices */
2046 len = resp_ctrl_m_pg(ap, pcontrol, target);
2049 case 0x19: /* if spc==1 then sas phy, control+discover */
2050 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2051 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2052 return check_condition_result;
2055 if ((0x0 == subpcode) || (0xff == subpcode))
2056 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2057 if ((0x1 == subpcode) || (0xff == subpcode))
2058 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2060 if ((0x2 == subpcode) || (0xff == subpcode))
2061 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2064 case 0x1c: /* Informational Exceptions Mode page, all devices */
2065 len = resp_iec_m_pg(ap, pcontrol, target);
2068 case 0x3f: /* Read all Mode pages */
2069 if ((0 == subpcode) || (0xff == subpcode)) {
2070 len = resp_err_recov_pg(ap, pcontrol, target);
2071 len += resp_disconnect_pg(ap + len, pcontrol, target);
2072 len += resp_format_pg(ap + len, pcontrol, target);
2073 len += resp_caching_pg(ap + len, pcontrol, target);
2074 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2075 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2076 if (0xff == subpcode) {
2077 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2078 target, target_dev_id);
2079 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2081 len += resp_iec_m_pg(ap + len, pcontrol, target);
2083 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2084 return check_condition_result;
2089 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2090 return check_condition_result;
2093 arr[0] = offset - 1;
2095 arr[0] = ((offset - 2) >> 8) & 0xff;
2096 arr[1] = (offset - 2) & 0xff;
2098 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2101 #define SDEBUG_MAX_MSELECT_SZ 512
2104 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2106 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2107 int param_len, res, mpage;
2108 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2109 unsigned char *cmd = scp->cmnd;
2110 int mselect6 = (MODE_SELECT == cmd[0]);
2112 memset(arr, 0, sizeof(arr));
2115 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2116 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2117 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2118 return check_condition_result;
2120 res = fetch_to_dev_buffer(scp, arr, param_len);
2122 return (DID_ERROR << 16);
2123 else if ((res < param_len) &&
2124 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2125 sdev_printk(KERN_INFO, scp->device,
2126 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2127 __func__, param_len, res);
2128 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2129 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2131 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2132 return check_condition_result;
2134 off = bd_len + (mselect6 ? 4 : 8);
2135 mpage = arr[off] & 0x3f;
2136 ps = !!(arr[off] & 0x80);
2138 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2139 return check_condition_result;
2141 spf = !!(arr[off] & 0x40);
2142 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2144 if ((pg_len + off) > param_len) {
2145 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2146 PARAMETER_LIST_LENGTH_ERR, 0);
2147 return check_condition_result;
2150 case 0x8: /* Caching Mode page */
2151 if (caching_pg[1] == arr[off + 1]) {
2152 memcpy(caching_pg + 2, arr + off + 2,
2153 sizeof(caching_pg) - 2);
2154 goto set_mode_changed_ua;
2157 case 0xa: /* Control Mode page */
2158 if (ctrl_m_pg[1] == arr[off + 1]) {
2159 memcpy(ctrl_m_pg + 2, arr + off + 2,
2160 sizeof(ctrl_m_pg) - 2);
2161 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2162 goto set_mode_changed_ua;
2165 case 0x1c: /* Informational Exceptions Mode page */
2166 if (iec_m_pg[1] == arr[off + 1]) {
2167 memcpy(iec_m_pg + 2, arr + off + 2,
2168 sizeof(iec_m_pg) - 2);
2169 goto set_mode_changed_ua;
2175 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2176 return check_condition_result;
2177 set_mode_changed_ua:
2178 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2182 static int resp_temp_l_pg(unsigned char * arr)
2184 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2185 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2188 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2189 return sizeof(temp_l_pg);
2192 static int resp_ie_l_pg(unsigned char * arr)
2194 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2197 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2198 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2199 arr[4] = THRESHOLD_EXCEEDED;
2202 return sizeof(ie_l_pg);
2205 #define SDEBUG_MAX_LSENSE_SZ 512
2207 static int resp_log_sense(struct scsi_cmnd * scp,
2208 struct sdebug_dev_info * devip)
2210 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2211 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2212 unsigned char *cmd = scp->cmnd;
2214 memset(arr, 0, sizeof(arr));
2218 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2219 return check_condition_result;
2221 pcontrol = (cmd[2] & 0xc0) >> 6;
2222 pcode = cmd[2] & 0x3f;
2223 subpcode = cmd[3] & 0xff;
2224 alloc_len = (cmd[7] << 8) + cmd[8];
2226 if (0 == subpcode) {
2228 case 0x0: /* Supported log pages log page */
2230 arr[n++] = 0x0; /* this page */
2231 arr[n++] = 0xd; /* Temperature */
2232 arr[n++] = 0x2f; /* Informational exceptions */
2235 case 0xd: /* Temperature log page */
2236 arr[3] = resp_temp_l_pg(arr + 4);
2238 case 0x2f: /* Informational exceptions log page */
2239 arr[3] = resp_ie_l_pg(arr + 4);
2242 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2243 return check_condition_result;
2245 } else if (0xff == subpcode) {
2249 case 0x0: /* Supported log pages and subpages log page */
2252 arr[n++] = 0x0; /* 0,0 page */
2254 arr[n++] = 0xff; /* this page */
2256 arr[n++] = 0x0; /* Temperature */
2258 arr[n++] = 0x0; /* Informational exceptions */
2261 case 0xd: /* Temperature subpages */
2264 arr[n++] = 0x0; /* Temperature */
2267 case 0x2f: /* Informational exceptions subpages */
2270 arr[n++] = 0x0; /* Informational exceptions */
2274 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2275 return check_condition_result;
2278 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2279 return check_condition_result;
2281 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2282 return fill_from_dev_buffer(scp, arr,
2283 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2286 static int check_device_access_params(struct scsi_cmnd *scp,
2287 unsigned long long lba, unsigned int num)
2289 if (lba + num > sdebug_capacity) {
2290 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2291 return check_condition_result;
2293 /* transfer length excessive (tie in to block limits VPD page) */
2294 if (num > sdebug_store_sectors) {
2295 /* needs work to find which cdb byte 'num' comes from */
2296 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2297 return check_condition_result;
2302 /* Returns number of bytes copied or -1 if error. */
2304 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2307 u64 block, rest = 0;
2308 struct scsi_data_buffer *sdb;
2309 enum dma_data_direction dir;
2310 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2314 sdb = scsi_out(scmd);
2315 dir = DMA_TO_DEVICE;
2316 func = sg_pcopy_to_buffer;
2318 sdb = scsi_in(scmd);
2319 dir = DMA_FROM_DEVICE;
2320 func = sg_pcopy_from_buffer;
2325 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2328 block = do_div(lba, sdebug_store_sectors);
2329 if (block + num > sdebug_store_sectors)
2330 rest = block + num - sdebug_store_sectors;
2332 ret = func(sdb->table.sgl, sdb->table.nents,
2333 fake_storep + (block * scsi_debug_sector_size),
2334 (num - rest) * scsi_debug_sector_size, 0);
2335 if (ret != (num - rest) * scsi_debug_sector_size)
2339 ret += func(sdb->table.sgl, sdb->table.nents,
2340 fake_storep, rest * scsi_debug_sector_size,
2341 (num - rest) * scsi_debug_sector_size);
2347 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2348 * arr into fake_store(lba,num) and return true. If comparison fails then
2351 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2354 u64 block, rest = 0;
2355 u32 store_blks = sdebug_store_sectors;
2356 u32 lb_size = scsi_debug_sector_size;
2358 block = do_div(lba, store_blks);
2359 if (block + num > store_blks)
2360 rest = block + num - store_blks;
2362 res = !memcmp(fake_storep + (block * lb_size), arr,
2363 (num - rest) * lb_size);
2367 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2371 arr += num * lb_size;
2372 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2374 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2379 static __be16 dif_compute_csum(const void *buf, int len)
2383 if (scsi_debug_guard)
2384 csum = (__force __be16)ip_compute_csum(buf, len);
2386 csum = cpu_to_be16(crc_t10dif(buf, len));
2391 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2392 sector_t sector, u32 ei_lba)
2394 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2396 if (sdt->guard_tag != csum) {
2397 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2399 (unsigned long)sector,
2400 be16_to_cpu(sdt->guard_tag),
2404 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2405 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2406 pr_err("%s: REF check failed on sector %lu\n",
2407 __func__, (unsigned long)sector);
2410 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2411 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2412 pr_err("%s: REF check failed on sector %lu\n",
2413 __func__, (unsigned long)sector);
2419 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2420 unsigned int sectors, bool read)
2424 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2425 struct sg_mapping_iter miter;
2427 /* Bytes of protection data to copy into sgl */
2428 resid = sectors * sizeof(*dif_storep);
2430 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2431 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2432 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2434 while (sg_miter_next(&miter) && resid > 0) {
2435 size_t len = min(miter.length, resid);
2436 void *start = dif_store(sector);
2439 if (dif_store_end < start + len)
2440 rest = start + len - dif_store_end;
2445 memcpy(paddr, start, len - rest);
2447 memcpy(start, paddr, len - rest);
2451 memcpy(paddr + len - rest, dif_storep, rest);
2453 memcpy(dif_storep, paddr + len - rest, rest);
2456 sector += len / sizeof(*dif_storep);
2459 sg_miter_stop(&miter);
2462 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2463 unsigned int sectors, u32 ei_lba)
2466 struct sd_dif_tuple *sdt;
2469 for (i = 0; i < sectors; i++, ei_lba++) {
2472 sector = start_sec + i;
2473 sdt = dif_store(sector);
2475 if (sdt->app_tag == cpu_to_be16(0xffff))
2478 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2485 dif_copy_prot(SCpnt, start_sec, sectors, true);
2492 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2494 u8 *cmd = scp->cmnd;
2498 unsigned long iflags;
2505 lba = get_unaligned_be64(cmd + 2);
2506 num = get_unaligned_be32(cmd + 10);
2511 lba = get_unaligned_be32(cmd + 2);
2512 num = get_unaligned_be16(cmd + 7);
2517 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2518 (u32)(cmd[1] & 0x1f) << 16;
2519 num = (0 == cmd[4]) ? 256 : cmd[4];
2524 lba = get_unaligned_be32(cmd + 2);
2525 num = get_unaligned_be32(cmd + 6);
2528 case XDWRITEREAD_10:
2530 lba = get_unaligned_be32(cmd + 2);
2531 num = get_unaligned_be16(cmd + 7);
2534 default: /* assume READ(32) */
2535 lba = get_unaligned_be64(cmd + 12);
2536 ei_lba = get_unaligned_be32(cmd + 20);
2537 num = get_unaligned_be32(cmd + 28);
2542 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2544 mk_sense_invalid_opcode(scp);
2545 return check_condition_result;
2547 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2548 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2549 (cmd[1] & 0xe0) == 0)
2550 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2553 if (sdebug_any_injecting_opt) {
2554 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2560 /* inline check_device_access_params() */
2561 if (lba + num > sdebug_capacity) {
2562 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2563 return check_condition_result;
2565 /* transfer length excessive (tie in to block limits VPD page) */
2566 if (num > sdebug_store_sectors) {
2567 /* needs work to find which cdb byte 'num' comes from */
2568 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2569 return check_condition_result;
2572 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2573 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2574 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2575 /* claim unrecoverable read error */
2576 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2577 /* set info field and valid bit for fixed descriptor */
2578 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2579 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2580 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2581 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2582 put_unaligned_be32(ret, scp->sense_buffer + 3);
2584 scsi_set_resid(scp, scsi_bufflen(scp));
2585 return check_condition_result;
2588 read_lock_irqsave(&atomic_rw, iflags);
2591 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2592 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2595 read_unlock_irqrestore(&atomic_rw, iflags);
2596 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2597 return illegal_condition_result;
2601 ret = do_device_access(scp, lba, num, false);
2602 read_unlock_irqrestore(&atomic_rw, iflags);
2604 return DID_ERROR << 16;
2606 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2608 if (sdebug_any_injecting_opt) {
2609 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2611 if (ep->inj_recovered) {
2612 mk_sense_buffer(scp, RECOVERED_ERROR,
2613 THRESHOLD_EXCEEDED, 0);
2614 return check_condition_result;
2615 } else if (ep->inj_transport) {
2616 mk_sense_buffer(scp, ABORTED_COMMAND,
2617 TRANSPORT_PROBLEM, ACK_NAK_TO);
2618 return check_condition_result;
2619 } else if (ep->inj_dif) {
2620 /* Logical block guard check failed */
2621 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2622 return illegal_condition_result;
2623 } else if (ep->inj_dix) {
2624 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2625 return illegal_condition_result;
2631 void dump_sector(unsigned char *buf, int len)
2635 pr_err(">>> Sector Dump <<<\n");
2636 for (i = 0 ; i < len ; i += 16) {
2639 for (j = 0, n = 0; j < 16; j++) {
2640 unsigned char c = buf[i+j];
2642 if (c >= 0x20 && c < 0x7e)
2643 n += scnprintf(b + n, sizeof(b) - n,
2646 n += scnprintf(b + n, sizeof(b) - n,
2649 pr_err("%04d: %s\n", i, b);
2653 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2654 unsigned int sectors, u32 ei_lba)
2657 struct sd_dif_tuple *sdt;
2659 sector_t sector = start_sec;
2662 struct sg_mapping_iter diter;
2663 struct sg_mapping_iter piter;
2665 BUG_ON(scsi_sg_count(SCpnt) == 0);
2666 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2668 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2669 scsi_prot_sg_count(SCpnt),
2670 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2671 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2672 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2674 /* For each protection page */
2675 while (sg_miter_next(&piter)) {
2677 if (WARN_ON(!sg_miter_next(&diter))) {
2682 for (ppage_offset = 0; ppage_offset < piter.length;
2683 ppage_offset += sizeof(struct sd_dif_tuple)) {
2684 /* If we're at the end of the current
2685 * data page advance to the next one
2687 if (dpage_offset >= diter.length) {
2688 if (WARN_ON(!sg_miter_next(&diter))) {
2695 sdt = piter.addr + ppage_offset;
2696 daddr = diter.addr + dpage_offset;
2698 ret = dif_verify(sdt, daddr, sector, ei_lba);
2700 dump_sector(daddr, scsi_debug_sector_size);
2706 dpage_offset += scsi_debug_sector_size;
2708 diter.consumed = dpage_offset;
2709 sg_miter_stop(&diter);
2711 sg_miter_stop(&piter);
2713 dif_copy_prot(SCpnt, start_sec, sectors, false);
2720 sg_miter_stop(&diter);
2721 sg_miter_stop(&piter);
2725 static unsigned long lba_to_map_index(sector_t lba)
2727 if (scsi_debug_unmap_alignment) {
2728 lba += scsi_debug_unmap_granularity -
2729 scsi_debug_unmap_alignment;
2731 do_div(lba, scsi_debug_unmap_granularity);
2736 static sector_t map_index_to_lba(unsigned long index)
2738 sector_t lba = index * scsi_debug_unmap_granularity;
2740 if (scsi_debug_unmap_alignment) {
2741 lba -= scsi_debug_unmap_granularity -
2742 scsi_debug_unmap_alignment;
2748 static unsigned int map_state(sector_t lba, unsigned int *num)
2751 unsigned int mapped;
2752 unsigned long index;
2755 index = lba_to_map_index(lba);
2756 mapped = test_bit(index, map_storep);
2759 next = find_next_zero_bit(map_storep, map_size, index);
2761 next = find_next_bit(map_storep, map_size, index);
2763 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2769 static void map_region(sector_t lba, unsigned int len)
2771 sector_t end = lba + len;
2774 unsigned long index = lba_to_map_index(lba);
2776 if (index < map_size)
2777 set_bit(index, map_storep);
2779 lba = map_index_to_lba(index + 1);
2783 static void unmap_region(sector_t lba, unsigned int len)
2785 sector_t end = lba + len;
2788 unsigned long index = lba_to_map_index(lba);
2790 if (lba == map_index_to_lba(index) &&
2791 lba + scsi_debug_unmap_granularity <= end &&
2793 clear_bit(index, map_storep);
2794 if (scsi_debug_lbprz) {
2795 memset(fake_storep +
2796 lba * scsi_debug_sector_size, 0,
2797 scsi_debug_sector_size *
2798 scsi_debug_unmap_granularity);
2801 memset(dif_storep + lba, 0xff,
2802 sizeof(*dif_storep) *
2803 scsi_debug_unmap_granularity);
2806 lba = map_index_to_lba(index + 1);
2811 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2813 u8 *cmd = scp->cmnd;
2817 unsigned long iflags;
2824 lba = get_unaligned_be64(cmd + 2);
2825 num = get_unaligned_be32(cmd + 10);
2830 lba = get_unaligned_be32(cmd + 2);
2831 num = get_unaligned_be16(cmd + 7);
2836 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2837 (u32)(cmd[1] & 0x1f) << 16;
2838 num = (0 == cmd[4]) ? 256 : cmd[4];
2843 lba = get_unaligned_be32(cmd + 2);
2844 num = get_unaligned_be32(cmd + 6);
2847 case 0x53: /* XDWRITEREAD(10) */
2849 lba = get_unaligned_be32(cmd + 2);
2850 num = get_unaligned_be16(cmd + 7);
2853 default: /* assume WRITE(32) */
2854 lba = get_unaligned_be64(cmd + 12);
2855 ei_lba = get_unaligned_be32(cmd + 20);
2856 num = get_unaligned_be32(cmd + 28);
2861 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2863 mk_sense_invalid_opcode(scp);
2864 return check_condition_result;
2866 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2867 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2868 (cmd[1] & 0xe0) == 0)
2869 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2873 /* inline check_device_access_params() */
2874 if (lba + num > sdebug_capacity) {
2875 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2876 return check_condition_result;
2878 /* transfer length excessive (tie in to block limits VPD page) */
2879 if (num > sdebug_store_sectors) {
2880 /* needs work to find which cdb byte 'num' comes from */
2881 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2882 return check_condition_result;
2885 write_lock_irqsave(&atomic_rw, iflags);
2888 if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2889 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2892 write_unlock_irqrestore(&atomic_rw, iflags);
2893 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2894 return illegal_condition_result;
2898 ret = do_device_access(scp, lba, num, true);
2899 if (scsi_debug_lbp())
2900 map_region(lba, num);
2901 write_unlock_irqrestore(&atomic_rw, iflags);
2903 return (DID_ERROR << 16);
2904 else if ((ret < (num * scsi_debug_sector_size)) &&
2905 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2906 sdev_printk(KERN_INFO, scp->device,
2907 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2908 my_name, num * scsi_debug_sector_size, ret);
2910 if (sdebug_any_injecting_opt) {
2911 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2913 if (ep->inj_recovered) {
2914 mk_sense_buffer(scp, RECOVERED_ERROR,
2915 THRESHOLD_EXCEEDED, 0);
2916 return check_condition_result;
2917 } else if (ep->inj_dif) {
2918 /* Logical block guard check failed */
2919 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2920 return illegal_condition_result;
2921 } else if (ep->inj_dix) {
2922 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2923 return illegal_condition_result;
2930 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2931 bool unmap, bool ndob)
2933 unsigned long iflags;
2934 unsigned long long i;
2937 ret = check_device_access_params(scp, lba, num);
2941 write_lock_irqsave(&atomic_rw, iflags);
2943 if (unmap && scsi_debug_lbp()) {
2944 unmap_region(lba, num);
2948 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2950 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2951 scsi_debug_sector_size);
2954 ret = fetch_to_dev_buffer(scp, fake_storep +
2955 (lba * scsi_debug_sector_size),
2956 scsi_debug_sector_size);
2959 write_unlock_irqrestore(&atomic_rw, iflags);
2960 return (DID_ERROR << 16);
2961 } else if ((ret < (num * scsi_debug_sector_size)) &&
2962 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2963 sdev_printk(KERN_INFO, scp->device,
2964 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2965 my_name, "write same",
2966 num * scsi_debug_sector_size, ret);
2968 /* Copy first sector to remaining blocks */
2969 for (i = 1 ; i < num ; i++)
2970 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2971 fake_storep + (lba * scsi_debug_sector_size),
2972 scsi_debug_sector_size);
2974 if (scsi_debug_lbp())
2975 map_region(lba, num);
2977 write_unlock_irqrestore(&atomic_rw, iflags);
2983 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2985 u8 *cmd = scp->cmnd;
2992 if (scsi_debug_lbpws10 == 0) {
2993 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2994 return check_condition_result;
2998 lba = get_unaligned_be32(cmd + 2);
2999 num = get_unaligned_be16(cmd + 7);
3000 if (num > scsi_debug_write_same_length) {
3001 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3002 return check_condition_result;
3004 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3008 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3010 u8 *cmd = scp->cmnd;
3017 if (cmd[1] & 0x8) { /* UNMAP */
3018 if (scsi_debug_lbpws == 0) {
3019 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3020 return check_condition_result;
3024 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3026 lba = get_unaligned_be64(cmd + 2);
3027 num = get_unaligned_be32(cmd + 10);
3028 if (num > scsi_debug_write_same_length) {
3029 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3030 return check_condition_result;
3032 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3036 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3038 u8 *cmd = scp->cmnd;
3040 u8 *fake_storep_hold;
3043 u32 lb_size = scsi_debug_sector_size;
3045 unsigned long iflags;
3049 lba = get_unaligned_be64(cmd + 2);
3050 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3052 return 0; /* degenerate case, not an error */
3053 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3055 mk_sense_invalid_opcode(scp);
3056 return check_condition_result;
3058 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3059 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3060 (cmd[1] & 0xe0) == 0)
3061 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3064 /* inline check_device_access_params() */
3065 if (lba + num > sdebug_capacity) {
3066 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3067 return check_condition_result;
3069 /* transfer length excessive (tie in to block limits VPD page) */
3070 if (num > sdebug_store_sectors) {
3071 /* needs work to find which cdb byte 'num' comes from */
3072 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3073 return check_condition_result;
3076 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3078 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3080 return check_condition_result;
3083 write_lock_irqsave(&atomic_rw, iflags);
3085 /* trick do_device_access() to fetch both compare and write buffers
3086 * from data-in into arr. Safe (atomic) since write_lock held. */
3087 fake_storep_hold = fake_storep;
3089 ret = do_device_access(scp, 0, dnum, true);
3090 fake_storep = fake_storep_hold;
3092 retval = DID_ERROR << 16;
3094 } else if ((ret < (dnum * lb_size)) &&
3095 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3096 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3097 "indicated=%u, IO sent=%d bytes\n", my_name,
3098 dnum * lb_size, ret);
3099 if (!comp_write_worker(lba, num, arr)) {
3100 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3101 retval = check_condition_result;
3104 if (scsi_debug_lbp())
3105 map_region(lba, num);
3107 write_unlock_irqrestore(&atomic_rw, iflags);
3112 struct unmap_block_desc {
3119 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3122 struct unmap_block_desc *desc;
3123 unsigned int i, payload_len, descriptors;
3125 unsigned long iflags;
3128 if (!scsi_debug_lbp())
3129 return 0; /* fib and say its done */
3130 payload_len = get_unaligned_be16(scp->cmnd + 7);
3131 BUG_ON(scsi_bufflen(scp) != payload_len);
3133 descriptors = (payload_len - 8) / 16;
3134 if (descriptors > scsi_debug_unmap_max_desc) {
3135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3136 return check_condition_result;
3139 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3141 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3143 return check_condition_result;
3146 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3148 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3149 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3151 desc = (void *)&buf[8];
3153 write_lock_irqsave(&atomic_rw, iflags);
3155 for (i = 0 ; i < descriptors ; i++) {
3156 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3157 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3159 ret = check_device_access_params(scp, lba, num);
3163 unmap_region(lba, num);
3169 write_unlock_irqrestore(&atomic_rw, iflags);
3175 #define SDEBUG_GET_LBA_STATUS_LEN 32
3178 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3180 u8 *cmd = scp->cmnd;
3182 u32 alloc_len, mapped, num;
3183 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3186 lba = get_unaligned_be64(cmd + 2);
3187 alloc_len = get_unaligned_be32(cmd + 10);
3192 ret = check_device_access_params(scp, lba, 1);
3196 if (scsi_debug_lbp())
3197 mapped = map_state(lba, &num);
3200 /* following just in case virtual_gb changed */
3201 sdebug_capacity = get_sdebug_capacity();
3202 if (sdebug_capacity - lba <= 0xffffffff)
3203 num = sdebug_capacity - lba;
3208 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3209 put_unaligned_be32(20, arr); /* Parameter Data Length */
3210 put_unaligned_be64(lba, arr + 8); /* LBA */
3211 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3212 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3214 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3217 #define SDEBUG_RLUN_ARR_SZ 256
3219 static int resp_report_luns(struct scsi_cmnd * scp,
3220 struct sdebug_dev_info * devip)
3222 unsigned int alloc_len;
3223 int lun_cnt, i, upper, num, n, want_wlun, shortish;
3225 unsigned char *cmd = scp->cmnd;
3226 int select_report = (int)cmd[2];
3227 struct scsi_lun *one_lun;
3228 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3229 unsigned char * max_addr;
3231 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3232 shortish = (alloc_len < 4);
3233 if (shortish || (select_report > 2)) {
3234 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3235 return check_condition_result;
3237 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3238 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3239 lun_cnt = scsi_debug_max_luns;
3240 if (1 == select_report)
3242 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3244 want_wlun = (select_report > 0) ? 1 : 0;
3245 num = lun_cnt + want_wlun;
3246 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3247 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3248 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3249 sizeof(struct scsi_lun)), num);
3254 one_lun = (struct scsi_lun *) &arr[8];
3255 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3256 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3257 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3259 upper = (lun >> 8) & 0x3f;
3261 one_lun[i].scsi_lun[0] =
3262 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3263 one_lun[i].scsi_lun[1] = lun & 0xff;
3266 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3267 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3270 alloc_len = (unsigned char *)(one_lun + i) - arr;
3271 return fill_from_dev_buffer(scp, arr,
3272 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3275 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3276 unsigned int num, struct sdebug_dev_info *devip)
3279 unsigned char *kaddr, *buf;
3280 unsigned int offset;
3281 struct scsi_data_buffer *sdb = scsi_in(scp);
3282 struct sg_mapping_iter miter;
3284 /* better not to use temporary buffer. */
3285 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3287 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3289 return check_condition_result;
3292 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3295 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3296 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3298 while (sg_miter_next(&miter)) {
3300 for (j = 0; j < miter.length; j++)
3301 *(kaddr + j) ^= *(buf + offset + j);
3303 offset += miter.length;
3305 sg_miter_stop(&miter);
3312 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3314 u8 *cmd = scp->cmnd;
3319 if (!scsi_bidi_cmnd(scp)) {
3320 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3322 return check_condition_result;
3324 errsts = resp_read_dt0(scp, devip);
3327 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3328 errsts = resp_write_dt0(scp, devip);
3332 lba = get_unaligned_be32(cmd + 2);
3333 num = get_unaligned_be16(cmd + 7);
3334 return resp_xdwriteread(scp, lba, num, devip);
3337 /* When timer or tasklet goes off this function is called. */
3338 static void sdebug_q_cmd_complete(unsigned long indx)
3342 unsigned long iflags;
3343 struct sdebug_queued_cmd *sqcp;
3344 struct scsi_cmnd *scp;
3345 struct sdebug_dev_info *devip;
3347 atomic_inc(&sdebug_completions);
3349 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3350 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3353 spin_lock_irqsave(&queued_arr_lock, iflags);
3354 sqcp = &queued_arr[qa_indx];
3357 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3358 pr_err("%s: scp is NULL\n", __func__);
3361 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3363 atomic_dec(&devip->num_in_q);
3365 pr_err("%s: devip=NULL\n", __func__);
3366 if (atomic_read(&retired_max_queue) > 0)
3369 sqcp->a_cmnd = NULL;
3370 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3371 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3372 pr_err("%s: Unexpected completion\n", __func__);
3376 if (unlikely(retiring)) { /* user has reduced max_queue */
3379 retval = atomic_read(&retired_max_queue);
3380 if (qa_indx >= retval) {
3381 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3382 pr_err("%s: index %d too large\n", __func__, retval);
3385 k = find_last_bit(queued_in_use_bm, retval);
3386 if ((k < scsi_debug_max_queue) || (k == retval))
3387 atomic_set(&retired_max_queue, 0);
3389 atomic_set(&retired_max_queue, k + 1);
3391 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3392 scp->scsi_done(scp); /* callback to mid level */
3395 /* When high resolution timer goes off this function is called. */
3396 static enum hrtimer_restart
3397 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3401 unsigned long iflags;
3402 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3403 struct sdebug_queued_cmd *sqcp;
3404 struct scsi_cmnd *scp;
3405 struct sdebug_dev_info *devip;
3407 atomic_inc(&sdebug_completions);
3408 qa_indx = sd_hrtp->qa_indx;
3409 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3410 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3413 spin_lock_irqsave(&queued_arr_lock, iflags);
3414 sqcp = &queued_arr[qa_indx];
3417 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3418 pr_err("%s: scp is NULL\n", __func__);
3421 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3423 atomic_dec(&devip->num_in_q);
3425 pr_err("%s: devip=NULL\n", __func__);
3426 if (atomic_read(&retired_max_queue) > 0)
3429 sqcp->a_cmnd = NULL;
3430 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3431 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3432 pr_err("%s: Unexpected completion\n", __func__);
3436 if (unlikely(retiring)) { /* user has reduced max_queue */
3439 retval = atomic_read(&retired_max_queue);
3440 if (qa_indx >= retval) {
3441 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3442 pr_err("%s: index %d too large\n", __func__, retval);
3445 k = find_last_bit(queued_in_use_bm, retval);
3446 if ((k < scsi_debug_max_queue) || (k == retval))
3447 atomic_set(&retired_max_queue, 0);
3449 atomic_set(&retired_max_queue, k + 1);
3451 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3452 scp->scsi_done(scp); /* callback to mid level */
3454 return HRTIMER_NORESTART;
3457 static struct sdebug_dev_info *
3458 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3460 struct sdebug_dev_info *devip;
3462 devip = kzalloc(sizeof(*devip), flags);
3464 devip->sdbg_host = sdbg_host;
3465 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3470 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3472 struct sdebug_host_info * sdbg_host;
3473 struct sdebug_dev_info * open_devip = NULL;
3474 struct sdebug_dev_info * devip =
3475 (struct sdebug_dev_info *)sdev->hostdata;
3479 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3481 pr_err("%s: Host info NULL\n", __func__);
3484 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3485 if ((devip->used) && (devip->channel == sdev->channel) &&
3486 (devip->target == sdev->id) &&
3487 (devip->lun == sdev->lun))
3490 if ((!devip->used) && (!open_devip))
3494 if (!open_devip) { /* try and make a new one */
3495 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3497 printk(KERN_ERR "%s: out of memory at line %d\n",
3498 __func__, __LINE__);
3503 open_devip->channel = sdev->channel;
3504 open_devip->target = sdev->id;
3505 open_devip->lun = sdev->lun;
3506 open_devip->sdbg_host = sdbg_host;
3507 atomic_set(&open_devip->num_in_q, 0);
3508 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3509 open_devip->used = true;
3513 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3515 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3516 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3517 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3518 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3522 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3524 struct sdebug_dev_info *devip;
3526 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3527 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3528 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3529 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3530 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3531 devip = devInfoReg(sdp);
3533 return 1; /* no resources, will be marked offline */
3534 sdp->hostdata = devip;
3535 blk_queue_max_segment_size(sdp->request_queue, -1U);
3536 if (scsi_debug_no_uld)
3537 sdp->no_uld_attach = 1;
3541 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3543 struct sdebug_dev_info *devip =
3544 (struct sdebug_dev_info *)sdp->hostdata;
3546 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3547 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3548 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3550 /* make this slot available for re-use */
3551 devip->used = false;
3552 sdp->hostdata = NULL;
3556 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3557 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3559 unsigned long iflags;
3560 int k, qmax, r_qmax;
3561 struct sdebug_queued_cmd *sqcp;
3562 struct sdebug_dev_info *devip;
3564 spin_lock_irqsave(&queued_arr_lock, iflags);
3565 qmax = scsi_debug_max_queue;
3566 r_qmax = atomic_read(&retired_max_queue);
3569 for (k = 0; k < qmax; ++k) {
3570 if (test_bit(k, queued_in_use_bm)) {
3571 sqcp = &queued_arr[k];
3572 if (cmnd == sqcp->a_cmnd) {
3573 devip = (struct sdebug_dev_info *)
3574 cmnd->device->hostdata;
3576 atomic_dec(&devip->num_in_q);
3577 sqcp->a_cmnd = NULL;
3578 spin_unlock_irqrestore(&queued_arr_lock,
3580 if (scsi_debug_ndelay > 0) {
3583 &sqcp->sd_hrtp->hrt);
3584 } else if (scsi_debug_delay > 0) {
3585 if (sqcp->cmnd_timerp)
3588 } else if (scsi_debug_delay < 0) {
3590 tasklet_kill(sqcp->tletp);
3592 clear_bit(k, queued_in_use_bm);
3597 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3601 /* Deletes (stops) timers or tasklets of all queued commands */
3602 static void stop_all_queued(void)
3604 unsigned long iflags;
3606 struct sdebug_queued_cmd *sqcp;
3607 struct sdebug_dev_info *devip;
3609 spin_lock_irqsave(&queued_arr_lock, iflags);
3610 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3611 if (test_bit(k, queued_in_use_bm)) {
3612 sqcp = &queued_arr[k];
3614 devip = (struct sdebug_dev_info *)
3615 sqcp->a_cmnd->device->hostdata;
3617 atomic_dec(&devip->num_in_q);
3618 sqcp->a_cmnd = NULL;
3619 spin_unlock_irqrestore(&queued_arr_lock,
3621 if (scsi_debug_ndelay > 0) {
3624 &sqcp->sd_hrtp->hrt);
3625 } else if (scsi_debug_delay > 0) {
3626 if (sqcp->cmnd_timerp)
3629 } else if (scsi_debug_delay < 0) {
3631 tasklet_kill(sqcp->tletp);
3633 clear_bit(k, queued_in_use_bm);
3634 spin_lock_irqsave(&queued_arr_lock, iflags);
3638 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3641 /* Free queued command memory on heap */
3642 static void free_all_queued(void)
3644 unsigned long iflags;
3646 struct sdebug_queued_cmd *sqcp;
3648 spin_lock_irqsave(&queued_arr_lock, iflags);
3649 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3650 sqcp = &queued_arr[k];
3651 kfree(sqcp->cmnd_timerp);
3652 sqcp->cmnd_timerp = NULL;
3655 kfree(sqcp->sd_hrtp);
3656 sqcp->sd_hrtp = NULL;
3658 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3661 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3665 if (SCpnt->device &&
3666 (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3667 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3669 stop_queued_cmnd(SCpnt);
3674 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3676 struct sdebug_dev_info * devip;
3679 if (SCpnt && SCpnt->device) {
3680 struct scsi_device *sdp = SCpnt->device;
3682 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3683 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3684 devip = devInfoReg(sdp);
3686 set_bit(SDEBUG_UA_POR, devip->uas_bm);
3691 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3693 struct sdebug_host_info *sdbg_host;
3694 struct sdebug_dev_info *devip;
3695 struct scsi_device *sdp;
3696 struct Scsi_Host *hp;
3699 ++num_target_resets;
3702 sdp = SCpnt->device;
3705 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3706 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3710 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3712 list_for_each_entry(devip,
3713 &sdbg_host->dev_info_list,
3715 if (devip->target == sdp->id) {
3716 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3720 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3721 sdev_printk(KERN_INFO, sdp,
3722 "%s: %d device(s) found in target\n", __func__, k);
3727 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3729 struct sdebug_host_info *sdbg_host;
3730 struct sdebug_dev_info *devip;
3731 struct scsi_device * sdp;
3732 struct Scsi_Host * hp;
3736 if (!(SCpnt && SCpnt->device))
3738 sdp = SCpnt->device;
3739 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3740 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3743 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3745 list_for_each_entry(devip,
3746 &sdbg_host->dev_info_list,
3748 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3753 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3754 sdev_printk(KERN_INFO, sdp,
3755 "%s: %d device(s) found in host\n", __func__, k);
3760 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3762 struct sdebug_host_info * sdbg_host;
3763 struct sdebug_dev_info *devip;
3767 if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3768 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3769 spin_lock(&sdebug_host_list_lock);
3770 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3771 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3773 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3777 spin_unlock(&sdebug_host_list_lock);
3779 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3780 sdev_printk(KERN_INFO, SCpnt->device,
3781 "%s: %d device(s) found\n", __func__, k);
3785 static void __init sdebug_build_parts(unsigned char *ramp,
3786 unsigned long store_size)
3788 struct partition * pp;
3789 int starts[SDEBUG_MAX_PARTS + 2];
3790 int sectors_per_part, num_sectors, k;
3791 int heads_by_sects, start_sec, end_sec;
3793 /* assume partition table already zeroed */
3794 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3796 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3797 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3798 pr_warn("%s: reducing partitions to %d\n", __func__,
3801 num_sectors = (int)sdebug_store_sectors;
3802 sectors_per_part = (num_sectors - sdebug_sectors_per)
3803 / scsi_debug_num_parts;
3804 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3805 starts[0] = sdebug_sectors_per;
3806 for (k = 1; k < scsi_debug_num_parts; ++k)
3807 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3809 starts[scsi_debug_num_parts] = num_sectors;
3810 starts[scsi_debug_num_parts + 1] = 0;
3812 ramp[510] = 0x55; /* magic partition markings */
3814 pp = (struct partition *)(ramp + 0x1be);
3815 for (k = 0; starts[k + 1]; ++k, ++pp) {
3816 start_sec = starts[k];
3817 end_sec = starts[k + 1] - 1;
3820 pp->cyl = start_sec / heads_by_sects;
3821 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3822 / sdebug_sectors_per;
3823 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3825 pp->end_cyl = end_sec / heads_by_sects;
3826 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3827 / sdebug_sectors_per;
3828 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3830 pp->start_sect = cpu_to_le32(start_sec);
3831 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3832 pp->sys_ind = 0x83; /* plain Linux partition */
3837 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3838 int scsi_result, int delta_jiff)
3840 unsigned long iflags;
3841 int k, num_in_q, qdepth, inject;
3842 struct sdebug_queued_cmd *sqcp = NULL;
3843 struct scsi_device *sdp = cmnd->device;
3845 if (NULL == cmnd || NULL == devip) {
3846 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3848 /* no particularly good error to report back */
3849 return SCSI_MLQUEUE_HOST_BUSY;
3851 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3852 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3853 __func__, scsi_result);
3854 if (delta_jiff == 0)
3855 goto respond_in_thread;
3857 /* schedule the response at a later time if resources permit */
3858 spin_lock_irqsave(&queued_arr_lock, iflags);
3859 num_in_q = atomic_read(&devip->num_in_q);
3860 qdepth = cmnd->device->queue_depth;
3862 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3864 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3865 goto respond_in_thread;
3867 scsi_result = device_qfull_result;
3868 } else if ((scsi_debug_every_nth != 0) &&
3869 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3870 (scsi_result == 0)) {
3871 if ((num_in_q == (qdepth - 1)) &&
3872 (atomic_inc_return(&sdebug_a_tsf) >=
3873 abs(scsi_debug_every_nth))) {
3874 atomic_set(&sdebug_a_tsf, 0);
3876 scsi_result = device_qfull_result;
3880 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3881 if (k >= scsi_debug_max_queue) {
3882 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3884 goto respond_in_thread;
3885 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3886 scsi_result = device_qfull_result;
3887 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3888 sdev_printk(KERN_INFO, sdp,
3889 "%s: max_queue=%d exceeded, %s\n",
3890 __func__, scsi_debug_max_queue,
3891 (scsi_result ? "status: TASK SET FULL" :
3892 "report: host busy"));
3894 goto respond_in_thread;
3896 return SCSI_MLQUEUE_HOST_BUSY;
3898 __set_bit(k, queued_in_use_bm);
3899 atomic_inc(&devip->num_in_q);
3900 sqcp = &queued_arr[k];
3901 sqcp->a_cmnd = cmnd;
3902 cmnd->result = scsi_result;
3903 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3904 if (delta_jiff > 0) {
3905 if (NULL == sqcp->cmnd_timerp) {
3906 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3908 if (NULL == sqcp->cmnd_timerp)
3909 return SCSI_MLQUEUE_HOST_BUSY;
3910 init_timer(sqcp->cmnd_timerp);
3912 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3913 sqcp->cmnd_timerp->data = k;
3914 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3915 add_timer(sqcp->cmnd_timerp);
3916 } else if (scsi_debug_ndelay > 0) {
3917 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3918 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3920 if (NULL == sd_hp) {
3921 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3923 return SCSI_MLQUEUE_HOST_BUSY;
3924 sqcp->sd_hrtp = sd_hp;
3925 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3927 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3930 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3931 } else { /* delay < 0 */
3932 if (NULL == sqcp->tletp) {
3933 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3935 if (NULL == sqcp->tletp)
3936 return SCSI_MLQUEUE_HOST_BUSY;
3937 tasklet_init(sqcp->tletp,
3938 sdebug_q_cmd_complete, k);
3940 if (-1 == delta_jiff)
3941 tasklet_hi_schedule(sqcp->tletp);
3943 tasklet_schedule(sqcp->tletp);
3945 if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3946 (scsi_result == device_qfull_result))
3947 sdev_printk(KERN_INFO, sdp,
3948 "%s: num_in_q=%d +1, %s%s\n", __func__,
3949 num_in_q, (inject ? "<inject> " : ""),
3950 "status: TASK SET FULL");
3953 respond_in_thread: /* call back to mid-layer using invocation thread */
3954 cmnd->result = scsi_result;
3955 cmnd->scsi_done(cmnd);
3959 /* Note: The following macros create attribute files in the
3960 /sys/module/scsi_debug/parameters directory. Unfortunately this
3961 driver is unaware of a change and cannot trigger auxiliary actions
3962 as it can when the corresponding attribute in the
3963 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3965 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3966 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3967 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3968 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3969 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3970 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3971 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3972 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3973 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3974 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3975 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3976 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3977 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3978 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3979 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3980 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3981 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3982 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3983 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3984 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3985 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3986 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3987 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3988 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3989 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3990 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3991 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3992 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3993 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3994 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3995 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3996 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3997 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3998 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3999 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4000 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4001 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4002 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4004 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4007 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4008 MODULE_DESCRIPTION("SCSI debug adapter driver");
4009 MODULE_LICENSE("GPL");
4010 MODULE_VERSION(SCSI_DEBUG_VERSION);
4012 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4013 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4014 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4015 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4016 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4017 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4018 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4019 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4020 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4021 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4022 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4023 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4024 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4025 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4026 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4027 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4028 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4029 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4030 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4031 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4032 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4033 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4034 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4035 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4036 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4037 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4038 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4039 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4040 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4041 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4042 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4043 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4044 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4045 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4046 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4047 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4048 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4049 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4050 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4052 static char sdebug_info[256];
4054 static const char * scsi_debug_info(struct Scsi_Host * shp)
4056 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4057 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4058 scsi_debug_version_date, scsi_debug_dev_size_mb,
4063 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4064 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4068 int minLen = length > 15 ? 15 : length;
4070 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4072 memcpy(arr, buffer, minLen);
4074 if (1 != sscanf(arr, "%d", &opts))
4076 scsi_debug_opts = opts;
4077 if (scsi_debug_every_nth != 0)
4078 atomic_set(&sdebug_cmnd_count, 0);
4082 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4083 * same for each scsi_debug host (if more than one). Some of the counters
4084 * output are not atomics so might be inaccurate in a busy system. */
4085 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4090 if (scsi_debug_every_nth > 0)
4091 snprintf(b, sizeof(b), " (curr:%d)",
4092 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4093 atomic_read(&sdebug_a_tsf) :
4094 atomic_read(&sdebug_cmnd_count)));
4098 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4099 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4101 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4102 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4103 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4104 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4105 "usec_in_jiffy=%lu\n",
4106 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4107 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4108 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4109 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4110 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4111 sdebug_sectors_per, num_aborts, num_dev_resets,
4112 num_target_resets, num_bus_resets, num_host_resets,
4113 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4115 f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4116 if (f != scsi_debug_max_queue) {
4117 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4118 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
4119 "queued_in_use_bm", f, l);
4124 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4126 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4128 /* Returns -EBUSY if delay is being changed and commands are queued */
4129 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4134 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4136 if (scsi_debug_delay != delay) {
4137 unsigned long iflags;
4140 spin_lock_irqsave(&queued_arr_lock, iflags);
4141 k = find_first_bit(queued_in_use_bm,
4142 scsi_debug_max_queue);
4143 if (k != scsi_debug_max_queue)
4144 res = -EBUSY; /* have queued commands */
4146 scsi_debug_delay = delay;
4147 scsi_debug_ndelay = 0;
4149 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4155 static DRIVER_ATTR_RW(delay);
4157 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4159 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4161 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4162 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4163 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4166 unsigned long iflags;
4169 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4170 (ndelay >= 0) && (ndelay < 1000000000)) {
4172 if (scsi_debug_ndelay != ndelay) {
4173 spin_lock_irqsave(&queued_arr_lock, iflags);
4174 k = find_first_bit(queued_in_use_bm,
4175 scsi_debug_max_queue);
4176 if (k != scsi_debug_max_queue)
4177 res = -EBUSY; /* have queued commands */
4179 scsi_debug_ndelay = ndelay;
4180 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4183 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4189 static DRIVER_ATTR_RW(ndelay);
4191 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4193 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4196 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4202 if (1 == sscanf(buf, "%10s", work)) {
4203 if (0 == strncasecmp(work,"0x", 2)) {
4204 if (1 == sscanf(&work[2], "%x", &opts))
4207 if (1 == sscanf(work, "%d", &opts))
4213 scsi_debug_opts = opts;
4214 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4215 sdebug_any_injecting_opt = true;
4216 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4217 sdebug_any_injecting_opt = true;
4218 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4219 sdebug_any_injecting_opt = true;
4220 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4221 sdebug_any_injecting_opt = true;
4222 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4223 sdebug_any_injecting_opt = true;
4224 atomic_set(&sdebug_cmnd_count, 0);
4225 atomic_set(&sdebug_a_tsf, 0);
4228 static DRIVER_ATTR_RW(opts);
4230 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4232 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4234 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4239 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4240 scsi_debug_ptype = n;
4245 static DRIVER_ATTR_RW(ptype);
4247 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4249 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4251 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4256 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4257 scsi_debug_dsense = n;
4262 static DRIVER_ATTR_RW(dsense);
4264 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4266 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4268 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4273 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4275 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4276 if (scsi_debug_fake_rw != n) {
4277 if ((0 == n) && (NULL == fake_storep)) {
4279 (unsigned long)scsi_debug_dev_size_mb *
4282 fake_storep = vmalloc(sz);
4283 if (NULL == fake_storep) {
4284 pr_err("%s: out of memory, 9\n",
4288 memset(fake_storep, 0, sz);
4290 scsi_debug_fake_rw = n;
4296 static DRIVER_ATTR_RW(fake_rw);
4298 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4300 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4302 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4307 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4308 scsi_debug_no_lun_0 = n;
4313 static DRIVER_ATTR_RW(no_lun_0);
4315 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4317 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4319 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4324 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4325 scsi_debug_num_tgts = n;
4326 sdebug_max_tgts_luns();
4331 static DRIVER_ATTR_RW(num_tgts);
4333 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4335 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4337 static DRIVER_ATTR_RO(dev_size_mb);
4339 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4341 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4343 static DRIVER_ATTR_RO(num_parts);
4345 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4347 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4349 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4354 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4355 scsi_debug_every_nth = nth;
4356 atomic_set(&sdebug_cmnd_count, 0);
4361 static DRIVER_ATTR_RW(every_nth);
4363 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4365 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4367 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4372 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4373 scsi_debug_max_luns = n;
4374 sdebug_max_tgts_luns();
4379 static DRIVER_ATTR_RW(max_luns);
4381 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4383 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4385 /* N.B. max_queue can be changed while there are queued commands. In flight
4386 * commands beyond the new max_queue will be completed. */
4387 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4390 unsigned long iflags;
4393 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4394 (n <= SCSI_DEBUG_CANQUEUE)) {
4395 spin_lock_irqsave(&queued_arr_lock, iflags);
4396 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4397 scsi_debug_max_queue = n;
4398 if (SCSI_DEBUG_CANQUEUE == k)
4399 atomic_set(&retired_max_queue, 0);
4401 atomic_set(&retired_max_queue, k + 1);
4403 atomic_set(&retired_max_queue, 0);
4404 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4409 static DRIVER_ATTR_RW(max_queue);
4411 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4413 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4415 static DRIVER_ATTR_RO(no_uld);
4417 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4419 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4421 static DRIVER_ATTR_RO(scsi_level);
4423 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4425 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4427 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4433 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4434 changed = (scsi_debug_virtual_gb != n);
4435 scsi_debug_virtual_gb = n;
4436 sdebug_capacity = get_sdebug_capacity();
4438 struct sdebug_host_info *sdhp;
4439 struct sdebug_dev_info *dp;
4441 spin_lock(&sdebug_host_list_lock);
4442 list_for_each_entry(sdhp, &sdebug_host_list,
4444 list_for_each_entry(dp, &sdhp->dev_info_list,
4446 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4450 spin_unlock(&sdebug_host_list_lock);
4456 static DRIVER_ATTR_RW(virtual_gb);
4458 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4460 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4463 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4468 if (sscanf(buf, "%d", &delta_hosts) != 1)
4470 if (delta_hosts > 0) {
4472 sdebug_add_adapter();
4473 } while (--delta_hosts);
4474 } else if (delta_hosts < 0) {
4476 sdebug_remove_adapter();
4477 } while (++delta_hosts);
4481 static DRIVER_ATTR_RW(add_host);
4483 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4485 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4487 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4492 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4493 scsi_debug_vpd_use_hostno = n;
4498 static DRIVER_ATTR_RW(vpd_use_hostno);
4500 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4502 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4504 static DRIVER_ATTR_RO(sector_size);
4506 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4508 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4510 static DRIVER_ATTR_RO(dix);
4512 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4514 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4516 static DRIVER_ATTR_RO(dif);
4518 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4520 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4522 static DRIVER_ATTR_RO(guard);
4524 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4526 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4528 static DRIVER_ATTR_RO(ato);
4530 static ssize_t map_show(struct device_driver *ddp, char *buf)
4534 if (!scsi_debug_lbp())
4535 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4536 sdebug_store_sectors);
4538 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4540 buf[count++] = '\n';
4545 static DRIVER_ATTR_RO(map);
4547 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4549 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4551 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4556 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4557 scsi_debug_removable = (n > 0);
4562 static DRIVER_ATTR_RW(removable);
4564 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4566 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4568 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4569 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4574 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4575 bool new_host_lock = (n > 0);
4578 if (new_host_lock != scsi_debug_host_lock) {
4579 unsigned long iflags;
4582 spin_lock_irqsave(&queued_arr_lock, iflags);
4583 k = find_first_bit(queued_in_use_bm,
4584 scsi_debug_max_queue);
4585 if (k != scsi_debug_max_queue)
4586 res = -EBUSY; /* have queued commands */
4588 scsi_debug_host_lock = new_host_lock;
4589 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4595 static DRIVER_ATTR_RW(host_lock);
4597 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4599 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4601 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4606 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4607 scsi_debug_strict = (n > 0);
4612 static DRIVER_ATTR_RW(strict);
4615 /* Note: The following array creates attribute files in the
4616 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4617 files (over those found in the /sys/module/scsi_debug/parameters
4618 directory) is that auxiliary actions can be triggered when an attribute
4619 is changed. For example see: sdebug_add_host_store() above.
4622 static struct attribute *sdebug_drv_attrs[] = {
4623 &driver_attr_delay.attr,
4624 &driver_attr_opts.attr,
4625 &driver_attr_ptype.attr,
4626 &driver_attr_dsense.attr,
4627 &driver_attr_fake_rw.attr,
4628 &driver_attr_no_lun_0.attr,
4629 &driver_attr_num_tgts.attr,
4630 &driver_attr_dev_size_mb.attr,
4631 &driver_attr_num_parts.attr,
4632 &driver_attr_every_nth.attr,
4633 &driver_attr_max_luns.attr,
4634 &driver_attr_max_queue.attr,
4635 &driver_attr_no_uld.attr,
4636 &driver_attr_scsi_level.attr,
4637 &driver_attr_virtual_gb.attr,
4638 &driver_attr_add_host.attr,
4639 &driver_attr_vpd_use_hostno.attr,
4640 &driver_attr_sector_size.attr,
4641 &driver_attr_dix.attr,
4642 &driver_attr_dif.attr,
4643 &driver_attr_guard.attr,
4644 &driver_attr_ato.attr,
4645 &driver_attr_map.attr,
4646 &driver_attr_removable.attr,
4647 &driver_attr_host_lock.attr,
4648 &driver_attr_ndelay.attr,
4649 &driver_attr_strict.attr,
4652 ATTRIBUTE_GROUPS(sdebug_drv);
4654 static struct device *pseudo_primary;
4656 static int __init scsi_debug_init(void)
4663 atomic_set(&sdebug_cmnd_count, 0);
4664 atomic_set(&sdebug_completions, 0);
4665 atomic_set(&retired_max_queue, 0);
4667 if (scsi_debug_ndelay >= 1000000000) {
4668 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4670 scsi_debug_ndelay = 0;
4671 } else if (scsi_debug_ndelay > 0)
4672 scsi_debug_delay = DELAY_OVERRIDDEN;
4674 switch (scsi_debug_sector_size) {
4681 pr_err("%s: invalid sector_size %d\n", __func__,
4682 scsi_debug_sector_size);
4686 switch (scsi_debug_dif) {
4688 case SD_DIF_TYPE0_PROTECTION:
4689 case SD_DIF_TYPE1_PROTECTION:
4690 case SD_DIF_TYPE2_PROTECTION:
4691 case SD_DIF_TYPE3_PROTECTION:
4695 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4699 if (scsi_debug_guard > 1) {
4700 pr_err("%s: guard must be 0 or 1\n", __func__);
4704 if (scsi_debug_ato > 1) {
4705 pr_err("%s: ato must be 0 or 1\n", __func__);
4709 if (scsi_debug_physblk_exp > 15) {
4710 pr_err("%s: invalid physblk_exp %u\n", __func__,
4711 scsi_debug_physblk_exp);
4715 if (scsi_debug_lowest_aligned > 0x3fff) {
4716 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4717 scsi_debug_lowest_aligned);
4721 if (scsi_debug_dev_size_mb < 1)
4722 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
4723 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4724 sdebug_store_sectors = sz / scsi_debug_sector_size;
4725 sdebug_capacity = get_sdebug_capacity();
4727 /* play around with geometry, don't waste too much on track 0 */
4729 sdebug_sectors_per = 32;
4730 if (scsi_debug_dev_size_mb >= 16)
4732 else if (scsi_debug_dev_size_mb >= 256)
4734 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4735 (sdebug_sectors_per * sdebug_heads);
4736 if (sdebug_cylinders_per >= 1024) {
4737 /* other LLDs do this; implies >= 1GB ram disk ... */
4739 sdebug_sectors_per = 63;
4740 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4741 (sdebug_sectors_per * sdebug_heads);
4744 if (0 == scsi_debug_fake_rw) {
4745 fake_storep = vmalloc(sz);
4746 if (NULL == fake_storep) {
4747 pr_err("%s: out of memory, 1\n", __func__);
4750 memset(fake_storep, 0, sz);
4751 if (scsi_debug_num_parts > 0)
4752 sdebug_build_parts(fake_storep, sz);
4755 if (scsi_debug_dix) {
4758 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4759 dif_storep = vmalloc(dif_size);
4761 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4764 if (dif_storep == NULL) {
4765 pr_err("%s: out of mem. (DIX)\n", __func__);
4770 memset(dif_storep, 0xff, dif_size);
4773 /* Logical Block Provisioning */
4774 if (scsi_debug_lbp()) {
4775 scsi_debug_unmap_max_blocks =
4776 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4778 scsi_debug_unmap_max_desc =
4779 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4781 scsi_debug_unmap_granularity =
4782 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4784 if (scsi_debug_unmap_alignment &&
4785 scsi_debug_unmap_granularity <=
4786 scsi_debug_unmap_alignment) {
4787 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4792 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4793 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4795 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4797 if (map_storep == NULL) {
4798 pr_err("%s: out of mem. (MAP)\n", __func__);
4803 bitmap_zero(map_storep, map_size);
4805 /* Map first 1KB for partition table */
4806 if (scsi_debug_num_parts)
4810 pseudo_primary = root_device_register("pseudo_0");
4811 if (IS_ERR(pseudo_primary)) {
4812 pr_warn("%s: root_device_register() error\n", __func__);
4813 ret = PTR_ERR(pseudo_primary);
4816 ret = bus_register(&pseudo_lld_bus);
4818 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4821 ret = driver_register(&sdebug_driverfs_driver);
4823 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4827 host_to_add = scsi_debug_add_host;
4828 scsi_debug_add_host = 0;
4830 for (k = 0; k < host_to_add; k++) {
4831 if (sdebug_add_adapter()) {
4832 pr_err("%s: sdebug_add_adapter failed k=%d\n",
4838 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4839 pr_info("%s: built %d host(s)\n", __func__,
4840 scsi_debug_add_host);
4845 bus_unregister(&pseudo_lld_bus);
4847 root_device_unregister(pseudo_primary);
4858 static void __exit scsi_debug_exit(void)
4860 int k = scsi_debug_add_host;
4865 sdebug_remove_adapter();
4866 driver_unregister(&sdebug_driverfs_driver);
4867 bus_unregister(&pseudo_lld_bus);
4868 root_device_unregister(pseudo_primary);
4876 device_initcall(scsi_debug_init);
4877 module_exit(scsi_debug_exit);
4879 static void sdebug_release_adapter(struct device * dev)
4881 struct sdebug_host_info *sdbg_host;
4883 sdbg_host = to_sdebug_host(dev);
4887 static int sdebug_add_adapter(void)
4889 int k, devs_per_host;
4891 struct sdebug_host_info *sdbg_host;
4892 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4894 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4895 if (NULL == sdbg_host) {
4896 printk(KERN_ERR "%s: out of memory at line %d\n",
4897 __func__, __LINE__);
4901 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4903 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4904 for (k = 0; k < devs_per_host; k++) {
4905 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4906 if (!sdbg_devinfo) {
4907 printk(KERN_ERR "%s: out of memory at line %d\n",
4908 __func__, __LINE__);
4914 spin_lock(&sdebug_host_list_lock);
4915 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4916 spin_unlock(&sdebug_host_list_lock);
4918 sdbg_host->dev.bus = &pseudo_lld_bus;
4919 sdbg_host->dev.parent = pseudo_primary;
4920 sdbg_host->dev.release = &sdebug_release_adapter;
4921 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4923 error = device_register(&sdbg_host->dev);
4928 ++scsi_debug_add_host;
4932 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4934 list_del(&sdbg_devinfo->dev_list);
4935 kfree(sdbg_devinfo);
4942 static void sdebug_remove_adapter(void)
4944 struct sdebug_host_info * sdbg_host = NULL;
4946 spin_lock(&sdebug_host_list_lock);
4947 if (!list_empty(&sdebug_host_list)) {
4948 sdbg_host = list_entry(sdebug_host_list.prev,
4949 struct sdebug_host_info, host_list);
4950 list_del(&sdbg_host->host_list);
4952 spin_unlock(&sdebug_host_list_lock);
4957 device_unregister(&sdbg_host->dev);
4958 --scsi_debug_add_host;
4962 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4965 unsigned long iflags;
4966 struct sdebug_dev_info *devip;
4968 spin_lock_irqsave(&queued_arr_lock, iflags);
4969 devip = (struct sdebug_dev_info *)sdev->hostdata;
4970 if (NULL == devip) {
4971 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4974 num_in_q = atomic_read(&devip->num_in_q);
4975 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4979 /* allow to exceed max host queued_arr elements for testing */
4980 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4981 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4982 scsi_change_queue_depth(sdev, qdepth);
4984 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4985 sdev_printk(KERN_INFO, sdev,
4986 "%s: qdepth=%d, num_in_q=%d\n",
4987 __func__, qdepth, num_in_q);
4989 return sdev->queue_depth;
4993 check_inject(struct scsi_cmnd *scp)
4995 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4997 memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4999 if (atomic_inc_return(&sdebug_cmnd_count) >=
5000 abs(scsi_debug_every_nth)) {
5001 atomic_set(&sdebug_cmnd_count, 0);
5002 if (scsi_debug_every_nth < -1)
5003 scsi_debug_every_nth = -1;
5004 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5005 return 1; /* ignore command causing timeout */
5006 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5007 scsi_medium_access_command(scp))
5008 return 1; /* time out reads and writes */
5009 if (sdebug_any_injecting_opt) {
5010 int opts = scsi_debug_opts;
5012 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5013 ep->inj_recovered = true;
5014 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5015 ep->inj_transport = true;
5016 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5018 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5020 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5021 ep->inj_short = true;
5028 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5031 struct scsi_device *sdp = scp->device;
5032 const struct opcode_info_t *oip;
5033 const struct opcode_info_t *r_oip;
5034 struct sdebug_dev_info *devip;
5035 u8 *cmd = scp->cmnd;
5036 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5039 int errsts_no_connect = DID_NO_CONNECT << 16;
5044 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5046 scsi_set_resid(scp, 0);
5047 if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5052 sb = (int)sizeof(b);
5054 strcpy(b, "too long, over 32 bytes");
5056 for (k = 0, n = 0; k < len && n < sb; ++k)
5057 n += scnprintf(b + n, sb - n, "%02x ",
5060 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5062 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5063 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5064 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5066 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5067 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5068 devip = (struct sdebug_dev_info *)sdp->hostdata;
5070 devip = devInfoReg(sdp);
5072 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5074 na = oip->num_attached;
5076 if (na) { /* multiple commands with this opcode */
5078 if (FF_SA & r_oip->flags) {
5079 if (F_SA_LOW & oip->flags)
5082 sa = get_unaligned_be16(cmd + 8);
5083 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5084 if (opcode == oip->opcode && sa == oip->sa)
5087 } else { /* since no service action only check opcode */
5088 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5089 if (opcode == oip->opcode)
5094 if (F_SA_LOW & r_oip->flags)
5095 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5096 else if (F_SA_HIGH & r_oip->flags)
5097 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5099 mk_sense_invalid_opcode(scp);
5102 } /* else (when na==0) we assume the oip is a match */
5104 if (F_INV_OP & flags) {
5105 mk_sense_invalid_opcode(scp);
5108 if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5110 sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5111 "0x%x not supported for wlun\n", opcode);
5112 mk_sense_invalid_opcode(scp);
5115 if (scsi_debug_strict) { /* check cdb against mask */
5119 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5120 rem = ~oip->len_mask[k] & cmd[k];
5122 for (j = 7; j >= 0; --j, rem <<= 1) {
5126 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5131 if (!(F_SKIP_UA & flags) &&
5132 SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5133 errsts = check_readiness(scp, UAS_ONLY, devip);
5137 if ((F_M_ACCESS & flags) && devip->stopped) {
5138 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5140 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5141 "%s\n", my_name, "initializing command "
5143 errsts = check_condition_result;
5146 if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5148 if (scsi_debug_every_nth) {
5149 if (check_inject(scp))
5150 return 0; /* ignore command: make trouble */
5152 if (oip->pfp) /* if this command has a resp_* function, call it */
5153 errsts = oip->pfp(scp, devip);
5154 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5155 errsts = r_pfp(scp, devip);
5158 return schedule_resp(scp, devip, errsts,
5159 ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5161 return schedule_resp(scp, devip, check_condition_result, 0);
5165 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5167 if (scsi_debug_host_lock) {
5168 unsigned long iflags;
5171 spin_lock_irqsave(shost->host_lock, iflags);
5172 rc = scsi_debug_queuecommand(cmd);
5173 spin_unlock_irqrestore(shost->host_lock, iflags);
5176 return scsi_debug_queuecommand(cmd);
5179 static struct scsi_host_template sdebug_driver_template = {
5180 .show_info = scsi_debug_show_info,
5181 .write_info = scsi_debug_write_info,
5182 .proc_name = sdebug_proc_name,
5183 .name = "SCSI DEBUG",
5184 .info = scsi_debug_info,
5185 .slave_alloc = scsi_debug_slave_alloc,
5186 .slave_configure = scsi_debug_slave_configure,
5187 .slave_destroy = scsi_debug_slave_destroy,
5188 .ioctl = scsi_debug_ioctl,
5189 .queuecommand = sdebug_queuecommand_lock_or_not,
5190 .change_queue_depth = sdebug_change_qdepth,
5191 .eh_abort_handler = scsi_debug_abort,
5192 .eh_device_reset_handler = scsi_debug_device_reset,
5193 .eh_target_reset_handler = scsi_debug_target_reset,
5194 .eh_bus_reset_handler = scsi_debug_bus_reset,
5195 .eh_host_reset_handler = scsi_debug_host_reset,
5196 .can_queue = SCSI_DEBUG_CANQUEUE,
5198 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
5199 .cmd_per_lun = DEF_CMD_PER_LUN,
5201 .use_clustering = DISABLE_CLUSTERING,
5202 .module = THIS_MODULE,
5203 .track_queue_depth = 1,
5204 .cmd_size = sizeof(struct sdebug_scmd_extra_t),
5207 static int sdebug_driver_probe(struct device * dev)
5211 struct sdebug_host_info *sdbg_host;
5212 struct Scsi_Host *hpnt;
5215 sdbg_host = to_sdebug_host(dev);
5217 sdebug_driver_template.can_queue = scsi_debug_max_queue;
5218 if (scsi_debug_clustering)
5219 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5220 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5222 pr_err("%s: scsi_host_alloc failed\n", __func__);
5227 sdbg_host->shost = hpnt;
5228 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5229 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5230 hpnt->max_id = scsi_debug_num_tgts + 1;
5232 hpnt->max_id = scsi_debug_num_tgts;
5233 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
5237 switch (scsi_debug_dif) {
5239 case SD_DIF_TYPE1_PROTECTION:
5240 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5242 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5245 case SD_DIF_TYPE2_PROTECTION:
5246 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5248 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5251 case SD_DIF_TYPE3_PROTECTION:
5252 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5254 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5259 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5263 scsi_host_set_prot(hpnt, host_prot);
5265 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5266 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5267 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5268 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5269 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5270 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5271 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5272 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5274 if (scsi_debug_guard == 1)
5275 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5277 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5279 opts = scsi_debug_opts;
5280 if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5281 sdebug_any_injecting_opt = true;
5282 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5283 sdebug_any_injecting_opt = true;
5284 else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5285 sdebug_any_injecting_opt = true;
5286 else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5287 sdebug_any_injecting_opt = true;
5288 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5289 sdebug_any_injecting_opt = true;
5291 error = scsi_add_host(hpnt, &sdbg_host->dev);
5293 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5295 scsi_host_put(hpnt);
5297 scsi_scan_host(hpnt);
5302 static int sdebug_driver_remove(struct device * dev)
5304 struct sdebug_host_info *sdbg_host;
5305 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5307 sdbg_host = to_sdebug_host(dev);
5310 printk(KERN_ERR "%s: Unable to locate host info\n",
5315 scsi_remove_host(sdbg_host->shost);
5317 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5319 list_del(&sdbg_devinfo->dev_list);
5320 kfree(sdbg_devinfo);
5323 scsi_host_put(sdbg_host->shost);
5327 static int pseudo_lld_bus_match(struct device *dev,
5328 struct device_driver *dev_driver)
5333 static struct bus_type pseudo_lld_bus = {
5335 .match = pseudo_lld_bus_match,
5336 .probe = sdebug_driver_probe,
5337 .remove = sdebug_driver_remove,
5338 .drv_groups = sdebug_drv_groups,