2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
50 #include <net/checksum.h>
52 #include <asm/unaligned.h>
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
64 #include "scsi_logging.h"
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
69 #define MY_NAME "scsi_debug"
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define INVALID_COMMAND_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132 #define DEF_UNMAP_ALIGNMENT 0
133 #define DEF_UNMAP_GRANULARITY 1
134 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
135 #define DEF_UNMAP_MAX_DESC 256
136 #define DEF_VIRTUAL_GB 0
137 #define DEF_VPD_USE_HOSTNO 1
138 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DELAY_OVERRIDDEN -9999
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE 1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
144 #define SCSI_DEBUG_OPT_TIMEOUT 4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
147 #define SCSI_DEBUG_OPT_DIF_ERR 32
148 #define SCSI_DEBUG_OPT_DIX_ERR 64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE 0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF 0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE 0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160 * - a RECOVERED_ERROR is simulated on successful read and write
161 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162 * - a TRANSPORT_ERROR is simulated on successful read and write
163 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
165 * When "every_nth" < 0 then after "- every_nth" commands:
166 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167 * - a RECOVERED_ERROR is simulated on successful read and write
168 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169 * - a TRANSPORT_ERROR is simulated on successful read and write
170 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171 * This will continue until some other action occurs (e.g. the user
172 * writing a new value (other than -1 or 1) to every_nth via sysfs).
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176 * priority order. In the subset implemented here lower numbers have higher
177 * priority. The UA numbers should be a sequence starting from 0 with
178 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_NUM_UAS 3
184 /* for check_readiness() */
188 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
189 * sector on read commands: */
190 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
191 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
193 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
194 * or "peripheral device" addressing (value 0) */
195 #define SAM2_LUN_ADDRESS_METHOD 0
196 #define SAM2_WLUN_REPORT_LUNS 0xc101
198 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
199 * (for response) at one time. Can be reduced by max_queue option. Command
200 * responses are not queued when delay=0 and ndelay=0. The per-device
201 * DEF_CMD_PER_LUN can be changed via sysfs:
202 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
203 * SCSI_DEBUG_CANQUEUE. */
204 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
205 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
206 #define DEF_CMD_PER_LUN 255
208 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
209 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
212 static int scsi_debug_add_host = DEF_NUM_HOST;
213 static int scsi_debug_ato = DEF_ATO;
214 static int scsi_debug_delay = DEF_DELAY;
215 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
216 static int scsi_debug_dif = DEF_DIF;
217 static int scsi_debug_dix = DEF_DIX;
218 static int scsi_debug_dsense = DEF_D_SENSE;
219 static int scsi_debug_every_nth = DEF_EVERY_NTH;
220 static int scsi_debug_fake_rw = DEF_FAKE_RW;
221 static unsigned int scsi_debug_guard = DEF_GUARD;
222 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
223 static int scsi_debug_max_luns = DEF_MAX_LUNS;
224 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
225 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
226 static int scsi_debug_ndelay = DEF_NDELAY;
227 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
228 static int scsi_debug_no_uld = 0;
229 static int scsi_debug_num_parts = DEF_NUM_PARTS;
230 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
231 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
232 static int scsi_debug_opts = DEF_OPTS;
233 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
234 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
235 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
236 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
237 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
238 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
239 static unsigned int scsi_debug_lbpu = DEF_LBPU;
240 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
241 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
242 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
243 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
244 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
245 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
246 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
247 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
248 static bool scsi_debug_removable = DEF_REMOVABLE;
249 static bool scsi_debug_clustering;
250 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
252 static atomic_t sdebug_cmnd_count;
253 static atomic_t sdebug_completions;
254 static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
256 #define DEV_READONLY(TGT) (0)
258 static unsigned int sdebug_store_sectors;
259 static sector_t sdebug_capacity; /* in sectors */
261 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
262 may still need them */
263 static int sdebug_heads; /* heads per disk */
264 static int sdebug_cylinders_per; /* cylinders per surface */
265 static int sdebug_sectors_per; /* sectors per cylinder */
267 #define SDEBUG_MAX_PARTS 4
269 #define SCSI_DEBUG_MAX_CMD_LEN 32
271 static unsigned int scsi_debug_lbp(void)
273 return ((0 == scsi_debug_fake_rw) &&
274 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
277 struct sdebug_dev_info {
278 struct list_head dev_list;
279 unsigned int channel;
282 struct sdebug_host_info *sdbg_host;
284 unsigned long uas_bm[1];
290 struct sdebug_host_info {
291 struct list_head host_list;
292 struct Scsi_Host *shost;
294 struct list_head dev_info_list;
297 #define to_sdebug_host(d) \
298 container_of(d, struct sdebug_host_info, dev)
300 static LIST_HEAD(sdebug_host_list);
301 static DEFINE_SPINLOCK(sdebug_host_list_lock);
304 struct sdebug_hrtimer { /* ... is derived from hrtimer */
305 struct hrtimer hrt; /* must be first element */
309 struct sdebug_queued_cmd {
310 /* in_use flagged by a bit in queued_in_use_bm[] */
311 struct timer_list *cmnd_timerp;
312 struct tasklet_struct *tletp;
313 struct sdebug_hrtimer *sd_hrtp;
314 struct scsi_cmnd * a_cmnd;
316 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
317 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
320 static unsigned char * fake_storep; /* ramdisk storage */
321 static struct sd_dif_tuple *dif_storep; /* protection info */
322 static void *map_storep; /* provisioning map */
324 static unsigned long map_size;
325 static int num_aborts;
326 static int num_dev_resets;
327 static int num_target_resets;
328 static int num_bus_resets;
329 static int num_host_resets;
330 static int dix_writes;
331 static int dix_reads;
332 static int dif_errors;
334 static DEFINE_SPINLOCK(queued_arr_lock);
335 static DEFINE_RWLOCK(atomic_rw);
337 static char sdebug_proc_name[] = MY_NAME;
338 static const char *my_name = MY_NAME;
340 static struct bus_type pseudo_lld_bus;
342 static struct device_driver sdebug_driverfs_driver = {
343 .name = sdebug_proc_name,
344 .bus = &pseudo_lld_bus,
347 static const int check_condition_result =
348 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
350 static const int illegal_condition_result =
351 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
353 static const int device_qfull_result =
354 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
356 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
357 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
359 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
361 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
364 static void *fake_store(unsigned long long lba)
366 lba = do_div(lba, sdebug_store_sectors);
368 return fake_storep + lba * scsi_debug_sector_size;
371 static struct sd_dif_tuple *dif_store(sector_t sector)
373 sector = do_div(sector, sdebug_store_sectors);
375 return dif_storep + sector;
378 static int sdebug_add_adapter(void);
379 static void sdebug_remove_adapter(void);
381 static void sdebug_max_tgts_luns(void)
383 struct sdebug_host_info *sdbg_host;
384 struct Scsi_Host *hpnt;
386 spin_lock(&sdebug_host_list_lock);
387 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
388 hpnt = sdbg_host->shost;
389 if ((hpnt->this_id >= 0) &&
390 (scsi_debug_num_tgts > hpnt->this_id))
391 hpnt->max_id = scsi_debug_num_tgts + 1;
393 hpnt->max_id = scsi_debug_num_tgts;
394 /* scsi_debug_max_luns; */
395 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
397 spin_unlock(&sdebug_host_list_lock);
400 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
402 /* Set in_bit to -1 to indicate no bit position of invalid field */
404 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
405 int in_byte, int in_bit)
407 unsigned char *sbuff;
411 sbuff = scp->sense_buffer;
413 sdev_printk(KERN_ERR, scp->device,
414 "%s: sense_buffer is NULL\n", __func__);
417 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
418 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
419 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
421 memset(sks, 0, sizeof(sks));
427 sks[0] |= 0x7 & in_bit;
429 put_unaligned_be16(in_byte, sks + 1);
430 if (scsi_debug_dsense) {
435 memcpy(sbuff + sl + 4, sks, 3);
437 memcpy(sbuff + 15, sks, 3);
438 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
439 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
440 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
441 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
444 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
446 unsigned char *sbuff;
448 sbuff = scp->sense_buffer;
450 sdev_printk(KERN_ERR, scp->device,
451 "%s: sense_buffer is NULL\n", __func__);
454 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
456 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
458 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
459 sdev_printk(KERN_INFO, scp->device,
460 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
461 my_name, key, asc, asq);
465 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
467 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
470 static void get_data_transfer_info(unsigned char *cmd,
471 unsigned long long *lba, unsigned int *num,
477 case VARIABLE_LENGTH_CMD:
478 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
479 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
480 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
481 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
483 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
484 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
486 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
493 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
494 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
495 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
496 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
498 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
503 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
506 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
513 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
516 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
520 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
521 (u32)(cmd[1] & 0x1f) << 16;
522 *num = (0 == cmd[4]) ? 256 : cmd[4];
529 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
531 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
533 sdev_printk(KERN_INFO, dev,
534 "%s: BLKFLSBUF [0x1261]\n", __func__);
535 else if (0x5331 == cmd)
536 sdev_printk(KERN_INFO, dev,
537 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
540 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
544 /* return -ENOTTY; // correct return but upsets fdisk */
547 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
548 struct sdebug_dev_info * devip)
551 bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
553 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
554 if (k != SDEBUG_NUM_UAS) {
555 const char *cp = NULL;
559 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
560 UA_RESET_ASC, POWER_ON_RESET_ASCQ);
562 cp = "power on reset";
564 case SDEBUG_UA_BUS_RESET:
565 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
566 UA_RESET_ASC, BUS_RESET_ASCQ);
570 case SDEBUG_UA_MODE_CHANGED:
571 mk_sense_buffer(SCpnt, UNIT_ATTENTION,
572 UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
574 cp = "mode parameters changed";
577 pr_warn("%s: unexpected unit attention code=%d\n",
583 clear_bit(k, devip->uas_bm);
585 sdev_printk(KERN_INFO, SCpnt->device,
586 "%s reports: Unit attention: %s\n",
588 return check_condition_result;
590 if ((UAS_TUR == uas_only) && devip->stopped) {
591 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
594 sdev_printk(KERN_INFO, SCpnt->device,
595 "%s reports: Not ready: %s\n", my_name,
596 "initializing command required");
597 return check_condition_result;
602 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
603 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
607 struct scsi_data_buffer *sdb = scsi_in(scp);
611 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
612 return (DID_ERROR << 16);
614 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
616 sdb->resid = scsi_bufflen(scp) - act_len;
621 /* Returns number of bytes fetched into 'arr' or -1 if error. */
622 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
625 if (!scsi_bufflen(scp))
627 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
630 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
634 static const char * inq_vendor_id = "Linux ";
635 static const char * inq_product_id = "scsi_debug ";
636 static const char *inq_product_rev = "0184"; /* version less '.' */
638 /* Device identification VPD page. Returns number of bytes placed in arr */
639 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
640 int target_dev_id, int dev_id_num,
641 const char * dev_id_str,
647 port_a = target_dev_id + 1;
648 /* T10 vendor identifier field format (faked) */
649 arr[0] = 0x2; /* ASCII */
652 memcpy(&arr[4], inq_vendor_id, 8);
653 memcpy(&arr[12], inq_product_id, 16);
654 memcpy(&arr[28], dev_id_str, dev_id_str_len);
655 num = 8 + 16 + dev_id_str_len;
658 if (dev_id_num >= 0) {
659 /* NAA-5, Logical unit identifier (binary) */
660 arr[num++] = 0x1; /* binary (not necessarily sas) */
661 arr[num++] = 0x3; /* PIV=0, lu, naa */
664 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
668 arr[num++] = (dev_id_num >> 24);
669 arr[num++] = (dev_id_num >> 16) & 0xff;
670 arr[num++] = (dev_id_num >> 8) & 0xff;
671 arr[num++] = dev_id_num & 0xff;
672 /* Target relative port number */
673 arr[num++] = 0x61; /* proto=sas, binary */
674 arr[num++] = 0x94; /* PIV=1, target port, rel port */
675 arr[num++] = 0x0; /* reserved */
676 arr[num++] = 0x4; /* length */
677 arr[num++] = 0x0; /* reserved */
678 arr[num++] = 0x0; /* reserved */
680 arr[num++] = 0x1; /* relative port A */
682 /* NAA-5, Target port identifier */
683 arr[num++] = 0x61; /* proto=sas, binary */
684 arr[num++] = 0x93; /* piv=1, target port, naa */
687 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
691 arr[num++] = (port_a >> 24);
692 arr[num++] = (port_a >> 16) & 0xff;
693 arr[num++] = (port_a >> 8) & 0xff;
694 arr[num++] = port_a & 0xff;
695 /* NAA-5, Target port group identifier */
696 arr[num++] = 0x61; /* proto=sas, binary */
697 arr[num++] = 0x95; /* piv=1, target port group id */
702 arr[num++] = (port_group_id >> 8) & 0xff;
703 arr[num++] = port_group_id & 0xff;
704 /* NAA-5, Target device identifier */
705 arr[num++] = 0x61; /* proto=sas, binary */
706 arr[num++] = 0xa3; /* piv=1, target device, naa */
709 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
713 arr[num++] = (target_dev_id >> 24);
714 arr[num++] = (target_dev_id >> 16) & 0xff;
715 arr[num++] = (target_dev_id >> 8) & 0xff;
716 arr[num++] = target_dev_id & 0xff;
717 /* SCSI name string: Target device identifier */
718 arr[num++] = 0x63; /* proto=sas, UTF-8 */
719 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
722 memcpy(arr + num, "naa.52222220", 12);
724 snprintf(b, sizeof(b), "%08X", target_dev_id);
725 memcpy(arr + num, b, 8);
727 memset(arr + num, 0, 4);
733 static unsigned char vpd84_data[] = {
734 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
735 0x22,0x22,0x22,0x0,0xbb,0x1,
736 0x22,0x22,0x22,0x0,0xbb,0x2,
739 /* Software interface identification VPD page */
740 static int inquiry_evpd_84(unsigned char * arr)
742 memcpy(arr, vpd84_data, sizeof(vpd84_data));
743 return sizeof(vpd84_data);
746 /* Management network addresses VPD page */
747 static int inquiry_evpd_85(unsigned char * arr)
750 const char * na1 = "https://www.kernel.org/config";
751 const char * na2 = "http://www.kernel.org/log";
754 arr[num++] = 0x1; /* lu, storage config */
755 arr[num++] = 0x0; /* reserved */
760 plen = ((plen / 4) + 1) * 4;
761 arr[num++] = plen; /* length, null termianted, padded */
762 memcpy(arr + num, na1, olen);
763 memset(arr + num + olen, 0, plen - olen);
766 arr[num++] = 0x4; /* lu, logging */
767 arr[num++] = 0x0; /* reserved */
772 plen = ((plen / 4) + 1) * 4;
773 arr[num++] = plen; /* length, null terminated, padded */
774 memcpy(arr + num, na2, olen);
775 memset(arr + num + olen, 0, plen - olen);
781 /* SCSI ports VPD page */
782 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
787 port_a = target_dev_id + 1;
789 arr[num++] = 0x0; /* reserved */
790 arr[num++] = 0x0; /* reserved */
792 arr[num++] = 0x1; /* relative port 1 (primary) */
793 memset(arr + num, 0, 6);
796 arr[num++] = 12; /* length tp descriptor */
797 /* naa-5 target port identifier (A) */
798 arr[num++] = 0x61; /* proto=sas, binary */
799 arr[num++] = 0x93; /* PIV=1, target port, NAA */
800 arr[num++] = 0x0; /* reserved */
801 arr[num++] = 0x8; /* length */
802 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
806 arr[num++] = (port_a >> 24);
807 arr[num++] = (port_a >> 16) & 0xff;
808 arr[num++] = (port_a >> 8) & 0xff;
809 arr[num++] = port_a & 0xff;
811 arr[num++] = 0x0; /* reserved */
812 arr[num++] = 0x0; /* reserved */
814 arr[num++] = 0x2; /* relative port 2 (secondary) */
815 memset(arr + num, 0, 6);
818 arr[num++] = 12; /* length tp descriptor */
819 /* naa-5 target port identifier (B) */
820 arr[num++] = 0x61; /* proto=sas, binary */
821 arr[num++] = 0x93; /* PIV=1, target port, NAA */
822 arr[num++] = 0x0; /* reserved */
823 arr[num++] = 0x8; /* length */
824 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
828 arr[num++] = (port_b >> 24);
829 arr[num++] = (port_b >> 16) & 0xff;
830 arr[num++] = (port_b >> 8) & 0xff;
831 arr[num++] = port_b & 0xff;
837 static unsigned char vpd89_data[] = {
838 /* from 4th byte */ 0,0,0,0,
839 'l','i','n','u','x',' ',' ',' ',
840 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
842 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
844 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
845 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
846 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
847 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
849 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
851 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
853 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
854 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
855 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
856 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
857 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
858 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
859 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
860 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
861 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
862 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
863 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
864 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
865 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
866 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
867 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
868 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
869 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
870 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
871 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
872 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
873 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
874 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
875 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
876 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
877 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
878 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
881 /* ATA Information VPD page */
882 static int inquiry_evpd_89(unsigned char * arr)
884 memcpy(arr, vpd89_data, sizeof(vpd89_data));
885 return sizeof(vpd89_data);
889 static unsigned char vpdb0_data[] = {
890 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
891 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
892 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
893 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
896 /* Block limits VPD page (SBC-3) */
897 static int inquiry_evpd_b0(unsigned char * arr)
901 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
903 /* Optimal transfer length granularity */
904 gran = 1 << scsi_debug_physblk_exp;
905 arr[2] = (gran >> 8) & 0xff;
906 arr[3] = gran & 0xff;
908 /* Maximum Transfer Length */
909 if (sdebug_store_sectors > 0x400) {
910 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
911 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
912 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
913 arr[7] = sdebug_store_sectors & 0xff;
916 /* Optimal Transfer Length */
917 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
919 if (scsi_debug_lbpu) {
920 /* Maximum Unmap LBA Count */
921 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
923 /* Maximum Unmap Block Descriptor Count */
924 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
927 /* Unmap Granularity Alignment */
928 if (scsi_debug_unmap_alignment) {
929 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
930 arr[28] |= 0x80; /* UGAVALID */
933 /* Optimal Unmap Granularity */
934 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
936 /* Maximum WRITE SAME Length */
937 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
939 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
941 return sizeof(vpdb0_data);
944 /* Block device characteristics VPD page (SBC-3) */
945 static int inquiry_evpd_b1(unsigned char *arr)
947 memset(arr, 0, 0x3c);
949 arr[1] = 1; /* non rotating medium (e.g. solid state) */
951 arr[3] = 5; /* less than 1.8" */
956 /* Logical block provisioning VPD page (SBC-3) */
957 static int inquiry_evpd_b2(unsigned char *arr)
960 arr[0] = 0; /* threshold exponent */
965 if (scsi_debug_lbpws)
968 if (scsi_debug_lbpws10)
971 if (scsi_debug_lbprz)
977 #define SDEBUG_LONG_INQ_SZ 96
978 #define SDEBUG_MAX_INQ_ARR_SZ 584
980 static int resp_inquiry(struct scsi_cmnd *scp, int target,
981 struct sdebug_dev_info * devip)
983 unsigned char pq_pdt;
985 unsigned char *cmd = scp->cmnd;
986 int alloc_len, n, ret;
988 alloc_len = (cmd[3] << 8) + cmd[4];
989 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
991 return DID_REQUEUE << 16;
993 pq_pdt = 0x1e; /* present, wlun */
994 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
995 pq_pdt = 0x7f; /* not present, no device type */
997 pq_pdt = (scsi_debug_ptype & 0x1f);
999 if (0x2 & cmd[1]) { /* CMDDT bit set */
1000 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1002 return check_condition_result;
1003 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1004 int lu_id_num, port_group_id, target_dev_id, len;
1006 int host_no = devip->sdbg_host->shost->host_no;
1008 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1009 (devip->channel & 0x7f);
1010 if (0 == scsi_debug_vpd_use_hostno)
1012 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
1013 (devip->target * 1000) + devip->lun);
1014 target_dev_id = ((host_no + 1) * 2000) +
1015 (devip->target * 1000) - 3;
1016 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1017 if (0 == cmd[2]) { /* supported vital product data pages */
1018 arr[1] = cmd[2]; /*sanity */
1020 arr[n++] = 0x0; /* this page */
1021 arr[n++] = 0x80; /* unit serial number */
1022 arr[n++] = 0x83; /* device identification */
1023 arr[n++] = 0x84; /* software interface ident. */
1024 arr[n++] = 0x85; /* management network addresses */
1025 arr[n++] = 0x86; /* extended inquiry */
1026 arr[n++] = 0x87; /* mode page policy */
1027 arr[n++] = 0x88; /* SCSI ports */
1028 arr[n++] = 0x89; /* ATA information */
1029 arr[n++] = 0xb0; /* Block limits (SBC) */
1030 arr[n++] = 0xb1; /* Block characteristics (SBC) */
1031 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1033 arr[3] = n - 4; /* number of supported VPD pages */
1034 } else if (0x80 == cmd[2]) { /* unit serial number */
1035 arr[1] = cmd[2]; /*sanity */
1037 memcpy(&arr[4], lu_id_str, len);
1038 } else if (0x83 == cmd[2]) { /* device identification */
1039 arr[1] = cmd[2]; /*sanity */
1040 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1041 target_dev_id, lu_id_num,
1043 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1044 arr[1] = cmd[2]; /*sanity */
1045 arr[3] = inquiry_evpd_84(&arr[4]);
1046 } else if (0x85 == cmd[2]) { /* Management network addresses */
1047 arr[1] = cmd[2]; /*sanity */
1048 arr[3] = inquiry_evpd_85(&arr[4]);
1049 } else if (0x86 == cmd[2]) { /* extended inquiry */
1050 arr[1] = cmd[2]; /*sanity */
1051 arr[3] = 0x3c; /* number of following entries */
1052 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1053 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1054 else if (scsi_debug_dif)
1055 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1057 arr[4] = 0x0; /* no protection stuff */
1058 arr[5] = 0x7; /* head of q, ordered + simple q's */
1059 } else if (0x87 == cmd[2]) { /* mode page policy */
1060 arr[1] = cmd[2]; /*sanity */
1061 arr[3] = 0x8; /* number of following entries */
1062 arr[4] = 0x2; /* disconnect-reconnect mp */
1063 arr[6] = 0x80; /* mlus, shared */
1064 arr[8] = 0x18; /* protocol specific lu */
1065 arr[10] = 0x82; /* mlus, per initiator port */
1066 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1067 arr[1] = cmd[2]; /*sanity */
1068 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1069 } else if (0x89 == cmd[2]) { /* ATA information */
1070 arr[1] = cmd[2]; /*sanity */
1071 n = inquiry_evpd_89(&arr[4]);
1073 arr[3] = (n & 0xff);
1074 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1075 arr[1] = cmd[2]; /*sanity */
1076 arr[3] = inquiry_evpd_b0(&arr[4]);
1077 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1078 arr[1] = cmd[2]; /*sanity */
1079 arr[3] = inquiry_evpd_b1(&arr[4]);
1080 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1081 arr[1] = cmd[2]; /*sanity */
1082 arr[3] = inquiry_evpd_b2(&arr[4]);
1084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1086 return check_condition_result;
1088 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1089 ret = fill_from_dev_buffer(scp, arr,
1090 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1094 /* drops through here for a standard inquiry */
1095 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
1096 arr[2] = scsi_debug_scsi_level;
1097 arr[3] = 2; /* response_data_format==2 */
1098 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1099 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1100 if (0 == scsi_debug_vpd_use_hostno)
1101 arr[5] = 0x10; /* claim: implicit TGPS */
1102 arr[6] = 0x10; /* claim: MultiP */
1103 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1104 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1105 memcpy(&arr[8], inq_vendor_id, 8);
1106 memcpy(&arr[16], inq_product_id, 16);
1107 memcpy(&arr[32], inq_product_rev, 4);
1108 /* version descriptors (2 bytes each) follow */
1109 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1110 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1112 if (scsi_debug_ptype == 0) {
1113 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1114 } else if (scsi_debug_ptype == 1) {
1115 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1117 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1118 ret = fill_from_dev_buffer(scp, arr,
1119 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1124 static int resp_requests(struct scsi_cmnd * scp,
1125 struct sdebug_dev_info * devip)
1127 unsigned char * sbuff;
1128 unsigned char *cmd = scp->cmnd;
1129 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1133 memset(arr, 0, sizeof(arr));
1134 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
1135 sbuff = scp->sense_buffer;
1136 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1139 arr[1] = 0x0; /* NO_SENSE in sense_key */
1140 arr[2] = THRESHOLD_EXCEEDED;
1141 arr[3] = 0xff; /* TEST set and MRIE==6 */
1144 arr[2] = 0x0; /* NO_SENSE in sense_key */
1145 arr[7] = 0xa; /* 18 byte sense buffer */
1146 arr[12] = THRESHOLD_EXCEEDED;
1147 arr[13] = 0xff; /* TEST set and MRIE==6 */
1150 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1151 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1152 /* DESC bit set and sense_buff in fixed format */
1153 memset(arr, 0, sizeof(arr));
1155 arr[1] = sbuff[2]; /* sense key */
1156 arr[2] = sbuff[12]; /* asc */
1157 arr[3] = sbuff[13]; /* ascq */
1161 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1162 return fill_from_dev_buffer(scp, arr, len);
1165 static int resp_start_stop(struct scsi_cmnd * scp,
1166 struct sdebug_dev_info * devip)
1168 unsigned char *cmd = scp->cmnd;
1169 int power_cond, errsts, start;
1171 errsts = check_readiness(scp, UAS_ONLY, devip);
1174 power_cond = (cmd[4] & 0xf0) >> 4;
1176 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1177 return check_condition_result;
1180 if (start == devip->stopped)
1181 devip->stopped = !start;
1185 static sector_t get_sdebug_capacity(void)
1187 if (scsi_debug_virtual_gb > 0)
1188 return (sector_t)scsi_debug_virtual_gb *
1189 (1073741824 / scsi_debug_sector_size);
1191 return sdebug_store_sectors;
1194 #define SDEBUG_READCAP_ARR_SZ 8
1195 static int resp_readcap(struct scsi_cmnd * scp,
1196 struct sdebug_dev_info * devip)
1198 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1202 errsts = check_readiness(scp, UAS_ONLY, devip);
1205 /* following just in case virtual_gb changed */
1206 sdebug_capacity = get_sdebug_capacity();
1207 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1208 if (sdebug_capacity < 0xffffffff) {
1209 capac = (unsigned int)sdebug_capacity - 1;
1210 arr[0] = (capac >> 24);
1211 arr[1] = (capac >> 16) & 0xff;
1212 arr[2] = (capac >> 8) & 0xff;
1213 arr[3] = capac & 0xff;
1220 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1221 arr[7] = scsi_debug_sector_size & 0xff;
1222 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1225 #define SDEBUG_READCAP16_ARR_SZ 32
1226 static int resp_readcap16(struct scsi_cmnd * scp,
1227 struct sdebug_dev_info * devip)
1229 unsigned char *cmd = scp->cmnd;
1230 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1231 unsigned long long capac;
1232 int errsts, k, alloc_len;
1234 errsts = check_readiness(scp, UAS_ONLY, devip);
1237 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1239 /* following just in case virtual_gb changed */
1240 sdebug_capacity = get_sdebug_capacity();
1241 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1242 capac = sdebug_capacity - 1;
1243 for (k = 0; k < 8; ++k, capac >>= 8)
1244 arr[7 - k] = capac & 0xff;
1245 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1246 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1247 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1248 arr[11] = scsi_debug_sector_size & 0xff;
1249 arr[13] = scsi_debug_physblk_exp & 0xf;
1250 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1252 if (scsi_debug_lbp()) {
1253 arr[14] |= 0x80; /* LBPME */
1254 if (scsi_debug_lbprz)
1255 arr[14] |= 0x40; /* LBPRZ */
1258 arr[15] = scsi_debug_lowest_aligned & 0xff;
1260 if (scsi_debug_dif) {
1261 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1262 arr[12] |= 1; /* PROT_EN */
1265 return fill_from_dev_buffer(scp, arr,
1266 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1269 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1271 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1272 struct sdebug_dev_info * devip)
1274 unsigned char *cmd = scp->cmnd;
1275 unsigned char * arr;
1276 int host_no = devip->sdbg_host->shost->host_no;
1277 int n, ret, alen, rlen;
1278 int port_group_a, port_group_b, port_a, port_b;
1280 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1283 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1285 return DID_REQUEUE << 16;
1287 * EVPD page 0x88 states we have two ports, one
1288 * real and a fake port with no device connected.
1289 * So we create two port groups with one port each
1290 * and set the group with port B to unavailable.
1292 port_a = 0x1; /* relative port A */
1293 port_b = 0x2; /* relative port B */
1294 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1295 (devip->channel & 0x7f);
1296 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1297 (devip->channel & 0x7f) + 0x80;
1300 * The asymmetric access state is cycled according to the host_id.
1303 if (0 == scsi_debug_vpd_use_hostno) {
1304 arr[n++] = host_no % 3; /* Asymm access state */
1305 arr[n++] = 0x0F; /* claim: all states are supported */
1307 arr[n++] = 0x0; /* Active/Optimized path */
1308 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1310 arr[n++] = (port_group_a >> 8) & 0xff;
1311 arr[n++] = port_group_a & 0xff;
1312 arr[n++] = 0; /* Reserved */
1313 arr[n++] = 0; /* Status code */
1314 arr[n++] = 0; /* Vendor unique */
1315 arr[n++] = 0x1; /* One port per group */
1316 arr[n++] = 0; /* Reserved */
1317 arr[n++] = 0; /* Reserved */
1318 arr[n++] = (port_a >> 8) & 0xff;
1319 arr[n++] = port_a & 0xff;
1320 arr[n++] = 3; /* Port unavailable */
1321 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1322 arr[n++] = (port_group_b >> 8) & 0xff;
1323 arr[n++] = port_group_b & 0xff;
1324 arr[n++] = 0; /* Reserved */
1325 arr[n++] = 0; /* Status code */
1326 arr[n++] = 0; /* Vendor unique */
1327 arr[n++] = 0x1; /* One port per group */
1328 arr[n++] = 0; /* Reserved */
1329 arr[n++] = 0; /* Reserved */
1330 arr[n++] = (port_b >> 8) & 0xff;
1331 arr[n++] = port_b & 0xff;
1334 arr[0] = (rlen >> 24) & 0xff;
1335 arr[1] = (rlen >> 16) & 0xff;
1336 arr[2] = (rlen >> 8) & 0xff;
1337 arr[3] = rlen & 0xff;
1340 * Return the smallest value of either
1341 * - The allocated length
1342 * - The constructed command length
1343 * - The maximum array size
1346 ret = fill_from_dev_buffer(scp, arr,
1347 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1352 /* <<Following mode page info copied from ST318451LW>> */
1354 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1355 { /* Read-Write Error Recovery page for mode_sense */
1356 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1359 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1361 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1362 return sizeof(err_recov_pg);
1365 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1366 { /* Disconnect-Reconnect page for mode_sense */
1367 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1368 0, 0, 0, 0, 0, 0, 0, 0};
1370 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1372 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1373 return sizeof(disconnect_pg);
1376 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1377 { /* Format device page for mode_sense */
1378 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1379 0, 0, 0, 0, 0, 0, 0, 0,
1380 0, 0, 0, 0, 0x40, 0, 0, 0};
1382 memcpy(p, format_pg, sizeof(format_pg));
1383 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1384 p[11] = sdebug_sectors_per & 0xff;
1385 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1386 p[13] = scsi_debug_sector_size & 0xff;
1387 if (scsi_debug_removable)
1388 p[20] |= 0x20; /* should agree with INQUIRY */
1390 memset(p + 2, 0, sizeof(format_pg) - 2);
1391 return sizeof(format_pg);
1394 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1395 { /* Caching page for mode_sense */
1396 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1397 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1398 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1399 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1401 if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1402 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1403 memcpy(p, caching_pg, sizeof(caching_pg));
1405 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1406 else if (2 == pcontrol)
1407 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1408 return sizeof(caching_pg);
1411 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1412 { /* Control mode page for mode_sense */
1413 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1415 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1418 if (scsi_debug_dsense)
1419 ctrl_m_pg[2] |= 0x4;
1421 ctrl_m_pg[2] &= ~0x4;
1424 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1426 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1428 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1429 else if (2 == pcontrol)
1430 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1431 return sizeof(ctrl_m_pg);
1435 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1436 { /* Informational Exceptions control mode page for mode_sense */
1437 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1439 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1442 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1444 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1445 else if (2 == pcontrol)
1446 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1447 return sizeof(iec_m_pg);
1450 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1451 { /* SAS SSP mode page - short format for mode_sense */
1452 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1453 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1455 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1457 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1458 return sizeof(sas_sf_m_pg);
1462 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1464 { /* SAS phy control and discover mode page for mode_sense */
1465 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1466 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1467 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1468 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1469 0x2, 0, 0, 0, 0, 0, 0, 0,
1470 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1471 0, 0, 0, 0, 0, 0, 0, 0,
1472 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1473 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1474 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1475 0x3, 0, 0, 0, 0, 0, 0, 0,
1476 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1477 0, 0, 0, 0, 0, 0, 0, 0,
1481 port_a = target_dev_id + 1;
1482 port_b = port_a + 1;
1483 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1484 p[20] = (port_a >> 24);
1485 p[21] = (port_a >> 16) & 0xff;
1486 p[22] = (port_a >> 8) & 0xff;
1487 p[23] = port_a & 0xff;
1488 p[48 + 20] = (port_b >> 24);
1489 p[48 + 21] = (port_b >> 16) & 0xff;
1490 p[48 + 22] = (port_b >> 8) & 0xff;
1491 p[48 + 23] = port_b & 0xff;
1493 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1494 return sizeof(sas_pcd_m_pg);
1497 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1498 { /* SAS SSP shared protocol specific port mode subpage */
1499 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1500 0, 0, 0, 0, 0, 0, 0, 0,
1503 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1505 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1506 return sizeof(sas_sha_m_pg);
1509 #define SDEBUG_MAX_MSENSE_SZ 256
1511 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1512 struct sdebug_dev_info * devip)
1514 unsigned char dbd, llbaa;
1515 int pcontrol, pcode, subpcode, bd_len;
1516 unsigned char dev_spec;
1517 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1519 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1520 unsigned char *cmd = scp->cmnd;
1522 errsts = check_readiness(scp, UAS_ONLY, devip);
1525 dbd = !!(cmd[1] & 0x8);
1526 pcontrol = (cmd[2] & 0xc0) >> 6;
1527 pcode = cmd[2] & 0x3f;
1529 msense_6 = (MODE_SENSE == cmd[0]);
1530 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1531 if ((0 == scsi_debug_ptype) && (0 == dbd))
1532 bd_len = llbaa ? 16 : 8;
1535 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1536 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1537 if (0x3 == pcontrol) { /* Saving values not supported */
1538 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1539 return check_condition_result;
1541 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1542 (devip->target * 1000) - 3;
1543 /* set DPOFUA bit for disks */
1544 if (0 == scsi_debug_ptype)
1545 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1555 arr[4] = 0x1; /* set LONGLBA bit */
1556 arr[7] = bd_len; /* assume 255 or less */
1560 if ((bd_len > 0) && (!sdebug_capacity))
1561 sdebug_capacity = get_sdebug_capacity();
1564 if (sdebug_capacity > 0xfffffffe) {
1570 ap[0] = (sdebug_capacity >> 24) & 0xff;
1571 ap[1] = (sdebug_capacity >> 16) & 0xff;
1572 ap[2] = (sdebug_capacity >> 8) & 0xff;
1573 ap[3] = sdebug_capacity & 0xff;
1575 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1576 ap[7] = scsi_debug_sector_size & 0xff;
1579 } else if (16 == bd_len) {
1580 unsigned long long capac = sdebug_capacity;
1582 for (k = 0; k < 8; ++k, capac >>= 8)
1583 ap[7 - k] = capac & 0xff;
1584 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1585 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1586 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1587 ap[15] = scsi_debug_sector_size & 0xff;
1592 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1593 /* TODO: Control Extension page */
1594 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1595 return check_condition_result;
1598 case 0x1: /* Read-Write error recovery page, direct access */
1599 len = resp_err_recov_pg(ap, pcontrol, target);
1602 case 0x2: /* Disconnect-Reconnect page, all devices */
1603 len = resp_disconnect_pg(ap, pcontrol, target);
1606 case 0x3: /* Format device page, direct access */
1607 len = resp_format_pg(ap, pcontrol, target);
1610 case 0x8: /* Caching page, direct access */
1611 len = resp_caching_pg(ap, pcontrol, target);
1614 case 0xa: /* Control Mode page, all devices */
1615 len = resp_ctrl_m_pg(ap, pcontrol, target);
1618 case 0x19: /* if spc==1 then sas phy, control+discover */
1619 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1620 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1621 return check_condition_result;
1624 if ((0x0 == subpcode) || (0xff == subpcode))
1625 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1626 if ((0x1 == subpcode) || (0xff == subpcode))
1627 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1629 if ((0x2 == subpcode) || (0xff == subpcode))
1630 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1633 case 0x1c: /* Informational Exceptions Mode page, all devices */
1634 len = resp_iec_m_pg(ap, pcontrol, target);
1637 case 0x3f: /* Read all Mode pages */
1638 if ((0 == subpcode) || (0xff == subpcode)) {
1639 len = resp_err_recov_pg(ap, pcontrol, target);
1640 len += resp_disconnect_pg(ap + len, pcontrol, target);
1641 len += resp_format_pg(ap + len, pcontrol, target);
1642 len += resp_caching_pg(ap + len, pcontrol, target);
1643 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1644 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1645 if (0xff == subpcode) {
1646 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1647 target, target_dev_id);
1648 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1650 len += resp_iec_m_pg(ap + len, pcontrol, target);
1652 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1653 return check_condition_result;
1658 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1659 return check_condition_result;
1662 arr[0] = offset - 1;
1664 arr[0] = ((offset - 2) >> 8) & 0xff;
1665 arr[1] = (offset - 2) & 0xff;
1667 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1670 #define SDEBUG_MAX_MSELECT_SZ 512
1672 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1673 struct sdebug_dev_info * devip)
1675 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1676 int param_len, res, errsts, mpage;
1677 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1678 unsigned char *cmd = scp->cmnd;
1680 errsts = check_readiness(scp, UAS_ONLY, devip);
1683 memset(arr, 0, sizeof(arr));
1686 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1687 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1688 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
1689 return check_condition_result;
1691 res = fetch_to_dev_buffer(scp, arr, param_len);
1693 return (DID_ERROR << 16);
1694 else if ((res < param_len) &&
1695 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1696 sdev_printk(KERN_INFO, scp->device,
1697 "%s: cdb indicated=%d, IO sent=%d bytes\n",
1698 __func__, param_len, res);
1699 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1700 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1702 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1703 return check_condition_result;
1705 off = bd_len + (mselect6 ? 4 : 8);
1706 mpage = arr[off] & 0x3f;
1707 ps = !!(arr[off] & 0x80);
1709 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
1710 return check_condition_result;
1712 spf = !!(arr[off] & 0x40);
1713 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1715 if ((pg_len + off) > param_len) {
1716 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1717 PARAMETER_LIST_LENGTH_ERR, 0);
1718 return check_condition_result;
1721 case 0x8: /* Caching Mode page */
1722 if (caching_pg[1] == arr[off + 1]) {
1723 memcpy(caching_pg + 2, arr + off + 2,
1724 sizeof(caching_pg) - 2);
1725 goto set_mode_changed_ua;
1728 case 0xa: /* Control Mode page */
1729 if (ctrl_m_pg[1] == arr[off + 1]) {
1730 memcpy(ctrl_m_pg + 2, arr + off + 2,
1731 sizeof(ctrl_m_pg) - 2);
1732 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1733 goto set_mode_changed_ua;
1736 case 0x1c: /* Informational Exceptions Mode page */
1737 if (iec_m_pg[1] == arr[off + 1]) {
1738 memcpy(iec_m_pg + 2, arr + off + 2,
1739 sizeof(iec_m_pg) - 2);
1740 goto set_mode_changed_ua;
1746 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
1747 return check_condition_result;
1748 set_mode_changed_ua:
1749 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
1753 static int resp_temp_l_pg(unsigned char * arr)
1755 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1756 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1759 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1760 return sizeof(temp_l_pg);
1763 static int resp_ie_l_pg(unsigned char * arr)
1765 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1768 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1769 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1770 arr[4] = THRESHOLD_EXCEEDED;
1773 return sizeof(ie_l_pg);
1776 #define SDEBUG_MAX_LSENSE_SZ 512
1778 static int resp_log_sense(struct scsi_cmnd * scp,
1779 struct sdebug_dev_info * devip)
1781 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1782 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1783 unsigned char *cmd = scp->cmnd;
1785 errsts = check_readiness(scp, UAS_ONLY, devip);
1788 memset(arr, 0, sizeof(arr));
1792 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
1793 return check_condition_result;
1795 pcontrol = (cmd[2] & 0xc0) >> 6;
1796 pcode = cmd[2] & 0x3f;
1797 subpcode = cmd[3] & 0xff;
1798 alloc_len = (cmd[7] << 8) + cmd[8];
1800 if (0 == subpcode) {
1802 case 0x0: /* Supported log pages log page */
1804 arr[n++] = 0x0; /* this page */
1805 arr[n++] = 0xd; /* Temperature */
1806 arr[n++] = 0x2f; /* Informational exceptions */
1809 case 0xd: /* Temperature log page */
1810 arr[3] = resp_temp_l_pg(arr + 4);
1812 case 0x2f: /* Informational exceptions log page */
1813 arr[3] = resp_ie_l_pg(arr + 4);
1816 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1817 return check_condition_result;
1819 } else if (0xff == subpcode) {
1823 case 0x0: /* Supported log pages and subpages log page */
1826 arr[n++] = 0x0; /* 0,0 page */
1828 arr[n++] = 0xff; /* this page */
1830 arr[n++] = 0x0; /* Temperature */
1832 arr[n++] = 0x0; /* Informational exceptions */
1835 case 0xd: /* Temperature subpages */
1838 arr[n++] = 0x0; /* Temperature */
1841 case 0x2f: /* Informational exceptions subpages */
1844 arr[n++] = 0x0; /* Informational exceptions */
1848 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1849 return check_condition_result;
1852 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1853 return check_condition_result;
1855 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1856 return fill_from_dev_buffer(scp, arr,
1857 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1860 static int check_device_access_params(struct scsi_cmnd *scp,
1861 unsigned long long lba, unsigned int num)
1863 if (lba + num > sdebug_capacity) {
1864 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
1865 return check_condition_result;
1867 /* transfer length excessive (tie in to block limits VPD page) */
1868 if (num > sdebug_store_sectors) {
1869 /* needs work to find which cdb byte 'num' comes from */
1870 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1871 return check_condition_result;
1876 /* Returns number of bytes copied or -1 if error. */
1877 static int do_device_access(struct scsi_cmnd *scmd,
1878 unsigned long long lba, unsigned int num, int write)
1881 unsigned long long block, rest = 0;
1882 struct scsi_data_buffer *sdb;
1883 enum dma_data_direction dir;
1884 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1888 sdb = scsi_out(scmd);
1889 dir = DMA_TO_DEVICE;
1890 func = sg_pcopy_to_buffer;
1892 sdb = scsi_in(scmd);
1893 dir = DMA_FROM_DEVICE;
1894 func = sg_pcopy_from_buffer;
1899 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1902 block = do_div(lba, sdebug_store_sectors);
1903 if (block + num > sdebug_store_sectors)
1904 rest = block + num - sdebug_store_sectors;
1906 ret = func(sdb->table.sgl, sdb->table.nents,
1907 fake_storep + (block * scsi_debug_sector_size),
1908 (num - rest) * scsi_debug_sector_size, 0);
1909 if (ret != (num - rest) * scsi_debug_sector_size)
1913 ret += func(sdb->table.sgl, sdb->table.nents,
1914 fake_storep, rest * scsi_debug_sector_size,
1915 (num - rest) * scsi_debug_sector_size);
1921 static __be16 dif_compute_csum(const void *buf, int len)
1925 if (scsi_debug_guard)
1926 csum = (__force __be16)ip_compute_csum(buf, len);
1928 csum = cpu_to_be16(crc_t10dif(buf, len));
1933 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1934 sector_t sector, u32 ei_lba)
1936 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1938 if (sdt->guard_tag != csum) {
1939 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1941 (unsigned long)sector,
1942 be16_to_cpu(sdt->guard_tag),
1946 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1947 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1948 pr_err("%s: REF check failed on sector %lu\n",
1949 __func__, (unsigned long)sector);
1952 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1953 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1954 pr_err("%s: REF check failed on sector %lu\n",
1955 __func__, (unsigned long)sector);
1961 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1962 unsigned int sectors, bool read)
1966 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1967 struct sg_mapping_iter miter;
1969 /* Bytes of protection data to copy into sgl */
1970 resid = sectors * sizeof(*dif_storep);
1972 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1973 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1974 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1976 while (sg_miter_next(&miter) && resid > 0) {
1977 size_t len = min(miter.length, resid);
1978 void *start = dif_store(sector);
1981 if (dif_store_end < start + len)
1982 rest = start + len - dif_store_end;
1987 memcpy(paddr, start, len - rest);
1989 memcpy(start, paddr, len - rest);
1993 memcpy(paddr + len - rest, dif_storep, rest);
1995 memcpy(dif_storep, paddr + len - rest, rest);
1998 sector += len / sizeof(*dif_storep);
2001 sg_miter_stop(&miter);
2004 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2005 unsigned int sectors, u32 ei_lba)
2008 struct sd_dif_tuple *sdt;
2011 for (i = 0; i < sectors; i++, ei_lba++) {
2014 sector = start_sec + i;
2015 sdt = dif_store(sector);
2017 if (sdt->app_tag == cpu_to_be16(0xffff))
2020 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2027 dif_copy_prot(SCpnt, start_sec, sectors, true);
2033 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
2034 unsigned int num, u32 ei_lba)
2036 unsigned long iflags;
2039 ret = check_device_access_params(SCpnt, lba, num);
2043 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2044 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2045 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2046 /* claim unrecoverable read error */
2047 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2048 /* set info field and valid bit for fixed descriptor */
2049 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
2050 SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */
2051 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2052 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2053 SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
2054 SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2055 SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2056 SCpnt->sense_buffer[6] = ret & 0xff;
2058 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
2059 return check_condition_result;
2062 read_lock_irqsave(&atomic_rw, iflags);
2065 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2066 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
2069 read_unlock_irqrestore(&atomic_rw, iflags);
2070 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
2071 return illegal_condition_result;
2075 ret = do_device_access(SCpnt, lba, num, 0);
2076 read_unlock_irqrestore(&atomic_rw, iflags);
2078 return DID_ERROR << 16;
2080 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
2085 void dump_sector(unsigned char *buf, int len)
2089 pr_err(">>> Sector Dump <<<\n");
2090 for (i = 0 ; i < len ; i += 16) {
2093 for (j = 0, n = 0; j < 16; j++) {
2094 unsigned char c = buf[i+j];
2096 if (c >= 0x20 && c < 0x7e)
2097 n += scnprintf(b + n, sizeof(b) - n,
2100 n += scnprintf(b + n, sizeof(b) - n,
2103 pr_err("%04d: %s\n", i, b);
2107 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2108 unsigned int sectors, u32 ei_lba)
2111 struct sd_dif_tuple *sdt;
2113 sector_t sector = start_sec;
2116 struct sg_mapping_iter diter;
2117 struct sg_mapping_iter piter;
2119 BUG_ON(scsi_sg_count(SCpnt) == 0);
2120 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2122 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2123 scsi_prot_sg_count(SCpnt),
2124 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2125 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2126 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2128 /* For each protection page */
2129 while (sg_miter_next(&piter)) {
2131 if (WARN_ON(!sg_miter_next(&diter))) {
2136 for (ppage_offset = 0; ppage_offset < piter.length;
2137 ppage_offset += sizeof(struct sd_dif_tuple)) {
2138 /* If we're at the end of the current
2139 * data page advance to the next one
2141 if (dpage_offset >= diter.length) {
2142 if (WARN_ON(!sg_miter_next(&diter))) {
2149 sdt = piter.addr + ppage_offset;
2150 daddr = diter.addr + dpage_offset;
2152 ret = dif_verify(sdt, daddr, sector, ei_lba);
2154 dump_sector(daddr, scsi_debug_sector_size);
2160 dpage_offset += scsi_debug_sector_size;
2162 diter.consumed = dpage_offset;
2163 sg_miter_stop(&diter);
2165 sg_miter_stop(&piter);
2167 dif_copy_prot(SCpnt, start_sec, sectors, false);
2174 sg_miter_stop(&diter);
2175 sg_miter_stop(&piter);
2179 static unsigned long lba_to_map_index(sector_t lba)
2181 if (scsi_debug_unmap_alignment) {
2182 lba += scsi_debug_unmap_granularity -
2183 scsi_debug_unmap_alignment;
2185 do_div(lba, scsi_debug_unmap_granularity);
2190 static sector_t map_index_to_lba(unsigned long index)
2192 sector_t lba = index * scsi_debug_unmap_granularity;
2194 if (scsi_debug_unmap_alignment) {
2195 lba -= scsi_debug_unmap_granularity -
2196 scsi_debug_unmap_alignment;
2202 static unsigned int map_state(sector_t lba, unsigned int *num)
2205 unsigned int mapped;
2206 unsigned long index;
2209 index = lba_to_map_index(lba);
2210 mapped = test_bit(index, map_storep);
2213 next = find_next_zero_bit(map_storep, map_size, index);
2215 next = find_next_bit(map_storep, map_size, index);
2217 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2223 static void map_region(sector_t lba, unsigned int len)
2225 sector_t end = lba + len;
2228 unsigned long index = lba_to_map_index(lba);
2230 if (index < map_size)
2231 set_bit(index, map_storep);
2233 lba = map_index_to_lba(index + 1);
2237 static void unmap_region(sector_t lba, unsigned int len)
2239 sector_t end = lba + len;
2242 unsigned long index = lba_to_map_index(lba);
2244 if (lba == map_index_to_lba(index) &&
2245 lba + scsi_debug_unmap_granularity <= end &&
2247 clear_bit(index, map_storep);
2248 if (scsi_debug_lbprz) {
2249 memset(fake_storep +
2250 lba * scsi_debug_sector_size, 0,
2251 scsi_debug_sector_size *
2252 scsi_debug_unmap_granularity);
2255 memset(dif_storep + lba, 0xff,
2256 sizeof(*dif_storep) *
2257 scsi_debug_unmap_granularity);
2260 lba = map_index_to_lba(index + 1);
2264 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2265 unsigned int num, u32 ei_lba)
2267 unsigned long iflags;
2270 ret = check_device_access_params(SCpnt, lba, num);
2274 write_lock_irqsave(&atomic_rw, iflags);
2277 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2278 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2281 write_unlock_irqrestore(&atomic_rw, iflags);
2282 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
2284 return illegal_condition_result;
2288 ret = do_device_access(SCpnt, lba, num, 1);
2289 if (scsi_debug_lbp())
2290 map_region(lba, num);
2291 write_unlock_irqrestore(&atomic_rw, iflags);
2293 return (DID_ERROR << 16);
2294 else if ((ret < (num * scsi_debug_sector_size)) &&
2295 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2296 sdev_printk(KERN_INFO, SCpnt->device,
2297 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2298 my_name, num * scsi_debug_sector_size, ret);
2303 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2304 unsigned int num, u32 ei_lba, unsigned int unmap)
2306 unsigned long iflags;
2307 unsigned long long i;
2310 ret = check_device_access_params(scmd, lba, num);
2314 if (num > scsi_debug_write_same_length) {
2315 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2317 return check_condition_result;
2320 write_lock_irqsave(&atomic_rw, iflags);
2322 if (unmap && scsi_debug_lbp()) {
2323 unmap_region(lba, num);
2327 /* Else fetch one logical block */
2328 ret = fetch_to_dev_buffer(scmd,
2329 fake_storep + (lba * scsi_debug_sector_size),
2330 scsi_debug_sector_size);
2333 write_unlock_irqrestore(&atomic_rw, iflags);
2334 return (DID_ERROR << 16);
2335 } else if ((ret < (num * scsi_debug_sector_size)) &&
2336 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2337 sdev_printk(KERN_INFO, scmd->device,
2338 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2339 my_name, "write same",
2340 num * scsi_debug_sector_size, ret);
2342 /* Copy first sector to remaining blocks */
2343 for (i = 1 ; i < num ; i++)
2344 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2345 fake_storep + (lba * scsi_debug_sector_size),
2346 scsi_debug_sector_size);
2348 if (scsi_debug_lbp())
2349 map_region(lba, num);
2351 write_unlock_irqrestore(&atomic_rw, iflags);
2356 struct unmap_block_desc {
2362 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2365 struct unmap_block_desc *desc;
2366 unsigned int i, payload_len, descriptors;
2368 unsigned long iflags;
2370 ret = check_readiness(scmd, UAS_ONLY, devip);
2374 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2375 BUG_ON(scsi_bufflen(scmd) != payload_len);
2377 descriptors = (payload_len - 8) / 16;
2379 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2381 return check_condition_result;
2383 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2385 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2386 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2388 desc = (void *)&buf[8];
2390 write_lock_irqsave(&atomic_rw, iflags);
2392 for (i = 0 ; i < descriptors ; i++) {
2393 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2394 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2396 ret = check_device_access_params(scmd, lba, num);
2400 unmap_region(lba, num);
2406 write_unlock_irqrestore(&atomic_rw, iflags);
2412 #define SDEBUG_GET_LBA_STATUS_LEN 32
2414 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2415 struct sdebug_dev_info * devip)
2417 unsigned long long lba;
2418 unsigned int alloc_len, mapped, num;
2419 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2422 ret = check_readiness(scmd, UAS_ONLY, devip);
2426 lba = get_unaligned_be64(&scmd->cmnd[2]);
2427 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2432 ret = check_device_access_params(scmd, lba, 1);
2436 mapped = map_state(lba, &num);
2438 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2439 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2440 put_unaligned_be64(lba, &arr[8]); /* LBA */
2441 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2442 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2444 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2447 #define SDEBUG_RLUN_ARR_SZ 256
2449 static int resp_report_luns(struct scsi_cmnd * scp,
2450 struct sdebug_dev_info * devip)
2452 unsigned int alloc_len;
2453 int lun_cnt, i, upper, num, n, want_wlun, shortish;
2455 unsigned char *cmd = scp->cmnd;
2456 int select_report = (int)cmd[2];
2457 struct scsi_lun *one_lun;
2458 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2459 unsigned char * max_addr;
2461 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2462 shortish = (alloc_len < 4);
2463 if (shortish || (select_report > 2)) {
2464 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
2465 return check_condition_result;
2467 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2468 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2469 lun_cnt = scsi_debug_max_luns;
2470 if (1 == select_report)
2472 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2474 want_wlun = (select_report > 0) ? 1 : 0;
2475 num = lun_cnt + want_wlun;
2476 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2477 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2478 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2479 sizeof(struct scsi_lun)), num);
2484 one_lun = (struct scsi_lun *) &arr[8];
2485 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2486 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2487 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2489 upper = (lun >> 8) & 0x3f;
2491 one_lun[i].scsi_lun[0] =
2492 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2493 one_lun[i].scsi_lun[1] = lun & 0xff;
2496 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2497 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2500 alloc_len = (unsigned char *)(one_lun + i) - arr;
2501 return fill_from_dev_buffer(scp, arr,
2502 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2505 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2506 unsigned int num, struct sdebug_dev_info *devip)
2509 unsigned char *kaddr, *buf;
2510 unsigned int offset;
2511 struct scsi_data_buffer *sdb = scsi_in(scp);
2512 struct sg_mapping_iter miter;
2514 /* better not to use temporary buffer. */
2515 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2517 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2519 return check_condition_result;
2522 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2525 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2526 SG_MITER_ATOMIC | SG_MITER_TO_SG);
2528 while (sg_miter_next(&miter)) {
2530 for (j = 0; j < miter.length; j++)
2531 *(kaddr + j) ^= *(buf + offset + j);
2533 offset += miter.length;
2535 sg_miter_stop(&miter);
2541 /* When timer or tasklet goes off this function is called. */
2542 static void sdebug_q_cmd_complete(unsigned long indx)
2546 unsigned long iflags;
2547 struct sdebug_queued_cmd *sqcp;
2548 struct scsi_cmnd *scp;
2549 struct sdebug_dev_info *devip;
2551 atomic_inc(&sdebug_completions);
2553 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2554 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2557 spin_lock_irqsave(&queued_arr_lock, iflags);
2558 sqcp = &queued_arr[qa_indx];
2561 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2562 pr_err("%s: scp is NULL\n", __func__);
2565 devip = (struct sdebug_dev_info *)scp->device->hostdata;
2567 atomic_dec(&devip->num_in_q);
2569 pr_err("%s: devip=NULL\n", __func__);
2570 if (atomic_read(&retired_max_queue) > 0)
2573 sqcp->a_cmnd = NULL;
2574 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2575 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2576 pr_err("%s: Unexpected completion\n", __func__);
2580 if (unlikely(retiring)) { /* user has reduced max_queue */
2583 retval = atomic_read(&retired_max_queue);
2584 if (qa_indx >= retval) {
2585 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2586 pr_err("%s: index %d too large\n", __func__, retval);
2589 k = find_last_bit(queued_in_use_bm, retval);
2590 if ((k < scsi_debug_max_queue) || (k == retval))
2591 atomic_set(&retired_max_queue, 0);
2593 atomic_set(&retired_max_queue, k + 1);
2595 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2596 scp->scsi_done(scp); /* callback to mid level */
2599 /* When high resolution timer goes off this function is called. */
2600 static enum hrtimer_restart
2601 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
2605 unsigned long iflags;
2606 struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
2607 struct sdebug_queued_cmd *sqcp;
2608 struct scsi_cmnd *scp;
2609 struct sdebug_dev_info *devip;
2611 atomic_inc(&sdebug_completions);
2612 qa_indx = sd_hrtp->qa_indx;
2613 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2614 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2617 spin_lock_irqsave(&queued_arr_lock, iflags);
2618 sqcp = &queued_arr[qa_indx];
2621 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2622 pr_err("%s: scp is NULL\n", __func__);
2625 devip = (struct sdebug_dev_info *)scp->device->hostdata;
2627 atomic_dec(&devip->num_in_q);
2629 pr_err("%s: devip=NULL\n", __func__);
2630 if (atomic_read(&retired_max_queue) > 0)
2633 sqcp->a_cmnd = NULL;
2634 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2635 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2636 pr_err("%s: Unexpected completion\n", __func__);
2640 if (unlikely(retiring)) { /* user has reduced max_queue */
2643 retval = atomic_read(&retired_max_queue);
2644 if (qa_indx >= retval) {
2645 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2646 pr_err("%s: index %d too large\n", __func__, retval);
2649 k = find_last_bit(queued_in_use_bm, retval);
2650 if ((k < scsi_debug_max_queue) || (k == retval))
2651 atomic_set(&retired_max_queue, 0);
2653 atomic_set(&retired_max_queue, k + 1);
2655 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2656 scp->scsi_done(scp); /* callback to mid level */
2658 return HRTIMER_NORESTART;
2661 static struct sdebug_dev_info *
2662 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2664 struct sdebug_dev_info *devip;
2666 devip = kzalloc(sizeof(*devip), flags);
2668 devip->sdbg_host = sdbg_host;
2669 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2674 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2676 struct sdebug_host_info * sdbg_host;
2677 struct sdebug_dev_info * open_devip = NULL;
2678 struct sdebug_dev_info * devip =
2679 (struct sdebug_dev_info *)sdev->hostdata;
2683 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2685 pr_err("%s: Host info NULL\n", __func__);
2688 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2689 if ((devip->used) && (devip->channel == sdev->channel) &&
2690 (devip->target == sdev->id) &&
2691 (devip->lun == sdev->lun))
2694 if ((!devip->used) && (!open_devip))
2698 if (!open_devip) { /* try and make a new one */
2699 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2701 printk(KERN_ERR "%s: out of memory at line %d\n",
2702 __func__, __LINE__);
2707 open_devip->channel = sdev->channel;
2708 open_devip->target = sdev->id;
2709 open_devip->lun = sdev->lun;
2710 open_devip->sdbg_host = sdbg_host;
2711 atomic_set(&open_devip->num_in_q, 0);
2712 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2713 open_devip->used = 1;
2714 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2715 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2720 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2722 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2723 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
2724 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2725 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2729 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2731 struct sdebug_dev_info *devip;
2733 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2734 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
2735 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2736 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2737 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2738 devip = devInfoReg(sdp);
2740 return 1; /* no resources, will be marked offline */
2741 sdp->hostdata = devip;
2742 blk_queue_max_segment_size(sdp->request_queue, -1U);
2743 if (scsi_debug_no_uld)
2744 sdp->no_uld_attach = 1;
2748 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2750 struct sdebug_dev_info *devip =
2751 (struct sdebug_dev_info *)sdp->hostdata;
2753 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2754 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
2755 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2757 /* make this slot available for re-use */
2759 sdp->hostdata = NULL;
2763 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
2764 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2766 unsigned long iflags;
2767 int k, qmax, r_qmax;
2768 struct sdebug_queued_cmd *sqcp;
2769 struct sdebug_dev_info *devip;
2771 spin_lock_irqsave(&queued_arr_lock, iflags);
2772 qmax = scsi_debug_max_queue;
2773 r_qmax = atomic_read(&retired_max_queue);
2776 for (k = 0; k < qmax; ++k) {
2777 if (test_bit(k, queued_in_use_bm)) {
2778 sqcp = &queued_arr[k];
2779 if (cmnd == sqcp->a_cmnd) {
2780 devip = (struct sdebug_dev_info *)
2781 cmnd->device->hostdata;
2783 atomic_dec(&devip->num_in_q);
2784 sqcp->a_cmnd = NULL;
2785 spin_unlock_irqrestore(&queued_arr_lock,
2787 if (scsi_debug_ndelay > 0) {
2790 &sqcp->sd_hrtp->hrt);
2791 } else if (scsi_debug_delay > 0) {
2792 if (sqcp->cmnd_timerp)
2795 } else if (scsi_debug_delay < 0) {
2797 tasklet_kill(sqcp->tletp);
2799 clear_bit(k, queued_in_use_bm);
2804 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2808 /* Deletes (stops) timers or tasklets of all queued commands */
2809 static void stop_all_queued(void)
2811 unsigned long iflags;
2813 struct sdebug_queued_cmd *sqcp;
2814 struct sdebug_dev_info *devip;
2816 spin_lock_irqsave(&queued_arr_lock, iflags);
2817 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2818 if (test_bit(k, queued_in_use_bm)) {
2819 sqcp = &queued_arr[k];
2821 devip = (struct sdebug_dev_info *)
2822 sqcp->a_cmnd->device->hostdata;
2824 atomic_dec(&devip->num_in_q);
2825 sqcp->a_cmnd = NULL;
2826 spin_unlock_irqrestore(&queued_arr_lock,
2828 if (scsi_debug_ndelay > 0) {
2831 &sqcp->sd_hrtp->hrt);
2832 } else if (scsi_debug_delay > 0) {
2833 if (sqcp->cmnd_timerp)
2836 } else if (scsi_debug_delay < 0) {
2838 tasklet_kill(sqcp->tletp);
2840 clear_bit(k, queued_in_use_bm);
2841 spin_lock_irqsave(&queued_arr_lock, iflags);
2845 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2848 /* Free queued command memory on heap */
2849 static void free_all_queued(void)
2851 unsigned long iflags;
2853 struct sdebug_queued_cmd *sqcp;
2855 spin_lock_irqsave(&queued_arr_lock, iflags);
2856 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2857 sqcp = &queued_arr[k];
2858 kfree(sqcp->cmnd_timerp);
2859 sqcp->cmnd_timerp = NULL;
2862 kfree(sqcp->sd_hrtp);
2863 sqcp->sd_hrtp = NULL;
2865 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2868 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
2872 if (SCpnt->device &&
2873 (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2874 sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
2876 stop_queued_cmnd(SCpnt);
2881 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2883 struct sdebug_dev_info * devip;
2886 if (SCpnt && SCpnt->device) {
2887 struct scsi_device *sdp = SCpnt->device;
2889 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2890 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2891 devip = devInfoReg(sdp);
2893 set_bit(SDEBUG_UA_POR, devip->uas_bm);
2898 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
2900 struct sdebug_host_info *sdbg_host;
2901 struct sdebug_dev_info *devip;
2902 struct scsi_device *sdp;
2903 struct Scsi_Host *hp;
2906 ++num_target_resets;
2909 sdp = SCpnt->device;
2912 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2913 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2917 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2919 list_for_each_entry(devip,
2920 &sdbg_host->dev_info_list,
2922 if (devip->target == sdp->id) {
2923 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2927 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2928 sdev_printk(KERN_INFO, sdp,
2929 "%s: %d device(s) found in target\n", __func__, k);
2934 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2936 struct sdebug_host_info *sdbg_host;
2937 struct sdebug_dev_info *devip;
2938 struct scsi_device * sdp;
2939 struct Scsi_Host * hp;
2943 if (!(SCpnt && SCpnt->device))
2945 sdp = SCpnt->device;
2946 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2947 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2950 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2952 list_for_each_entry(devip,
2953 &sdbg_host->dev_info_list,
2955 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2960 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2961 sdev_printk(KERN_INFO, sdp,
2962 "%s: %d device(s) found in host\n", __func__, k);
2967 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2969 struct sdebug_host_info * sdbg_host;
2970 struct sdebug_dev_info *devip;
2974 if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2975 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
2976 spin_lock(&sdebug_host_list_lock);
2977 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2978 list_for_each_entry(devip, &sdbg_host->dev_info_list,
2980 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2984 spin_unlock(&sdebug_host_list_lock);
2986 if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2987 sdev_printk(KERN_INFO, SCpnt->device,
2988 "%s: %d device(s) found\n", __func__, k);
2992 static void __init sdebug_build_parts(unsigned char *ramp,
2993 unsigned long store_size)
2995 struct partition * pp;
2996 int starts[SDEBUG_MAX_PARTS + 2];
2997 int sectors_per_part, num_sectors, k;
2998 int heads_by_sects, start_sec, end_sec;
3000 /* assume partition table already zeroed */
3001 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3003 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3004 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3005 pr_warn("%s: reducing partitions to %d\n", __func__,
3008 num_sectors = (int)sdebug_store_sectors;
3009 sectors_per_part = (num_sectors - sdebug_sectors_per)
3010 / scsi_debug_num_parts;
3011 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3012 starts[0] = sdebug_sectors_per;
3013 for (k = 1; k < scsi_debug_num_parts; ++k)
3014 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3016 starts[scsi_debug_num_parts] = num_sectors;
3017 starts[scsi_debug_num_parts + 1] = 0;
3019 ramp[510] = 0x55; /* magic partition markings */
3021 pp = (struct partition *)(ramp + 0x1be);
3022 for (k = 0; starts[k + 1]; ++k, ++pp) {
3023 start_sec = starts[k];
3024 end_sec = starts[k + 1] - 1;
3027 pp->cyl = start_sec / heads_by_sects;
3028 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3029 / sdebug_sectors_per;
3030 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3032 pp->end_cyl = end_sec / heads_by_sects;
3033 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3034 / sdebug_sectors_per;
3035 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3037 pp->start_sect = cpu_to_le32(start_sec);
3038 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3039 pp->sys_ind = 0x83; /* plain Linux partition */
3044 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3045 int scsi_result, int delta_jiff)
3047 unsigned long iflags;
3048 int k, num_in_q, qdepth, inject;
3049 struct sdebug_queued_cmd *sqcp = NULL;
3050 struct scsi_device *sdp = cmnd->device;
3052 if (NULL == cmnd || NULL == devip) {
3053 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3055 /* no particularly good error to report back */
3056 return SCSI_MLQUEUE_HOST_BUSY;
3058 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3059 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3060 __func__, scsi_result);
3061 if (delta_jiff == 0)
3062 goto respond_in_thread;
3064 /* schedule the response at a later time if resources permit */
3065 spin_lock_irqsave(&queued_arr_lock, iflags);
3066 num_in_q = atomic_read(&devip->num_in_q);
3067 qdepth = cmnd->device->queue_depth;
3069 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3071 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3072 goto respond_in_thread;
3074 scsi_result = device_qfull_result;
3075 } else if ((scsi_debug_every_nth != 0) &&
3076 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3077 (scsi_result == 0)) {
3078 if ((num_in_q == (qdepth - 1)) &&
3079 (atomic_inc_return(&sdebug_a_tsf) >=
3080 abs(scsi_debug_every_nth))) {
3081 atomic_set(&sdebug_a_tsf, 0);
3083 scsi_result = device_qfull_result;
3087 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3088 if (k >= scsi_debug_max_queue) {
3089 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3091 goto respond_in_thread;
3092 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3093 scsi_result = device_qfull_result;
3094 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3095 sdev_printk(KERN_INFO, sdp,
3096 "%s: max_queue=%d exceeded, %s\n",
3097 __func__, scsi_debug_max_queue,
3098 (scsi_result ? "status: TASK SET FULL" :
3099 "report: host busy"));
3101 goto respond_in_thread;
3103 return SCSI_MLQUEUE_HOST_BUSY;
3105 __set_bit(k, queued_in_use_bm);
3106 atomic_inc(&devip->num_in_q);
3107 sqcp = &queued_arr[k];
3108 sqcp->a_cmnd = cmnd;
3109 cmnd->result = scsi_result;
3110 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3111 if (delta_jiff > 0) {
3112 if (NULL == sqcp->cmnd_timerp) {
3113 sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3115 if (NULL == sqcp->cmnd_timerp)
3116 return SCSI_MLQUEUE_HOST_BUSY;
3117 init_timer(sqcp->cmnd_timerp);
3119 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3120 sqcp->cmnd_timerp->data = k;
3121 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3122 add_timer(sqcp->cmnd_timerp);
3123 } else if (scsi_debug_ndelay > 0) {
3124 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3125 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3127 if (NULL == sd_hp) {
3128 sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3130 return SCSI_MLQUEUE_HOST_BUSY;
3131 sqcp->sd_hrtp = sd_hp;
3132 hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3134 sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3137 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3138 } else { /* delay < 0 */
3139 if (NULL == sqcp->tletp) {
3140 sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3142 if (NULL == sqcp->tletp)
3143 return SCSI_MLQUEUE_HOST_BUSY;
3144 tasklet_init(sqcp->tletp,
3145 sdebug_q_cmd_complete, k);
3147 if (-1 == delta_jiff)
3148 tasklet_hi_schedule(sqcp->tletp);
3150 tasklet_schedule(sqcp->tletp);
3152 if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3153 (scsi_result == device_qfull_result))
3154 sdev_printk(KERN_INFO, sdp,
3155 "%s: num_in_q=%d +1, %s%s\n", __func__,
3156 num_in_q, (inject ? "<inject> " : ""),
3157 "status: TASK SET FULL");
3160 respond_in_thread: /* call back to mid-layer using invocation thread */
3161 cmnd->result = scsi_result;
3162 cmnd->scsi_done(cmnd);
3166 /* Note: The following macros create attribute files in the
3167 /sys/module/scsi_debug/parameters directory. Unfortunately this
3168 driver is unaware of a change and cannot trigger auxiliary actions
3169 as it can when the corresponding attribute in the
3170 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3172 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3173 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3174 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3175 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3176 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3177 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3178 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3179 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3180 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3181 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3182 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3183 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3184 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3185 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3186 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3187 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3188 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3189 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3190 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3191 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3192 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3193 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3194 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3195 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3196 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3197 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3198 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3199 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3200 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3201 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3202 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3203 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3204 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3205 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3206 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3207 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3208 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3210 module_param_named(write_same_length, scsi_debug_write_same_length, int,
3213 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3214 MODULE_DESCRIPTION("SCSI debug adapter driver");
3215 MODULE_LICENSE("GPL");
3216 MODULE_VERSION(SCSI_DEBUG_VERSION);
3218 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3219 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3220 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3221 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3222 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
3223 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3224 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3225 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3226 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3227 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3228 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3229 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3230 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3231 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3232 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3233 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3234 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3235 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3236 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3237 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3238 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3239 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3240 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3241 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3242 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3243 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3244 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3245 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3246 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3247 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3248 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3249 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3250 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3251 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3252 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3253 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
3254 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3255 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3257 static char sdebug_info[256];
3259 static const char * scsi_debug_info(struct Scsi_Host * shp)
3261 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3262 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3263 scsi_debug_version_date, scsi_debug_dev_size_mb,
3268 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3269 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3273 int minLen = length > 15 ? 15 : length;
3275 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3277 memcpy(arr, buffer, minLen);
3279 if (1 != sscanf(arr, "%d", &opts))
3281 scsi_debug_opts = opts;
3282 if (scsi_debug_every_nth != 0)
3283 atomic_set(&sdebug_cmnd_count, 0);
3287 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
3288 * same for each scsi_debug host (if more than one). Some of the counters
3289 * output are not atomics so might be inaccurate in a busy system. */
3290 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3295 if (scsi_debug_every_nth > 0)
3296 snprintf(b, sizeof(b), " (curr:%d)",
3297 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3298 atomic_read(&sdebug_a_tsf) :
3299 atomic_read(&sdebug_cmnd_count)));
3303 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3304 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3306 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3307 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3308 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3309 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3310 "usec_in_jiffy=%lu\n",
3311 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3312 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3313 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3314 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3315 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3316 sdebug_sectors_per, num_aborts, num_dev_resets,
3317 num_target_resets, num_bus_resets, num_host_resets,
3318 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3320 f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3321 if (f != scsi_debug_max_queue) {
3322 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3323 seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
3324 "queued_in_use_bm", f, l);
3329 static ssize_t delay_show(struct device_driver *ddp, char *buf)
3331 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3333 /* Returns -EBUSY if delay is being changed and commands are queued */
3334 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3339 if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3341 if (scsi_debug_delay != delay) {
3342 unsigned long iflags;
3345 spin_lock_irqsave(&queued_arr_lock, iflags);
3346 k = find_first_bit(queued_in_use_bm,
3347 scsi_debug_max_queue);
3348 if (k != scsi_debug_max_queue)
3349 res = -EBUSY; /* have queued commands */
3351 scsi_debug_delay = delay;
3352 scsi_debug_ndelay = 0;
3354 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3360 static DRIVER_ATTR_RW(delay);
3362 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3364 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3366 /* Returns -EBUSY if ndelay is being changed and commands are queued */
3367 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
3368 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3371 unsigned long iflags;
3374 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3375 (ndelay >= 0) && (ndelay < 1000000000)) {
3377 if (scsi_debug_ndelay != ndelay) {
3378 spin_lock_irqsave(&queued_arr_lock, iflags);
3379 k = find_first_bit(queued_in_use_bm,
3380 scsi_debug_max_queue);
3381 if (k != scsi_debug_max_queue)
3382 res = -EBUSY; /* have queued commands */
3384 scsi_debug_ndelay = ndelay;
3385 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3388 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3394 static DRIVER_ATTR_RW(ndelay);
3396 static ssize_t opts_show(struct device_driver *ddp, char *buf)
3398 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3401 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3407 if (1 == sscanf(buf, "%10s", work)) {
3408 if (0 == strncasecmp(work,"0x", 2)) {
3409 if (1 == sscanf(&work[2], "%x", &opts))
3412 if (1 == sscanf(work, "%d", &opts))
3418 scsi_debug_opts = opts;
3419 atomic_set(&sdebug_cmnd_count, 0);
3420 atomic_set(&sdebug_a_tsf, 0);
3423 static DRIVER_ATTR_RW(opts);
3425 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3427 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3429 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3434 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3435 scsi_debug_ptype = n;
3440 static DRIVER_ATTR_RW(ptype);
3442 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3444 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3446 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3451 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3452 scsi_debug_dsense = n;
3457 static DRIVER_ATTR_RW(dsense);
3459 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3461 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3463 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3468 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3470 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3471 if (scsi_debug_fake_rw != n) {
3472 if ((0 == n) && (NULL == fake_storep)) {
3474 (unsigned long)scsi_debug_dev_size_mb *
3477 fake_storep = vmalloc(sz);
3478 if (NULL == fake_storep) {
3479 pr_err("%s: out of memory, 9\n",
3483 memset(fake_storep, 0, sz);
3485 scsi_debug_fake_rw = n;
3491 static DRIVER_ATTR_RW(fake_rw);
3493 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3495 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3497 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3502 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3503 scsi_debug_no_lun_0 = n;
3508 static DRIVER_ATTR_RW(no_lun_0);
3510 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3512 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3514 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3519 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3520 scsi_debug_num_tgts = n;
3521 sdebug_max_tgts_luns();
3526 static DRIVER_ATTR_RW(num_tgts);
3528 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3530 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3532 static DRIVER_ATTR_RO(dev_size_mb);
3534 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3536 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3538 static DRIVER_ATTR_RO(num_parts);
3540 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3542 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3544 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3549 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3550 scsi_debug_every_nth = nth;
3551 atomic_set(&sdebug_cmnd_count, 0);
3556 static DRIVER_ATTR_RW(every_nth);
3558 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3560 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3562 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3567 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3568 scsi_debug_max_luns = n;
3569 sdebug_max_tgts_luns();
3574 static DRIVER_ATTR_RW(max_luns);
3576 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3578 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3580 /* N.B. max_queue can be changed while there are queued commands. In flight
3581 * commands beyond the new max_queue will be completed. */
3582 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3585 unsigned long iflags;
3588 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3589 (n <= SCSI_DEBUG_CANQUEUE)) {
3590 spin_lock_irqsave(&queued_arr_lock, iflags);
3591 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
3592 scsi_debug_max_queue = n;
3593 if (SCSI_DEBUG_CANQUEUE == k)
3594 atomic_set(&retired_max_queue, 0);
3596 atomic_set(&retired_max_queue, k + 1);
3598 atomic_set(&retired_max_queue, 0);
3599 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3604 static DRIVER_ATTR_RW(max_queue);
3606 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3608 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3610 static DRIVER_ATTR_RO(no_uld);
3612 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3614 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3616 static DRIVER_ATTR_RO(scsi_level);
3618 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3620 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3622 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3627 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3628 scsi_debug_virtual_gb = n;
3630 sdebug_capacity = get_sdebug_capacity();
3636 static DRIVER_ATTR_RW(virtual_gb);
3638 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3640 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3643 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3648 if (sscanf(buf, "%d", &delta_hosts) != 1)
3650 if (delta_hosts > 0) {
3652 sdebug_add_adapter();
3653 } while (--delta_hosts);
3654 } else if (delta_hosts < 0) {
3656 sdebug_remove_adapter();
3657 } while (++delta_hosts);
3661 static DRIVER_ATTR_RW(add_host);
3663 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3665 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3667 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3672 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3673 scsi_debug_vpd_use_hostno = n;
3678 static DRIVER_ATTR_RW(vpd_use_hostno);
3680 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3682 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3684 static DRIVER_ATTR_RO(sector_size);
3686 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3688 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3690 static DRIVER_ATTR_RO(dix);
3692 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3694 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3696 static DRIVER_ATTR_RO(dif);
3698 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3700 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3702 static DRIVER_ATTR_RO(guard);
3704 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3706 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3708 static DRIVER_ATTR_RO(ato);
3710 static ssize_t map_show(struct device_driver *ddp, char *buf)
3714 if (!scsi_debug_lbp())
3715 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3716 sdebug_store_sectors);
3718 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3720 buf[count++] = '\n';
3725 static DRIVER_ATTR_RO(map);
3727 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3729 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3731 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3736 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3737 scsi_debug_removable = (n > 0);
3742 static DRIVER_ATTR_RW(removable);
3744 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
3746 return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
3748 /* Returns -EBUSY if host_lock is being changed and commands are queued */
3749 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3754 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3755 bool new_host_lock = (n > 0);
3758 if (new_host_lock != scsi_debug_host_lock) {
3759 unsigned long iflags;
3762 spin_lock_irqsave(&queued_arr_lock, iflags);
3763 k = find_first_bit(queued_in_use_bm,
3764 scsi_debug_max_queue);
3765 if (k != scsi_debug_max_queue)
3766 res = -EBUSY; /* have queued commands */
3768 scsi_debug_host_lock = new_host_lock;
3769 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3775 static DRIVER_ATTR_RW(host_lock);
3778 /* Note: The following array creates attribute files in the
3779 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3780 files (over those found in the /sys/module/scsi_debug/parameters
3781 directory) is that auxiliary actions can be triggered when an attribute
3782 is changed. For example see: sdebug_add_host_store() above.
3785 static struct attribute *sdebug_drv_attrs[] = {
3786 &driver_attr_delay.attr,
3787 &driver_attr_opts.attr,
3788 &driver_attr_ptype.attr,
3789 &driver_attr_dsense.attr,
3790 &driver_attr_fake_rw.attr,
3791 &driver_attr_no_lun_0.attr,
3792 &driver_attr_num_tgts.attr,
3793 &driver_attr_dev_size_mb.attr,
3794 &driver_attr_num_parts.attr,
3795 &driver_attr_every_nth.attr,
3796 &driver_attr_max_luns.attr,
3797 &driver_attr_max_queue.attr,
3798 &driver_attr_no_uld.attr,
3799 &driver_attr_scsi_level.attr,
3800 &driver_attr_virtual_gb.attr,
3801 &driver_attr_add_host.attr,
3802 &driver_attr_vpd_use_hostno.attr,
3803 &driver_attr_sector_size.attr,
3804 &driver_attr_dix.attr,
3805 &driver_attr_dif.attr,
3806 &driver_attr_guard.attr,
3807 &driver_attr_ato.attr,
3808 &driver_attr_map.attr,
3809 &driver_attr_removable.attr,
3810 &driver_attr_host_lock.attr,
3811 &driver_attr_ndelay.attr,
3814 ATTRIBUTE_GROUPS(sdebug_drv);
3816 static struct device *pseudo_primary;
3818 static int __init scsi_debug_init(void)
3825 atomic_set(&sdebug_cmnd_count, 0);
3826 atomic_set(&sdebug_completions, 0);
3827 atomic_set(&retired_max_queue, 0);
3829 if (scsi_debug_ndelay >= 1000000000) {
3830 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
3832 scsi_debug_ndelay = 0;
3833 } else if (scsi_debug_ndelay > 0)
3834 scsi_debug_delay = DELAY_OVERRIDDEN;
3836 switch (scsi_debug_sector_size) {
3843 pr_err("%s: invalid sector_size %d\n", __func__,
3844 scsi_debug_sector_size);
3848 switch (scsi_debug_dif) {
3850 case SD_DIF_TYPE0_PROTECTION:
3851 case SD_DIF_TYPE1_PROTECTION:
3852 case SD_DIF_TYPE2_PROTECTION:
3853 case SD_DIF_TYPE3_PROTECTION:
3857 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
3861 if (scsi_debug_guard > 1) {
3862 pr_err("%s: guard must be 0 or 1\n", __func__);
3866 if (scsi_debug_ato > 1) {
3867 pr_err("%s: ato must be 0 or 1\n", __func__);
3871 if (scsi_debug_physblk_exp > 15) {
3872 pr_err("%s: invalid physblk_exp %u\n", __func__,
3873 scsi_debug_physblk_exp);
3877 if (scsi_debug_lowest_aligned > 0x3fff) {
3878 pr_err("%s: lowest_aligned too big: %u\n", __func__,
3879 scsi_debug_lowest_aligned);
3883 if (scsi_debug_dev_size_mb < 1)
3884 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3885 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3886 sdebug_store_sectors = sz / scsi_debug_sector_size;
3887 sdebug_capacity = get_sdebug_capacity();
3889 /* play around with geometry, don't waste too much on track 0 */
3891 sdebug_sectors_per = 32;
3892 if (scsi_debug_dev_size_mb >= 16)
3894 else if (scsi_debug_dev_size_mb >= 256)
3896 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3897 (sdebug_sectors_per * sdebug_heads);
3898 if (sdebug_cylinders_per >= 1024) {
3899 /* other LLDs do this; implies >= 1GB ram disk ... */
3901 sdebug_sectors_per = 63;
3902 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3903 (sdebug_sectors_per * sdebug_heads);
3906 if (0 == scsi_debug_fake_rw) {
3907 fake_storep = vmalloc(sz);
3908 if (NULL == fake_storep) {
3909 pr_err("%s: out of memory, 1\n", __func__);
3912 memset(fake_storep, 0, sz);
3913 if (scsi_debug_num_parts > 0)
3914 sdebug_build_parts(fake_storep, sz);
3917 if (scsi_debug_dix) {
3920 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3921 dif_storep = vmalloc(dif_size);
3923 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
3926 if (dif_storep == NULL) {
3927 pr_err("%s: out of mem. (DIX)\n", __func__);
3932 memset(dif_storep, 0xff, dif_size);
3935 /* Logical Block Provisioning */
3936 if (scsi_debug_lbp()) {
3937 scsi_debug_unmap_max_blocks =
3938 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3940 scsi_debug_unmap_max_desc =
3941 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3943 scsi_debug_unmap_granularity =
3944 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3946 if (scsi_debug_unmap_alignment &&
3947 scsi_debug_unmap_granularity <=
3948 scsi_debug_unmap_alignment) {
3949 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
3954 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3955 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3957 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
3959 if (map_storep == NULL) {
3960 pr_err("%s: out of mem. (MAP)\n", __func__);
3965 bitmap_zero(map_storep, map_size);
3967 /* Map first 1KB for partition table */
3968 if (scsi_debug_num_parts)
3972 pseudo_primary = root_device_register("pseudo_0");
3973 if (IS_ERR(pseudo_primary)) {
3974 pr_warn("%s: root_device_register() error\n", __func__);
3975 ret = PTR_ERR(pseudo_primary);
3978 ret = bus_register(&pseudo_lld_bus);
3980 pr_warn("%s: bus_register error: %d\n", __func__, ret);
3983 ret = driver_register(&sdebug_driverfs_driver);
3985 pr_warn("%s: driver_register error: %d\n", __func__, ret);
3989 host_to_add = scsi_debug_add_host;
3990 scsi_debug_add_host = 0;
3992 for (k = 0; k < host_to_add; k++) {
3993 if (sdebug_add_adapter()) {
3994 pr_err("%s: sdebug_add_adapter failed k=%d\n",
4000 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4001 pr_info("%s: built %d host(s)\n", __func__,
4002 scsi_debug_add_host);
4007 bus_unregister(&pseudo_lld_bus);
4009 root_device_unregister(pseudo_primary);
4020 static void __exit scsi_debug_exit(void)
4022 int k = scsi_debug_add_host;
4027 sdebug_remove_adapter();
4028 driver_unregister(&sdebug_driverfs_driver);
4029 bus_unregister(&pseudo_lld_bus);
4030 root_device_unregister(pseudo_primary);
4038 device_initcall(scsi_debug_init);
4039 module_exit(scsi_debug_exit);
4041 static void sdebug_release_adapter(struct device * dev)
4043 struct sdebug_host_info *sdbg_host;
4045 sdbg_host = to_sdebug_host(dev);
4049 static int sdebug_add_adapter(void)
4051 int k, devs_per_host;
4053 struct sdebug_host_info *sdbg_host;
4054 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4056 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4057 if (NULL == sdbg_host) {
4058 printk(KERN_ERR "%s: out of memory at line %d\n",
4059 __func__, __LINE__);
4063 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4065 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4066 for (k = 0; k < devs_per_host; k++) {
4067 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4068 if (!sdbg_devinfo) {
4069 printk(KERN_ERR "%s: out of memory at line %d\n",
4070 __func__, __LINE__);
4076 spin_lock(&sdebug_host_list_lock);
4077 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4078 spin_unlock(&sdebug_host_list_lock);
4080 sdbg_host->dev.bus = &pseudo_lld_bus;
4081 sdbg_host->dev.parent = pseudo_primary;
4082 sdbg_host->dev.release = &sdebug_release_adapter;
4083 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4085 error = device_register(&sdbg_host->dev);
4090 ++scsi_debug_add_host;
4094 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4096 list_del(&sdbg_devinfo->dev_list);
4097 kfree(sdbg_devinfo);
4104 static void sdebug_remove_adapter(void)
4106 struct sdebug_host_info * sdbg_host = NULL;
4108 spin_lock(&sdebug_host_list_lock);
4109 if (!list_empty(&sdebug_host_list)) {
4110 sdbg_host = list_entry(sdebug_host_list.prev,
4111 struct sdebug_host_info, host_list);
4112 list_del(&sdbg_host->host_list);
4114 spin_unlock(&sdebug_host_list_lock);
4119 device_unregister(&sdbg_host->dev);
4120 --scsi_debug_add_host;
4124 scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4126 unsigned char *cmd = SCpnt->cmnd;
4129 unsigned long long lba;
4132 int target = SCpnt->device->id;
4133 struct sdebug_dev_info *devip = NULL;
4134 int inj_recovered = 0;
4135 int inj_transport = 0;
4139 int delay_override = 0;
4142 scsi_set_resid(SCpnt, 0);
4143 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4144 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4148 len = SCpnt->cmd_len;
4150 strcpy(b, "too long, over 32 bytes");
4152 for (k = 0, n = 0; k < len; ++k)
4153 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4154 (unsigned int)cmd[k]);
4156 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4160 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4161 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4162 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4163 devip = devInfoReg(SCpnt->device);
4165 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4167 if ((scsi_debug_every_nth != 0) &&
4168 (atomic_inc_return(&sdebug_cmnd_count) >=
4169 abs(scsi_debug_every_nth))) {
4170 atomic_set(&sdebug_cmnd_count, 0);
4171 if (scsi_debug_every_nth < -1)
4172 scsi_debug_every_nth = -1;
4173 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4174 return 0; /* ignore command causing timeout */
4175 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4176 scsi_medium_access_command(SCpnt))
4177 return 0; /* time out reads and writes */
4178 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4179 inj_recovered = 1; /* to reads and writes below */
4180 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4181 inj_transport = 1; /* to reads and writes below */
4182 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4183 inj_dif = 1; /* to reads and writes below */
4184 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4185 inj_dix = 1; /* to reads and writes below */
4186 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4194 case TEST_UNIT_READY:
4196 break; /* only allowable wlun commands */
4198 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4199 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4200 "not supported for wlun\n", *cmd);
4201 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4203 errsts = check_condition_result;
4204 return schedule_resp(SCpnt, devip, errsts, 0);
4209 case INQUIRY: /* mandatory, ignore unit attention */
4211 errsts = resp_inquiry(SCpnt, target, devip);
4213 case REQUEST_SENSE: /* mandatory, ignore unit attention */
4215 errsts = resp_requests(SCpnt, devip);
4217 case REZERO_UNIT: /* actually this is REWIND for SSC */
4219 errsts = resp_start_stop(SCpnt, devip);
4221 case ALLOW_MEDIUM_REMOVAL:
4222 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4225 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4226 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4227 cmd[4] ? "inhibited" : "enabled");
4229 case SEND_DIAGNOSTIC: /* mandatory */
4230 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4232 case TEST_UNIT_READY: /* mandatory */
4233 /* delay_override = 1; */
4234 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4237 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4240 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4243 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4246 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4249 errsts = resp_readcap(SCpnt, devip);
4251 case SERVICE_ACTION_IN:
4252 if (cmd[1] == SAI_READ_CAPACITY_16)
4253 errsts = resp_readcap16(SCpnt, devip);
4254 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4256 if (scsi_debug_lbp() == 0) {
4257 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4258 INVALID_COMMAND_OPCODE, 0);
4259 errsts = check_condition_result;
4261 errsts = resp_get_lba_status(SCpnt, devip);
4263 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4265 errsts = check_condition_result;
4268 case MAINTENANCE_IN:
4269 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4270 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4272 errsts = check_condition_result;
4275 errsts = resp_report_tgtpgs(SCpnt, devip);
4280 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4281 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4283 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4284 INVALID_COMMAND_OPCODE, 0);
4285 errsts = check_condition_result;
4289 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4290 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4291 (cmd[1] & 0xe0) == 0)
4292 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4297 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4300 if (scsi_debug_fake_rw)
4302 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4307 errsts = resp_read(SCpnt, lba, num, ei_lba);
4308 if (inj_recovered && (0 == errsts)) {
4309 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4310 THRESHOLD_EXCEEDED, 0);
4311 errsts = check_condition_result;
4312 } else if (inj_transport && (0 == errsts)) {
4313 mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4314 TRANSPORT_PROBLEM, ACK_NAK_TO);
4315 errsts = check_condition_result;
4316 } else if (inj_dif && (0 == errsts)) {
4317 /* Logical block guard check failed */
4318 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4319 errsts = illegal_condition_result;
4320 } else if (inj_dix && (0 == errsts)) {
4321 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4322 errsts = illegal_condition_result;
4325 case REPORT_LUNS: /* mandatory, ignore unit attention */
4327 errsts = resp_report_luns(SCpnt, devip);
4329 case VERIFY: /* 10 byte SBC-2 command */
4330 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4335 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4336 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4338 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4339 INVALID_COMMAND_OPCODE, 0);
4340 errsts = check_condition_result;
4344 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4345 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4346 (cmd[1] & 0xe0) == 0)
4347 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4352 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4355 if (scsi_debug_fake_rw)
4357 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4358 errsts = resp_write(SCpnt, lba, num, ei_lba);
4359 if (inj_recovered && (0 == errsts)) {
4360 mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4361 THRESHOLD_EXCEEDED, 0);
4362 errsts = check_condition_result;
4363 } else if (inj_dif && (0 == errsts)) {
4364 mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4365 errsts = illegal_condition_result;
4366 } else if (inj_dix && (0 == errsts)) {
4367 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4368 errsts = illegal_condition_result;
4374 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4375 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4376 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4377 INVALID_FIELD_IN_CDB, 0);
4378 errsts = check_condition_result;
4384 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4387 if (scsi_debug_fake_rw)
4389 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4390 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4393 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4396 if (scsi_debug_fake_rw)
4399 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4400 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4401 INVALID_COMMAND_OPCODE, 0);
4402 errsts = check_condition_result;
4404 errsts = resp_unmap(SCpnt, devip);
4408 errsts = resp_mode_sense(SCpnt, target, devip);
4411 errsts = resp_mode_select(SCpnt, 1, devip);
4413 case MODE_SELECT_10:
4414 errsts = resp_mode_select(SCpnt, 0, devip);
4417 errsts = resp_log_sense(SCpnt, devip);
4419 case SYNCHRONIZE_CACHE:
4421 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4424 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4426 case XDWRITEREAD_10:
4427 if (!scsi_bidi_cmnd(SCpnt)) {
4428 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4429 INVALID_FIELD_IN_CDB, 0);
4430 errsts = check_condition_result;
4434 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4437 if (scsi_debug_fake_rw)
4439 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4440 errsts = resp_read(SCpnt, lba, num, ei_lba);
4443 errsts = resp_write(SCpnt, lba, num, ei_lba);
4446 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4448 case VARIABLE_LENGTH_CMD:
4449 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4451 if ((cmd[10] & 0xe0) == 0)
4453 "Unprotected RD/WR to DIF device\n");
4455 if (cmd[9] == READ_32) {
4456 BUG_ON(SCpnt->cmd_len < 32);
4460 if (cmd[9] == WRITE_32) {
4461 BUG_ON(SCpnt->cmd_len < 32);
4466 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4467 INVALID_FIELD_IN_CDB, 0);
4468 errsts = check_condition_result;
4471 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4472 sdev_printk(KERN_INFO, SCpnt->device,
4473 "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4474 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4476 errsts = check_condition_result;
4479 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4480 sdev_printk(KERN_INFO, SCpnt->device,
4481 "%s: Opcode: 0x%x not supported\n",
4483 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4485 break; /* Unit attention takes precedence */
4486 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4487 errsts = check_condition_result;
4490 return schedule_resp(SCpnt, devip, errsts,
4491 (delay_override ? 0 : scsi_debug_delay));
4495 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4497 if (scsi_debug_host_lock) {
4498 unsigned long iflags;
4501 spin_lock_irqsave(shost->host_lock, iflags);
4502 rc = scsi_debug_queuecommand(cmd);
4503 spin_unlock_irqrestore(shost->host_lock, iflags);
4506 return scsi_debug_queuecommand(cmd);
4510 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4513 unsigned long iflags;
4514 struct sdebug_dev_info *devip;
4516 spin_lock_irqsave(&queued_arr_lock, iflags);
4517 devip = (struct sdebug_dev_info *)sdev->hostdata;
4518 if (NULL == devip) {
4519 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4522 num_in_q = atomic_read(&devip->num_in_q);
4523 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4527 /* allow to exceed max host queued_arr elements for testing */
4528 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4529 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4530 scsi_change_queue_depth(sdev, qdepth);
4532 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4533 sdev_printk(KERN_INFO, sdev,
4534 "%s: qdepth=%d, num_in_q=%d\n",
4535 __func__, qdepth, num_in_q);
4537 return sdev->queue_depth;
4541 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4543 qtype = scsi_change_queue_type(sdev, qtype);
4544 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4551 case MSG_SIMPLE_TAG:
4554 case MSG_ORDERED_TAG:
4555 cp = "ordered tags";
4561 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4566 static struct scsi_host_template sdebug_driver_template = {
4567 .show_info = scsi_debug_show_info,
4568 .write_info = scsi_debug_write_info,
4569 .proc_name = sdebug_proc_name,
4570 .name = "SCSI DEBUG",
4571 .info = scsi_debug_info,
4572 .slave_alloc = scsi_debug_slave_alloc,
4573 .slave_configure = scsi_debug_slave_configure,
4574 .slave_destroy = scsi_debug_slave_destroy,
4575 .ioctl = scsi_debug_ioctl,
4576 .queuecommand = sdebug_queuecommand_lock_or_not,
4577 .change_queue_depth = sdebug_change_qdepth,
4578 .change_queue_type = sdebug_change_qtype,
4579 .eh_abort_handler = scsi_debug_abort,
4580 .eh_device_reset_handler = scsi_debug_device_reset,
4581 .eh_target_reset_handler = scsi_debug_target_reset,
4582 .eh_bus_reset_handler = scsi_debug_bus_reset,
4583 .eh_host_reset_handler = scsi_debug_host_reset,
4584 .can_queue = SCSI_DEBUG_CANQUEUE,
4586 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
4587 .cmd_per_lun = DEF_CMD_PER_LUN,
4589 .use_clustering = DISABLE_CLUSTERING,
4590 .module = THIS_MODULE,
4591 .track_queue_depth = 1,
4594 static int sdebug_driver_probe(struct device * dev)
4597 struct sdebug_host_info *sdbg_host;
4598 struct Scsi_Host *hpnt;
4601 sdbg_host = to_sdebug_host(dev);
4603 sdebug_driver_template.can_queue = scsi_debug_max_queue;
4604 if (scsi_debug_clustering)
4605 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4606 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4608 pr_err("%s: scsi_host_alloc failed\n", __func__);
4613 sdbg_host->shost = hpnt;
4614 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4615 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4616 hpnt->max_id = scsi_debug_num_tgts + 1;
4618 hpnt->max_id = scsi_debug_num_tgts;
4619 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
4623 switch (scsi_debug_dif) {
4625 case SD_DIF_TYPE1_PROTECTION:
4626 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4628 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4631 case SD_DIF_TYPE2_PROTECTION:
4632 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4634 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4637 case SD_DIF_TYPE3_PROTECTION:
4638 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4640 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4645 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4649 scsi_host_set_prot(hpnt, host_prot);
4651 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4652 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4653 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4654 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4655 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4656 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4657 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4658 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4660 if (scsi_debug_guard == 1)
4661 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4663 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4665 error = scsi_add_host(hpnt, &sdbg_host->dev);
4667 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4669 scsi_host_put(hpnt);
4671 scsi_scan_host(hpnt);
4676 static int sdebug_driver_remove(struct device * dev)
4678 struct sdebug_host_info *sdbg_host;
4679 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4681 sdbg_host = to_sdebug_host(dev);
4684 printk(KERN_ERR "%s: Unable to locate host info\n",
4689 scsi_remove_host(sdbg_host->shost);
4691 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4693 list_del(&sdbg_devinfo->dev_list);
4694 kfree(sdbg_devinfo);
4697 scsi_host_put(sdbg_host->shost);
4701 static int pseudo_lld_bus_match(struct device *dev,
4702 struct device_driver *dev_driver)
4707 static struct bus_type pseudo_lld_bus = {
4709 .match = pseudo_lld_bus_match,
4710 .probe = sdebug_driver_probe,
4711 .remove = sdebug_driver_remove,
4712 .drv_groups = sdebug_drv_groups,