2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
94 #define DEF_DEV_SIZE_MB 8
98 #define DEF_EVERY_NTH 0
103 #define DEF_LBPWS10 0
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static int scsi_debug_cmnd_count = 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
229 struct sdebug_host_info *sdbg_host;
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
240 struct list_head dev_info_list;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
251 struct sdebug_queued_cmd {
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static unsigned char *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
276 static char sdebug_proc_name[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus;
280 static inline sector_t dif_offset(sector_t sector)
285 static struct device_driver sdebug_driverfs_driver = {
286 .name = sdebug_proc_name,
287 .bus = &pseudo_lld_bus,
290 static const int check_condition_result =
291 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
293 static const int illegal_condition_result =
294 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
296 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
298 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
301 static int sdebug_add_adapter(void);
302 static void sdebug_remove_adapter(void);
304 static void sdebug_max_tgts_luns(void)
306 struct sdebug_host_info *sdbg_host;
307 struct Scsi_Host *hpnt;
309 spin_lock(&sdebug_host_list_lock);
310 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
311 hpnt = sdbg_host->shost;
312 if ((hpnt->this_id >= 0) &&
313 (scsi_debug_num_tgts > hpnt->this_id))
314 hpnt->max_id = scsi_debug_num_tgts + 1;
316 hpnt->max_id = scsi_debug_num_tgts;
317 /* scsi_debug_max_luns; */
318 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
320 spin_unlock(&sdebug_host_list_lock);
323 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
326 unsigned char *sbuff;
328 sbuff = devip->sense_buff;
329 memset(sbuff, 0, SDEBUG_SENSE_LEN);
331 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
333 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
334 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
335 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
338 static void get_data_transfer_info(unsigned char *cmd,
339 unsigned long long *lba, unsigned int *num,
345 case VARIABLE_LENGTH_CMD:
346 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
347 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
348 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
349 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
351 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
352 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
354 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
361 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
362 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
363 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
364 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
366 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
371 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
374 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
381 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
384 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
388 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
389 (u32)(cmd[1] & 0x1f) << 16;
390 *num = (0 == cmd[4]) ? 256 : cmd[4];
397 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
399 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
400 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
403 /* return -ENOTTY; // correct return but upsets fdisk */
406 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
407 struct sdebug_dev_info * devip)
410 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
411 printk(KERN_INFO "scsi_debug: Reporting Unit "
412 "attention: power on reset\n");
414 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
415 return check_condition_result;
417 if ((0 == reset_only) && devip->stopped) {
418 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
419 printk(KERN_INFO "scsi_debug: Reporting Not "
420 "ready: initializing command required\n");
421 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
423 return check_condition_result;
428 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
429 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
433 struct scsi_data_buffer *sdb = scsi_in(scp);
437 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
438 return (DID_ERROR << 16);
440 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
443 sdb->resid -= act_len;
445 sdb->resid = scsi_bufflen(scp) - act_len;
450 /* Returns number of bytes fetched into 'arr' or -1 if error. */
451 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
454 if (!scsi_bufflen(scp))
456 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
459 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
463 static const char * inq_vendor_id = "Linux ";
464 static const char * inq_product_id = "scsi_debug ";
465 static const char * inq_product_rev = "0004";
467 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
468 int target_dev_id, int dev_id_num,
469 const char * dev_id_str,
475 port_a = target_dev_id + 1;
476 /* T10 vendor identifier field format (faked) */
477 arr[0] = 0x2; /* ASCII */
480 memcpy(&arr[4], inq_vendor_id, 8);
481 memcpy(&arr[12], inq_product_id, 16);
482 memcpy(&arr[28], dev_id_str, dev_id_str_len);
483 num = 8 + 16 + dev_id_str_len;
486 if (dev_id_num >= 0) {
487 /* NAA-5, Logical unit identifier (binary) */
488 arr[num++] = 0x1; /* binary (not necessarily sas) */
489 arr[num++] = 0x3; /* PIV=0, lu, naa */
492 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
496 arr[num++] = (dev_id_num >> 24);
497 arr[num++] = (dev_id_num >> 16) & 0xff;
498 arr[num++] = (dev_id_num >> 8) & 0xff;
499 arr[num++] = dev_id_num & 0xff;
500 /* Target relative port number */
501 arr[num++] = 0x61; /* proto=sas, binary */
502 arr[num++] = 0x94; /* PIV=1, target port, rel port */
503 arr[num++] = 0x0; /* reserved */
504 arr[num++] = 0x4; /* length */
505 arr[num++] = 0x0; /* reserved */
506 arr[num++] = 0x0; /* reserved */
508 arr[num++] = 0x1; /* relative port A */
510 /* NAA-5, Target port identifier */
511 arr[num++] = 0x61; /* proto=sas, binary */
512 arr[num++] = 0x93; /* piv=1, target port, naa */
515 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
519 arr[num++] = (port_a >> 24);
520 arr[num++] = (port_a >> 16) & 0xff;
521 arr[num++] = (port_a >> 8) & 0xff;
522 arr[num++] = port_a & 0xff;
523 /* NAA-5, Target port group identifier */
524 arr[num++] = 0x61; /* proto=sas, binary */
525 arr[num++] = 0x95; /* piv=1, target port group id */
530 arr[num++] = (port_group_id >> 8) & 0xff;
531 arr[num++] = port_group_id & 0xff;
532 /* NAA-5, Target device identifier */
533 arr[num++] = 0x61; /* proto=sas, binary */
534 arr[num++] = 0xa3; /* piv=1, target device, naa */
537 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
541 arr[num++] = (target_dev_id >> 24);
542 arr[num++] = (target_dev_id >> 16) & 0xff;
543 arr[num++] = (target_dev_id >> 8) & 0xff;
544 arr[num++] = target_dev_id & 0xff;
545 /* SCSI name string: Target device identifier */
546 arr[num++] = 0x63; /* proto=sas, UTF-8 */
547 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
550 memcpy(arr + num, "naa.52222220", 12);
552 snprintf(b, sizeof(b), "%08X", target_dev_id);
553 memcpy(arr + num, b, 8);
555 memset(arr + num, 0, 4);
561 static unsigned char vpd84_data[] = {
562 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
563 0x22,0x22,0x22,0x0,0xbb,0x1,
564 0x22,0x22,0x22,0x0,0xbb,0x2,
567 static int inquiry_evpd_84(unsigned char * arr)
569 memcpy(arr, vpd84_data, sizeof(vpd84_data));
570 return sizeof(vpd84_data);
573 static int inquiry_evpd_85(unsigned char * arr)
576 const char * na1 = "https://www.kernel.org/config";
577 const char * na2 = "http://www.kernel.org/log";
580 arr[num++] = 0x1; /* lu, storage config */
581 arr[num++] = 0x0; /* reserved */
586 plen = ((plen / 4) + 1) * 4;
587 arr[num++] = plen; /* length, null termianted, padded */
588 memcpy(arr + num, na1, olen);
589 memset(arr + num + olen, 0, plen - olen);
592 arr[num++] = 0x4; /* lu, logging */
593 arr[num++] = 0x0; /* reserved */
598 plen = ((plen / 4) + 1) * 4;
599 arr[num++] = plen; /* length, null terminated, padded */
600 memcpy(arr + num, na2, olen);
601 memset(arr + num + olen, 0, plen - olen);
607 /* SCSI ports VPD page */
608 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
613 port_a = target_dev_id + 1;
615 arr[num++] = 0x0; /* reserved */
616 arr[num++] = 0x0; /* reserved */
618 arr[num++] = 0x1; /* relative port 1 (primary) */
619 memset(arr + num, 0, 6);
622 arr[num++] = 12; /* length tp descriptor */
623 /* naa-5 target port identifier (A) */
624 arr[num++] = 0x61; /* proto=sas, binary */
625 arr[num++] = 0x93; /* PIV=1, target port, NAA */
626 arr[num++] = 0x0; /* reserved */
627 arr[num++] = 0x8; /* length */
628 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
632 arr[num++] = (port_a >> 24);
633 arr[num++] = (port_a >> 16) & 0xff;
634 arr[num++] = (port_a >> 8) & 0xff;
635 arr[num++] = port_a & 0xff;
637 arr[num++] = 0x0; /* reserved */
638 arr[num++] = 0x0; /* reserved */
640 arr[num++] = 0x2; /* relative port 2 (secondary) */
641 memset(arr + num, 0, 6);
644 arr[num++] = 12; /* length tp descriptor */
645 /* naa-5 target port identifier (B) */
646 arr[num++] = 0x61; /* proto=sas, binary */
647 arr[num++] = 0x93; /* PIV=1, target port, NAA */
648 arr[num++] = 0x0; /* reserved */
649 arr[num++] = 0x8; /* length */
650 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
654 arr[num++] = (port_b >> 24);
655 arr[num++] = (port_b >> 16) & 0xff;
656 arr[num++] = (port_b >> 8) & 0xff;
657 arr[num++] = port_b & 0xff;
663 static unsigned char vpd89_data[] = {
664 /* from 4th byte */ 0,0,0,0,
665 'l','i','n','u','x',' ',' ',' ',
666 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
668 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
670 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
671 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
672 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
673 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
675 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
677 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
679 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
680 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
681 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
683 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
684 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
685 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
690 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
691 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
692 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
707 static int inquiry_evpd_89(unsigned char * arr)
709 memcpy(arr, vpd89_data, sizeof(vpd89_data));
710 return sizeof(vpd89_data);
714 /* Block limits VPD page (SBC-3) */
715 static unsigned char vpdb0_data[] = {
716 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
717 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
718 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
719 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
722 static int inquiry_evpd_b0(unsigned char * arr)
726 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
728 /* Optimal transfer length granularity */
729 gran = 1 << scsi_debug_physblk_exp;
730 arr[2] = (gran >> 8) & 0xff;
731 arr[3] = gran & 0xff;
733 /* Maximum Transfer Length */
734 if (sdebug_store_sectors > 0x400) {
735 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
736 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
737 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
738 arr[7] = sdebug_store_sectors & 0xff;
741 /* Optimal Transfer Length */
742 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
744 if (scsi_debug_lbpu) {
745 /* Maximum Unmap LBA Count */
746 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
748 /* Maximum Unmap Block Descriptor Count */
749 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
752 /* Unmap Granularity Alignment */
753 if (scsi_debug_unmap_alignment) {
754 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
755 arr[28] |= 0x80; /* UGAVALID */
758 /* Optimal Unmap Granularity */
759 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
761 /* Maximum WRITE SAME Length */
762 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
764 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
766 return sizeof(vpdb0_data);
769 /* Block device characteristics VPD page (SBC-3) */
770 static int inquiry_evpd_b1(unsigned char *arr)
772 memset(arr, 0, 0x3c);
774 arr[1] = 1; /* non rotating medium (e.g. solid state) */
776 arr[3] = 5; /* less than 1.8" */
781 /* Logical block provisioning VPD page (SBC-3) */
782 static int inquiry_evpd_b2(unsigned char *arr)
785 arr[0] = 0; /* threshold exponent */
790 if (scsi_debug_lbpws)
793 if (scsi_debug_lbpws10)
796 if (scsi_debug_lbprz)
802 #define SDEBUG_LONG_INQ_SZ 96
803 #define SDEBUG_MAX_INQ_ARR_SZ 584
805 static int resp_inquiry(struct scsi_cmnd * scp, int target,
806 struct sdebug_dev_info * devip)
808 unsigned char pq_pdt;
810 unsigned char *cmd = (unsigned char *)scp->cmnd;
811 int alloc_len, n, ret;
813 alloc_len = (cmd[3] << 8) + cmd[4];
814 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
816 return DID_REQUEUE << 16;
818 pq_pdt = 0x1e; /* present, wlun */
819 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
820 pq_pdt = 0x7f; /* not present, no device type */
822 pq_pdt = (scsi_debug_ptype & 0x1f);
824 if (0x2 & cmd[1]) { /* CMDDT bit set */
825 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
828 return check_condition_result;
829 } else if (0x1 & cmd[1]) { /* EVPD bit set */
830 int lu_id_num, port_group_id, target_dev_id, len;
832 int host_no = devip->sdbg_host->shost->host_no;
834 port_group_id = (((host_no + 1) & 0x7f) << 8) +
835 (devip->channel & 0x7f);
836 if (0 == scsi_debug_vpd_use_hostno)
838 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
839 (devip->target * 1000) + devip->lun);
840 target_dev_id = ((host_no + 1) * 2000) +
841 (devip->target * 1000) - 3;
842 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
843 if (0 == cmd[2]) { /* supported vital product data pages */
844 arr[1] = cmd[2]; /*sanity */
846 arr[n++] = 0x0; /* this page */
847 arr[n++] = 0x80; /* unit serial number */
848 arr[n++] = 0x83; /* device identification */
849 arr[n++] = 0x84; /* software interface ident. */
850 arr[n++] = 0x85; /* management network addresses */
851 arr[n++] = 0x86; /* extended inquiry */
852 arr[n++] = 0x87; /* mode page policy */
853 arr[n++] = 0x88; /* SCSI ports */
854 arr[n++] = 0x89; /* ATA information */
855 arr[n++] = 0xb0; /* Block limits (SBC) */
856 arr[n++] = 0xb1; /* Block characteristics (SBC) */
857 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
859 arr[3] = n - 4; /* number of supported VPD pages */
860 } else if (0x80 == cmd[2]) { /* unit serial number */
861 arr[1] = cmd[2]; /*sanity */
863 memcpy(&arr[4], lu_id_str, len);
864 } else if (0x83 == cmd[2]) { /* device identification */
865 arr[1] = cmd[2]; /*sanity */
866 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
867 target_dev_id, lu_id_num,
869 } else if (0x84 == cmd[2]) { /* Software interface ident. */
870 arr[1] = cmd[2]; /*sanity */
871 arr[3] = inquiry_evpd_84(&arr[4]);
872 } else if (0x85 == cmd[2]) { /* Management network addresses */
873 arr[1] = cmd[2]; /*sanity */
874 arr[3] = inquiry_evpd_85(&arr[4]);
875 } else if (0x86 == cmd[2]) { /* extended inquiry */
876 arr[1] = cmd[2]; /*sanity */
877 arr[3] = 0x3c; /* number of following entries */
878 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
879 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
880 else if (scsi_debug_dif)
881 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
883 arr[4] = 0x0; /* no protection stuff */
884 arr[5] = 0x7; /* head of q, ordered + simple q's */
885 } else if (0x87 == cmd[2]) { /* mode page policy */
886 arr[1] = cmd[2]; /*sanity */
887 arr[3] = 0x8; /* number of following entries */
888 arr[4] = 0x2; /* disconnect-reconnect mp */
889 arr[6] = 0x80; /* mlus, shared */
890 arr[8] = 0x18; /* protocol specific lu */
891 arr[10] = 0x82; /* mlus, per initiator port */
892 } else if (0x88 == cmd[2]) { /* SCSI Ports */
893 arr[1] = cmd[2]; /*sanity */
894 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
895 } else if (0x89 == cmd[2]) { /* ATA information */
896 arr[1] = cmd[2]; /*sanity */
897 n = inquiry_evpd_89(&arr[4]);
900 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
901 arr[1] = cmd[2]; /*sanity */
902 arr[3] = inquiry_evpd_b0(&arr[4]);
903 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
904 arr[1] = cmd[2]; /*sanity */
905 arr[3] = inquiry_evpd_b1(&arr[4]);
906 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
907 arr[1] = cmd[2]; /*sanity */
908 arr[3] = inquiry_evpd_b2(&arr[4]);
910 /* Illegal request, invalid field in cdb */
911 mk_sense_buffer(devip, ILLEGAL_REQUEST,
912 INVALID_FIELD_IN_CDB, 0);
914 return check_condition_result;
916 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
917 ret = fill_from_dev_buffer(scp, arr,
918 min(len, SDEBUG_MAX_INQ_ARR_SZ));
922 /* drops through here for a standard inquiry */
923 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
924 arr[2] = scsi_debug_scsi_level;
925 arr[3] = 2; /* response_data_format==2 */
926 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
927 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
928 if (0 == scsi_debug_vpd_use_hostno)
929 arr[5] = 0x10; /* claim: implicit TGPS */
930 arr[6] = 0x10; /* claim: MultiP */
931 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
932 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
933 memcpy(&arr[8], inq_vendor_id, 8);
934 memcpy(&arr[16], inq_product_id, 16);
935 memcpy(&arr[32], inq_product_rev, 4);
936 /* version descriptors (2 bytes each) follow */
937 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
938 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
940 if (scsi_debug_ptype == 0) {
941 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
942 } else if (scsi_debug_ptype == 1) {
943 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
945 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
946 ret = fill_from_dev_buffer(scp, arr,
947 min(alloc_len, SDEBUG_LONG_INQ_SZ));
952 static int resp_requests(struct scsi_cmnd * scp,
953 struct sdebug_dev_info * devip)
955 unsigned char * sbuff;
956 unsigned char *cmd = (unsigned char *)scp->cmnd;
957 unsigned char arr[SDEBUG_SENSE_LEN];
961 memset(arr, 0, sizeof(arr));
962 if (devip->reset == 1)
963 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
964 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
965 sbuff = devip->sense_buff;
966 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
969 arr[1] = 0x0; /* NO_SENSE in sense_key */
970 arr[2] = THRESHOLD_EXCEEDED;
971 arr[3] = 0xff; /* TEST set and MRIE==6 */
974 arr[2] = 0x0; /* NO_SENSE in sense_key */
975 arr[7] = 0xa; /* 18 byte sense buffer */
976 arr[12] = THRESHOLD_EXCEEDED;
977 arr[13] = 0xff; /* TEST set and MRIE==6 */
980 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
981 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
982 /* DESC bit set and sense_buff in fixed format */
983 memset(arr, 0, sizeof(arr));
985 arr[1] = sbuff[2]; /* sense key */
986 arr[2] = sbuff[12]; /* asc */
987 arr[3] = sbuff[13]; /* ascq */
991 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
992 return fill_from_dev_buffer(scp, arr, len);
995 static int resp_start_stop(struct scsi_cmnd * scp,
996 struct sdebug_dev_info * devip)
998 unsigned char *cmd = (unsigned char *)scp->cmnd;
999 int power_cond, errsts, start;
1001 if ((errsts = check_readiness(scp, 1, devip)))
1003 power_cond = (cmd[4] & 0xf0) >> 4;
1005 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1007 return check_condition_result;
1010 if (start == devip->stopped)
1011 devip->stopped = !start;
1015 static sector_t get_sdebug_capacity(void)
1017 if (scsi_debug_virtual_gb > 0)
1018 return (sector_t)scsi_debug_virtual_gb *
1019 (1073741824 / scsi_debug_sector_size);
1021 return sdebug_store_sectors;
1024 #define SDEBUG_READCAP_ARR_SZ 8
1025 static int resp_readcap(struct scsi_cmnd * scp,
1026 struct sdebug_dev_info * devip)
1028 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1032 if ((errsts = check_readiness(scp, 1, devip)))
1034 /* following just in case virtual_gb changed */
1035 sdebug_capacity = get_sdebug_capacity();
1036 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1037 if (sdebug_capacity < 0xffffffff) {
1038 capac = (unsigned int)sdebug_capacity - 1;
1039 arr[0] = (capac >> 24);
1040 arr[1] = (capac >> 16) & 0xff;
1041 arr[2] = (capac >> 8) & 0xff;
1042 arr[3] = capac & 0xff;
1049 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1050 arr[7] = scsi_debug_sector_size & 0xff;
1051 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1054 #define SDEBUG_READCAP16_ARR_SZ 32
1055 static int resp_readcap16(struct scsi_cmnd * scp,
1056 struct sdebug_dev_info * devip)
1058 unsigned char *cmd = (unsigned char *)scp->cmnd;
1059 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1060 unsigned long long capac;
1061 int errsts, k, alloc_len;
1063 if ((errsts = check_readiness(scp, 1, devip)))
1065 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1067 /* following just in case virtual_gb changed */
1068 sdebug_capacity = get_sdebug_capacity();
1069 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1070 capac = sdebug_capacity - 1;
1071 for (k = 0; k < 8; ++k, capac >>= 8)
1072 arr[7 - k] = capac & 0xff;
1073 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1074 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1075 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1076 arr[11] = scsi_debug_sector_size & 0xff;
1077 arr[13] = scsi_debug_physblk_exp & 0xf;
1078 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1080 if (scsi_debug_lbp()) {
1081 arr[14] |= 0x80; /* LBPME */
1082 if (scsi_debug_lbprz)
1083 arr[14] |= 0x40; /* LBPRZ */
1086 arr[15] = scsi_debug_lowest_aligned & 0xff;
1088 if (scsi_debug_dif) {
1089 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1090 arr[12] |= 1; /* PROT_EN */
1093 return fill_from_dev_buffer(scp, arr,
1094 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1097 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1099 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1100 struct sdebug_dev_info * devip)
1102 unsigned char *cmd = (unsigned char *)scp->cmnd;
1103 unsigned char * arr;
1104 int host_no = devip->sdbg_host->shost->host_no;
1105 int n, ret, alen, rlen;
1106 int port_group_a, port_group_b, port_a, port_b;
1108 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1111 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1113 return DID_REQUEUE << 16;
1115 * EVPD page 0x88 states we have two ports, one
1116 * real and a fake port with no device connected.
1117 * So we create two port groups with one port each
1118 * and set the group with port B to unavailable.
1120 port_a = 0x1; /* relative port A */
1121 port_b = 0x2; /* relative port B */
1122 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1123 (devip->channel & 0x7f);
1124 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1125 (devip->channel & 0x7f) + 0x80;
1128 * The asymmetric access state is cycled according to the host_id.
1131 if (0 == scsi_debug_vpd_use_hostno) {
1132 arr[n++] = host_no % 3; /* Asymm access state */
1133 arr[n++] = 0x0F; /* claim: all states are supported */
1135 arr[n++] = 0x0; /* Active/Optimized path */
1136 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1138 arr[n++] = (port_group_a >> 8) & 0xff;
1139 arr[n++] = port_group_a & 0xff;
1140 arr[n++] = 0; /* Reserved */
1141 arr[n++] = 0; /* Status code */
1142 arr[n++] = 0; /* Vendor unique */
1143 arr[n++] = 0x1; /* One port per group */
1144 arr[n++] = 0; /* Reserved */
1145 arr[n++] = 0; /* Reserved */
1146 arr[n++] = (port_a >> 8) & 0xff;
1147 arr[n++] = port_a & 0xff;
1148 arr[n++] = 3; /* Port unavailable */
1149 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1150 arr[n++] = (port_group_b >> 8) & 0xff;
1151 arr[n++] = port_group_b & 0xff;
1152 arr[n++] = 0; /* Reserved */
1153 arr[n++] = 0; /* Status code */
1154 arr[n++] = 0; /* Vendor unique */
1155 arr[n++] = 0x1; /* One port per group */
1156 arr[n++] = 0; /* Reserved */
1157 arr[n++] = 0; /* Reserved */
1158 arr[n++] = (port_b >> 8) & 0xff;
1159 arr[n++] = port_b & 0xff;
1162 arr[0] = (rlen >> 24) & 0xff;
1163 arr[1] = (rlen >> 16) & 0xff;
1164 arr[2] = (rlen >> 8) & 0xff;
1165 arr[3] = rlen & 0xff;
1168 * Return the smallest value of either
1169 * - The allocated length
1170 * - The constructed command length
1171 * - The maximum array size
1174 ret = fill_from_dev_buffer(scp, arr,
1175 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1180 /* <<Following mode page info copied from ST318451LW>> */
1182 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1183 { /* Read-Write Error Recovery page for mode_sense */
1184 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1187 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1189 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1190 return sizeof(err_recov_pg);
1193 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1194 { /* Disconnect-Reconnect page for mode_sense */
1195 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1196 0, 0, 0, 0, 0, 0, 0, 0};
1198 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1200 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1201 return sizeof(disconnect_pg);
1204 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1205 { /* Format device page for mode_sense */
1206 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1207 0, 0, 0, 0, 0, 0, 0, 0,
1208 0, 0, 0, 0, 0x40, 0, 0, 0};
1210 memcpy(p, format_pg, sizeof(format_pg));
1211 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1212 p[11] = sdebug_sectors_per & 0xff;
1213 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1214 p[13] = scsi_debug_sector_size & 0xff;
1215 if (scsi_debug_removable)
1216 p[20] |= 0x20; /* should agree with INQUIRY */
1218 memset(p + 2, 0, sizeof(format_pg) - 2);
1219 return sizeof(format_pg);
1222 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1223 { /* Caching page for mode_sense */
1224 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1225 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1227 memcpy(p, caching_pg, sizeof(caching_pg));
1229 memset(p + 2, 0, sizeof(caching_pg) - 2);
1230 return sizeof(caching_pg);
1233 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1234 { /* Control mode page for mode_sense */
1235 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1237 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1240 if (scsi_debug_dsense)
1241 ctrl_m_pg[2] |= 0x4;
1243 ctrl_m_pg[2] &= ~0x4;
1246 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1248 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1250 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1251 else if (2 == pcontrol)
1252 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1253 return sizeof(ctrl_m_pg);
1257 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1258 { /* Informational Exceptions control mode page for mode_sense */
1259 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1261 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1264 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1266 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1267 else if (2 == pcontrol)
1268 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1269 return sizeof(iec_m_pg);
1272 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1273 { /* SAS SSP mode page - short format for mode_sense */
1274 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1275 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1277 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1279 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1280 return sizeof(sas_sf_m_pg);
1284 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1286 { /* SAS phy control and discover mode page for mode_sense */
1287 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1288 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1289 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1290 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1291 0x2, 0, 0, 0, 0, 0, 0, 0,
1292 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1293 0, 0, 0, 0, 0, 0, 0, 0,
1294 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 0x3, 0, 0, 0, 0, 0, 0, 0,
1298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 0, 0, 0, 0, 0, 0, 0, 0,
1303 port_a = target_dev_id + 1;
1304 port_b = port_a + 1;
1305 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1306 p[20] = (port_a >> 24);
1307 p[21] = (port_a >> 16) & 0xff;
1308 p[22] = (port_a >> 8) & 0xff;
1309 p[23] = port_a & 0xff;
1310 p[48 + 20] = (port_b >> 24);
1311 p[48 + 21] = (port_b >> 16) & 0xff;
1312 p[48 + 22] = (port_b >> 8) & 0xff;
1313 p[48 + 23] = port_b & 0xff;
1315 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1316 return sizeof(sas_pcd_m_pg);
1319 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1320 { /* SAS SSP shared protocol specific port mode subpage */
1321 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1322 0, 0, 0, 0, 0, 0, 0, 0,
1325 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1327 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1328 return sizeof(sas_sha_m_pg);
1331 #define SDEBUG_MAX_MSENSE_SZ 256
1333 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1334 struct sdebug_dev_info * devip)
1336 unsigned char dbd, llbaa;
1337 int pcontrol, pcode, subpcode, bd_len;
1338 unsigned char dev_spec;
1339 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1341 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1342 unsigned char *cmd = (unsigned char *)scp->cmnd;
1344 if ((errsts = check_readiness(scp, 1, devip)))
1346 dbd = !!(cmd[1] & 0x8);
1347 pcontrol = (cmd[2] & 0xc0) >> 6;
1348 pcode = cmd[2] & 0x3f;
1350 msense_6 = (MODE_SENSE == cmd[0]);
1351 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1352 if ((0 == scsi_debug_ptype) && (0 == dbd))
1353 bd_len = llbaa ? 16 : 8;
1356 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1357 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1358 if (0x3 == pcontrol) { /* Saving values not supported */
1359 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1361 return check_condition_result;
1363 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1364 (devip->target * 1000) - 3;
1365 /* set DPOFUA bit for disks */
1366 if (0 == scsi_debug_ptype)
1367 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1377 arr[4] = 0x1; /* set LONGLBA bit */
1378 arr[7] = bd_len; /* assume 255 or less */
1382 if ((bd_len > 0) && (!sdebug_capacity))
1383 sdebug_capacity = get_sdebug_capacity();
1386 if (sdebug_capacity > 0xfffffffe) {
1392 ap[0] = (sdebug_capacity >> 24) & 0xff;
1393 ap[1] = (sdebug_capacity >> 16) & 0xff;
1394 ap[2] = (sdebug_capacity >> 8) & 0xff;
1395 ap[3] = sdebug_capacity & 0xff;
1397 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1398 ap[7] = scsi_debug_sector_size & 0xff;
1401 } else if (16 == bd_len) {
1402 unsigned long long capac = sdebug_capacity;
1404 for (k = 0; k < 8; ++k, capac >>= 8)
1405 ap[7 - k] = capac & 0xff;
1406 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1407 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1408 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1409 ap[15] = scsi_debug_sector_size & 0xff;
1414 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1415 /* TODO: Control Extension page */
1416 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1418 return check_condition_result;
1421 case 0x1: /* Read-Write error recovery page, direct access */
1422 len = resp_err_recov_pg(ap, pcontrol, target);
1425 case 0x2: /* Disconnect-Reconnect page, all devices */
1426 len = resp_disconnect_pg(ap, pcontrol, target);
1429 case 0x3: /* Format device page, direct access */
1430 len = resp_format_pg(ap, pcontrol, target);
1433 case 0x8: /* Caching page, direct access */
1434 len = resp_caching_pg(ap, pcontrol, target);
1437 case 0xa: /* Control Mode page, all devices */
1438 len = resp_ctrl_m_pg(ap, pcontrol, target);
1441 case 0x19: /* if spc==1 then sas phy, control+discover */
1442 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1443 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1444 INVALID_FIELD_IN_CDB, 0);
1445 return check_condition_result;
1448 if ((0x0 == subpcode) || (0xff == subpcode))
1449 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1450 if ((0x1 == subpcode) || (0xff == subpcode))
1451 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1453 if ((0x2 == subpcode) || (0xff == subpcode))
1454 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1457 case 0x1c: /* Informational Exceptions Mode page, all devices */
1458 len = resp_iec_m_pg(ap, pcontrol, target);
1461 case 0x3f: /* Read all Mode pages */
1462 if ((0 == subpcode) || (0xff == subpcode)) {
1463 len = resp_err_recov_pg(ap, pcontrol, target);
1464 len += resp_disconnect_pg(ap + len, pcontrol, target);
1465 len += resp_format_pg(ap + len, pcontrol, target);
1466 len += resp_caching_pg(ap + len, pcontrol, target);
1467 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1468 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1469 if (0xff == subpcode) {
1470 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1471 target, target_dev_id);
1472 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1474 len += resp_iec_m_pg(ap + len, pcontrol, target);
1476 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1477 INVALID_FIELD_IN_CDB, 0);
1478 return check_condition_result;
1483 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1485 return check_condition_result;
1488 arr[0] = offset - 1;
1490 arr[0] = ((offset - 2) >> 8) & 0xff;
1491 arr[1] = (offset - 2) & 0xff;
1493 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1496 #define SDEBUG_MAX_MSELECT_SZ 512
1498 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1499 struct sdebug_dev_info * devip)
1501 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1502 int param_len, res, errsts, mpage;
1503 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1504 unsigned char *cmd = (unsigned char *)scp->cmnd;
1506 if ((errsts = check_readiness(scp, 1, devip)))
1508 memset(arr, 0, sizeof(arr));
1511 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1512 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1513 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1514 INVALID_FIELD_IN_CDB, 0);
1515 return check_condition_result;
1517 res = fetch_to_dev_buffer(scp, arr, param_len);
1519 return (DID_ERROR << 16);
1520 else if ((res < param_len) &&
1521 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1522 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1523 " IO sent=%d bytes\n", param_len, res);
1524 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1525 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1527 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1528 INVALID_FIELD_IN_PARAM_LIST, 0);
1529 return check_condition_result;
1531 off = bd_len + (mselect6 ? 4 : 8);
1532 mpage = arr[off] & 0x3f;
1533 ps = !!(arr[off] & 0x80);
1535 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1536 INVALID_FIELD_IN_PARAM_LIST, 0);
1537 return check_condition_result;
1539 spf = !!(arr[off] & 0x40);
1540 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1542 if ((pg_len + off) > param_len) {
1543 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1544 PARAMETER_LIST_LENGTH_ERR, 0);
1545 return check_condition_result;
1548 case 0xa: /* Control Mode page */
1549 if (ctrl_m_pg[1] == arr[off + 1]) {
1550 memcpy(ctrl_m_pg + 2, arr + off + 2,
1551 sizeof(ctrl_m_pg) - 2);
1552 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1556 case 0x1c: /* Informational Exceptions Mode page */
1557 if (iec_m_pg[1] == arr[off + 1]) {
1558 memcpy(iec_m_pg + 2, arr + off + 2,
1559 sizeof(iec_m_pg) - 2);
1566 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1567 INVALID_FIELD_IN_PARAM_LIST, 0);
1568 return check_condition_result;
1571 static int resp_temp_l_pg(unsigned char * arr)
1573 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1574 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1577 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1578 return sizeof(temp_l_pg);
1581 static int resp_ie_l_pg(unsigned char * arr)
1583 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1586 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1587 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1588 arr[4] = THRESHOLD_EXCEEDED;
1591 return sizeof(ie_l_pg);
1594 #define SDEBUG_MAX_LSENSE_SZ 512
1596 static int resp_log_sense(struct scsi_cmnd * scp,
1597 struct sdebug_dev_info * devip)
1599 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1600 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1601 unsigned char *cmd = (unsigned char *)scp->cmnd;
1603 if ((errsts = check_readiness(scp, 1, devip)))
1605 memset(arr, 0, sizeof(arr));
1609 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1610 INVALID_FIELD_IN_CDB, 0);
1611 return check_condition_result;
1613 pcontrol = (cmd[2] & 0xc0) >> 6;
1614 pcode = cmd[2] & 0x3f;
1615 subpcode = cmd[3] & 0xff;
1616 alloc_len = (cmd[7] << 8) + cmd[8];
1618 if (0 == subpcode) {
1620 case 0x0: /* Supported log pages log page */
1622 arr[n++] = 0x0; /* this page */
1623 arr[n++] = 0xd; /* Temperature */
1624 arr[n++] = 0x2f; /* Informational exceptions */
1627 case 0xd: /* Temperature log page */
1628 arr[3] = resp_temp_l_pg(arr + 4);
1630 case 0x2f: /* Informational exceptions log page */
1631 arr[3] = resp_ie_l_pg(arr + 4);
1634 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1635 INVALID_FIELD_IN_CDB, 0);
1636 return check_condition_result;
1638 } else if (0xff == subpcode) {
1642 case 0x0: /* Supported log pages and subpages log page */
1645 arr[n++] = 0x0; /* 0,0 page */
1647 arr[n++] = 0xff; /* this page */
1649 arr[n++] = 0x0; /* Temperature */
1651 arr[n++] = 0x0; /* Informational exceptions */
1654 case 0xd: /* Temperature subpages */
1657 arr[n++] = 0x0; /* Temperature */
1660 case 0x2f: /* Informational exceptions subpages */
1663 arr[n++] = 0x0; /* Informational exceptions */
1667 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1668 INVALID_FIELD_IN_CDB, 0);
1669 return check_condition_result;
1672 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1673 INVALID_FIELD_IN_CDB, 0);
1674 return check_condition_result;
1676 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1677 return fill_from_dev_buffer(scp, arr,
1678 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1681 static int check_device_access_params(struct sdebug_dev_info *devi,
1682 unsigned long long lba, unsigned int num)
1684 if (lba + num > sdebug_capacity) {
1685 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1686 return check_condition_result;
1688 /* transfer length excessive (tie in to block limits VPD page) */
1689 if (num > sdebug_store_sectors) {
1690 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1691 return check_condition_result;
1696 static int do_device_access(struct scsi_cmnd *scmd,
1697 struct sdebug_dev_info *devi,
1698 unsigned long long lba, unsigned int num, int write)
1701 unsigned long long block, rest = 0;
1702 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1704 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1706 block = do_div(lba, sdebug_store_sectors);
1707 if (block + num > sdebug_store_sectors)
1708 rest = block + num - sdebug_store_sectors;
1710 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1711 (num - rest) * scsi_debug_sector_size);
1713 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1718 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1719 unsigned int sectors, u32 ei_lba)
1721 unsigned int i, resid;
1722 struct scatterlist *psgl;
1723 struct sd_dif_tuple *sdt;
1725 sector_t tmp_sec = start_sec;
1728 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1730 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1732 for (i = 0 ; i < sectors ; i++) {
1735 if (sdt[i].app_tag == 0xffff)
1738 sector = start_sec + i;
1740 switch (scsi_debug_guard) {
1742 csum = ip_compute_csum(fake_storep +
1743 sector * scsi_debug_sector_size,
1744 scsi_debug_sector_size);
1747 csum = crc_t10dif(fake_storep +
1748 sector * scsi_debug_sector_size,
1749 scsi_debug_sector_size);
1750 csum = cpu_to_be16(csum);
1756 if (sdt[i].guard_tag != csum) {
1757 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1758 " rcvd 0x%04x, data 0x%04x\n", __func__,
1759 (unsigned long)sector,
1760 be16_to_cpu(sdt[i].guard_tag),
1766 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1767 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1768 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1769 __func__, (unsigned long)sector);
1774 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1775 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1776 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1777 __func__, (unsigned long)sector);
1785 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1788 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1789 int len = min(psgl->length, resid);
1791 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1792 memcpy(paddr, dif_storep + dif_offset(sector), len);
1795 if (sector >= sdebug_store_sectors) {
1798 sector = do_div(tmp_sec, sdebug_store_sectors);
1801 kunmap_atomic(paddr);
1809 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1810 unsigned int num, struct sdebug_dev_info *devip,
1813 unsigned long iflags;
1816 ret = check_device_access_params(devip, lba, num);
1820 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1821 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1822 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1823 /* claim unrecoverable read error */
1824 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1825 /* set info field and valid bit for fixed descriptor */
1826 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1827 devip->sense_buff[0] |= 0x80; /* Valid bit */
1828 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1829 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1830 devip->sense_buff[3] = (ret >> 24) & 0xff;
1831 devip->sense_buff[4] = (ret >> 16) & 0xff;
1832 devip->sense_buff[5] = (ret >> 8) & 0xff;
1833 devip->sense_buff[6] = ret & 0xff;
1835 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1836 return check_condition_result;
1840 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1841 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1844 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1845 return illegal_condition_result;
1849 read_lock_irqsave(&atomic_rw, iflags);
1850 ret = do_device_access(SCpnt, devip, lba, num, 0);
1851 read_unlock_irqrestore(&atomic_rw, iflags);
1855 void dump_sector(unsigned char *buf, int len)
1859 printk(KERN_ERR ">>> Sector Dump <<<\n");
1861 for (i = 0 ; i < len ; i += 16) {
1862 printk(KERN_ERR "%04d: ", i);
1864 for (j = 0 ; j < 16 ; j++) {
1865 unsigned char c = buf[i+j];
1866 if (c >= 0x20 && c < 0x7e)
1867 printk(" %c ", buf[i+j]);
1869 printk("%02x ", buf[i+j]);
1876 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1877 unsigned int sectors, u32 ei_lba)
1880 struct sd_dif_tuple *sdt;
1881 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1882 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1883 void *daddr, *paddr;
1884 sector_t tmp_sec = start_sec;
1887 unsigned short csum;
1889 sector = do_div(tmp_sec, sdebug_store_sectors);
1891 BUG_ON(scsi_sg_count(SCpnt) == 0);
1892 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1894 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1897 /* For each data page */
1898 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1899 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1901 /* For each sector-sized chunk in data page */
1902 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1904 /* If we're at the end of the current
1905 * protection page advance to the next one
1907 if (ppage_offset >= psgl->length) {
1908 kunmap_atomic(paddr);
1909 psgl = sg_next(psgl);
1910 BUG_ON(psgl == NULL);
1911 paddr = kmap_atomic(sg_page(psgl))
1916 sdt = paddr + ppage_offset;
1918 switch (scsi_debug_guard) {
1920 csum = ip_compute_csum(daddr,
1921 scsi_debug_sector_size);
1924 csum = cpu_to_be16(crc_t10dif(daddr,
1925 scsi_debug_sector_size));
1933 if (sdt->guard_tag != csum) {
1935 "%s: GUARD check failed on sector %lu " \
1936 "rcvd 0x%04x, calculated 0x%04x\n",
1937 __func__, (unsigned long)sector,
1938 be16_to_cpu(sdt->guard_tag),
1941 dump_sector(daddr, scsi_debug_sector_size);
1945 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1946 be32_to_cpu(sdt->ref_tag)
1947 != (start_sec & 0xffffffff)) {
1949 "%s: REF check failed on sector %lu\n",
1950 __func__, (unsigned long)sector);
1952 dump_sector(daddr, scsi_debug_sector_size);
1956 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1957 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1959 "%s: REF check failed on sector %lu\n",
1960 __func__, (unsigned long)sector);
1962 dump_sector(daddr, scsi_debug_sector_size);
1966 /* Would be great to copy this in bigger
1967 * chunks. However, for the sake of
1968 * correctness we need to verify each sector
1969 * before writing it to "stable" storage
1971 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1975 if (sector == sdebug_store_sectors)
1976 sector = 0; /* Force wrap */
1980 daddr += scsi_debug_sector_size;
1981 ppage_offset += sizeof(struct sd_dif_tuple);
1984 kunmap_atomic(daddr);
1987 kunmap_atomic(paddr);
1995 kunmap_atomic(daddr);
1996 kunmap_atomic(paddr);
2000 static unsigned long lba_to_map_index(sector_t lba)
2002 if (scsi_debug_unmap_alignment) {
2003 lba += scsi_debug_unmap_granularity -
2004 scsi_debug_unmap_alignment;
2006 do_div(lba, scsi_debug_unmap_granularity);
2011 static sector_t map_index_to_lba(unsigned long index)
2013 return index * scsi_debug_unmap_granularity -
2014 scsi_debug_unmap_alignment;
2017 static unsigned int map_state(sector_t lba, unsigned int *num)
2020 unsigned int mapped;
2021 unsigned long index;
2024 index = lba_to_map_index(lba);
2025 mapped = test_bit(index, map_storep);
2028 next = find_next_zero_bit(map_storep, map_size, index);
2030 next = find_next_bit(map_storep, map_size, index);
2032 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2038 static void map_region(sector_t lba, unsigned int len)
2040 sector_t end = lba + len;
2043 unsigned long index = lba_to_map_index(lba);
2045 if (index < map_size)
2046 set_bit(index, map_storep);
2048 lba = map_index_to_lba(index + 1);
2052 static void unmap_region(sector_t lba, unsigned int len)
2054 sector_t end = lba + len;
2057 unsigned long index = lba_to_map_index(lba);
2059 if (lba == map_index_to_lba(index) &&
2060 lba + scsi_debug_unmap_granularity <= end &&
2062 clear_bit(index, map_storep);
2063 if (scsi_debug_lbprz) {
2064 memset(fake_storep +
2065 lba * scsi_debug_sector_size, 0,
2066 scsi_debug_sector_size *
2067 scsi_debug_unmap_granularity);
2070 lba = map_index_to_lba(index + 1);
2074 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2075 unsigned int num, struct sdebug_dev_info *devip,
2078 unsigned long iflags;
2081 ret = check_device_access_params(devip, lba, num);
2086 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2087 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2090 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2091 return illegal_condition_result;
2095 write_lock_irqsave(&atomic_rw, iflags);
2096 ret = do_device_access(SCpnt, devip, lba, num, 1);
2097 if (scsi_debug_lbp())
2098 map_region(lba, num);
2099 write_unlock_irqrestore(&atomic_rw, iflags);
2101 return (DID_ERROR << 16);
2102 else if ((ret < (num * scsi_debug_sector_size)) &&
2103 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2104 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2105 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2110 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2111 unsigned int num, struct sdebug_dev_info *devip,
2112 u32 ei_lba, unsigned int unmap)
2114 unsigned long iflags;
2115 unsigned long long i;
2118 ret = check_device_access_params(devip, lba, num);
2122 if (num > scsi_debug_write_same_length) {
2123 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2125 return check_condition_result;
2128 write_lock_irqsave(&atomic_rw, iflags);
2130 if (unmap && scsi_debug_lbp()) {
2131 unmap_region(lba, num);
2135 /* Else fetch one logical block */
2136 ret = fetch_to_dev_buffer(scmd,
2137 fake_storep + (lba * scsi_debug_sector_size),
2138 scsi_debug_sector_size);
2141 write_unlock_irqrestore(&atomic_rw, iflags);
2142 return (DID_ERROR << 16);
2143 } else if ((ret < (num * scsi_debug_sector_size)) &&
2144 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2145 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2146 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2148 /* Copy first sector to remaining blocks */
2149 for (i = 1 ; i < num ; i++)
2150 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2151 fake_storep + (lba * scsi_debug_sector_size),
2152 scsi_debug_sector_size);
2154 if (scsi_debug_lbp())
2155 map_region(lba, num);
2157 write_unlock_irqrestore(&atomic_rw, iflags);
2162 struct unmap_block_desc {
2168 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2171 struct unmap_block_desc *desc;
2172 unsigned int i, payload_len, descriptors;
2175 ret = check_readiness(scmd, 1, devip);
2179 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2180 BUG_ON(scsi_bufflen(scmd) != payload_len);
2182 descriptors = (payload_len - 8) / 16;
2184 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2186 return check_condition_result;
2188 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2190 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2191 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2193 desc = (void *)&buf[8];
2195 for (i = 0 ; i < descriptors ; i++) {
2196 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2197 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2199 ret = check_device_access_params(devip, lba, num);
2203 unmap_region(lba, num);
2214 #define SDEBUG_GET_LBA_STATUS_LEN 32
2216 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2217 struct sdebug_dev_info * devip)
2219 unsigned long long lba;
2220 unsigned int alloc_len, mapped, num;
2221 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2224 ret = check_readiness(scmd, 1, devip);
2228 lba = get_unaligned_be64(&scmd->cmnd[2]);
2229 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2234 ret = check_device_access_params(devip, lba, 1);
2238 mapped = map_state(lba, &num);
2240 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2241 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2242 put_unaligned_be64(lba, &arr[8]); /* LBA */
2243 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2244 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2246 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2249 #define SDEBUG_RLUN_ARR_SZ 256
2251 static int resp_report_luns(struct scsi_cmnd * scp,
2252 struct sdebug_dev_info * devip)
2254 unsigned int alloc_len;
2255 int lun_cnt, i, upper, num, n, wlun, lun;
2256 unsigned char *cmd = (unsigned char *)scp->cmnd;
2257 int select_report = (int)cmd[2];
2258 struct scsi_lun *one_lun;
2259 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2260 unsigned char * max_addr;
2262 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2263 if ((alloc_len < 4) || (select_report > 2)) {
2264 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2266 return check_condition_result;
2268 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2269 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2270 lun_cnt = scsi_debug_max_luns;
2271 if (1 == select_report)
2273 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2275 wlun = (select_report > 0) ? 1 : 0;
2276 num = lun_cnt + wlun;
2277 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2278 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2279 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2280 sizeof(struct scsi_lun)), num);
2285 one_lun = (struct scsi_lun *) &arr[8];
2286 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2287 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2288 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2290 upper = (lun >> 8) & 0x3f;
2292 one_lun[i].scsi_lun[0] =
2293 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2294 one_lun[i].scsi_lun[1] = lun & 0xff;
2297 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2298 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2301 alloc_len = (unsigned char *)(one_lun + i) - arr;
2302 return fill_from_dev_buffer(scp, arr,
2303 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2306 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2307 unsigned int num, struct sdebug_dev_info *devip)
2310 unsigned char *kaddr, *buf;
2311 unsigned int offset;
2312 struct scatterlist *sg;
2313 struct scsi_data_buffer *sdb = scsi_in(scp);
2315 /* better not to use temporary buffer. */
2316 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2320 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2323 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2324 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2328 for (j = 0; j < sg->length; j++)
2329 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2331 offset += sg->length;
2332 kunmap_atomic(kaddr);
2341 /* When timer goes off this function is called. */
2342 static void timer_intr_handler(unsigned long indx)
2344 struct sdebug_queued_cmd * sqcp;
2345 unsigned long iflags;
2347 if (indx >= scsi_debug_max_queue) {
2348 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2352 spin_lock_irqsave(&queued_arr_lock, iflags);
2353 sqcp = &queued_arr[(int)indx];
2354 if (! sqcp->in_use) {
2355 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2357 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2361 if (sqcp->done_funct) {
2362 sqcp->a_cmnd->result = sqcp->scsi_result;
2363 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2365 sqcp->done_funct = NULL;
2366 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2370 static struct sdebug_dev_info *
2371 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2373 struct sdebug_dev_info *devip;
2375 devip = kzalloc(sizeof(*devip), flags);
2377 devip->sdbg_host = sdbg_host;
2378 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2383 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2385 struct sdebug_host_info * sdbg_host;
2386 struct sdebug_dev_info * open_devip = NULL;
2387 struct sdebug_dev_info * devip =
2388 (struct sdebug_dev_info *)sdev->hostdata;
2392 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2394 printk(KERN_ERR "Host info NULL\n");
2397 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2398 if ((devip->used) && (devip->channel == sdev->channel) &&
2399 (devip->target == sdev->id) &&
2400 (devip->lun == sdev->lun))
2403 if ((!devip->used) && (!open_devip))
2407 if (!open_devip) { /* try and make a new one */
2408 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2410 printk(KERN_ERR "%s: out of memory at line %d\n",
2411 __func__, __LINE__);
2416 open_devip->channel = sdev->channel;
2417 open_devip->target = sdev->id;
2418 open_devip->lun = sdev->lun;
2419 open_devip->sdbg_host = sdbg_host;
2420 open_devip->reset = 1;
2421 open_devip->used = 1;
2422 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2423 if (scsi_debug_dsense)
2424 open_devip->sense_buff[0] = 0x72;
2426 open_devip->sense_buff[0] = 0x70;
2427 open_devip->sense_buff[7] = 0xa;
2429 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2430 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2435 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2437 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2438 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2439 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2440 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2444 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2446 struct sdebug_dev_info *devip;
2448 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2449 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2450 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2451 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2452 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2453 devip = devInfoReg(sdp);
2455 return 1; /* no resources, will be marked offline */
2456 sdp->hostdata = devip;
2457 if (sdp->host->cmd_per_lun)
2458 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2459 sdp->host->cmd_per_lun);
2460 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2461 if (scsi_debug_no_uld)
2462 sdp->no_uld_attach = 1;
2466 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2468 struct sdebug_dev_info *devip =
2469 (struct sdebug_dev_info *)sdp->hostdata;
2471 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2472 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2473 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2475 /* make this slot available for re-use */
2477 sdp->hostdata = NULL;
2481 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2482 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2484 unsigned long iflags;
2486 struct sdebug_queued_cmd *sqcp;
2488 spin_lock_irqsave(&queued_arr_lock, iflags);
2489 for (k = 0; k < scsi_debug_max_queue; ++k) {
2490 sqcp = &queued_arr[k];
2491 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2492 del_timer_sync(&sqcp->cmnd_timer);
2494 sqcp->a_cmnd = NULL;
2498 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2499 return (k < scsi_debug_max_queue) ? 1 : 0;
2502 /* Deletes (stops) timers of all queued commands */
2503 static void stop_all_queued(void)
2505 unsigned long iflags;
2507 struct sdebug_queued_cmd *sqcp;
2509 spin_lock_irqsave(&queued_arr_lock, iflags);
2510 for (k = 0; k < scsi_debug_max_queue; ++k) {
2511 sqcp = &queued_arr[k];
2512 if (sqcp->in_use && sqcp->a_cmnd) {
2513 del_timer_sync(&sqcp->cmnd_timer);
2515 sqcp->a_cmnd = NULL;
2518 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2521 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2523 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2524 printk(KERN_INFO "scsi_debug: abort\n");
2526 stop_queued_cmnd(SCpnt);
2530 static int scsi_debug_biosparam(struct scsi_device *sdev,
2531 struct block_device * bdev, sector_t capacity, int *info)
2536 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2537 printk(KERN_INFO "scsi_debug: biosparam\n");
2538 buf = scsi_bios_ptable(bdev);
2540 res = scsi_partsize(buf, capacity,
2541 &info[2], &info[0], &info[1]);
2546 info[0] = sdebug_heads;
2547 info[1] = sdebug_sectors_per;
2548 info[2] = sdebug_cylinders_per;
2552 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2554 struct sdebug_dev_info * devip;
2556 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2557 printk(KERN_INFO "scsi_debug: device_reset\n");
2560 devip = devInfoReg(SCpnt->device);
2567 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2569 struct sdebug_host_info *sdbg_host;
2570 struct sdebug_dev_info * dev_info;
2571 struct scsi_device * sdp;
2572 struct Scsi_Host * hp;
2574 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2575 printk(KERN_INFO "scsi_debug: bus_reset\n");
2577 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2578 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2580 list_for_each_entry(dev_info,
2581 &sdbg_host->dev_info_list,
2583 dev_info->reset = 1;
2589 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2591 struct sdebug_host_info * sdbg_host;
2592 struct sdebug_dev_info * dev_info;
2594 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2595 printk(KERN_INFO "scsi_debug: host_reset\n");
2597 spin_lock(&sdebug_host_list_lock);
2598 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2599 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2601 dev_info->reset = 1;
2603 spin_unlock(&sdebug_host_list_lock);
2608 /* Initializes timers in queued array */
2609 static void __init init_all_queued(void)
2611 unsigned long iflags;
2613 struct sdebug_queued_cmd * sqcp;
2615 spin_lock_irqsave(&queued_arr_lock, iflags);
2616 for (k = 0; k < scsi_debug_max_queue; ++k) {
2617 sqcp = &queued_arr[k];
2618 init_timer(&sqcp->cmnd_timer);
2620 sqcp->a_cmnd = NULL;
2622 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2625 static void __init sdebug_build_parts(unsigned char *ramp,
2626 unsigned long store_size)
2628 struct partition * pp;
2629 int starts[SDEBUG_MAX_PARTS + 2];
2630 int sectors_per_part, num_sectors, k;
2631 int heads_by_sects, start_sec, end_sec;
2633 /* assume partition table already zeroed */
2634 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2636 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2637 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2638 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2639 "partitions to %d\n", SDEBUG_MAX_PARTS);
2641 num_sectors = (int)sdebug_store_sectors;
2642 sectors_per_part = (num_sectors - sdebug_sectors_per)
2643 / scsi_debug_num_parts;
2644 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2645 starts[0] = sdebug_sectors_per;
2646 for (k = 1; k < scsi_debug_num_parts; ++k)
2647 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2649 starts[scsi_debug_num_parts] = num_sectors;
2650 starts[scsi_debug_num_parts + 1] = 0;
2652 ramp[510] = 0x55; /* magic partition markings */
2654 pp = (struct partition *)(ramp + 0x1be);
2655 for (k = 0; starts[k + 1]; ++k, ++pp) {
2656 start_sec = starts[k];
2657 end_sec = starts[k + 1] - 1;
2660 pp->cyl = start_sec / heads_by_sects;
2661 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2662 / sdebug_sectors_per;
2663 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2665 pp->end_cyl = end_sec / heads_by_sects;
2666 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2667 / sdebug_sectors_per;
2668 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2670 pp->start_sect = start_sec;
2671 pp->nr_sects = end_sec - start_sec + 1;
2672 pp->sys_ind = 0x83; /* plain Linux partition */
2676 static int schedule_resp(struct scsi_cmnd * cmnd,
2677 struct sdebug_dev_info * devip,
2678 done_funct_t done, int scsi_result, int delta_jiff)
2680 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2682 struct scsi_device * sdp = cmnd->device;
2684 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2685 "non-zero result=0x%x\n", sdp->host->host_no,
2686 sdp->channel, sdp->id, sdp->lun, scsi_result);
2689 if (cmnd && devip) {
2690 /* simulate autosense by this driver */
2691 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2692 memcpy(cmnd->sense_buffer, devip->sense_buff,
2693 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2694 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2696 if (delta_jiff <= 0) {
2698 cmnd->result = scsi_result;
2703 unsigned long iflags;
2705 struct sdebug_queued_cmd * sqcp = NULL;
2707 spin_lock_irqsave(&queued_arr_lock, iflags);
2708 for (k = 0; k < scsi_debug_max_queue; ++k) {
2709 sqcp = &queued_arr[k];
2713 if (k >= scsi_debug_max_queue) {
2714 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2715 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2716 return 1; /* report busy to mid level */
2719 sqcp->a_cmnd = cmnd;
2720 sqcp->scsi_result = scsi_result;
2721 sqcp->done_funct = done;
2722 sqcp->cmnd_timer.function = timer_intr_handler;
2723 sqcp->cmnd_timer.data = k;
2724 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2725 add_timer(&sqcp->cmnd_timer);
2726 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2732 /* Note: The following macros create attribute files in the
2733 /sys/module/scsi_debug/parameters directory. Unfortunately this
2734 driver is unaware of a change and cannot trigger auxiliary actions
2735 as it can when the corresponding attribute in the
2736 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2738 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2739 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2740 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2741 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2742 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2743 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2744 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2745 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2746 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2747 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2748 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2749 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2750 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2751 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2752 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2753 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2754 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2755 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2756 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2757 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2758 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2759 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2760 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2761 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2762 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2763 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2764 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2765 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2766 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2767 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2768 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2769 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2770 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2771 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2773 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2776 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2777 MODULE_DESCRIPTION("SCSI debug adapter driver");
2778 MODULE_LICENSE("GPL");
2779 MODULE_VERSION(SCSI_DEBUG_VERSION);
2781 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2782 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2783 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2784 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2785 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2786 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2787 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2788 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2789 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2790 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2791 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2792 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2793 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2794 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2795 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2796 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2797 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2798 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2799 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2800 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2801 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2802 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2803 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2804 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2805 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2806 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2807 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2808 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2809 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2810 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2811 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2812 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2813 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2814 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2815 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2817 static char sdebug_info[256];
2819 static const char * scsi_debug_info(struct Scsi_Host * shp)
2821 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2822 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2823 scsi_debug_version_date, scsi_debug_dev_size_mb,
2828 /* scsi_debug_proc_info
2829 * Used if the driver currently has no own support for /proc/scsi
2831 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2832 int length, int inout)
2834 int len, pos, begin;
2837 orig_length = length;
2841 int minLen = length > 15 ? 15 : length;
2843 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2845 memcpy(arr, buffer, minLen);
2847 if (1 != sscanf(arr, "%d", &pos))
2849 scsi_debug_opts = pos;
2850 if (scsi_debug_every_nth != 0)
2851 scsi_debug_cmnd_count = 0;
2855 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2857 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2858 "every_nth=%d(curr:%d)\n"
2859 "delay=%d, max_luns=%d, scsi_level=%d\n"
2860 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2861 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2862 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2863 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2864 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2865 scsi_debug_cmnd_count, scsi_debug_delay,
2866 scsi_debug_max_luns, scsi_debug_scsi_level,
2867 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2868 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2869 num_host_resets, dix_reads, dix_writes, dif_errors);
2874 *start = buffer + (offset - begin); /* Start of wanted data */
2875 len -= (offset - begin);
2881 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2883 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2886 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2887 const char * buf, size_t count)
2892 if (1 == sscanf(buf, "%10s", work)) {
2893 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2894 scsi_debug_delay = delay;
2900 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2901 sdebug_delay_store);
2903 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2905 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2908 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2909 const char * buf, size_t count)
2914 if (1 == sscanf(buf, "%10s", work)) {
2915 if (0 == strnicmp(work,"0x", 2)) {
2916 if (1 == sscanf(&work[2], "%x", &opts))
2919 if (1 == sscanf(work, "%d", &opts))
2925 scsi_debug_opts = opts;
2926 scsi_debug_cmnd_count = 0;
2929 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2932 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2934 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2936 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2937 const char * buf, size_t count)
2941 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2942 scsi_debug_ptype = n;
2947 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2949 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2951 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2953 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2954 const char * buf, size_t count)
2958 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2959 scsi_debug_dsense = n;
2964 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2965 sdebug_dsense_store);
2967 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2969 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2971 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2972 const char * buf, size_t count)
2976 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2977 scsi_debug_fake_rw = n;
2982 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2983 sdebug_fake_rw_store);
2985 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2987 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2989 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2990 const char * buf, size_t count)
2994 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2995 scsi_debug_no_lun_0 = n;
3000 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
3001 sdebug_no_lun_0_store);
3003 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
3005 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3007 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
3008 const char * buf, size_t count)
3012 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3013 scsi_debug_num_tgts = n;
3014 sdebug_max_tgts_luns();
3019 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3020 sdebug_num_tgts_store);
3022 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3024 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3026 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3028 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3030 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3032 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3034 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3036 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3038 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3039 const char * buf, size_t count)
3043 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3044 scsi_debug_every_nth = nth;
3045 scsi_debug_cmnd_count = 0;
3050 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3051 sdebug_every_nth_store);
3053 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3055 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3057 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3058 const char * buf, size_t count)
3062 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3063 scsi_debug_max_luns = n;
3064 sdebug_max_tgts_luns();
3069 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3070 sdebug_max_luns_store);
3072 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3074 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3076 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3077 const char * buf, size_t count)
3081 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3082 (n <= SCSI_DEBUG_CANQUEUE)) {
3083 scsi_debug_max_queue = n;
3088 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3089 sdebug_max_queue_store);
3091 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3093 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3095 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3097 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3099 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3101 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3103 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3105 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3107 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3108 const char * buf, size_t count)
3112 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3113 scsi_debug_virtual_gb = n;
3115 sdebug_capacity = get_sdebug_capacity();
3121 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3122 sdebug_virtual_gb_store);
3124 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3126 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3129 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3130 const char * buf, size_t count)
3134 if (sscanf(buf, "%d", &delta_hosts) != 1)
3136 if (delta_hosts > 0) {
3138 sdebug_add_adapter();
3139 } while (--delta_hosts);
3140 } else if (delta_hosts < 0) {
3142 sdebug_remove_adapter();
3143 } while (++delta_hosts);
3147 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3148 sdebug_add_host_store);
3150 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3153 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3155 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3156 const char * buf, size_t count)
3160 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3161 scsi_debug_vpd_use_hostno = n;
3166 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3167 sdebug_vpd_use_hostno_store);
3169 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3171 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3173 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3175 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3177 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3179 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3181 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3183 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3185 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3187 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3189 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3191 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3193 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3195 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3197 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3199 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3203 if (!scsi_debug_lbp())
3204 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3205 sdebug_store_sectors);
3207 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3209 buf[count++] = '\n';
3214 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3216 static ssize_t sdebug_removable_show(struct device_driver *ddp,
3219 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3221 static ssize_t sdebug_removable_store(struct device_driver *ddp,
3222 const char *buf, size_t count)
3226 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3227 scsi_debug_removable = (n > 0);
3232 DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
3233 sdebug_removable_store);
3236 /* Note: The following function creates attribute files in the
3237 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3238 files (over those found in the /sys/module/scsi_debug/parameters
3239 directory) is that auxiliary actions can be triggered when an attribute
3240 is changed. For example see: sdebug_add_host_store() above.
3242 static int do_create_driverfs_files(void)
3246 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3247 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3248 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3249 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3250 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3251 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3252 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3253 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3254 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3255 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3256 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3257 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3258 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3259 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3260 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
3261 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3262 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3263 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3264 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3265 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3266 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3267 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3268 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3269 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3273 static void do_remove_driverfs_files(void)
3275 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3276 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3277 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3278 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3279 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3280 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3281 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3282 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3283 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3284 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3285 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3286 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
3287 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3288 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3289 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3290 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3291 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3292 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3293 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3294 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3295 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3296 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3297 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3298 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3301 struct device *pseudo_primary;
3303 static int __init scsi_debug_init(void)
3310 switch (scsi_debug_sector_size) {
3317 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3318 scsi_debug_sector_size);
3322 switch (scsi_debug_dif) {
3324 case SD_DIF_TYPE0_PROTECTION:
3325 case SD_DIF_TYPE1_PROTECTION:
3326 case SD_DIF_TYPE2_PROTECTION:
3327 case SD_DIF_TYPE3_PROTECTION:
3331 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3335 if (scsi_debug_guard > 1) {
3336 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3340 if (scsi_debug_ato > 1) {
3341 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3345 if (scsi_debug_physblk_exp > 15) {
3346 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3347 scsi_debug_physblk_exp);
3351 if (scsi_debug_lowest_aligned > 0x3fff) {
3352 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3353 scsi_debug_lowest_aligned);
3357 if (scsi_debug_dev_size_mb < 1)
3358 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3359 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3360 sdebug_store_sectors = sz / scsi_debug_sector_size;
3361 sdebug_capacity = get_sdebug_capacity();
3363 /* play around with geometry, don't waste too much on track 0 */
3365 sdebug_sectors_per = 32;
3366 if (scsi_debug_dev_size_mb >= 16)
3368 else if (scsi_debug_dev_size_mb >= 256)
3370 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3371 (sdebug_sectors_per * sdebug_heads);
3372 if (sdebug_cylinders_per >= 1024) {
3373 /* other LLDs do this; implies >= 1GB ram disk ... */
3375 sdebug_sectors_per = 63;
3376 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3377 (sdebug_sectors_per * sdebug_heads);
3380 fake_storep = vmalloc(sz);
3381 if (NULL == fake_storep) {
3382 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3385 memset(fake_storep, 0, sz);
3386 if (scsi_debug_num_parts > 0)
3387 sdebug_build_parts(fake_storep, sz);
3389 if (scsi_debug_dif) {
3392 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3393 dif_storep = vmalloc(dif_size);
3395 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3396 dif_size, dif_storep);
3398 if (dif_storep == NULL) {
3399 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3404 memset(dif_storep, 0xff, dif_size);
3407 /* Logical Block Provisioning */
3408 if (scsi_debug_lbp()) {
3409 scsi_debug_unmap_max_blocks =
3410 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3412 scsi_debug_unmap_max_desc =
3413 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3415 scsi_debug_unmap_granularity =
3416 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3418 if (scsi_debug_unmap_alignment &&
3419 scsi_debug_unmap_granularity <=
3420 scsi_debug_unmap_alignment) {
3422 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3427 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3428 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3430 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3433 if (map_storep == NULL) {
3434 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3439 bitmap_zero(map_storep, map_size);
3441 /* Map first 1KB for partition table */
3442 if (scsi_debug_num_parts)
3446 pseudo_primary = root_device_register("pseudo_0");
3447 if (IS_ERR(pseudo_primary)) {
3448 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3449 ret = PTR_ERR(pseudo_primary);
3452 ret = bus_register(&pseudo_lld_bus);
3454 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3458 ret = driver_register(&sdebug_driverfs_driver);
3460 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3464 ret = do_create_driverfs_files();
3466 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3473 host_to_add = scsi_debug_add_host;
3474 scsi_debug_add_host = 0;
3476 for (k = 0; k < host_to_add; k++) {
3477 if (sdebug_add_adapter()) {
3478 printk(KERN_ERR "scsi_debug_init: "
3479 "sdebug_add_adapter failed k=%d\n", k);
3484 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3485 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3486 scsi_debug_add_host);
3491 do_remove_driverfs_files();
3492 driver_unregister(&sdebug_driverfs_driver);
3494 bus_unregister(&pseudo_lld_bus);
3496 root_device_unregister(pseudo_primary);
3507 static void __exit scsi_debug_exit(void)
3509 int k = scsi_debug_add_host;
3513 sdebug_remove_adapter();
3514 do_remove_driverfs_files();
3515 driver_unregister(&sdebug_driverfs_driver);
3516 bus_unregister(&pseudo_lld_bus);
3517 root_device_unregister(pseudo_primary);
3525 device_initcall(scsi_debug_init);
3526 module_exit(scsi_debug_exit);
3528 static void sdebug_release_adapter(struct device * dev)
3530 struct sdebug_host_info *sdbg_host;
3532 sdbg_host = to_sdebug_host(dev);
3536 static int sdebug_add_adapter(void)
3538 int k, devs_per_host;
3540 struct sdebug_host_info *sdbg_host;
3541 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3543 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3544 if (NULL == sdbg_host) {
3545 printk(KERN_ERR "%s: out of memory at line %d\n",
3546 __func__, __LINE__);
3550 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3552 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3553 for (k = 0; k < devs_per_host; k++) {
3554 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3555 if (!sdbg_devinfo) {
3556 printk(KERN_ERR "%s: out of memory at line %d\n",
3557 __func__, __LINE__);
3563 spin_lock(&sdebug_host_list_lock);
3564 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3565 spin_unlock(&sdebug_host_list_lock);
3567 sdbg_host->dev.bus = &pseudo_lld_bus;
3568 sdbg_host->dev.parent = pseudo_primary;
3569 sdbg_host->dev.release = &sdebug_release_adapter;
3570 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3572 error = device_register(&sdbg_host->dev);
3577 ++scsi_debug_add_host;
3581 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3583 list_del(&sdbg_devinfo->dev_list);
3584 kfree(sdbg_devinfo);
3591 static void sdebug_remove_adapter(void)
3593 struct sdebug_host_info * sdbg_host = NULL;
3595 spin_lock(&sdebug_host_list_lock);
3596 if (!list_empty(&sdebug_host_list)) {
3597 sdbg_host = list_entry(sdebug_host_list.prev,
3598 struct sdebug_host_info, host_list);
3599 list_del(&sdbg_host->host_list);
3601 spin_unlock(&sdebug_host_list_lock);
3606 device_unregister(&sdbg_host->dev);
3607 --scsi_debug_add_host;
3611 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3613 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3616 unsigned long long lba;
3619 int target = SCpnt->device->id;
3620 struct sdebug_dev_info *devip = NULL;
3621 int inj_recovered = 0;
3622 int inj_transport = 0;
3625 int delay_override = 0;
3628 scsi_set_resid(SCpnt, 0);
3629 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3630 printk(KERN_INFO "scsi_debug: cmd ");
3631 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3632 printk("%02x ", (int)cmd[k]);
3636 if (target == SCpnt->device->host->hostt->this_id) {
3637 printk(KERN_INFO "scsi_debug: initiator's id used as "
3639 return schedule_resp(SCpnt, NULL, done,
3640 DID_NO_CONNECT << 16, 0);
3643 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3644 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3645 return schedule_resp(SCpnt, NULL, done,
3646 DID_NO_CONNECT << 16, 0);
3647 devip = devInfoReg(SCpnt->device);
3649 return schedule_resp(SCpnt, NULL, done,
3650 DID_NO_CONNECT << 16, 0);
3652 if ((scsi_debug_every_nth != 0) &&
3653 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3654 scsi_debug_cmnd_count = 0;
3655 if (scsi_debug_every_nth < -1)
3656 scsi_debug_every_nth = -1;
3657 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3658 return 0; /* ignore command causing timeout */
3659 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3660 scsi_medium_access_command(SCpnt))
3661 return 0; /* time out reads and writes */
3662 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3663 inj_recovered = 1; /* to reads and writes below */
3664 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3665 inj_transport = 1; /* to reads and writes below */
3666 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3667 inj_dif = 1; /* to reads and writes below */
3668 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3669 inj_dix = 1; /* to reads and writes below */
3676 case TEST_UNIT_READY:
3678 break; /* only allowable wlun commands */
3680 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3681 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3682 "not supported for wlun\n", *cmd);
3683 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3685 errsts = check_condition_result;
3686 return schedule_resp(SCpnt, devip, done, errsts,
3692 case INQUIRY: /* mandatory, ignore unit attention */
3694 errsts = resp_inquiry(SCpnt, target, devip);
3696 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3698 errsts = resp_requests(SCpnt, devip);
3700 case REZERO_UNIT: /* actually this is REWIND for SSC */
3702 errsts = resp_start_stop(SCpnt, devip);
3704 case ALLOW_MEDIUM_REMOVAL:
3705 errsts = check_readiness(SCpnt, 1, devip);
3708 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3709 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3710 cmd[4] ? "inhibited" : "enabled");
3712 case SEND_DIAGNOSTIC: /* mandatory */
3713 errsts = check_readiness(SCpnt, 1, devip);
3715 case TEST_UNIT_READY: /* mandatory */
3717 errsts = check_readiness(SCpnt, 0, devip);
3720 errsts = check_readiness(SCpnt, 1, devip);
3723 errsts = check_readiness(SCpnt, 1, devip);
3726 errsts = check_readiness(SCpnt, 1, devip);
3729 errsts = check_readiness(SCpnt, 1, devip);
3732 errsts = resp_readcap(SCpnt, devip);
3734 case SERVICE_ACTION_IN:
3735 if (cmd[1] == SAI_READ_CAPACITY_16)
3736 errsts = resp_readcap16(SCpnt, devip);
3737 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3739 if (scsi_debug_lbp() == 0) {
3740 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3741 INVALID_COMMAND_OPCODE, 0);
3742 errsts = check_condition_result;
3744 errsts = resp_get_lba_status(SCpnt, devip);
3746 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3748 errsts = check_condition_result;
3751 case MAINTENANCE_IN:
3752 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3753 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3755 errsts = check_condition_result;
3758 errsts = resp_report_tgtpgs(SCpnt, devip);
3763 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3764 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3766 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3767 INVALID_COMMAND_OPCODE, 0);
3768 errsts = check_condition_result;
3772 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3773 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3774 (cmd[1] & 0xe0) == 0)
3775 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3780 errsts = check_readiness(SCpnt, 0, devip);
3783 if (scsi_debug_fake_rw)
3785 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3786 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3787 if (inj_recovered && (0 == errsts)) {
3788 mk_sense_buffer(devip, RECOVERED_ERROR,
3789 THRESHOLD_EXCEEDED, 0);
3790 errsts = check_condition_result;
3791 } else if (inj_transport && (0 == errsts)) {
3792 mk_sense_buffer(devip, ABORTED_COMMAND,
3793 TRANSPORT_PROBLEM, ACK_NAK_TO);
3794 errsts = check_condition_result;
3795 } else if (inj_dif && (0 == errsts)) {
3796 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3797 errsts = illegal_condition_result;
3798 } else if (inj_dix && (0 == errsts)) {
3799 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3800 errsts = illegal_condition_result;
3803 case REPORT_LUNS: /* mandatory, ignore unit attention */
3805 errsts = resp_report_luns(SCpnt, devip);
3807 case VERIFY: /* 10 byte SBC-2 command */
3808 errsts = check_readiness(SCpnt, 0, devip);
3813 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3814 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3816 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3817 INVALID_COMMAND_OPCODE, 0);
3818 errsts = check_condition_result;
3822 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3823 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3824 (cmd[1] & 0xe0) == 0)
3825 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3830 errsts = check_readiness(SCpnt, 0, devip);
3833 if (scsi_debug_fake_rw)
3835 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3836 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3837 if (inj_recovered && (0 == errsts)) {
3838 mk_sense_buffer(devip, RECOVERED_ERROR,
3839 THRESHOLD_EXCEEDED, 0);
3840 errsts = check_condition_result;
3841 } else if (inj_dif && (0 == errsts)) {
3842 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3843 errsts = illegal_condition_result;
3844 } else if (inj_dix && (0 == errsts)) {
3845 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3846 errsts = illegal_condition_result;
3852 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3853 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3854 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3855 INVALID_FIELD_IN_CDB, 0);
3856 errsts = check_condition_result;
3862 errsts = check_readiness(SCpnt, 0, devip);
3865 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3866 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3869 errsts = check_readiness(SCpnt, 0, devip);
3873 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3874 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3875 INVALID_COMMAND_OPCODE, 0);
3876 errsts = check_condition_result;
3878 errsts = resp_unmap(SCpnt, devip);
3882 errsts = resp_mode_sense(SCpnt, target, devip);
3885 errsts = resp_mode_select(SCpnt, 1, devip);
3887 case MODE_SELECT_10:
3888 errsts = resp_mode_select(SCpnt, 0, devip);
3891 errsts = resp_log_sense(SCpnt, devip);
3893 case SYNCHRONIZE_CACHE:
3895 errsts = check_readiness(SCpnt, 0, devip);
3898 errsts = check_readiness(SCpnt, 1, devip);
3900 case XDWRITEREAD_10:
3901 if (!scsi_bidi_cmnd(SCpnt)) {
3902 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3903 INVALID_FIELD_IN_CDB, 0);
3904 errsts = check_condition_result;
3908 errsts = check_readiness(SCpnt, 0, devip);
3911 if (scsi_debug_fake_rw)
3913 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3914 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3917 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3920 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3922 case VARIABLE_LENGTH_CMD:
3923 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3925 if ((cmd[10] & 0xe0) == 0)
3927 "Unprotected RD/WR to DIF device\n");
3929 if (cmd[9] == READ_32) {
3930 BUG_ON(SCpnt->cmd_len < 32);
3934 if (cmd[9] == WRITE_32) {
3935 BUG_ON(SCpnt->cmd_len < 32);
3940 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3941 INVALID_FIELD_IN_CDB, 0);
3942 errsts = check_condition_result;
3946 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3947 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3948 "supported\n", *cmd);
3949 errsts = check_readiness(SCpnt, 1, devip);
3951 break; /* Unit attention takes precedence */
3952 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3953 errsts = check_condition_result;
3956 return schedule_resp(SCpnt, devip, done, errsts,
3957 (delay_override ? 0 : scsi_debug_delay));
3960 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3962 static struct scsi_host_template sdebug_driver_template = {
3963 .proc_info = scsi_debug_proc_info,
3964 .proc_name = sdebug_proc_name,
3965 .name = "SCSI DEBUG",
3966 .info = scsi_debug_info,
3967 .slave_alloc = scsi_debug_slave_alloc,
3968 .slave_configure = scsi_debug_slave_configure,
3969 .slave_destroy = scsi_debug_slave_destroy,
3970 .ioctl = scsi_debug_ioctl,
3971 .queuecommand = scsi_debug_queuecommand,
3972 .eh_abort_handler = scsi_debug_abort,
3973 .eh_bus_reset_handler = scsi_debug_bus_reset,
3974 .eh_device_reset_handler = scsi_debug_device_reset,
3975 .eh_host_reset_handler = scsi_debug_host_reset,
3976 .bios_param = scsi_debug_biosparam,
3977 .can_queue = SCSI_DEBUG_CANQUEUE,
3979 .sg_tablesize = 256,
3981 .max_sectors = 0xffff,
3982 .use_clustering = DISABLE_CLUSTERING,
3983 .module = THIS_MODULE,
3986 static int sdebug_driver_probe(struct device * dev)
3989 struct sdebug_host_info *sdbg_host;
3990 struct Scsi_Host *hpnt;
3993 sdbg_host = to_sdebug_host(dev);
3995 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3996 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3998 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
4003 sdbg_host->shost = hpnt;
4004 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4005 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4006 hpnt->max_id = scsi_debug_num_tgts + 1;
4008 hpnt->max_id = scsi_debug_num_tgts;
4009 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
4013 switch (scsi_debug_dif) {
4015 case SD_DIF_TYPE1_PROTECTION:
4016 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4018 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4021 case SD_DIF_TYPE2_PROTECTION:
4022 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4024 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4027 case SD_DIF_TYPE3_PROTECTION:
4028 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4030 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4035 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4039 scsi_host_set_prot(hpnt, host_prot);
4041 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4042 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4043 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4044 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4045 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4046 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4047 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4048 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4050 if (scsi_debug_guard == 1)
4051 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4053 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4055 error = scsi_add_host(hpnt, &sdbg_host->dev);
4057 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4059 scsi_host_put(hpnt);
4061 scsi_scan_host(hpnt);
4067 static int sdebug_driver_remove(struct device * dev)
4069 struct sdebug_host_info *sdbg_host;
4070 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4072 sdbg_host = to_sdebug_host(dev);
4075 printk(KERN_ERR "%s: Unable to locate host info\n",
4080 scsi_remove_host(sdbg_host->shost);
4082 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4084 list_del(&sdbg_devinfo->dev_list);
4085 kfree(sdbg_devinfo);
4088 scsi_host_put(sdbg_host->shost);
4092 static int pseudo_lld_bus_match(struct device *dev,
4093 struct device_driver *dev_driver)
4098 static struct bus_type pseudo_lld_bus = {
4100 .match = pseudo_lld_bus_match,
4101 .probe = sdebug_driver_probe,
4102 .remove = sdebug_driver_remove,