scsi_debug: pinpoint invalid field in sense data
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define INVALID_COMMAND_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132 #define DEF_UNMAP_ALIGNMENT 0
133 #define DEF_UNMAP_GRANULARITY 1
134 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
135 #define DEF_UNMAP_MAX_DESC 256
136 #define DEF_VIRTUAL_GB   0
137 #define DEF_VPD_USE_HOSTNO 1
138 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DELAY_OVERRIDDEN -9999
140
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE   1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
144 #define SCSI_DEBUG_OPT_TIMEOUT   4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
147 #define SCSI_DEBUG_OPT_DIF_ERR   32
148 #define SCSI_DEBUG_OPT_DIX_ERR   64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE    0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160  *   - a RECOVERED_ERROR is simulated on successful read and write
161  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162  *   - a TRANSPORT_ERROR is simulated on successful read and write
163  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
164  *
165  * When "every_nth" < 0 then after "- every_nth" commands:
166  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167  *   - a RECOVERED_ERROR is simulated on successful read and write
168  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169  *   - a TRANSPORT_ERROR is simulated on successful read and write
170  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171  * This will continue until some other action occurs (e.g. the user
172  * writing a new value (other than -1 or 1) to every_nth via sysfs).
173  */
174
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176  * priority order. In the subset implemented here lower numbers have higher
177  * priority. The UA numbers should be a sequence starting from 0 with
178  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_NUM_UAS 3
183
184 /* for check_readiness() */
185 #define UAS_ONLY 1
186 #define UAS_TUR 0
187
188 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
189  * sector on read commands: */
190 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
191 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
192
193 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
194  * or "peripheral device" addressing (value 0) */
195 #define SAM2_LUN_ADDRESS_METHOD 0
196 #define SAM2_WLUN_REPORT_LUNS 0xc101
197
198 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
199  * (for response) at one time. Can be reduced by max_queue option. Command
200  * responses are not queued when delay=0 and ndelay=0. The per-device
201  * DEF_CMD_PER_LUN can be changed via sysfs:
202  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
203  * SCSI_DEBUG_CANQUEUE. */
204 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
205 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
206 #define DEF_CMD_PER_LUN  255
207
208 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
209 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
210 #endif
211
212 static int scsi_debug_add_host = DEF_NUM_HOST;
213 static int scsi_debug_ato = DEF_ATO;
214 static int scsi_debug_delay = DEF_DELAY;
215 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
216 static int scsi_debug_dif = DEF_DIF;
217 static int scsi_debug_dix = DEF_DIX;
218 static int scsi_debug_dsense = DEF_D_SENSE;
219 static int scsi_debug_every_nth = DEF_EVERY_NTH;
220 static int scsi_debug_fake_rw = DEF_FAKE_RW;
221 static unsigned int scsi_debug_guard = DEF_GUARD;
222 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
223 static int scsi_debug_max_luns = DEF_MAX_LUNS;
224 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
225 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
226 static int scsi_debug_ndelay = DEF_NDELAY;
227 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
228 static int scsi_debug_no_uld = 0;
229 static int scsi_debug_num_parts = DEF_NUM_PARTS;
230 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
231 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
232 static int scsi_debug_opts = DEF_OPTS;
233 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
234 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
235 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
236 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
237 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
238 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
239 static unsigned int scsi_debug_lbpu = DEF_LBPU;
240 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
241 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
242 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
243 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
244 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
245 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
246 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
247 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
248 static bool scsi_debug_removable = DEF_REMOVABLE;
249 static bool scsi_debug_clustering;
250 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
251
252 static atomic_t sdebug_cmnd_count;
253 static atomic_t sdebug_completions;
254 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
255
256 #define DEV_READONLY(TGT)      (0)
257
258 static unsigned int sdebug_store_sectors;
259 static sector_t sdebug_capacity;        /* in sectors */
260
261 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
262    may still need them */
263 static int sdebug_heads;                /* heads per disk */
264 static int sdebug_cylinders_per;        /* cylinders per surface */
265 static int sdebug_sectors_per;          /* sectors per cylinder */
266
267 #define SDEBUG_MAX_PARTS 4
268
269 #define SCSI_DEBUG_MAX_CMD_LEN 32
270
271 static unsigned int scsi_debug_lbp(void)
272 {
273         return ((0 == scsi_debug_fake_rw) &&
274                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
275 }
276
277 struct sdebug_dev_info {
278         struct list_head dev_list;
279         unsigned int channel;
280         unsigned int target;
281         u64 lun;
282         struct sdebug_host_info *sdbg_host;
283         u64 wlun;
284         unsigned long uas_bm[1];
285         atomic_t num_in_q;
286         char stopped;
287         char used;
288 };
289
290 struct sdebug_host_info {
291         struct list_head host_list;
292         struct Scsi_Host *shost;
293         struct device dev;
294         struct list_head dev_info_list;
295 };
296
297 #define to_sdebug_host(d)       \
298         container_of(d, struct sdebug_host_info, dev)
299
300 static LIST_HEAD(sdebug_host_list);
301 static DEFINE_SPINLOCK(sdebug_host_list_lock);
302
303
304 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
305         struct hrtimer hrt;     /* must be first element */
306         int qa_indx;
307 };
308
309 struct sdebug_queued_cmd {
310         /* in_use flagged by a bit in queued_in_use_bm[] */
311         struct timer_list *cmnd_timerp;
312         struct tasklet_struct *tletp;
313         struct sdebug_hrtimer *sd_hrtp;
314         struct scsi_cmnd * a_cmnd;
315 };
316 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
317 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
318
319
320 static unsigned char * fake_storep;     /* ramdisk storage */
321 static struct sd_dif_tuple *dif_storep; /* protection info */
322 static void *map_storep;                /* provisioning map */
323
324 static unsigned long map_size;
325 static int num_aborts;
326 static int num_dev_resets;
327 static int num_target_resets;
328 static int num_bus_resets;
329 static int num_host_resets;
330 static int dix_writes;
331 static int dix_reads;
332 static int dif_errors;
333
334 static DEFINE_SPINLOCK(queued_arr_lock);
335 static DEFINE_RWLOCK(atomic_rw);
336
337 static char sdebug_proc_name[] = MY_NAME;
338 static const char *my_name = MY_NAME;
339
340 static struct bus_type pseudo_lld_bus;
341
342 static struct device_driver sdebug_driverfs_driver = {
343         .name           = sdebug_proc_name,
344         .bus            = &pseudo_lld_bus,
345 };
346
347 static const int check_condition_result =
348                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
349
350 static const int illegal_condition_result =
351         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
352
353 static const int device_qfull_result =
354         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
355
356 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
357                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
358                                      0, 0, 0, 0};
359 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
360                                     0, 0, 0x2, 0x4b};
361 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
362                                    0, 0, 0x0, 0x0};
363
364 static void *fake_store(unsigned long long lba)
365 {
366         lba = do_div(lba, sdebug_store_sectors);
367
368         return fake_storep + lba * scsi_debug_sector_size;
369 }
370
371 static struct sd_dif_tuple *dif_store(sector_t sector)
372 {
373         sector = do_div(sector, sdebug_store_sectors);
374
375         return dif_storep + sector;
376 }
377
378 static int sdebug_add_adapter(void);
379 static void sdebug_remove_adapter(void);
380
381 static void sdebug_max_tgts_luns(void)
382 {
383         struct sdebug_host_info *sdbg_host;
384         struct Scsi_Host *hpnt;
385
386         spin_lock(&sdebug_host_list_lock);
387         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
388                 hpnt = sdbg_host->shost;
389                 if ((hpnt->this_id >= 0) &&
390                     (scsi_debug_num_tgts > hpnt->this_id))
391                         hpnt->max_id = scsi_debug_num_tgts + 1;
392                 else
393                         hpnt->max_id = scsi_debug_num_tgts;
394                 /* scsi_debug_max_luns; */
395                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
396         }
397         spin_unlock(&sdebug_host_list_lock);
398 }
399
400 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
401
402 /* Set in_bit to -1 to indicate no bit position of invalid field */
403 static void
404 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
405                      int in_byte, int in_bit)
406 {
407         unsigned char *sbuff;
408         u8 sks[4];
409         int sl, asc;
410
411         sbuff = scp->sense_buffer;
412         if (!sbuff) {
413                 sdev_printk(KERN_ERR, scp->device,
414                             "%s: sense_buffer is NULL\n", __func__);
415                 return;
416         }
417         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
418         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
419         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
420                                 asc, 0);
421         memset(sks, 0, sizeof(sks));
422         sks[0] = 0x80;
423         if (c_d)
424                 sks[0] |= 0x40;
425         if (in_bit >= 0) {
426                 sks[0] |= 0x8;
427                 sks[0] |= 0x7 & in_bit;
428         }
429         put_unaligned_be16(in_byte, sks + 1);
430         if (scsi_debug_dsense) {
431                 sl = sbuff[7] + 8;
432                 sbuff[7] = sl;
433                 sbuff[sl] = 0x2;
434                 sbuff[sl + 1] = 0x6;
435                 memcpy(sbuff + sl + 4, sks, 3);
436         } else
437                 memcpy(sbuff + 15, sks, 3);
438         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
439                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
440                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
441                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
442 }
443
444 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
445 {
446         unsigned char *sbuff;
447
448         sbuff = scp->sense_buffer;
449         if (!sbuff) {
450                 sdev_printk(KERN_ERR, scp->device,
451                             "%s: sense_buffer is NULL\n", __func__);
452                 return;
453         }
454         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
455
456         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
457
458         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
459                 sdev_printk(KERN_INFO, scp->device,
460                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
461                             my_name, key, asc, asq);
462 }
463
464 static void
465 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
466 {
467         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
468 }
469
470 static void get_data_transfer_info(unsigned char *cmd,
471                                    unsigned long long *lba, unsigned int *num,
472                                    u32 *ei_lba)
473 {
474         *ei_lba = 0;
475
476         switch (*cmd) {
477         case VARIABLE_LENGTH_CMD:
478                 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
479                         (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
480                         (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
481                         (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
482
483                 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
484                         (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
485
486                 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
487                         (u32)cmd[28] << 24;
488                 break;
489
490         case WRITE_SAME_16:
491         case WRITE_16:
492         case READ_16:
493                 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
494                         (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
495                         (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
496                         (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
497
498                 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
499                         (u32)cmd[10] << 24;
500                 break;
501         case WRITE_12:
502         case READ_12:
503                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
504                         (u32)cmd[2] << 24;
505
506                 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
507                         (u32)cmd[6] << 24;
508                 break;
509         case WRITE_SAME:
510         case WRITE_10:
511         case READ_10:
512         case XDWRITEREAD_10:
513                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
514                         (u32)cmd[2] << 24;
515
516                 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
517                 break;
518         case WRITE_6:
519         case READ_6:
520                 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
521                         (u32)(cmd[1] & 0x1f) << 16;
522                 *num = (0 == cmd[4]) ? 256 : cmd[4];
523                 break;
524         default:
525                 break;
526         }
527 }
528
529 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
530 {
531         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
532                 if (0x1261 == cmd)
533                         sdev_printk(KERN_INFO, dev,
534                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
535                 else if (0x5331 == cmd)
536                         sdev_printk(KERN_INFO, dev,
537                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
538                                     __func__);
539                 else
540                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
541                                     __func__, cmd);
542         }
543         return -EINVAL;
544         /* return -ENOTTY; // correct return but upsets fdisk */
545 }
546
547 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
548                            struct sdebug_dev_info * devip)
549 {
550         int k;
551         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
552
553         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
554         if (k != SDEBUG_NUM_UAS) {
555                 const char *cp = NULL;
556
557                 switch (k) {
558                 case SDEBUG_UA_POR:
559                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
560                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
561                         if (debug)
562                                 cp = "power on reset";
563                         break;
564                 case SDEBUG_UA_BUS_RESET:
565                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
566                                         UA_RESET_ASC, BUS_RESET_ASCQ);
567                         if (debug)
568                                 cp = "bus reset";
569                         break;
570                 case SDEBUG_UA_MODE_CHANGED:
571                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
572                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
573                         if (debug)
574                                 cp = "mode parameters changed";
575                         break;
576                 default:
577                         pr_warn("%s: unexpected unit attention code=%d\n",
578                                 __func__, k);
579                         if (debug)
580                                 cp = "unknown";
581                         break;
582                 }
583                 clear_bit(k, devip->uas_bm);
584                 if (debug)
585                         sdev_printk(KERN_INFO, SCpnt->device,
586                                    "%s reports: Unit attention: %s\n",
587                                    my_name, cp);
588                 return check_condition_result;
589         }
590         if ((UAS_TUR == uas_only) && devip->stopped) {
591                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
592                                 0x2);
593                 if (debug)
594                         sdev_printk(KERN_INFO, SCpnt->device,
595                                     "%s reports: Not ready: %s\n", my_name,
596                                     "initializing command required");
597                 return check_condition_result;
598         }
599         return 0;
600 }
601
602 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
603 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
604                                 int arr_len)
605 {
606         int act_len;
607         struct scsi_data_buffer *sdb = scsi_in(scp);
608
609         if (!sdb->length)
610                 return 0;
611         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
612                 return (DID_ERROR << 16);
613
614         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
615                                       arr, arr_len);
616         sdb->resid = scsi_bufflen(scp) - act_len;
617
618         return 0;
619 }
620
621 /* Returns number of bytes fetched into 'arr' or -1 if error. */
622 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
623                                int arr_len)
624 {
625         if (!scsi_bufflen(scp))
626                 return 0;
627         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
628                 return -1;
629
630         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
631 }
632
633
634 static const char * inq_vendor_id = "Linux   ";
635 static const char * inq_product_id = "scsi_debug      ";
636 static const char *inq_product_rev = "0184";    /* version less '.' */
637
638 /* Device identification VPD page. Returns number of bytes placed in arr */
639 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
640                            int target_dev_id, int dev_id_num,
641                            const char * dev_id_str,
642                            int dev_id_str_len)
643 {
644         int num, port_a;
645         char b[32];
646
647         port_a = target_dev_id + 1;
648         /* T10 vendor identifier field format (faked) */
649         arr[0] = 0x2;   /* ASCII */
650         arr[1] = 0x1;
651         arr[2] = 0x0;
652         memcpy(&arr[4], inq_vendor_id, 8);
653         memcpy(&arr[12], inq_product_id, 16);
654         memcpy(&arr[28], dev_id_str, dev_id_str_len);
655         num = 8 + 16 + dev_id_str_len;
656         arr[3] = num;
657         num += 4;
658         if (dev_id_num >= 0) {
659                 /* NAA-5, Logical unit identifier (binary) */
660                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
661                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
662                 arr[num++] = 0x0;
663                 arr[num++] = 0x8;
664                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
665                 arr[num++] = 0x33;
666                 arr[num++] = 0x33;
667                 arr[num++] = 0x30;
668                 arr[num++] = (dev_id_num >> 24);
669                 arr[num++] = (dev_id_num >> 16) & 0xff;
670                 arr[num++] = (dev_id_num >> 8) & 0xff;
671                 arr[num++] = dev_id_num & 0xff;
672                 /* Target relative port number */
673                 arr[num++] = 0x61;      /* proto=sas, binary */
674                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
675                 arr[num++] = 0x0;       /* reserved */
676                 arr[num++] = 0x4;       /* length */
677                 arr[num++] = 0x0;       /* reserved */
678                 arr[num++] = 0x0;       /* reserved */
679                 arr[num++] = 0x0;
680                 arr[num++] = 0x1;       /* relative port A */
681         }
682         /* NAA-5, Target port identifier */
683         arr[num++] = 0x61;      /* proto=sas, binary */
684         arr[num++] = 0x93;      /* piv=1, target port, naa */
685         arr[num++] = 0x0;
686         arr[num++] = 0x8;
687         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
688         arr[num++] = 0x22;
689         arr[num++] = 0x22;
690         arr[num++] = 0x20;
691         arr[num++] = (port_a >> 24);
692         arr[num++] = (port_a >> 16) & 0xff;
693         arr[num++] = (port_a >> 8) & 0xff;
694         arr[num++] = port_a & 0xff;
695         /* NAA-5, Target port group identifier */
696         arr[num++] = 0x61;      /* proto=sas, binary */
697         arr[num++] = 0x95;      /* piv=1, target port group id */
698         arr[num++] = 0x0;
699         arr[num++] = 0x4;
700         arr[num++] = 0;
701         arr[num++] = 0;
702         arr[num++] = (port_group_id >> 8) & 0xff;
703         arr[num++] = port_group_id & 0xff;
704         /* NAA-5, Target device identifier */
705         arr[num++] = 0x61;      /* proto=sas, binary */
706         arr[num++] = 0xa3;      /* piv=1, target device, naa */
707         arr[num++] = 0x0;
708         arr[num++] = 0x8;
709         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
710         arr[num++] = 0x22;
711         arr[num++] = 0x22;
712         arr[num++] = 0x20;
713         arr[num++] = (target_dev_id >> 24);
714         arr[num++] = (target_dev_id >> 16) & 0xff;
715         arr[num++] = (target_dev_id >> 8) & 0xff;
716         arr[num++] = target_dev_id & 0xff;
717         /* SCSI name string: Target device identifier */
718         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
719         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
720         arr[num++] = 0x0;
721         arr[num++] = 24;
722         memcpy(arr + num, "naa.52222220", 12);
723         num += 12;
724         snprintf(b, sizeof(b), "%08X", target_dev_id);
725         memcpy(arr + num, b, 8);
726         num += 8;
727         memset(arr + num, 0, 4);
728         num += 4;
729         return num;
730 }
731
732
733 static unsigned char vpd84_data[] = {
734 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
735     0x22,0x22,0x22,0x0,0xbb,0x1,
736     0x22,0x22,0x22,0x0,0xbb,0x2,
737 };
738
739 /*  Software interface identification VPD page */
740 static int inquiry_evpd_84(unsigned char * arr)
741 {
742         memcpy(arr, vpd84_data, sizeof(vpd84_data));
743         return sizeof(vpd84_data);
744 }
745
746 /* Management network addresses VPD page */
747 static int inquiry_evpd_85(unsigned char * arr)
748 {
749         int num = 0;
750         const char * na1 = "https://www.kernel.org/config";
751         const char * na2 = "http://www.kernel.org/log";
752         int plen, olen;
753
754         arr[num++] = 0x1;       /* lu, storage config */
755         arr[num++] = 0x0;       /* reserved */
756         arr[num++] = 0x0;
757         olen = strlen(na1);
758         plen = olen + 1;
759         if (plen % 4)
760                 plen = ((plen / 4) + 1) * 4;
761         arr[num++] = plen;      /* length, null termianted, padded */
762         memcpy(arr + num, na1, olen);
763         memset(arr + num + olen, 0, plen - olen);
764         num += plen;
765
766         arr[num++] = 0x4;       /* lu, logging */
767         arr[num++] = 0x0;       /* reserved */
768         arr[num++] = 0x0;
769         olen = strlen(na2);
770         plen = olen + 1;
771         if (plen % 4)
772                 plen = ((plen / 4) + 1) * 4;
773         arr[num++] = plen;      /* length, null terminated, padded */
774         memcpy(arr + num, na2, olen);
775         memset(arr + num + olen, 0, plen - olen);
776         num += plen;
777
778         return num;
779 }
780
781 /* SCSI ports VPD page */
782 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
783 {
784         int num = 0;
785         int port_a, port_b;
786
787         port_a = target_dev_id + 1;
788         port_b = port_a + 1;
789         arr[num++] = 0x0;       /* reserved */
790         arr[num++] = 0x0;       /* reserved */
791         arr[num++] = 0x0;
792         arr[num++] = 0x1;       /* relative port 1 (primary) */
793         memset(arr + num, 0, 6);
794         num += 6;
795         arr[num++] = 0x0;
796         arr[num++] = 12;        /* length tp descriptor */
797         /* naa-5 target port identifier (A) */
798         arr[num++] = 0x61;      /* proto=sas, binary */
799         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
800         arr[num++] = 0x0;       /* reserved */
801         arr[num++] = 0x8;       /* length */
802         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
803         arr[num++] = 0x22;
804         arr[num++] = 0x22;
805         arr[num++] = 0x20;
806         arr[num++] = (port_a >> 24);
807         arr[num++] = (port_a >> 16) & 0xff;
808         arr[num++] = (port_a >> 8) & 0xff;
809         arr[num++] = port_a & 0xff;
810
811         arr[num++] = 0x0;       /* reserved */
812         arr[num++] = 0x0;       /* reserved */
813         arr[num++] = 0x0;
814         arr[num++] = 0x2;       /* relative port 2 (secondary) */
815         memset(arr + num, 0, 6);
816         num += 6;
817         arr[num++] = 0x0;
818         arr[num++] = 12;        /* length tp descriptor */
819         /* naa-5 target port identifier (B) */
820         arr[num++] = 0x61;      /* proto=sas, binary */
821         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
822         arr[num++] = 0x0;       /* reserved */
823         arr[num++] = 0x8;       /* length */
824         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
825         arr[num++] = 0x22;
826         arr[num++] = 0x22;
827         arr[num++] = 0x20;
828         arr[num++] = (port_b >> 24);
829         arr[num++] = (port_b >> 16) & 0xff;
830         arr[num++] = (port_b >> 8) & 0xff;
831         arr[num++] = port_b & 0xff;
832
833         return num;
834 }
835
836
837 static unsigned char vpd89_data[] = {
838 /* from 4th byte */ 0,0,0,0,
839 'l','i','n','u','x',' ',' ',' ',
840 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
841 '1','2','3','4',
842 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
843 0xec,0,0,0,
844 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
845 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
846 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
847 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
848 0x53,0x41,
849 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
850 0x20,0x20,
851 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
852 0x10,0x80,
853 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
854 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
855 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
856 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
857 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
858 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
859 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
860 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
861 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
862 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
863 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
864 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
865 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
866 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
867 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
868 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
869 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
870 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
871 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
872 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
873 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
874 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
875 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
876 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
877 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
878 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
879 };
880
881 /* ATA Information VPD page */
882 static int inquiry_evpd_89(unsigned char * arr)
883 {
884         memcpy(arr, vpd89_data, sizeof(vpd89_data));
885         return sizeof(vpd89_data);
886 }
887
888
889 static unsigned char vpdb0_data[] = {
890         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
891         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
892         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
893         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
894 };
895
896 /* Block limits VPD page (SBC-3) */
897 static int inquiry_evpd_b0(unsigned char * arr)
898 {
899         unsigned int gran;
900
901         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
902
903         /* Optimal transfer length granularity */
904         gran = 1 << scsi_debug_physblk_exp;
905         arr[2] = (gran >> 8) & 0xff;
906         arr[3] = gran & 0xff;
907
908         /* Maximum Transfer Length */
909         if (sdebug_store_sectors > 0x400) {
910                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
911                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
912                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
913                 arr[7] = sdebug_store_sectors & 0xff;
914         }
915
916         /* Optimal Transfer Length */
917         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
918
919         if (scsi_debug_lbpu) {
920                 /* Maximum Unmap LBA Count */
921                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
922
923                 /* Maximum Unmap Block Descriptor Count */
924                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
925         }
926
927         /* Unmap Granularity Alignment */
928         if (scsi_debug_unmap_alignment) {
929                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
930                 arr[28] |= 0x80; /* UGAVALID */
931         }
932
933         /* Optimal Unmap Granularity */
934         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
935
936         /* Maximum WRITE SAME Length */
937         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
938
939         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
940
941         return sizeof(vpdb0_data);
942 }
943
944 /* Block device characteristics VPD page (SBC-3) */
945 static int inquiry_evpd_b1(unsigned char *arr)
946 {
947         memset(arr, 0, 0x3c);
948         arr[0] = 0;
949         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
950         arr[2] = 0;
951         arr[3] = 5;     /* less than 1.8" */
952
953         return 0x3c;
954 }
955
956 /* Logical block provisioning VPD page (SBC-3) */
957 static int inquiry_evpd_b2(unsigned char *arr)
958 {
959         memset(arr, 0, 0x4);
960         arr[0] = 0;                     /* threshold exponent */
961
962         if (scsi_debug_lbpu)
963                 arr[1] = 1 << 7;
964
965         if (scsi_debug_lbpws)
966                 arr[1] |= 1 << 6;
967
968         if (scsi_debug_lbpws10)
969                 arr[1] |= 1 << 5;
970
971         if (scsi_debug_lbprz)
972                 arr[1] |= 1 << 2;
973
974         return 0x4;
975 }
976
977 #define SDEBUG_LONG_INQ_SZ 96
978 #define SDEBUG_MAX_INQ_ARR_SZ 584
979
980 static int resp_inquiry(struct scsi_cmnd *scp, int target,
981                         struct sdebug_dev_info * devip)
982 {
983         unsigned char pq_pdt;
984         unsigned char * arr;
985         unsigned char *cmd = scp->cmnd;
986         int alloc_len, n, ret;
987
988         alloc_len = (cmd[3] << 8) + cmd[4];
989         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
990         if (! arr)
991                 return DID_REQUEUE << 16;
992         if (devip->wlun)
993                 pq_pdt = 0x1e;  /* present, wlun */
994         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
995                 pq_pdt = 0x7f;  /* not present, no device type */
996         else
997                 pq_pdt = (scsi_debug_ptype & 0x1f);
998         arr[0] = pq_pdt;
999         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1000                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1001                 kfree(arr);
1002                 return check_condition_result;
1003         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1004                 int lu_id_num, port_group_id, target_dev_id, len;
1005                 char lu_id_str[6];
1006                 int host_no = devip->sdbg_host->shost->host_no;
1007                 
1008                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1009                     (devip->channel & 0x7f);
1010                 if (0 == scsi_debug_vpd_use_hostno)
1011                         host_no = 0;
1012                 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
1013                             (devip->target * 1000) + devip->lun);
1014                 target_dev_id = ((host_no + 1) * 2000) +
1015                                  (devip->target * 1000) - 3;
1016                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1017                 if (0 == cmd[2]) { /* supported vital product data pages */
1018                         arr[1] = cmd[2];        /*sanity */
1019                         n = 4;
1020                         arr[n++] = 0x0;   /* this page */
1021                         arr[n++] = 0x80;  /* unit serial number */
1022                         arr[n++] = 0x83;  /* device identification */
1023                         arr[n++] = 0x84;  /* software interface ident. */
1024                         arr[n++] = 0x85;  /* management network addresses */
1025                         arr[n++] = 0x86;  /* extended inquiry */
1026                         arr[n++] = 0x87;  /* mode page policy */
1027                         arr[n++] = 0x88;  /* SCSI ports */
1028                         arr[n++] = 0x89;  /* ATA information */
1029                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1030                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1031                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1032                                 arr[n++] = 0xb2;
1033                         arr[3] = n - 4;   /* number of supported VPD pages */
1034                 } else if (0x80 == cmd[2]) { /* unit serial number */
1035                         arr[1] = cmd[2];        /*sanity */
1036                         arr[3] = len;
1037                         memcpy(&arr[4], lu_id_str, len);
1038                 } else if (0x83 == cmd[2]) { /* device identification */
1039                         arr[1] = cmd[2];        /*sanity */
1040                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1041                                                  target_dev_id, lu_id_num,
1042                                                  lu_id_str, len);
1043                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1044                         arr[1] = cmd[2];        /*sanity */
1045                         arr[3] = inquiry_evpd_84(&arr[4]);
1046                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1047                         arr[1] = cmd[2];        /*sanity */
1048                         arr[3] = inquiry_evpd_85(&arr[4]);
1049                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1050                         arr[1] = cmd[2];        /*sanity */
1051                         arr[3] = 0x3c;  /* number of following entries */
1052                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1053                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1054                         else if (scsi_debug_dif)
1055                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1056                         else
1057                                 arr[4] = 0x0;   /* no protection stuff */
1058                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1059                 } else if (0x87 == cmd[2]) { /* mode page policy */
1060                         arr[1] = cmd[2];        /*sanity */
1061                         arr[3] = 0x8;   /* number of following entries */
1062                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1063                         arr[6] = 0x80;  /* mlus, shared */
1064                         arr[8] = 0x18;   /* protocol specific lu */
1065                         arr[10] = 0x82;  /* mlus, per initiator port */
1066                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1067                         arr[1] = cmd[2];        /*sanity */
1068                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1069                 } else if (0x89 == cmd[2]) { /* ATA information */
1070                         arr[1] = cmd[2];        /*sanity */
1071                         n = inquiry_evpd_89(&arr[4]);
1072                         arr[2] = (n >> 8);
1073                         arr[3] = (n & 0xff);
1074                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1075                         arr[1] = cmd[2];        /*sanity */
1076                         arr[3] = inquiry_evpd_b0(&arr[4]);
1077                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1078                         arr[1] = cmd[2];        /*sanity */
1079                         arr[3] = inquiry_evpd_b1(&arr[4]);
1080                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1081                         arr[1] = cmd[2];        /*sanity */
1082                         arr[3] = inquiry_evpd_b2(&arr[4]);
1083                 } else {
1084                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1085                         kfree(arr);
1086                         return check_condition_result;
1087                 }
1088                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1089                 ret = fill_from_dev_buffer(scp, arr,
1090                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1091                 kfree(arr);
1092                 return ret;
1093         }
1094         /* drops through here for a standard inquiry */
1095         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1096         arr[2] = scsi_debug_scsi_level;
1097         arr[3] = 2;    /* response_data_format==2 */
1098         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1099         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1100         if (0 == scsi_debug_vpd_use_hostno)
1101                 arr[5] = 0x10; /* claim: implicit TGPS */
1102         arr[6] = 0x10; /* claim: MultiP */
1103         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1104         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1105         memcpy(&arr[8], inq_vendor_id, 8);
1106         memcpy(&arr[16], inq_product_id, 16);
1107         memcpy(&arr[32], inq_product_rev, 4);
1108         /* version descriptors (2 bytes each) follow */
1109         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1110         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1111         n = 62;
1112         if (scsi_debug_ptype == 0) {
1113                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1114         } else if (scsi_debug_ptype == 1) {
1115                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1116         }
1117         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1118         ret = fill_from_dev_buffer(scp, arr,
1119                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1120         kfree(arr);
1121         return ret;
1122 }
1123
1124 static int resp_requests(struct scsi_cmnd * scp,
1125                          struct sdebug_dev_info * devip)
1126 {
1127         unsigned char * sbuff;
1128         unsigned char *cmd = scp->cmnd;
1129         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1130         int want_dsense;
1131         int len = 18;
1132
1133         memset(arr, 0, sizeof(arr));
1134         want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
1135         sbuff = scp->sense_buffer;
1136         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1137                 if (want_dsense) {
1138                         arr[0] = 0x72;
1139                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1140                         arr[2] = THRESHOLD_EXCEEDED;
1141                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1142                 } else {
1143                         arr[0] = 0x70;
1144                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1145                         arr[7] = 0xa;           /* 18 byte sense buffer */
1146                         arr[12] = THRESHOLD_EXCEEDED;
1147                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1148                 }
1149         } else {
1150                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1151                 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1152                         /* DESC bit set and sense_buff in fixed format */
1153                         memset(arr, 0, sizeof(arr));
1154                         arr[0] = 0x72;
1155                         arr[1] = sbuff[2];     /* sense key */
1156                         arr[2] = sbuff[12];    /* asc */
1157                         arr[3] = sbuff[13];    /* ascq */
1158                         len = 8;
1159                 }
1160         }
1161         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1162         return fill_from_dev_buffer(scp, arr, len);
1163 }
1164
1165 static int resp_start_stop(struct scsi_cmnd * scp,
1166                            struct sdebug_dev_info * devip)
1167 {
1168         unsigned char *cmd = scp->cmnd;
1169         int power_cond, errsts, start;
1170
1171         errsts = check_readiness(scp, UAS_ONLY, devip);
1172         if (errsts)
1173                 return errsts;
1174         power_cond = (cmd[4] & 0xf0) >> 4;
1175         if (power_cond) {
1176                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1177                 return check_condition_result;
1178         }
1179         start = cmd[4] & 1;
1180         if (start == devip->stopped)
1181                 devip->stopped = !start;
1182         return 0;
1183 }
1184
1185 static sector_t get_sdebug_capacity(void)
1186 {
1187         if (scsi_debug_virtual_gb > 0)
1188                 return (sector_t)scsi_debug_virtual_gb *
1189                         (1073741824 / scsi_debug_sector_size);
1190         else
1191                 return sdebug_store_sectors;
1192 }
1193
1194 #define SDEBUG_READCAP_ARR_SZ 8
1195 static int resp_readcap(struct scsi_cmnd * scp,
1196                         struct sdebug_dev_info * devip)
1197 {
1198         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1199         unsigned int capac;
1200         int errsts;
1201
1202         errsts = check_readiness(scp, UAS_ONLY, devip);
1203         if (errsts)
1204                 return errsts;
1205         /* following just in case virtual_gb changed */
1206         sdebug_capacity = get_sdebug_capacity();
1207         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1208         if (sdebug_capacity < 0xffffffff) {
1209                 capac = (unsigned int)sdebug_capacity - 1;
1210                 arr[0] = (capac >> 24);
1211                 arr[1] = (capac >> 16) & 0xff;
1212                 arr[2] = (capac >> 8) & 0xff;
1213                 arr[3] = capac & 0xff;
1214         } else {
1215                 arr[0] = 0xff;
1216                 arr[1] = 0xff;
1217                 arr[2] = 0xff;
1218                 arr[3] = 0xff;
1219         }
1220         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1221         arr[7] = scsi_debug_sector_size & 0xff;
1222         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1223 }
1224
1225 #define SDEBUG_READCAP16_ARR_SZ 32
1226 static int resp_readcap16(struct scsi_cmnd * scp,
1227                           struct sdebug_dev_info * devip)
1228 {
1229         unsigned char *cmd = scp->cmnd;
1230         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1231         unsigned long long capac;
1232         int errsts, k, alloc_len;
1233
1234         errsts = check_readiness(scp, UAS_ONLY, devip);
1235         if (errsts)
1236                 return errsts;
1237         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1238                      + cmd[13]);
1239         /* following just in case virtual_gb changed */
1240         sdebug_capacity = get_sdebug_capacity();
1241         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1242         capac = sdebug_capacity - 1;
1243         for (k = 0; k < 8; ++k, capac >>= 8)
1244                 arr[7 - k] = capac & 0xff;
1245         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1246         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1247         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1248         arr[11] = scsi_debug_sector_size & 0xff;
1249         arr[13] = scsi_debug_physblk_exp & 0xf;
1250         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1251
1252         if (scsi_debug_lbp()) {
1253                 arr[14] |= 0x80; /* LBPME */
1254                 if (scsi_debug_lbprz)
1255                         arr[14] |= 0x40; /* LBPRZ */
1256         }
1257
1258         arr[15] = scsi_debug_lowest_aligned & 0xff;
1259
1260         if (scsi_debug_dif) {
1261                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1262                 arr[12] |= 1; /* PROT_EN */
1263         }
1264
1265         return fill_from_dev_buffer(scp, arr,
1266                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1267 }
1268
1269 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1270
1271 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1272                               struct sdebug_dev_info * devip)
1273 {
1274         unsigned char *cmd = scp->cmnd;
1275         unsigned char * arr;
1276         int host_no = devip->sdbg_host->shost->host_no;
1277         int n, ret, alen, rlen;
1278         int port_group_a, port_group_b, port_a, port_b;
1279
1280         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1281                 + cmd[9]);
1282
1283         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1284         if (! arr)
1285                 return DID_REQUEUE << 16;
1286         /*
1287          * EVPD page 0x88 states we have two ports, one
1288          * real and a fake port with no device connected.
1289          * So we create two port groups with one port each
1290          * and set the group with port B to unavailable.
1291          */
1292         port_a = 0x1; /* relative port A */
1293         port_b = 0x2; /* relative port B */
1294         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1295             (devip->channel & 0x7f);
1296         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1297             (devip->channel & 0x7f) + 0x80;
1298
1299         /*
1300          * The asymmetric access state is cycled according to the host_id.
1301          */
1302         n = 4;
1303         if (0 == scsi_debug_vpd_use_hostno) {
1304             arr[n++] = host_no % 3; /* Asymm access state */
1305             arr[n++] = 0x0F; /* claim: all states are supported */
1306         } else {
1307             arr[n++] = 0x0; /* Active/Optimized path */
1308             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1309         }
1310         arr[n++] = (port_group_a >> 8) & 0xff;
1311         arr[n++] = port_group_a & 0xff;
1312         arr[n++] = 0;    /* Reserved */
1313         arr[n++] = 0;    /* Status code */
1314         arr[n++] = 0;    /* Vendor unique */
1315         arr[n++] = 0x1;  /* One port per group */
1316         arr[n++] = 0;    /* Reserved */
1317         arr[n++] = 0;    /* Reserved */
1318         arr[n++] = (port_a >> 8) & 0xff;
1319         arr[n++] = port_a & 0xff;
1320         arr[n++] = 3;    /* Port unavailable */
1321         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1322         arr[n++] = (port_group_b >> 8) & 0xff;
1323         arr[n++] = port_group_b & 0xff;
1324         arr[n++] = 0;    /* Reserved */
1325         arr[n++] = 0;    /* Status code */
1326         arr[n++] = 0;    /* Vendor unique */
1327         arr[n++] = 0x1;  /* One port per group */
1328         arr[n++] = 0;    /* Reserved */
1329         arr[n++] = 0;    /* Reserved */
1330         arr[n++] = (port_b >> 8) & 0xff;
1331         arr[n++] = port_b & 0xff;
1332
1333         rlen = n - 4;
1334         arr[0] = (rlen >> 24) & 0xff;
1335         arr[1] = (rlen >> 16) & 0xff;
1336         arr[2] = (rlen >> 8) & 0xff;
1337         arr[3] = rlen & 0xff;
1338
1339         /*
1340          * Return the smallest value of either
1341          * - The allocated length
1342          * - The constructed command length
1343          * - The maximum array size
1344          */
1345         rlen = min(alen,n);
1346         ret = fill_from_dev_buffer(scp, arr,
1347                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1348         kfree(arr);
1349         return ret;
1350 }
1351
1352 /* <<Following mode page info copied from ST318451LW>> */
1353
1354 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1355 {       /* Read-Write Error Recovery page for mode_sense */
1356         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1357                                         5, 0, 0xff, 0xff};
1358
1359         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1360         if (1 == pcontrol)
1361                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1362         return sizeof(err_recov_pg);
1363 }
1364
1365 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1366 {       /* Disconnect-Reconnect page for mode_sense */
1367         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1368                                          0, 0, 0, 0, 0, 0, 0, 0};
1369
1370         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1371         if (1 == pcontrol)
1372                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1373         return sizeof(disconnect_pg);
1374 }
1375
1376 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1377 {       /* Format device page for mode_sense */
1378         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1379                                      0, 0, 0, 0, 0, 0, 0, 0,
1380                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1381
1382         memcpy(p, format_pg, sizeof(format_pg));
1383         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1384         p[11] = sdebug_sectors_per & 0xff;
1385         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1386         p[13] = scsi_debug_sector_size & 0xff;
1387         if (scsi_debug_removable)
1388                 p[20] |= 0x20; /* should agree with INQUIRY */
1389         if (1 == pcontrol)
1390                 memset(p + 2, 0, sizeof(format_pg) - 2);
1391         return sizeof(format_pg);
1392 }
1393
1394 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1395 {       /* Caching page for mode_sense */
1396         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1397                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1398         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1399                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1400
1401         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1402                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1403         memcpy(p, caching_pg, sizeof(caching_pg));
1404         if (1 == pcontrol)
1405                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1406         else if (2 == pcontrol)
1407                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1408         return sizeof(caching_pg);
1409 }
1410
1411 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1412 {       /* Control mode page for mode_sense */
1413         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1414                                         0, 0, 0, 0};
1415         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1416                                      0, 0, 0x2, 0x4b};
1417
1418         if (scsi_debug_dsense)
1419                 ctrl_m_pg[2] |= 0x4;
1420         else
1421                 ctrl_m_pg[2] &= ~0x4;
1422
1423         if (scsi_debug_ato)
1424                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1425
1426         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1427         if (1 == pcontrol)
1428                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1429         else if (2 == pcontrol)
1430                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1431         return sizeof(ctrl_m_pg);
1432 }
1433
1434
1435 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1436 {       /* Informational Exceptions control mode page for mode_sense */
1437         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1438                                        0, 0, 0x0, 0x0};
1439         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1440                                       0, 0, 0x0, 0x0};
1441
1442         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1443         if (1 == pcontrol)
1444                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1445         else if (2 == pcontrol)
1446                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1447         return sizeof(iec_m_pg);
1448 }
1449
1450 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1451 {       /* SAS SSP mode page - short format for mode_sense */
1452         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1453                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1454
1455         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1456         if (1 == pcontrol)
1457                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1458         return sizeof(sas_sf_m_pg);
1459 }
1460
1461
1462 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1463                               int target_dev_id)
1464 {       /* SAS phy control and discover mode page for mode_sense */
1465         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1466                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1467                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1468                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1469                     0x2, 0, 0, 0, 0, 0, 0, 0,
1470                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1471                     0, 0, 0, 0, 0, 0, 0, 0,
1472                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1473                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1474                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1475                     0x3, 0, 0, 0, 0, 0, 0, 0,
1476                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1477                     0, 0, 0, 0, 0, 0, 0, 0,
1478                 };
1479         int port_a, port_b;
1480
1481         port_a = target_dev_id + 1;
1482         port_b = port_a + 1;
1483         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1484         p[20] = (port_a >> 24);
1485         p[21] = (port_a >> 16) & 0xff;
1486         p[22] = (port_a >> 8) & 0xff;
1487         p[23] = port_a & 0xff;
1488         p[48 + 20] = (port_b >> 24);
1489         p[48 + 21] = (port_b >> 16) & 0xff;
1490         p[48 + 22] = (port_b >> 8) & 0xff;
1491         p[48 + 23] = port_b & 0xff;
1492         if (1 == pcontrol)
1493                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1494         return sizeof(sas_pcd_m_pg);
1495 }
1496
1497 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1498 {       /* SAS SSP shared protocol specific port mode subpage */
1499         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1500                     0, 0, 0, 0, 0, 0, 0, 0,
1501                 };
1502
1503         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1504         if (1 == pcontrol)
1505                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1506         return sizeof(sas_sha_m_pg);
1507 }
1508
1509 #define SDEBUG_MAX_MSENSE_SZ 256
1510
1511 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1512                            struct sdebug_dev_info * devip)
1513 {
1514         unsigned char dbd, llbaa;
1515         int pcontrol, pcode, subpcode, bd_len;
1516         unsigned char dev_spec;
1517         int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1518         unsigned char * ap;
1519         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1520         unsigned char *cmd = scp->cmnd;
1521
1522         errsts = check_readiness(scp, UAS_ONLY, devip);
1523         if (errsts)
1524                 return errsts;
1525         dbd = !!(cmd[1] & 0x8);
1526         pcontrol = (cmd[2] & 0xc0) >> 6;
1527         pcode = cmd[2] & 0x3f;
1528         subpcode = cmd[3];
1529         msense_6 = (MODE_SENSE == cmd[0]);
1530         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1531         if ((0 == scsi_debug_ptype) && (0 == dbd))
1532                 bd_len = llbaa ? 16 : 8;
1533         else
1534                 bd_len = 0;
1535         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1536         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1537         if (0x3 == pcontrol) {  /* Saving values not supported */
1538                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1539                 return check_condition_result;
1540         }
1541         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1542                         (devip->target * 1000) - 3;
1543         /* set DPOFUA bit for disks */
1544         if (0 == scsi_debug_ptype)
1545                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1546         else
1547                 dev_spec = 0x0;
1548         if (msense_6) {
1549                 arr[2] = dev_spec;
1550                 arr[3] = bd_len;
1551                 offset = 4;
1552         } else {
1553                 arr[3] = dev_spec;
1554                 if (16 == bd_len)
1555                         arr[4] = 0x1;   /* set LONGLBA bit */
1556                 arr[7] = bd_len;        /* assume 255 or less */
1557                 offset = 8;
1558         }
1559         ap = arr + offset;
1560         if ((bd_len > 0) && (!sdebug_capacity))
1561                 sdebug_capacity = get_sdebug_capacity();
1562
1563         if (8 == bd_len) {
1564                 if (sdebug_capacity > 0xfffffffe) {
1565                         ap[0] = 0xff;
1566                         ap[1] = 0xff;
1567                         ap[2] = 0xff;
1568                         ap[3] = 0xff;
1569                 } else {
1570                         ap[0] = (sdebug_capacity >> 24) & 0xff;
1571                         ap[1] = (sdebug_capacity >> 16) & 0xff;
1572                         ap[2] = (sdebug_capacity >> 8) & 0xff;
1573                         ap[3] = sdebug_capacity & 0xff;
1574                 }
1575                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1576                 ap[7] = scsi_debug_sector_size & 0xff;
1577                 offset += bd_len;
1578                 ap = arr + offset;
1579         } else if (16 == bd_len) {
1580                 unsigned long long capac = sdebug_capacity;
1581
1582                 for (k = 0; k < 8; ++k, capac >>= 8)
1583                         ap[7 - k] = capac & 0xff;
1584                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1585                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1586                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1587                 ap[15] = scsi_debug_sector_size & 0xff;
1588                 offset += bd_len;
1589                 ap = arr + offset;
1590         }
1591
1592         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1593                 /* TODO: Control Extension page */
1594                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1595                 return check_condition_result;
1596         }
1597         switch (pcode) {
1598         case 0x1:       /* Read-Write error recovery page, direct access */
1599                 len = resp_err_recov_pg(ap, pcontrol, target);
1600                 offset += len;
1601                 break;
1602         case 0x2:       /* Disconnect-Reconnect page, all devices */
1603                 len = resp_disconnect_pg(ap, pcontrol, target);
1604                 offset += len;
1605                 break;
1606         case 0x3:       /* Format device page, direct access */
1607                 len = resp_format_pg(ap, pcontrol, target);
1608                 offset += len;
1609                 break;
1610         case 0x8:       /* Caching page, direct access */
1611                 len = resp_caching_pg(ap, pcontrol, target);
1612                 offset += len;
1613                 break;
1614         case 0xa:       /* Control Mode page, all devices */
1615                 len = resp_ctrl_m_pg(ap, pcontrol, target);
1616                 offset += len;
1617                 break;
1618         case 0x19:      /* if spc==1 then sas phy, control+discover */
1619                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1620                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1621                         return check_condition_result;
1622                 }
1623                 len = 0;
1624                 if ((0x0 == subpcode) || (0xff == subpcode))
1625                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1626                 if ((0x1 == subpcode) || (0xff == subpcode))
1627                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1628                                                   target_dev_id);
1629                 if ((0x2 == subpcode) || (0xff == subpcode))
1630                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
1631                 offset += len;
1632                 break;
1633         case 0x1c:      /* Informational Exceptions Mode page, all devices */
1634                 len = resp_iec_m_pg(ap, pcontrol, target);
1635                 offset += len;
1636                 break;
1637         case 0x3f:      /* Read all Mode pages */
1638                 if ((0 == subpcode) || (0xff == subpcode)) {
1639                         len = resp_err_recov_pg(ap, pcontrol, target);
1640                         len += resp_disconnect_pg(ap + len, pcontrol, target);
1641                         len += resp_format_pg(ap + len, pcontrol, target);
1642                         len += resp_caching_pg(ap + len, pcontrol, target);
1643                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1644                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1645                         if (0xff == subpcode) {
1646                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1647                                                   target, target_dev_id);
1648                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1649                         }
1650                         len += resp_iec_m_pg(ap + len, pcontrol, target);
1651                 } else {
1652                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1653                         return check_condition_result;
1654                 }
1655                 offset += len;
1656                 break;
1657         default:
1658                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1659                 return check_condition_result;
1660         }
1661         if (msense_6)
1662                 arr[0] = offset - 1;
1663         else {
1664                 arr[0] = ((offset - 2) >> 8) & 0xff;
1665                 arr[1] = (offset - 2) & 0xff;
1666         }
1667         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1668 }
1669
1670 #define SDEBUG_MAX_MSELECT_SZ 512
1671
1672 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1673                             struct sdebug_dev_info * devip)
1674 {
1675         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1676         int param_len, res, errsts, mpage;
1677         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1678         unsigned char *cmd = scp->cmnd;
1679
1680         errsts = check_readiness(scp, UAS_ONLY, devip);
1681         if (errsts)
1682                 return errsts;
1683         memset(arr, 0, sizeof(arr));
1684         pf = cmd[1] & 0x10;
1685         sp = cmd[1] & 0x1;
1686         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1687         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1688                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
1689                 return check_condition_result;
1690         }
1691         res = fetch_to_dev_buffer(scp, arr, param_len);
1692         if (-1 == res)
1693                 return (DID_ERROR << 16);
1694         else if ((res < param_len) &&
1695                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1696                 sdev_printk(KERN_INFO, scp->device,
1697                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
1698                             __func__, param_len, res);
1699         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1700         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1701         if (md_len > 2) {
1702                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1703                 return check_condition_result;
1704         }
1705         off = bd_len + (mselect6 ? 4 : 8);
1706         mpage = arr[off] & 0x3f;
1707         ps = !!(arr[off] & 0x80);
1708         if (ps) {
1709                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
1710                 return check_condition_result;
1711         }
1712         spf = !!(arr[off] & 0x40);
1713         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1714                        (arr[off + 1] + 2);
1715         if ((pg_len + off) > param_len) {
1716                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1717                                 PARAMETER_LIST_LENGTH_ERR, 0);
1718                 return check_condition_result;
1719         }
1720         switch (mpage) {
1721         case 0x8:      /* Caching Mode page */
1722                 if (caching_pg[1] == arr[off + 1]) {
1723                         memcpy(caching_pg + 2, arr + off + 2,
1724                                sizeof(caching_pg) - 2);
1725                         goto set_mode_changed_ua;
1726                 }
1727                 break;
1728         case 0xa:      /* Control Mode page */
1729                 if (ctrl_m_pg[1] == arr[off + 1]) {
1730                         memcpy(ctrl_m_pg + 2, arr + off + 2,
1731                                sizeof(ctrl_m_pg) - 2);
1732                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1733                         goto set_mode_changed_ua;
1734                 }
1735                 break;
1736         case 0x1c:      /* Informational Exceptions Mode page */
1737                 if (iec_m_pg[1] == arr[off + 1]) {
1738                         memcpy(iec_m_pg + 2, arr + off + 2,
1739                                sizeof(iec_m_pg) - 2);
1740                         goto set_mode_changed_ua;
1741                 }
1742                 break;
1743         default:
1744                 break;
1745         }
1746         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
1747         return check_condition_result;
1748 set_mode_changed_ua:
1749         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
1750         return 0;
1751 }
1752
1753 static int resp_temp_l_pg(unsigned char * arr)
1754 {
1755         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1756                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
1757                 };
1758
1759         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1760         return sizeof(temp_l_pg);
1761 }
1762
1763 static int resp_ie_l_pg(unsigned char * arr)
1764 {
1765         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1766                 };
1767
1768         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1769         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
1770                 arr[4] = THRESHOLD_EXCEEDED;
1771                 arr[5] = 0xff;
1772         }
1773         return sizeof(ie_l_pg);
1774 }
1775
1776 #define SDEBUG_MAX_LSENSE_SZ 512
1777
1778 static int resp_log_sense(struct scsi_cmnd * scp,
1779                           struct sdebug_dev_info * devip)
1780 {
1781         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1782         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1783         unsigned char *cmd = scp->cmnd;
1784
1785         errsts = check_readiness(scp, UAS_ONLY, devip);
1786         if (errsts)
1787                 return errsts;
1788         memset(arr, 0, sizeof(arr));
1789         ppc = cmd[1] & 0x2;
1790         sp = cmd[1] & 0x1;
1791         if (ppc || sp) {
1792                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
1793                 return check_condition_result;
1794         }
1795         pcontrol = (cmd[2] & 0xc0) >> 6;
1796         pcode = cmd[2] & 0x3f;
1797         subpcode = cmd[3] & 0xff;
1798         alloc_len = (cmd[7] << 8) + cmd[8];
1799         arr[0] = pcode;
1800         if (0 == subpcode) {
1801                 switch (pcode) {
1802                 case 0x0:       /* Supported log pages log page */
1803                         n = 4;
1804                         arr[n++] = 0x0;         /* this page */
1805                         arr[n++] = 0xd;         /* Temperature */
1806                         arr[n++] = 0x2f;        /* Informational exceptions */
1807                         arr[3] = n - 4;
1808                         break;
1809                 case 0xd:       /* Temperature log page */
1810                         arr[3] = resp_temp_l_pg(arr + 4);
1811                         break;
1812                 case 0x2f:      /* Informational exceptions log page */
1813                         arr[3] = resp_ie_l_pg(arr + 4);
1814                         break;
1815                 default:
1816                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1817                         return check_condition_result;
1818                 }
1819         } else if (0xff == subpcode) {
1820                 arr[0] |= 0x40;
1821                 arr[1] = subpcode;
1822                 switch (pcode) {
1823                 case 0x0:       /* Supported log pages and subpages log page */
1824                         n = 4;
1825                         arr[n++] = 0x0;
1826                         arr[n++] = 0x0;         /* 0,0 page */
1827                         arr[n++] = 0x0;
1828                         arr[n++] = 0xff;        /* this page */
1829                         arr[n++] = 0xd;
1830                         arr[n++] = 0x0;         /* Temperature */
1831                         arr[n++] = 0x2f;
1832                         arr[n++] = 0x0; /* Informational exceptions */
1833                         arr[3] = n - 4;
1834                         break;
1835                 case 0xd:       /* Temperature subpages */
1836                         n = 4;
1837                         arr[n++] = 0xd;
1838                         arr[n++] = 0x0;         /* Temperature */
1839                         arr[3] = n - 4;
1840                         break;
1841                 case 0x2f:      /* Informational exceptions subpages */
1842                         n = 4;
1843                         arr[n++] = 0x2f;
1844                         arr[n++] = 0x0;         /* Informational exceptions */
1845                         arr[3] = n - 4;
1846                         break;
1847                 default:
1848                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1849                         return check_condition_result;
1850                 }
1851         } else {
1852                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1853                 return check_condition_result;
1854         }
1855         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1856         return fill_from_dev_buffer(scp, arr,
1857                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
1858 }
1859
1860 static int check_device_access_params(struct scsi_cmnd *scp,
1861                                       unsigned long long lba, unsigned int num)
1862 {
1863         if (lba + num > sdebug_capacity) {
1864                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
1865                 return check_condition_result;
1866         }
1867         /* transfer length excessive (tie in to block limits VPD page) */
1868         if (num > sdebug_store_sectors) {
1869                 /* needs work to find which cdb byte 'num' comes from */
1870                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1871                 return check_condition_result;
1872         }
1873         return 0;
1874 }
1875
1876 /* Returns number of bytes copied or -1 if error. */
1877 static int do_device_access(struct scsi_cmnd *scmd,
1878                             unsigned long long lba, unsigned int num, int write)
1879 {
1880         int ret;
1881         unsigned long long block, rest = 0;
1882         struct scsi_data_buffer *sdb;
1883         enum dma_data_direction dir;
1884         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1885                        off_t);
1886
1887         if (write) {
1888                 sdb = scsi_out(scmd);
1889                 dir = DMA_TO_DEVICE;
1890                 func = sg_pcopy_to_buffer;
1891         } else {
1892                 sdb = scsi_in(scmd);
1893                 dir = DMA_FROM_DEVICE;
1894                 func = sg_pcopy_from_buffer;
1895         }
1896
1897         if (!sdb->length)
1898                 return 0;
1899         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1900                 return -1;
1901
1902         block = do_div(lba, sdebug_store_sectors);
1903         if (block + num > sdebug_store_sectors)
1904                 rest = block + num - sdebug_store_sectors;
1905
1906         ret = func(sdb->table.sgl, sdb->table.nents,
1907                    fake_storep + (block * scsi_debug_sector_size),
1908                    (num - rest) * scsi_debug_sector_size, 0);
1909         if (ret != (num - rest) * scsi_debug_sector_size)
1910                 return ret;
1911
1912         if (rest) {
1913                 ret += func(sdb->table.sgl, sdb->table.nents,
1914                             fake_storep, rest * scsi_debug_sector_size,
1915                             (num - rest) * scsi_debug_sector_size);
1916         }
1917
1918         return ret;
1919 }
1920
1921 static __be16 dif_compute_csum(const void *buf, int len)
1922 {
1923         __be16 csum;
1924
1925         if (scsi_debug_guard)
1926                 csum = (__force __be16)ip_compute_csum(buf, len);
1927         else
1928                 csum = cpu_to_be16(crc_t10dif(buf, len));
1929
1930         return csum;
1931 }
1932
1933 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1934                       sector_t sector, u32 ei_lba)
1935 {
1936         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1937
1938         if (sdt->guard_tag != csum) {
1939                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1940                         __func__,
1941                         (unsigned long)sector,
1942                         be16_to_cpu(sdt->guard_tag),
1943                         be16_to_cpu(csum));
1944                 return 0x01;
1945         }
1946         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1947             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1948                 pr_err("%s: REF check failed on sector %lu\n",
1949                         __func__, (unsigned long)sector);
1950                 return 0x03;
1951         }
1952         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1953             be32_to_cpu(sdt->ref_tag) != ei_lba) {
1954                 pr_err("%s: REF check failed on sector %lu\n",
1955                         __func__, (unsigned long)sector);
1956                 return 0x03;
1957         }
1958         return 0;
1959 }
1960
1961 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1962                           unsigned int sectors, bool read)
1963 {
1964         size_t resid;
1965         void *paddr;
1966         const void *dif_store_end = dif_storep + sdebug_store_sectors;
1967         struct sg_mapping_iter miter;
1968
1969         /* Bytes of protection data to copy into sgl */
1970         resid = sectors * sizeof(*dif_storep);
1971
1972         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1973                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1974                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1975
1976         while (sg_miter_next(&miter) && resid > 0) {
1977                 size_t len = min(miter.length, resid);
1978                 void *start = dif_store(sector);
1979                 size_t rest = 0;
1980
1981                 if (dif_store_end < start + len)
1982                         rest = start + len - dif_store_end;
1983
1984                 paddr = miter.addr;
1985
1986                 if (read)
1987                         memcpy(paddr, start, len - rest);
1988                 else
1989                         memcpy(start, paddr, len - rest);
1990
1991                 if (rest) {
1992                         if (read)
1993                                 memcpy(paddr + len - rest, dif_storep, rest);
1994                         else
1995                                 memcpy(dif_storep, paddr + len - rest, rest);
1996                 }
1997
1998                 sector += len / sizeof(*dif_storep);
1999                 resid -= len;
2000         }
2001         sg_miter_stop(&miter);
2002 }
2003
2004 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2005                             unsigned int sectors, u32 ei_lba)
2006 {
2007         unsigned int i;
2008         struct sd_dif_tuple *sdt;
2009         sector_t sector;
2010
2011         for (i = 0; i < sectors; i++, ei_lba++) {
2012                 int ret;
2013
2014                 sector = start_sec + i;
2015                 sdt = dif_store(sector);
2016
2017                 if (sdt->app_tag == cpu_to_be16(0xffff))
2018                         continue;
2019
2020                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2021                 if (ret) {
2022                         dif_errors++;
2023                         return ret;
2024                 }
2025         }
2026
2027         dif_copy_prot(SCpnt, start_sec, sectors, true);
2028         dix_reads++;
2029
2030         return 0;
2031 }
2032
2033 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
2034                      unsigned int num, u32 ei_lba)
2035 {
2036         unsigned long iflags;
2037         int ret;
2038
2039         ret = check_device_access_params(SCpnt, lba, num);
2040         if (ret)
2041                 return ret;
2042
2043         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2044             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2045             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2046                 /* claim unrecoverable read error */
2047                 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2048                 /* set info field and valid bit for fixed descriptor */
2049                 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
2050                         SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */
2051                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2052                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2053                         SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
2054                         SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2055                         SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2056                         SCpnt->sense_buffer[6] = ret & 0xff;
2057                 }
2058                 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
2059                 return check_condition_result;
2060         }
2061
2062         read_lock_irqsave(&atomic_rw, iflags);
2063
2064         /* DIX + T10 DIF */
2065         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2066                 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
2067
2068                 if (prot_ret) {
2069                         read_unlock_irqrestore(&atomic_rw, iflags);
2070                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
2071                         return illegal_condition_result;
2072                 }
2073         }
2074
2075         ret = do_device_access(SCpnt, lba, num, 0);
2076         read_unlock_irqrestore(&atomic_rw, iflags);
2077         if (ret == -1)
2078                 return DID_ERROR << 16;
2079
2080         scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
2081
2082         return 0;
2083 }
2084
2085 void dump_sector(unsigned char *buf, int len)
2086 {
2087         int i, j, n;
2088
2089         pr_err(">>> Sector Dump <<<\n");
2090         for (i = 0 ; i < len ; i += 16) {
2091                 char b[128];
2092
2093                 for (j = 0, n = 0; j < 16; j++) {
2094                         unsigned char c = buf[i+j];
2095
2096                         if (c >= 0x20 && c < 0x7e)
2097                                 n += scnprintf(b + n, sizeof(b) - n,
2098                                                " %c ", buf[i+j]);
2099                         else
2100                                 n += scnprintf(b + n, sizeof(b) - n,
2101                                                "%02x ", buf[i+j]);
2102                 }
2103                 pr_err("%04d: %s\n", i, b);
2104         }
2105 }
2106
2107 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2108                              unsigned int sectors, u32 ei_lba)
2109 {
2110         int ret;
2111         struct sd_dif_tuple *sdt;
2112         void *daddr;
2113         sector_t sector = start_sec;
2114         int ppage_offset;
2115         int dpage_offset;
2116         struct sg_mapping_iter diter;
2117         struct sg_mapping_iter piter;
2118
2119         BUG_ON(scsi_sg_count(SCpnt) == 0);
2120         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2121
2122         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2123                         scsi_prot_sg_count(SCpnt),
2124                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2125         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2126                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2127
2128         /* For each protection page */
2129         while (sg_miter_next(&piter)) {
2130                 dpage_offset = 0;
2131                 if (WARN_ON(!sg_miter_next(&diter))) {
2132                         ret = 0x01;
2133                         goto out;
2134                 }
2135
2136                 for (ppage_offset = 0; ppage_offset < piter.length;
2137                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2138                         /* If we're at the end of the current
2139                          * data page advance to the next one
2140                          */
2141                         if (dpage_offset >= diter.length) {
2142                                 if (WARN_ON(!sg_miter_next(&diter))) {
2143                                         ret = 0x01;
2144                                         goto out;
2145                                 }
2146                                 dpage_offset = 0;
2147                         }
2148
2149                         sdt = piter.addr + ppage_offset;
2150                         daddr = diter.addr + dpage_offset;
2151
2152                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2153                         if (ret) {
2154                                 dump_sector(daddr, scsi_debug_sector_size);
2155                                 goto out;
2156                         }
2157
2158                         sector++;
2159                         ei_lba++;
2160                         dpage_offset += scsi_debug_sector_size;
2161                 }
2162                 diter.consumed = dpage_offset;
2163                 sg_miter_stop(&diter);
2164         }
2165         sg_miter_stop(&piter);
2166
2167         dif_copy_prot(SCpnt, start_sec, sectors, false);
2168         dix_writes++;
2169
2170         return 0;
2171
2172 out:
2173         dif_errors++;
2174         sg_miter_stop(&diter);
2175         sg_miter_stop(&piter);
2176         return ret;
2177 }
2178
2179 static unsigned long lba_to_map_index(sector_t lba)
2180 {
2181         if (scsi_debug_unmap_alignment) {
2182                 lba += scsi_debug_unmap_granularity -
2183                         scsi_debug_unmap_alignment;
2184         }
2185         do_div(lba, scsi_debug_unmap_granularity);
2186
2187         return lba;
2188 }
2189
2190 static sector_t map_index_to_lba(unsigned long index)
2191 {
2192         sector_t lba = index * scsi_debug_unmap_granularity;
2193
2194         if (scsi_debug_unmap_alignment) {
2195                 lba -= scsi_debug_unmap_granularity -
2196                         scsi_debug_unmap_alignment;
2197         }
2198
2199         return lba;
2200 }
2201
2202 static unsigned int map_state(sector_t lba, unsigned int *num)
2203 {
2204         sector_t end;
2205         unsigned int mapped;
2206         unsigned long index;
2207         unsigned long next;
2208
2209         index = lba_to_map_index(lba);
2210         mapped = test_bit(index, map_storep);
2211
2212         if (mapped)
2213                 next = find_next_zero_bit(map_storep, map_size, index);
2214         else
2215                 next = find_next_bit(map_storep, map_size, index);
2216
2217         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2218         *num = end - lba;
2219
2220         return mapped;
2221 }
2222
2223 static void map_region(sector_t lba, unsigned int len)
2224 {
2225         sector_t end = lba + len;
2226
2227         while (lba < end) {
2228                 unsigned long index = lba_to_map_index(lba);
2229
2230                 if (index < map_size)
2231                         set_bit(index, map_storep);
2232
2233                 lba = map_index_to_lba(index + 1);
2234         }
2235 }
2236
2237 static void unmap_region(sector_t lba, unsigned int len)
2238 {
2239         sector_t end = lba + len;
2240
2241         while (lba < end) {
2242                 unsigned long index = lba_to_map_index(lba);
2243
2244                 if (lba == map_index_to_lba(index) &&
2245                     lba + scsi_debug_unmap_granularity <= end &&
2246                     index < map_size) {
2247                         clear_bit(index, map_storep);
2248                         if (scsi_debug_lbprz) {
2249                                 memset(fake_storep +
2250                                        lba * scsi_debug_sector_size, 0,
2251                                        scsi_debug_sector_size *
2252                                        scsi_debug_unmap_granularity);
2253                         }
2254                         if (dif_storep) {
2255                                 memset(dif_storep + lba, 0xff,
2256                                        sizeof(*dif_storep) *
2257                                        scsi_debug_unmap_granularity);
2258                         }
2259                 }
2260                 lba = map_index_to_lba(index + 1);
2261         }
2262 }
2263
2264 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2265                       unsigned int num, u32 ei_lba)
2266 {
2267         unsigned long iflags;
2268         int ret;
2269
2270         ret = check_device_access_params(SCpnt, lba, num);
2271         if (ret)
2272                 return ret;
2273
2274         write_lock_irqsave(&atomic_rw, iflags);
2275
2276         /* DIX + T10 DIF */
2277         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2278                 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2279
2280                 if (prot_ret) {
2281                         write_unlock_irqrestore(&atomic_rw, iflags);
2282                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
2283                                         prot_ret);
2284                         return illegal_condition_result;
2285                 }
2286         }
2287
2288         ret = do_device_access(SCpnt, lba, num, 1);
2289         if (scsi_debug_lbp())
2290                 map_region(lba, num);
2291         write_unlock_irqrestore(&atomic_rw, iflags);
2292         if (-1 == ret)
2293                 return (DID_ERROR << 16);
2294         else if ((ret < (num * scsi_debug_sector_size)) &&
2295                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2296                 sdev_printk(KERN_INFO, SCpnt->device,
2297                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2298                             my_name, num * scsi_debug_sector_size, ret);
2299
2300         return 0;
2301 }
2302
2303 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2304                       unsigned int num, u32 ei_lba, unsigned int unmap)
2305 {
2306         unsigned long iflags;
2307         unsigned long long i;
2308         int ret;
2309
2310         ret = check_device_access_params(scmd, lba, num);
2311         if (ret)
2312                 return ret;
2313
2314         if (num > scsi_debug_write_same_length) {
2315                 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2316                                 0);
2317                 return check_condition_result;
2318         }
2319
2320         write_lock_irqsave(&atomic_rw, iflags);
2321
2322         if (unmap && scsi_debug_lbp()) {
2323                 unmap_region(lba, num);
2324                 goto out;
2325         }
2326
2327         /* Else fetch one logical block */
2328         ret = fetch_to_dev_buffer(scmd,
2329                                   fake_storep + (lba * scsi_debug_sector_size),
2330                                   scsi_debug_sector_size);
2331
2332         if (-1 == ret) {
2333                 write_unlock_irqrestore(&atomic_rw, iflags);
2334                 return (DID_ERROR << 16);
2335         } else if ((ret < (num * scsi_debug_sector_size)) &&
2336                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2337                 sdev_printk(KERN_INFO, scmd->device,
2338                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2339                             my_name, "write same",
2340                             num * scsi_debug_sector_size, ret);
2341
2342         /* Copy first sector to remaining blocks */
2343         for (i = 1 ; i < num ; i++)
2344                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2345                        fake_storep + (lba * scsi_debug_sector_size),
2346                        scsi_debug_sector_size);
2347
2348         if (scsi_debug_lbp())
2349                 map_region(lba, num);
2350 out:
2351         write_unlock_irqrestore(&atomic_rw, iflags);
2352
2353         return 0;
2354 }
2355
2356 struct unmap_block_desc {
2357         __be64  lba;
2358         __be32  blocks;
2359         __be32  __reserved;
2360 };
2361
2362 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2363 {
2364         unsigned char *buf;
2365         struct unmap_block_desc *desc;
2366         unsigned int i, payload_len, descriptors;
2367         int ret;
2368         unsigned long iflags;
2369
2370         ret = check_readiness(scmd, UAS_ONLY, devip);
2371         if (ret)
2372                 return ret;
2373
2374         payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2375         BUG_ON(scsi_bufflen(scmd) != payload_len);
2376
2377         descriptors = (payload_len - 8) / 16;
2378
2379         buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2380         if (!buf)
2381                 return check_condition_result;
2382
2383         scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2384
2385         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2386         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2387
2388         desc = (void *)&buf[8];
2389
2390         write_lock_irqsave(&atomic_rw, iflags);
2391
2392         for (i = 0 ; i < descriptors ; i++) {
2393                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2394                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2395
2396                 ret = check_device_access_params(scmd, lba, num);
2397                 if (ret)
2398                         goto out;
2399
2400                 unmap_region(lba, num);
2401         }
2402
2403         ret = 0;
2404
2405 out:
2406         write_unlock_irqrestore(&atomic_rw, iflags);
2407         kfree(buf);
2408
2409         return ret;
2410 }
2411
2412 #define SDEBUG_GET_LBA_STATUS_LEN 32
2413
2414 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2415                                struct sdebug_dev_info * devip)
2416 {
2417         unsigned long long lba;
2418         unsigned int alloc_len, mapped, num;
2419         unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2420         int ret;
2421
2422         ret = check_readiness(scmd, UAS_ONLY, devip);
2423         if (ret)
2424                 return ret;
2425
2426         lba = get_unaligned_be64(&scmd->cmnd[2]);
2427         alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2428
2429         if (alloc_len < 24)
2430                 return 0;
2431
2432         ret = check_device_access_params(scmd, lba, 1);
2433         if (ret)
2434                 return ret;
2435
2436         mapped = map_state(lba, &num);
2437
2438         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2439         put_unaligned_be32(20, &arr[0]);        /* Parameter Data Length */
2440         put_unaligned_be64(lba, &arr[8]);       /* LBA */
2441         put_unaligned_be32(num, &arr[16]);      /* Number of blocks */
2442         arr[20] = !mapped;                      /* mapped = 0, unmapped = 1 */
2443
2444         return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2445 }
2446
2447 #define SDEBUG_RLUN_ARR_SZ 256
2448
2449 static int resp_report_luns(struct scsi_cmnd * scp,
2450                             struct sdebug_dev_info * devip)
2451 {
2452         unsigned int alloc_len;
2453         int lun_cnt, i, upper, num, n, want_wlun, shortish;
2454         u64 lun;
2455         unsigned char *cmd = scp->cmnd;
2456         int select_report = (int)cmd[2];
2457         struct scsi_lun *one_lun;
2458         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2459         unsigned char * max_addr;
2460
2461         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2462         shortish = (alloc_len < 4);
2463         if (shortish || (select_report > 2)) {
2464                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
2465                 return check_condition_result;
2466         }
2467         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2468         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2469         lun_cnt = scsi_debug_max_luns;
2470         if (1 == select_report)
2471                 lun_cnt = 0;
2472         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2473                 --lun_cnt;
2474         want_wlun = (select_report > 0) ? 1 : 0;
2475         num = lun_cnt + want_wlun;
2476         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2477         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2478         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2479                             sizeof(struct scsi_lun)), num);
2480         if (n < num) {
2481                 want_wlun = 0;
2482                 lun_cnt = n;
2483         }
2484         one_lun = (struct scsi_lun *) &arr[8];
2485         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2486         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2487              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2488              i++, lun++) {
2489                 upper = (lun >> 8) & 0x3f;
2490                 if (upper)
2491                         one_lun[i].scsi_lun[0] =
2492                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2493                 one_lun[i].scsi_lun[1] = lun & 0xff;
2494         }
2495         if (want_wlun) {
2496                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2497                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2498                 i++;
2499         }
2500         alloc_len = (unsigned char *)(one_lun + i) - arr;
2501         return fill_from_dev_buffer(scp, arr,
2502                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2503 }
2504
2505 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2506                             unsigned int num, struct sdebug_dev_info *devip)
2507 {
2508         int j;
2509         unsigned char *kaddr, *buf;
2510         unsigned int offset;
2511         struct scsi_data_buffer *sdb = scsi_in(scp);
2512         struct sg_mapping_iter miter;
2513
2514         /* better not to use temporary buffer. */
2515         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2516         if (!buf) {
2517                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2518                                 INSUFF_RES_ASCQ);
2519                 return check_condition_result;
2520         }
2521
2522         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2523
2524         offset = 0;
2525         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2526                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
2527
2528         while (sg_miter_next(&miter)) {
2529                 kaddr = miter.addr;
2530                 for (j = 0; j < miter.length; j++)
2531                         *(kaddr + j) ^= *(buf + offset + j);
2532
2533                 offset += miter.length;
2534         }
2535         sg_miter_stop(&miter);
2536         kfree(buf);
2537
2538         return 0;
2539 }
2540
2541 /* When timer or tasklet goes off this function is called. */
2542 static void sdebug_q_cmd_complete(unsigned long indx)
2543 {
2544         int qa_indx;
2545         int retiring = 0;
2546         unsigned long iflags;
2547         struct sdebug_queued_cmd *sqcp;
2548         struct scsi_cmnd *scp;
2549         struct sdebug_dev_info *devip;
2550
2551         atomic_inc(&sdebug_completions);
2552         qa_indx = indx;
2553         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2554                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2555                 return;
2556         }
2557         spin_lock_irqsave(&queued_arr_lock, iflags);
2558         sqcp = &queued_arr[qa_indx];
2559         scp = sqcp->a_cmnd;
2560         if (NULL == scp) {
2561                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2562                 pr_err("%s: scp is NULL\n", __func__);
2563                 return;
2564         }
2565         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2566         if (devip)
2567                 atomic_dec(&devip->num_in_q);
2568         else
2569                 pr_err("%s: devip=NULL\n", __func__);
2570         if (atomic_read(&retired_max_queue) > 0)
2571                 retiring = 1;
2572
2573         sqcp->a_cmnd = NULL;
2574         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2575                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2576                 pr_err("%s: Unexpected completion\n", __func__);
2577                 return;
2578         }
2579
2580         if (unlikely(retiring)) {       /* user has reduced max_queue */
2581                 int k, retval;
2582
2583                 retval = atomic_read(&retired_max_queue);
2584                 if (qa_indx >= retval) {
2585                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2586                         pr_err("%s: index %d too large\n", __func__, retval);
2587                         return;
2588                 }
2589                 k = find_last_bit(queued_in_use_bm, retval);
2590                 if ((k < scsi_debug_max_queue) || (k == retval))
2591                         atomic_set(&retired_max_queue, 0);
2592                 else
2593                         atomic_set(&retired_max_queue, k + 1);
2594         }
2595         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2596         scp->scsi_done(scp); /* callback to mid level */
2597 }
2598
2599 /* When high resolution timer goes off this function is called. */
2600 static enum hrtimer_restart
2601 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
2602 {
2603         int qa_indx;
2604         int retiring = 0;
2605         unsigned long iflags;
2606         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
2607         struct sdebug_queued_cmd *sqcp;
2608         struct scsi_cmnd *scp;
2609         struct sdebug_dev_info *devip;
2610
2611         atomic_inc(&sdebug_completions);
2612         qa_indx = sd_hrtp->qa_indx;
2613         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2614                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2615                 goto the_end;
2616         }
2617         spin_lock_irqsave(&queued_arr_lock, iflags);
2618         sqcp = &queued_arr[qa_indx];
2619         scp = sqcp->a_cmnd;
2620         if (NULL == scp) {
2621                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2622                 pr_err("%s: scp is NULL\n", __func__);
2623                 goto the_end;
2624         }
2625         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2626         if (devip)
2627                 atomic_dec(&devip->num_in_q);
2628         else
2629                 pr_err("%s: devip=NULL\n", __func__);
2630         if (atomic_read(&retired_max_queue) > 0)
2631                 retiring = 1;
2632
2633         sqcp->a_cmnd = NULL;
2634         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2635                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2636                 pr_err("%s: Unexpected completion\n", __func__);
2637                 goto the_end;
2638         }
2639
2640         if (unlikely(retiring)) {       /* user has reduced max_queue */
2641                 int k, retval;
2642
2643                 retval = atomic_read(&retired_max_queue);
2644                 if (qa_indx >= retval) {
2645                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2646                         pr_err("%s: index %d too large\n", __func__, retval);
2647                         goto the_end;
2648                 }
2649                 k = find_last_bit(queued_in_use_bm, retval);
2650                 if ((k < scsi_debug_max_queue) || (k == retval))
2651                         atomic_set(&retired_max_queue, 0);
2652                 else
2653                         atomic_set(&retired_max_queue, k + 1);
2654         }
2655         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2656         scp->scsi_done(scp); /* callback to mid level */
2657 the_end:
2658         return HRTIMER_NORESTART;
2659 }
2660
2661 static struct sdebug_dev_info *
2662 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2663 {
2664         struct sdebug_dev_info *devip;
2665
2666         devip = kzalloc(sizeof(*devip), flags);
2667         if (devip) {
2668                 devip->sdbg_host = sdbg_host;
2669                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2670         }
2671         return devip;
2672 }
2673
2674 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2675 {
2676         struct sdebug_host_info * sdbg_host;
2677         struct sdebug_dev_info * open_devip = NULL;
2678         struct sdebug_dev_info * devip =
2679                         (struct sdebug_dev_info *)sdev->hostdata;
2680
2681         if (devip)
2682                 return devip;
2683         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2684         if (!sdbg_host) {
2685                 pr_err("%s: Host info NULL\n", __func__);
2686                 return NULL;
2687         }
2688         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2689                 if ((devip->used) && (devip->channel == sdev->channel) &&
2690                     (devip->target == sdev->id) &&
2691                     (devip->lun == sdev->lun))
2692                         return devip;
2693                 else {
2694                         if ((!devip->used) && (!open_devip))
2695                                 open_devip = devip;
2696                 }
2697         }
2698         if (!open_devip) { /* try and make a new one */
2699                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2700                 if (!open_devip) {
2701                         printk(KERN_ERR "%s: out of memory at line %d\n",
2702                                 __func__, __LINE__);
2703                         return NULL;
2704                 }
2705         }
2706
2707         open_devip->channel = sdev->channel;
2708         open_devip->target = sdev->id;
2709         open_devip->lun = sdev->lun;
2710         open_devip->sdbg_host = sdbg_host;
2711         atomic_set(&open_devip->num_in_q, 0);
2712         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2713         open_devip->used = 1;
2714         if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2715                 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2716
2717         return open_devip;
2718 }
2719
2720 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2721 {
2722         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2723                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
2724                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2725         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2726         return 0;
2727 }
2728
2729 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2730 {
2731         struct sdebug_dev_info *devip;
2732
2733         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2734                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
2735                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2736         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2737                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2738         devip = devInfoReg(sdp);
2739         if (NULL == devip)
2740                 return 1;       /* no resources, will be marked offline */
2741         sdp->hostdata = devip;
2742         blk_queue_max_segment_size(sdp->request_queue, -1U);
2743         if (scsi_debug_no_uld)
2744                 sdp->no_uld_attach = 1;
2745         return 0;
2746 }
2747
2748 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2749 {
2750         struct sdebug_dev_info *devip =
2751                 (struct sdebug_dev_info *)sdp->hostdata;
2752
2753         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2754                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
2755                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2756         if (devip) {
2757                 /* make this slot available for re-use */
2758                 devip->used = 0;
2759                 sdp->hostdata = NULL;
2760         }
2761 }
2762
2763 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
2764 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2765 {
2766         unsigned long iflags;
2767         int k, qmax, r_qmax;
2768         struct sdebug_queued_cmd *sqcp;
2769         struct sdebug_dev_info *devip;
2770
2771         spin_lock_irqsave(&queued_arr_lock, iflags);
2772         qmax = scsi_debug_max_queue;
2773         r_qmax = atomic_read(&retired_max_queue);
2774         if (r_qmax > qmax)
2775                 qmax = r_qmax;
2776         for (k = 0; k < qmax; ++k) {
2777                 if (test_bit(k, queued_in_use_bm)) {
2778                         sqcp = &queued_arr[k];
2779                         if (cmnd == sqcp->a_cmnd) {
2780                                 devip = (struct sdebug_dev_info *)
2781                                         cmnd->device->hostdata;
2782                                 if (devip)
2783                                         atomic_dec(&devip->num_in_q);
2784                                 sqcp->a_cmnd = NULL;
2785                                 spin_unlock_irqrestore(&queued_arr_lock,
2786                                                        iflags);
2787                                 if (scsi_debug_ndelay > 0) {
2788                                         if (sqcp->sd_hrtp)
2789                                                 hrtimer_cancel(
2790                                                         &sqcp->sd_hrtp->hrt);
2791                                 } else if (scsi_debug_delay > 0) {
2792                                         if (sqcp->cmnd_timerp)
2793                                                 del_timer_sync(
2794                                                         sqcp->cmnd_timerp);
2795                                 } else if (scsi_debug_delay < 0) {
2796                                         if (sqcp->tletp)
2797                                                 tasklet_kill(sqcp->tletp);
2798                                 }
2799                                 clear_bit(k, queued_in_use_bm);
2800                                 return 1;
2801                         }
2802                 }
2803         }
2804         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2805         return 0;
2806 }
2807
2808 /* Deletes (stops) timers or tasklets of all queued commands */
2809 static void stop_all_queued(void)
2810 {
2811         unsigned long iflags;
2812         int k;
2813         struct sdebug_queued_cmd *sqcp;
2814         struct sdebug_dev_info *devip;
2815
2816         spin_lock_irqsave(&queued_arr_lock, iflags);
2817         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2818                 if (test_bit(k, queued_in_use_bm)) {
2819                         sqcp = &queued_arr[k];
2820                         if (sqcp->a_cmnd) {
2821                                 devip = (struct sdebug_dev_info *)
2822                                         sqcp->a_cmnd->device->hostdata;
2823                                 if (devip)
2824                                         atomic_dec(&devip->num_in_q);
2825                                 sqcp->a_cmnd = NULL;
2826                                 spin_unlock_irqrestore(&queued_arr_lock,
2827                                                        iflags);
2828                                 if (scsi_debug_ndelay > 0) {
2829                                         if (sqcp->sd_hrtp)
2830                                                 hrtimer_cancel(
2831                                                         &sqcp->sd_hrtp->hrt);
2832                                 } else if (scsi_debug_delay > 0) {
2833                                         if (sqcp->cmnd_timerp)
2834                                                 del_timer_sync(
2835                                                         sqcp->cmnd_timerp);
2836                                 } else if (scsi_debug_delay < 0) {
2837                                         if (sqcp->tletp)
2838                                                 tasklet_kill(sqcp->tletp);
2839                                 }
2840                                 clear_bit(k, queued_in_use_bm);
2841                                 spin_lock_irqsave(&queued_arr_lock, iflags);
2842                         }
2843                 }
2844         }
2845         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2846 }
2847
2848 /* Free queued command memory on heap */
2849 static void free_all_queued(void)
2850 {
2851         unsigned long iflags;
2852         int k;
2853         struct sdebug_queued_cmd *sqcp;
2854
2855         spin_lock_irqsave(&queued_arr_lock, iflags);
2856         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2857                 sqcp = &queued_arr[k];
2858                 kfree(sqcp->cmnd_timerp);
2859                 sqcp->cmnd_timerp = NULL;
2860                 kfree(sqcp->tletp);
2861                 sqcp->tletp = NULL;
2862                 kfree(sqcp->sd_hrtp);
2863                 sqcp->sd_hrtp = NULL;
2864         }
2865         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2866 }
2867
2868 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
2869 {
2870         ++num_aborts;
2871         if (SCpnt) {
2872                 if (SCpnt->device &&
2873                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2874                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
2875                                     __func__);
2876                 stop_queued_cmnd(SCpnt);
2877         }
2878         return SUCCESS;
2879 }
2880
2881 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2882 {
2883         struct sdebug_dev_info * devip;
2884
2885         ++num_dev_resets;
2886         if (SCpnt && SCpnt->device) {
2887                 struct scsi_device *sdp = SCpnt->device;
2888
2889                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2890                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2891                 devip = devInfoReg(sdp);
2892                 if (devip)
2893                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
2894         }
2895         return SUCCESS;
2896 }
2897
2898 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
2899 {
2900         struct sdebug_host_info *sdbg_host;
2901         struct sdebug_dev_info *devip;
2902         struct scsi_device *sdp;
2903         struct Scsi_Host *hp;
2904         int k = 0;
2905
2906         ++num_target_resets;
2907         if (!SCpnt)
2908                 goto lie;
2909         sdp = SCpnt->device;
2910         if (!sdp)
2911                 goto lie;
2912         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2913                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2914         hp = sdp->host;
2915         if (!hp)
2916                 goto lie;
2917         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2918         if (sdbg_host) {
2919                 list_for_each_entry(devip,
2920                                     &sdbg_host->dev_info_list,
2921                                     dev_list)
2922                         if (devip->target == sdp->id) {
2923                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2924                                 ++k;
2925                         }
2926         }
2927         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2928                 sdev_printk(KERN_INFO, sdp,
2929                             "%s: %d device(s) found in target\n", __func__, k);
2930 lie:
2931         return SUCCESS;
2932 }
2933
2934 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2935 {
2936         struct sdebug_host_info *sdbg_host;
2937         struct sdebug_dev_info *devip;
2938         struct scsi_device * sdp;
2939         struct Scsi_Host * hp;
2940         int k = 0;
2941
2942         ++num_bus_resets;
2943         if (!(SCpnt && SCpnt->device))
2944                 goto lie;
2945         sdp = SCpnt->device;
2946         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2947                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2948         hp = sdp->host;
2949         if (hp) {
2950                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2951                 if (sdbg_host) {
2952                         list_for_each_entry(devip,
2953                                             &sdbg_host->dev_info_list,
2954                                             dev_list) {
2955                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2956                                 ++k;
2957                         }
2958                 }
2959         }
2960         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2961                 sdev_printk(KERN_INFO, sdp,
2962                             "%s: %d device(s) found in host\n", __func__, k);
2963 lie:
2964         return SUCCESS;
2965 }
2966
2967 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2968 {
2969         struct sdebug_host_info * sdbg_host;
2970         struct sdebug_dev_info *devip;
2971         int k = 0;
2972
2973         ++num_host_resets;
2974         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2975                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
2976         spin_lock(&sdebug_host_list_lock);
2977         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2978                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
2979                                     dev_list) {
2980                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2981                         ++k;
2982                 }
2983         }
2984         spin_unlock(&sdebug_host_list_lock);
2985         stop_all_queued();
2986         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2987                 sdev_printk(KERN_INFO, SCpnt->device,
2988                             "%s: %d device(s) found\n", __func__, k);
2989         return SUCCESS;
2990 }
2991
2992 static void __init sdebug_build_parts(unsigned char *ramp,
2993                                       unsigned long store_size)
2994 {
2995         struct partition * pp;
2996         int starts[SDEBUG_MAX_PARTS + 2];
2997         int sectors_per_part, num_sectors, k;
2998         int heads_by_sects, start_sec, end_sec;
2999
3000         /* assume partition table already zeroed */
3001         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3002                 return;
3003         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3004                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3005                 pr_warn("%s: reducing partitions to %d\n", __func__,
3006                         SDEBUG_MAX_PARTS);
3007         }
3008         num_sectors = (int)sdebug_store_sectors;
3009         sectors_per_part = (num_sectors - sdebug_sectors_per)
3010                            / scsi_debug_num_parts;
3011         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3012         starts[0] = sdebug_sectors_per;
3013         for (k = 1; k < scsi_debug_num_parts; ++k)
3014                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3015                             * heads_by_sects;
3016         starts[scsi_debug_num_parts] = num_sectors;
3017         starts[scsi_debug_num_parts + 1] = 0;
3018
3019         ramp[510] = 0x55;       /* magic partition markings */
3020         ramp[511] = 0xAA;
3021         pp = (struct partition *)(ramp + 0x1be);
3022         for (k = 0; starts[k + 1]; ++k, ++pp) {
3023                 start_sec = starts[k];
3024                 end_sec = starts[k + 1] - 1;
3025                 pp->boot_ind = 0;
3026
3027                 pp->cyl = start_sec / heads_by_sects;
3028                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3029                            / sdebug_sectors_per;
3030                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3031
3032                 pp->end_cyl = end_sec / heads_by_sects;
3033                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3034                                / sdebug_sectors_per;
3035                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3036
3037                 pp->start_sect = cpu_to_le32(start_sec);
3038                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3039                 pp->sys_ind = 0x83;     /* plain Linux partition */
3040         }
3041 }
3042
3043 static int
3044 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3045               int scsi_result, int delta_jiff)
3046 {
3047         unsigned long iflags;
3048         int k, num_in_q, qdepth, inject;
3049         struct sdebug_queued_cmd *sqcp = NULL;
3050         struct scsi_device *sdp = cmnd->device;
3051
3052         if (NULL == cmnd || NULL == devip) {
3053                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3054                         __func__);
3055                 /* no particularly good error to report back */
3056                 return SCSI_MLQUEUE_HOST_BUSY;
3057         }
3058         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3059                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3060                             __func__, scsi_result);
3061         if (delta_jiff == 0)
3062                 goto respond_in_thread;
3063
3064         /* schedule the response at a later time if resources permit */
3065         spin_lock_irqsave(&queued_arr_lock, iflags);
3066         num_in_q = atomic_read(&devip->num_in_q);
3067         qdepth = cmnd->device->queue_depth;
3068         inject = 0;
3069         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3070                 if (scsi_result) {
3071                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3072                         goto respond_in_thread;
3073                 } else
3074                         scsi_result = device_qfull_result;
3075         } else if ((scsi_debug_every_nth != 0) &&
3076                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3077                    (scsi_result == 0)) {
3078                 if ((num_in_q == (qdepth - 1)) &&
3079                     (atomic_inc_return(&sdebug_a_tsf) >=
3080                      abs(scsi_debug_every_nth))) {
3081                         atomic_set(&sdebug_a_tsf, 0);
3082                         inject = 1;
3083                         scsi_result = device_qfull_result;
3084                 }
3085         }
3086
3087         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3088         if (k >= scsi_debug_max_queue) {
3089                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3090                 if (scsi_result)
3091                         goto respond_in_thread;
3092                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3093                         scsi_result = device_qfull_result;
3094                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3095                         sdev_printk(KERN_INFO, sdp,
3096                                     "%s: max_queue=%d exceeded, %s\n",
3097                                     __func__, scsi_debug_max_queue,
3098                                     (scsi_result ?  "status: TASK SET FULL" :
3099                                                     "report: host busy"));
3100                 if (scsi_result)
3101                         goto respond_in_thread;
3102                 else
3103                         return SCSI_MLQUEUE_HOST_BUSY;
3104         }
3105         __set_bit(k, queued_in_use_bm);
3106         atomic_inc(&devip->num_in_q);
3107         sqcp = &queued_arr[k];
3108         sqcp->a_cmnd = cmnd;
3109         cmnd->result = scsi_result;
3110         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3111         if (delta_jiff > 0) {
3112                 if (NULL == sqcp->cmnd_timerp) {
3113                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3114                                                     GFP_ATOMIC);
3115                         if (NULL == sqcp->cmnd_timerp)
3116                                 return SCSI_MLQUEUE_HOST_BUSY;
3117                         init_timer(sqcp->cmnd_timerp);
3118                 }
3119                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3120                 sqcp->cmnd_timerp->data = k;
3121                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3122                 add_timer(sqcp->cmnd_timerp);
3123         } else if (scsi_debug_ndelay > 0) {
3124                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3125                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3126
3127                 if (NULL == sd_hp) {
3128                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3129                         if (NULL == sd_hp)
3130                                 return SCSI_MLQUEUE_HOST_BUSY;
3131                         sqcp->sd_hrtp = sd_hp;
3132                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3133                                      HRTIMER_MODE_REL);
3134                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3135                         sd_hp->qa_indx = k;
3136                 }
3137                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3138         } else {        /* delay < 0 */
3139                 if (NULL == sqcp->tletp) {
3140                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3141                                               GFP_ATOMIC);
3142                         if (NULL == sqcp->tletp)
3143                                 return SCSI_MLQUEUE_HOST_BUSY;
3144                         tasklet_init(sqcp->tletp,
3145                                      sdebug_q_cmd_complete, k);
3146                 }
3147                 if (-1 == delta_jiff)
3148                         tasklet_hi_schedule(sqcp->tletp);
3149                 else
3150                         tasklet_schedule(sqcp->tletp);
3151         }
3152         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3153             (scsi_result == device_qfull_result))
3154                 sdev_printk(KERN_INFO, sdp,
3155                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3156                             num_in_q, (inject ? "<inject> " : ""),
3157                             "status: TASK SET FULL");
3158         return 0;
3159
3160 respond_in_thread:      /* call back to mid-layer using invocation thread */
3161         cmnd->result = scsi_result;
3162         cmnd->scsi_done(cmnd);
3163         return 0;
3164 }
3165
3166 /* Note: The following macros create attribute files in the
3167    /sys/module/scsi_debug/parameters directory. Unfortunately this
3168    driver is unaware of a change and cannot trigger auxiliary actions
3169    as it can when the corresponding attribute in the
3170    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3171  */
3172 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3173 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3174 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3175 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3176 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3177 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3178 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3179 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3180 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3181 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3182 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3183 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3184 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3185 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3186 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3187 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3188 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3189 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3190 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3191 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3192 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3193 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3194 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3195 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3196 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3197 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3198 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3199 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3200 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3201 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3202 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3203 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3204 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3205 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3206 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3207 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3208 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3209                    S_IRUGO | S_IWUSR);
3210 module_param_named(write_same_length, scsi_debug_write_same_length, int,
3211                    S_IRUGO | S_IWUSR);
3212
3213 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3214 MODULE_DESCRIPTION("SCSI debug adapter driver");
3215 MODULE_LICENSE("GPL");
3216 MODULE_VERSION(SCSI_DEBUG_VERSION);
3217
3218 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3219 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3220 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3221 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3222 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
3223 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3224 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3225 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3226 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3227 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3228 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3229 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3230 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3231 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3232 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3233 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3234 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3235 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3236 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3237 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3238 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3239 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3240 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3241 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3242 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3243 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3244 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3245 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3246 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3247 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3248 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3249 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3250 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3251 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3252 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3253 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
3254 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3255 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3256
3257 static char sdebug_info[256];
3258
3259 static const char * scsi_debug_info(struct Scsi_Host * shp)
3260 {
3261         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3262                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3263                 scsi_debug_version_date, scsi_debug_dev_size_mb,
3264                 scsi_debug_opts);
3265         return sdebug_info;
3266 }
3267
3268 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3269 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3270 {
3271         char arr[16];
3272         int opts;
3273         int minLen = length > 15 ? 15 : length;
3274
3275         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3276                 return -EACCES;
3277         memcpy(arr, buffer, minLen);
3278         arr[minLen] = '\0';
3279         if (1 != sscanf(arr, "%d", &opts))
3280                 return -EINVAL;
3281         scsi_debug_opts = opts;
3282         if (scsi_debug_every_nth != 0)
3283                 atomic_set(&sdebug_cmnd_count, 0);
3284         return length;
3285 }
3286
3287 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
3288  * same for each scsi_debug host (if more than one). Some of the counters
3289  * output are not atomics so might be inaccurate in a busy system. */
3290 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3291 {
3292         int f, l;
3293         char b[32];
3294
3295         if (scsi_debug_every_nth > 0)
3296                 snprintf(b, sizeof(b), " (curr:%d)",
3297                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3298                                 atomic_read(&sdebug_a_tsf) :
3299                                 atomic_read(&sdebug_cmnd_count)));
3300         else
3301                 b[0] = '\0';
3302
3303         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3304                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3305                 "every_nth=%d%s\n"
3306                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3307                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3308                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3309                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3310                 "usec_in_jiffy=%lu\n",
3311                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3312                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3313                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3314                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3315                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3316                 sdebug_sectors_per, num_aborts, num_dev_resets,
3317                 num_target_resets, num_bus_resets, num_host_resets,
3318                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3319
3320         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3321         if (f != scsi_debug_max_queue) {
3322                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3323                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
3324                            "queued_in_use_bm", f, l);
3325         }
3326         return 0;
3327 }
3328
3329 static ssize_t delay_show(struct device_driver *ddp, char *buf)
3330 {
3331         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3332 }
3333 /* Returns -EBUSY if delay is being changed and commands are queued */
3334 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3335                            size_t count)
3336 {
3337         int delay, res;
3338
3339         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3340                 res = count;
3341                 if (scsi_debug_delay != delay) {
3342                         unsigned long iflags;
3343                         int k;
3344
3345                         spin_lock_irqsave(&queued_arr_lock, iflags);
3346                         k = find_first_bit(queued_in_use_bm,
3347                                            scsi_debug_max_queue);
3348                         if (k != scsi_debug_max_queue)
3349                                 res = -EBUSY;   /* have queued commands */
3350                         else {
3351                                 scsi_debug_delay = delay;
3352                                 scsi_debug_ndelay = 0;
3353                         }
3354                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3355                 }
3356                 return res;
3357         }
3358         return -EINVAL;
3359 }
3360 static DRIVER_ATTR_RW(delay);
3361
3362 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3363 {
3364         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3365 }
3366 /* Returns -EBUSY if ndelay is being changed and commands are queued */
3367 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
3368 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3369                            size_t count)
3370 {
3371         unsigned long iflags;
3372         int ndelay, res, k;
3373
3374         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3375             (ndelay >= 0) && (ndelay < 1000000000)) {
3376                 res = count;
3377                 if (scsi_debug_ndelay != ndelay) {
3378                         spin_lock_irqsave(&queued_arr_lock, iflags);
3379                         k = find_first_bit(queued_in_use_bm,
3380                                            scsi_debug_max_queue);
3381                         if (k != scsi_debug_max_queue)
3382                                 res = -EBUSY;   /* have queued commands */
3383                         else {
3384                                 scsi_debug_ndelay = ndelay;
3385                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3386                                                           : DEF_DELAY;
3387                         }
3388                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3389                 }
3390                 return res;
3391         }
3392         return -EINVAL;
3393 }
3394 static DRIVER_ATTR_RW(ndelay);
3395
3396 static ssize_t opts_show(struct device_driver *ddp, char *buf)
3397 {
3398         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3399 }
3400
3401 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3402                           size_t count)
3403 {
3404         int opts;
3405         char work[20];
3406
3407         if (1 == sscanf(buf, "%10s", work)) {
3408                 if (0 == strncasecmp(work,"0x", 2)) {
3409                         if (1 == sscanf(&work[2], "%x", &opts))
3410                                 goto opts_done;
3411                 } else {
3412                         if (1 == sscanf(work, "%d", &opts))
3413                                 goto opts_done;
3414                 }
3415         }
3416         return -EINVAL;
3417 opts_done:
3418         scsi_debug_opts = opts;
3419         atomic_set(&sdebug_cmnd_count, 0);
3420         atomic_set(&sdebug_a_tsf, 0);
3421         return count;
3422 }
3423 static DRIVER_ATTR_RW(opts);
3424
3425 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3426 {
3427         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3428 }
3429 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3430                            size_t count)
3431 {
3432         int n;
3433
3434         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3435                 scsi_debug_ptype = n;
3436                 return count;
3437         }
3438         return -EINVAL;
3439 }
3440 static DRIVER_ATTR_RW(ptype);
3441
3442 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3443 {
3444         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3445 }
3446 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3447                             size_t count)
3448 {
3449         int n;
3450
3451         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3452                 scsi_debug_dsense = n;
3453                 return count;
3454         }
3455         return -EINVAL;
3456 }
3457 static DRIVER_ATTR_RW(dsense);
3458
3459 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3460 {
3461         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3462 }
3463 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3464                              size_t count)
3465 {
3466         int n;
3467
3468         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3469                 n = (n > 0);
3470                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3471                 if (scsi_debug_fake_rw != n) {
3472                         if ((0 == n) && (NULL == fake_storep)) {
3473                                 unsigned long sz =
3474                                         (unsigned long)scsi_debug_dev_size_mb *
3475                                         1048576;
3476
3477                                 fake_storep = vmalloc(sz);
3478                                 if (NULL == fake_storep) {
3479                                         pr_err("%s: out of memory, 9\n",
3480                                                __func__);
3481                                         return -ENOMEM;
3482                                 }
3483                                 memset(fake_storep, 0, sz);
3484                         }
3485                         scsi_debug_fake_rw = n;
3486                 }
3487                 return count;
3488         }
3489         return -EINVAL;
3490 }
3491 static DRIVER_ATTR_RW(fake_rw);
3492
3493 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3494 {
3495         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3496 }
3497 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3498                               size_t count)
3499 {
3500         int n;
3501
3502         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3503                 scsi_debug_no_lun_0 = n;
3504                 return count;
3505         }
3506         return -EINVAL;
3507 }
3508 static DRIVER_ATTR_RW(no_lun_0);
3509
3510 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3511 {
3512         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3513 }
3514 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3515                               size_t count)
3516 {
3517         int n;
3518
3519         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3520                 scsi_debug_num_tgts = n;
3521                 sdebug_max_tgts_luns();
3522                 return count;
3523         }
3524         return -EINVAL;
3525 }
3526 static DRIVER_ATTR_RW(num_tgts);
3527
3528 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3529 {
3530         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3531 }
3532 static DRIVER_ATTR_RO(dev_size_mb);
3533
3534 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3535 {
3536         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3537 }
3538 static DRIVER_ATTR_RO(num_parts);
3539
3540 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3541 {
3542         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3543 }
3544 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3545                                size_t count)
3546 {
3547         int nth;
3548
3549         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3550                 scsi_debug_every_nth = nth;
3551                 atomic_set(&sdebug_cmnd_count, 0);
3552                 return count;
3553         }
3554         return -EINVAL;
3555 }
3556 static DRIVER_ATTR_RW(every_nth);
3557
3558 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3559 {
3560         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3561 }
3562 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3563                               size_t count)
3564 {
3565         int n;
3566
3567         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3568                 scsi_debug_max_luns = n;
3569                 sdebug_max_tgts_luns();
3570                 return count;
3571         }
3572         return -EINVAL;
3573 }
3574 static DRIVER_ATTR_RW(max_luns);
3575
3576 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3577 {
3578         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3579 }
3580 /* N.B. max_queue can be changed while there are queued commands. In flight
3581  * commands beyond the new max_queue will be completed. */
3582 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3583                                size_t count)
3584 {
3585         unsigned long iflags;
3586         int n, k;
3587
3588         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3589             (n <= SCSI_DEBUG_CANQUEUE)) {
3590                 spin_lock_irqsave(&queued_arr_lock, iflags);
3591                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
3592                 scsi_debug_max_queue = n;
3593                 if (SCSI_DEBUG_CANQUEUE == k)
3594                         atomic_set(&retired_max_queue, 0);
3595                 else if (k >= n)
3596                         atomic_set(&retired_max_queue, k + 1);
3597                 else
3598                         atomic_set(&retired_max_queue, 0);
3599                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3600                 return count;
3601         }
3602         return -EINVAL;
3603 }
3604 static DRIVER_ATTR_RW(max_queue);
3605
3606 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3607 {
3608         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3609 }
3610 static DRIVER_ATTR_RO(no_uld);
3611
3612 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3613 {
3614         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3615 }
3616 static DRIVER_ATTR_RO(scsi_level);
3617
3618 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3619 {
3620         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3621 }
3622 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3623                                 size_t count)
3624 {
3625         int n;
3626
3627         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3628                 scsi_debug_virtual_gb = n;
3629
3630                 sdebug_capacity = get_sdebug_capacity();
3631
3632                 return count;
3633         }
3634         return -EINVAL;
3635 }
3636 static DRIVER_ATTR_RW(virtual_gb);
3637
3638 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3639 {
3640         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3641 }
3642
3643 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3644                               size_t count)
3645 {
3646         int delta_hosts;
3647
3648         if (sscanf(buf, "%d", &delta_hosts) != 1)
3649                 return -EINVAL;
3650         if (delta_hosts > 0) {
3651                 do {
3652                         sdebug_add_adapter();
3653                 } while (--delta_hosts);
3654         } else if (delta_hosts < 0) {
3655                 do {
3656                         sdebug_remove_adapter();
3657                 } while (++delta_hosts);
3658         }
3659         return count;
3660 }
3661 static DRIVER_ATTR_RW(add_host);
3662
3663 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3664 {
3665         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3666 }
3667 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3668                                     size_t count)
3669 {
3670         int n;
3671
3672         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3673                 scsi_debug_vpd_use_hostno = n;
3674                 return count;
3675         }
3676         return -EINVAL;
3677 }
3678 static DRIVER_ATTR_RW(vpd_use_hostno);
3679
3680 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3681 {
3682         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3683 }
3684 static DRIVER_ATTR_RO(sector_size);
3685
3686 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3687 {
3688         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3689 }
3690 static DRIVER_ATTR_RO(dix);
3691
3692 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3693 {
3694         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3695 }
3696 static DRIVER_ATTR_RO(dif);
3697
3698 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3699 {
3700         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3701 }
3702 static DRIVER_ATTR_RO(guard);
3703
3704 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3705 {
3706         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3707 }
3708 static DRIVER_ATTR_RO(ato);
3709
3710 static ssize_t map_show(struct device_driver *ddp, char *buf)
3711 {
3712         ssize_t count;
3713
3714         if (!scsi_debug_lbp())
3715                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3716                                  sdebug_store_sectors);
3717
3718         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3719
3720         buf[count++] = '\n';
3721         buf[count++] = 0;
3722
3723         return count;
3724 }
3725 static DRIVER_ATTR_RO(map);
3726
3727 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3728 {
3729         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3730 }
3731 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3732                                size_t count)
3733 {
3734         int n;
3735
3736         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3737                 scsi_debug_removable = (n > 0);
3738                 return count;
3739         }
3740         return -EINVAL;
3741 }
3742 static DRIVER_ATTR_RW(removable);
3743
3744 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
3745 {
3746         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
3747 }
3748 /* Returns -EBUSY if host_lock is being changed and commands are queued */
3749 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3750                                size_t count)
3751 {
3752         int n, res;
3753
3754         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3755                 bool new_host_lock = (n > 0);
3756
3757                 res = count;
3758                 if (new_host_lock != scsi_debug_host_lock) {
3759                         unsigned long iflags;
3760                         int k;
3761
3762                         spin_lock_irqsave(&queued_arr_lock, iflags);
3763                         k = find_first_bit(queued_in_use_bm,
3764                                            scsi_debug_max_queue);
3765                         if (k != scsi_debug_max_queue)
3766                                 res = -EBUSY;   /* have queued commands */
3767                         else
3768                                 scsi_debug_host_lock = new_host_lock;
3769                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3770                 }
3771                 return res;
3772         }
3773         return -EINVAL;
3774 }
3775 static DRIVER_ATTR_RW(host_lock);
3776
3777
3778 /* Note: The following array creates attribute files in the
3779    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3780    files (over those found in the /sys/module/scsi_debug/parameters
3781    directory) is that auxiliary actions can be triggered when an attribute
3782    is changed. For example see: sdebug_add_host_store() above.
3783  */
3784
3785 static struct attribute *sdebug_drv_attrs[] = {
3786         &driver_attr_delay.attr,
3787         &driver_attr_opts.attr,
3788         &driver_attr_ptype.attr,
3789         &driver_attr_dsense.attr,
3790         &driver_attr_fake_rw.attr,
3791         &driver_attr_no_lun_0.attr,
3792         &driver_attr_num_tgts.attr,
3793         &driver_attr_dev_size_mb.attr,
3794         &driver_attr_num_parts.attr,
3795         &driver_attr_every_nth.attr,
3796         &driver_attr_max_luns.attr,
3797         &driver_attr_max_queue.attr,
3798         &driver_attr_no_uld.attr,
3799         &driver_attr_scsi_level.attr,
3800         &driver_attr_virtual_gb.attr,
3801         &driver_attr_add_host.attr,
3802         &driver_attr_vpd_use_hostno.attr,
3803         &driver_attr_sector_size.attr,
3804         &driver_attr_dix.attr,
3805         &driver_attr_dif.attr,
3806         &driver_attr_guard.attr,
3807         &driver_attr_ato.attr,
3808         &driver_attr_map.attr,
3809         &driver_attr_removable.attr,
3810         &driver_attr_host_lock.attr,
3811         &driver_attr_ndelay.attr,
3812         NULL,
3813 };
3814 ATTRIBUTE_GROUPS(sdebug_drv);
3815
3816 static struct device *pseudo_primary;
3817
3818 static int __init scsi_debug_init(void)
3819 {
3820         unsigned long sz;
3821         int host_to_add;
3822         int k;
3823         int ret;
3824
3825         atomic_set(&sdebug_cmnd_count, 0);
3826         atomic_set(&sdebug_completions, 0);
3827         atomic_set(&retired_max_queue, 0);
3828
3829         if (scsi_debug_ndelay >= 1000000000) {
3830                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
3831                         __func__);
3832                 scsi_debug_ndelay = 0;
3833         } else if (scsi_debug_ndelay > 0)
3834                 scsi_debug_delay = DELAY_OVERRIDDEN;
3835
3836         switch (scsi_debug_sector_size) {
3837         case  512:
3838         case 1024:
3839         case 2048:
3840         case 4096:
3841                 break;
3842         default:
3843                 pr_err("%s: invalid sector_size %d\n", __func__,
3844                        scsi_debug_sector_size);
3845                 return -EINVAL;
3846         }
3847
3848         switch (scsi_debug_dif) {
3849
3850         case SD_DIF_TYPE0_PROTECTION:
3851         case SD_DIF_TYPE1_PROTECTION:
3852         case SD_DIF_TYPE2_PROTECTION:
3853         case SD_DIF_TYPE3_PROTECTION:
3854                 break;
3855
3856         default:
3857                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
3858                 return -EINVAL;
3859         }
3860
3861         if (scsi_debug_guard > 1) {
3862                 pr_err("%s: guard must be 0 or 1\n", __func__);
3863                 return -EINVAL;
3864         }
3865
3866         if (scsi_debug_ato > 1) {
3867                 pr_err("%s: ato must be 0 or 1\n", __func__);
3868                 return -EINVAL;
3869         }
3870
3871         if (scsi_debug_physblk_exp > 15) {
3872                 pr_err("%s: invalid physblk_exp %u\n", __func__,
3873                        scsi_debug_physblk_exp);
3874                 return -EINVAL;
3875         }
3876
3877         if (scsi_debug_lowest_aligned > 0x3fff) {
3878                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
3879                        scsi_debug_lowest_aligned);
3880                 return -EINVAL;
3881         }
3882
3883         if (scsi_debug_dev_size_mb < 1)
3884                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3885         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3886         sdebug_store_sectors = sz / scsi_debug_sector_size;
3887         sdebug_capacity = get_sdebug_capacity();
3888
3889         /* play around with geometry, don't waste too much on track 0 */
3890         sdebug_heads = 8;
3891         sdebug_sectors_per = 32;
3892         if (scsi_debug_dev_size_mb >= 16)
3893                 sdebug_heads = 32;
3894         else if (scsi_debug_dev_size_mb >= 256)
3895                 sdebug_heads = 64;
3896         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3897                                (sdebug_sectors_per * sdebug_heads);
3898         if (sdebug_cylinders_per >= 1024) {
3899                 /* other LLDs do this; implies >= 1GB ram disk ... */
3900                 sdebug_heads = 255;
3901                 sdebug_sectors_per = 63;
3902                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3903                                (sdebug_sectors_per * sdebug_heads);
3904         }
3905
3906         if (0 == scsi_debug_fake_rw) {
3907                 fake_storep = vmalloc(sz);
3908                 if (NULL == fake_storep) {
3909                         pr_err("%s: out of memory, 1\n", __func__);
3910                         return -ENOMEM;
3911                 }
3912                 memset(fake_storep, 0, sz);
3913                 if (scsi_debug_num_parts > 0)
3914                         sdebug_build_parts(fake_storep, sz);
3915         }
3916
3917         if (scsi_debug_dix) {
3918                 int dif_size;
3919
3920                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3921                 dif_storep = vmalloc(dif_size);
3922
3923                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
3924                         dif_storep);
3925
3926                 if (dif_storep == NULL) {
3927                         pr_err("%s: out of mem. (DIX)\n", __func__);
3928                         ret = -ENOMEM;
3929                         goto free_vm;
3930                 }
3931
3932                 memset(dif_storep, 0xff, dif_size);
3933         }
3934
3935         /* Logical Block Provisioning */
3936         if (scsi_debug_lbp()) {
3937                 scsi_debug_unmap_max_blocks =
3938                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3939
3940                 scsi_debug_unmap_max_desc =
3941                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3942
3943                 scsi_debug_unmap_granularity =
3944                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3945
3946                 if (scsi_debug_unmap_alignment &&
3947                     scsi_debug_unmap_granularity <=
3948                     scsi_debug_unmap_alignment) {
3949                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
3950                                __func__);
3951                         return -EINVAL;
3952                 }
3953
3954                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3955                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3956
3957                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
3958
3959                 if (map_storep == NULL) {
3960                         pr_err("%s: out of mem. (MAP)\n", __func__);
3961                         ret = -ENOMEM;
3962                         goto free_vm;
3963                 }
3964
3965                 bitmap_zero(map_storep, map_size);
3966
3967                 /* Map first 1KB for partition table */
3968                 if (scsi_debug_num_parts)
3969                         map_region(0, 2);
3970         }
3971
3972         pseudo_primary = root_device_register("pseudo_0");
3973         if (IS_ERR(pseudo_primary)) {
3974                 pr_warn("%s: root_device_register() error\n", __func__);
3975                 ret = PTR_ERR(pseudo_primary);
3976                 goto free_vm;
3977         }
3978         ret = bus_register(&pseudo_lld_bus);
3979         if (ret < 0) {
3980                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
3981                 goto dev_unreg;
3982         }
3983         ret = driver_register(&sdebug_driverfs_driver);
3984         if (ret < 0) {
3985                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
3986                 goto bus_unreg;
3987         }
3988
3989         host_to_add = scsi_debug_add_host;
3990         scsi_debug_add_host = 0;
3991
3992         for (k = 0; k < host_to_add; k++) {
3993                 if (sdebug_add_adapter()) {
3994                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
3995                                 __func__, k);
3996                         break;
3997                 }
3998         }
3999
4000         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4001                 pr_info("%s: built %d host(s)\n", __func__,
4002                         scsi_debug_add_host);
4003         }
4004         return 0;
4005
4006 bus_unreg:
4007         bus_unregister(&pseudo_lld_bus);
4008 dev_unreg:
4009         root_device_unregister(pseudo_primary);
4010 free_vm:
4011         if (map_storep)
4012                 vfree(map_storep);
4013         if (dif_storep)
4014                 vfree(dif_storep);
4015         vfree(fake_storep);
4016
4017         return ret;
4018 }
4019
4020 static void __exit scsi_debug_exit(void)
4021 {
4022         int k = scsi_debug_add_host;
4023
4024         stop_all_queued();
4025         free_all_queued();
4026         for (; k; k--)
4027                 sdebug_remove_adapter();
4028         driver_unregister(&sdebug_driverfs_driver);
4029         bus_unregister(&pseudo_lld_bus);
4030         root_device_unregister(pseudo_primary);
4031
4032         if (dif_storep)
4033                 vfree(dif_storep);
4034
4035         vfree(fake_storep);
4036 }
4037
4038 device_initcall(scsi_debug_init);
4039 module_exit(scsi_debug_exit);
4040
4041 static void sdebug_release_adapter(struct device * dev)
4042 {
4043         struct sdebug_host_info *sdbg_host;
4044
4045         sdbg_host = to_sdebug_host(dev);
4046         kfree(sdbg_host);
4047 }
4048
4049 static int sdebug_add_adapter(void)
4050 {
4051         int k, devs_per_host;
4052         int error = 0;
4053         struct sdebug_host_info *sdbg_host;
4054         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4055
4056         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4057         if (NULL == sdbg_host) {
4058                 printk(KERN_ERR "%s: out of memory at line %d\n",
4059                        __func__, __LINE__);
4060                 return -ENOMEM;
4061         }
4062
4063         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4064
4065         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4066         for (k = 0; k < devs_per_host; k++) {
4067                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4068                 if (!sdbg_devinfo) {
4069                         printk(KERN_ERR "%s: out of memory at line %d\n",
4070                                __func__, __LINE__);
4071                         error = -ENOMEM;
4072                         goto clean;
4073                 }
4074         }
4075
4076         spin_lock(&sdebug_host_list_lock);
4077         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4078         spin_unlock(&sdebug_host_list_lock);
4079
4080         sdbg_host->dev.bus = &pseudo_lld_bus;
4081         sdbg_host->dev.parent = pseudo_primary;
4082         sdbg_host->dev.release = &sdebug_release_adapter;
4083         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4084
4085         error = device_register(&sdbg_host->dev);
4086
4087         if (error)
4088                 goto clean;
4089
4090         ++scsi_debug_add_host;
4091         return error;
4092
4093 clean:
4094         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4095                                  dev_list) {
4096                 list_del(&sdbg_devinfo->dev_list);
4097                 kfree(sdbg_devinfo);
4098         }
4099
4100         kfree(sdbg_host);
4101         return error;
4102 }
4103
4104 static void sdebug_remove_adapter(void)
4105 {
4106         struct sdebug_host_info * sdbg_host = NULL;
4107
4108         spin_lock(&sdebug_host_list_lock);
4109         if (!list_empty(&sdebug_host_list)) {
4110                 sdbg_host = list_entry(sdebug_host_list.prev,
4111                                        struct sdebug_host_info, host_list);
4112                 list_del(&sdbg_host->host_list);
4113         }
4114         spin_unlock(&sdebug_host_list_lock);
4115
4116         if (!sdbg_host)
4117                 return;
4118
4119         device_unregister(&sdbg_host->dev);
4120         --scsi_debug_add_host;
4121 }
4122
4123 static int
4124 scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4125 {
4126         unsigned char *cmd = SCpnt->cmnd;
4127         int len, k;
4128         unsigned int num;
4129         unsigned long long lba;
4130         u32 ei_lba;
4131         int errsts = 0;
4132         int target = SCpnt->device->id;
4133         struct sdebug_dev_info *devip = NULL;
4134         int inj_recovered = 0;
4135         int inj_transport = 0;
4136         int inj_dif = 0;
4137         int inj_dix = 0;
4138         int inj_short = 0;
4139         int delay_override = 0;
4140         int unmap = 0;
4141
4142         scsi_set_resid(SCpnt, 0);
4143         if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4144             !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4145                 char b[120];
4146                 int n;
4147
4148                 len = SCpnt->cmd_len;
4149                 if (len > 32)
4150                         strcpy(b, "too long, over 32 bytes");
4151                 else {
4152                         for (k = 0, n = 0; k < len; ++k)
4153                                 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4154                                                (unsigned int)cmd[k]);
4155                 }
4156                 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4157                             b);
4158         }
4159
4160         if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4161             (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4162                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4163         devip = devInfoReg(SCpnt->device);
4164         if (NULL == devip)
4165                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4166
4167         if ((scsi_debug_every_nth != 0) &&
4168             (atomic_inc_return(&sdebug_cmnd_count) >=
4169              abs(scsi_debug_every_nth))) {
4170                 atomic_set(&sdebug_cmnd_count, 0);
4171                 if (scsi_debug_every_nth < -1)
4172                         scsi_debug_every_nth = -1;
4173                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4174                         return 0; /* ignore command causing timeout */
4175                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4176                          scsi_medium_access_command(SCpnt))
4177                         return 0; /* time out reads and writes */
4178                 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4179                         inj_recovered = 1; /* to reads and writes below */
4180                 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4181                         inj_transport = 1; /* to reads and writes below */
4182                 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4183                         inj_dif = 1; /* to reads and writes below */
4184                 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4185                         inj_dix = 1; /* to reads and writes below */
4186                 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4187                         inj_short = 1;
4188         }
4189
4190         if (devip->wlun) {
4191                 switch (*cmd) {
4192                 case INQUIRY:
4193                 case REQUEST_SENSE:
4194                 case TEST_UNIT_READY:
4195                 case REPORT_LUNS:
4196                         break;  /* only allowable wlun commands */
4197                 default:
4198                         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4199                                 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4200                                        "not supported for wlun\n", *cmd);
4201                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4202                                         INVALID_OPCODE, 0);
4203                         errsts = check_condition_result;
4204                         return schedule_resp(SCpnt, devip, errsts, 0);
4205                 }
4206         }
4207
4208         switch (*cmd) {
4209         case INQUIRY:     /* mandatory, ignore unit attention */
4210                 delay_override = 1;
4211                 errsts = resp_inquiry(SCpnt, target, devip);
4212                 break;
4213         case REQUEST_SENSE:     /* mandatory, ignore unit attention */
4214                 delay_override = 1;
4215                 errsts = resp_requests(SCpnt, devip);
4216                 break;
4217         case REZERO_UNIT:       /* actually this is REWIND for SSC */
4218         case START_STOP:
4219                 errsts = resp_start_stop(SCpnt, devip);
4220                 break;
4221         case ALLOW_MEDIUM_REMOVAL:
4222                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4223                 if (errsts)
4224                         break;
4225                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4226                         printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4227                                cmd[4] ? "inhibited" : "enabled");
4228                 break;
4229         case SEND_DIAGNOSTIC:     /* mandatory */
4230                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4231                 break;
4232         case TEST_UNIT_READY:     /* mandatory */
4233                 /* delay_override = 1; */
4234                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4235                 break;
4236         case RESERVE:
4237                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4238                 break;
4239         case RESERVE_10:
4240                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4241                 break;
4242         case RELEASE:
4243                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4244                 break;
4245         case RELEASE_10:
4246                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4247                 break;
4248         case READ_CAPACITY:
4249                 errsts = resp_readcap(SCpnt, devip);
4250                 break;
4251         case SERVICE_ACTION_IN:
4252                 if (cmd[1] == SAI_READ_CAPACITY_16)
4253                         errsts = resp_readcap16(SCpnt, devip);
4254                 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4255
4256                         if (scsi_debug_lbp() == 0) {
4257                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4258                                                 INVALID_COMMAND_OPCODE, 0);
4259                                 errsts = check_condition_result;
4260                         } else
4261                                 errsts = resp_get_lba_status(SCpnt, devip);
4262                 } else {
4263                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4264                                         INVALID_OPCODE, 0);
4265                         errsts = check_condition_result;
4266                 }
4267                 break;
4268         case MAINTENANCE_IN:
4269                 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4270                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4271                                         INVALID_OPCODE, 0);
4272                         errsts = check_condition_result;
4273                         break;
4274                 }
4275                 errsts = resp_report_tgtpgs(SCpnt, devip);
4276                 break;
4277         case READ_16:
4278         case READ_12:
4279         case READ_10:
4280                 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4281                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4282                     cmd[1] & 0xe0) {
4283                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4284                                         INVALID_COMMAND_OPCODE, 0);
4285                         errsts = check_condition_result;
4286                         break;
4287                 }
4288
4289                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4290                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4291                     (cmd[1] & 0xe0) == 0)
4292                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4293
4294                 /* fall through */
4295         case READ_6:
4296 read:
4297                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4298                 if (errsts)
4299                         break;
4300                 if (scsi_debug_fake_rw)
4301                         break;
4302                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4303
4304                 if (inj_short)
4305                         num /= 2;
4306
4307                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4308                 if (inj_recovered && (0 == errsts)) {
4309                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4310                                         THRESHOLD_EXCEEDED, 0);
4311                         errsts = check_condition_result;
4312                 } else if (inj_transport && (0 == errsts)) {
4313                         mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4314                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
4315                         errsts = check_condition_result;
4316                 } else if (inj_dif && (0 == errsts)) {
4317                         /* Logical block guard check failed */
4318                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4319                         errsts = illegal_condition_result;
4320                 } else if (inj_dix && (0 == errsts)) {
4321                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4322                         errsts = illegal_condition_result;
4323                 }
4324                 break;
4325         case REPORT_LUNS:       /* mandatory, ignore unit attention */
4326                 delay_override = 1;
4327                 errsts = resp_report_luns(SCpnt, devip);
4328                 break;
4329         case VERIFY:            /* 10 byte SBC-2 command */
4330                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4331                 break;
4332         case WRITE_16:
4333         case WRITE_12:
4334         case WRITE_10:
4335                 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4336                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4337                     cmd[1] & 0xe0) {
4338                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4339                                         INVALID_COMMAND_OPCODE, 0);
4340                         errsts = check_condition_result;
4341                         break;
4342                 }
4343
4344                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4345                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4346                     (cmd[1] & 0xe0) == 0)
4347                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4348
4349                 /* fall through */
4350         case WRITE_6:
4351 write:
4352                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4353                 if (errsts)
4354                         break;
4355                 if (scsi_debug_fake_rw)
4356                         break;
4357                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4358                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4359                 if (inj_recovered && (0 == errsts)) {
4360                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4361                                         THRESHOLD_EXCEEDED, 0);
4362                         errsts = check_condition_result;
4363                 } else if (inj_dif && (0 == errsts)) {
4364                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4365                         errsts = illegal_condition_result;
4366                 } else if (inj_dix && (0 == errsts)) {
4367                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4368                         errsts = illegal_condition_result;
4369                 }
4370                 break;
4371         case WRITE_SAME_16:
4372         case WRITE_SAME:
4373                 if (cmd[1] & 0x8) {
4374                         if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4375                             (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4376                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4377                                                 INVALID_FIELD_IN_CDB, 0);
4378                                 errsts = check_condition_result;
4379                         } else
4380                                 unmap = 1;
4381                 }
4382                 if (errsts)
4383                         break;
4384                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4385                 if (errsts)
4386                         break;
4387                 if (scsi_debug_fake_rw)
4388                         break;
4389                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4390                 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4391                 break;
4392         case UNMAP:
4393                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4394                 if (errsts)
4395                         break;
4396                 if (scsi_debug_fake_rw)
4397                         break;
4398
4399                 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4400                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4401                                         INVALID_COMMAND_OPCODE, 0);
4402                         errsts = check_condition_result;
4403                 } else
4404                         errsts = resp_unmap(SCpnt, devip);
4405                 break;
4406         case MODE_SENSE:
4407         case MODE_SENSE_10:
4408                 errsts = resp_mode_sense(SCpnt, target, devip);
4409                 break;
4410         case MODE_SELECT:
4411                 errsts = resp_mode_select(SCpnt, 1, devip);
4412                 break;
4413         case MODE_SELECT_10:
4414                 errsts = resp_mode_select(SCpnt, 0, devip);
4415                 break;
4416         case LOG_SENSE:
4417                 errsts = resp_log_sense(SCpnt, devip);
4418                 break;
4419         case SYNCHRONIZE_CACHE:
4420                 delay_override = 1;
4421                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4422                 break;
4423         case WRITE_BUFFER:
4424                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4425                 break;
4426         case XDWRITEREAD_10:
4427                 if (!scsi_bidi_cmnd(SCpnt)) {
4428                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4429                                         INVALID_FIELD_IN_CDB, 0);
4430                         errsts = check_condition_result;
4431                         break;
4432                 }
4433
4434                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4435                 if (errsts)
4436                         break;
4437                 if (scsi_debug_fake_rw)
4438                         break;
4439                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4440                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4441                 if (errsts)
4442                         break;
4443                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4444                 if (errsts)
4445                         break;
4446                 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4447                 break;
4448         case VARIABLE_LENGTH_CMD:
4449                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4450
4451                         if ((cmd[10] & 0xe0) == 0)
4452                                 printk(KERN_ERR
4453                                        "Unprotected RD/WR to DIF device\n");
4454
4455                         if (cmd[9] == READ_32) {
4456                                 BUG_ON(SCpnt->cmd_len < 32);
4457                                 goto read;
4458                         }
4459
4460                         if (cmd[9] == WRITE_32) {
4461                                 BUG_ON(SCpnt->cmd_len < 32);
4462                                 goto write;
4463                         }
4464                 }
4465
4466                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4467                                 INVALID_FIELD_IN_CDB, 0);
4468                 errsts = check_condition_result;
4469                 break;
4470         case 0x85:
4471                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4472                         sdev_printk(KERN_INFO, SCpnt->device,
4473                         "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4474                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4475                                 INVALID_OPCODE, 0);
4476                 errsts = check_condition_result;
4477                 break;
4478         default:
4479                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4480                         sdev_printk(KERN_INFO, SCpnt->device,
4481                                     "%s: Opcode: 0x%x not supported\n",
4482                                     my_name, *cmd);
4483                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4484                 if (errsts)
4485                         break;  /* Unit attention takes precedence */
4486                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4487                 errsts = check_condition_result;
4488                 break;
4489         }
4490         return schedule_resp(SCpnt, devip, errsts,
4491                              (delay_override ? 0 : scsi_debug_delay));
4492 }
4493
4494 static int
4495 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4496 {
4497         if (scsi_debug_host_lock) {
4498                 unsigned long iflags;
4499                 int rc;
4500
4501                 spin_lock_irqsave(shost->host_lock, iflags);
4502                 rc = scsi_debug_queuecommand(cmd);
4503                 spin_unlock_irqrestore(shost->host_lock, iflags);
4504                 return rc;
4505         } else
4506                 return scsi_debug_queuecommand(cmd);
4507 }
4508
4509 static int
4510 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4511 {
4512         int num_in_q = 0;
4513         unsigned long iflags;
4514         struct sdebug_dev_info *devip;
4515
4516         spin_lock_irqsave(&queued_arr_lock, iflags);
4517         devip = (struct sdebug_dev_info *)sdev->hostdata;
4518         if (NULL == devip) {
4519                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4520                 return  -ENODEV;
4521         }
4522         num_in_q = atomic_read(&devip->num_in_q);
4523         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4524
4525         if (qdepth < 1)
4526                 qdepth = 1;
4527         /* allow to exceed max host queued_arr elements for testing */
4528         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4529                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4530         scsi_change_queue_depth(sdev, qdepth);
4531
4532         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4533                 sdev_printk(KERN_INFO, sdev,
4534                             "%s: qdepth=%d, num_in_q=%d\n",
4535                             __func__, qdepth, num_in_q);
4536         }
4537         return sdev->queue_depth;
4538 }
4539
4540 static int
4541 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4542 {
4543         qtype = scsi_change_queue_type(sdev, qtype);
4544         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4545                 const char *cp;
4546
4547                 switch (qtype) {
4548                 case 0:
4549                         cp = "untagged";
4550                         break;
4551                 case MSG_SIMPLE_TAG:
4552                         cp = "simple tags";
4553                         break;
4554                 case MSG_ORDERED_TAG:
4555                         cp = "ordered tags";
4556                         break;
4557                 default:
4558                         cp = "unknown";
4559                         break;
4560                 }
4561                 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4562         }
4563         return qtype;
4564 }
4565
4566 static struct scsi_host_template sdebug_driver_template = {
4567         .show_info =            scsi_debug_show_info,
4568         .write_info =           scsi_debug_write_info,
4569         .proc_name =            sdebug_proc_name,
4570         .name =                 "SCSI DEBUG",
4571         .info =                 scsi_debug_info,
4572         .slave_alloc =          scsi_debug_slave_alloc,
4573         .slave_configure =      scsi_debug_slave_configure,
4574         .slave_destroy =        scsi_debug_slave_destroy,
4575         .ioctl =                scsi_debug_ioctl,
4576         .queuecommand =         sdebug_queuecommand_lock_or_not,
4577         .change_queue_depth =   sdebug_change_qdepth,
4578         .change_queue_type =    sdebug_change_qtype,
4579         .eh_abort_handler =     scsi_debug_abort,
4580         .eh_device_reset_handler = scsi_debug_device_reset,
4581         .eh_target_reset_handler = scsi_debug_target_reset,
4582         .eh_bus_reset_handler = scsi_debug_bus_reset,
4583         .eh_host_reset_handler = scsi_debug_host_reset,
4584         .can_queue =            SCSI_DEBUG_CANQUEUE,
4585         .this_id =              7,
4586         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
4587         .cmd_per_lun =          DEF_CMD_PER_LUN,
4588         .max_sectors =          -1U,
4589         .use_clustering =       DISABLE_CLUSTERING,
4590         .module =               THIS_MODULE,
4591         .track_queue_depth =    1,
4592 };
4593
4594 static int sdebug_driver_probe(struct device * dev)
4595 {
4596         int error = 0;
4597         struct sdebug_host_info *sdbg_host;
4598         struct Scsi_Host *hpnt;
4599         int host_prot;
4600
4601         sdbg_host = to_sdebug_host(dev);
4602
4603         sdebug_driver_template.can_queue = scsi_debug_max_queue;
4604         if (scsi_debug_clustering)
4605                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4606         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4607         if (NULL == hpnt) {
4608                 pr_err("%s: scsi_host_alloc failed\n", __func__);
4609                 error = -ENODEV;
4610                 return error;
4611         }
4612
4613         sdbg_host->shost = hpnt;
4614         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4615         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4616                 hpnt->max_id = scsi_debug_num_tgts + 1;
4617         else
4618                 hpnt->max_id = scsi_debug_num_tgts;
4619         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
4620
4621         host_prot = 0;
4622
4623         switch (scsi_debug_dif) {
4624
4625         case SD_DIF_TYPE1_PROTECTION:
4626                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4627                 if (scsi_debug_dix)
4628                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4629                 break;
4630
4631         case SD_DIF_TYPE2_PROTECTION:
4632                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4633                 if (scsi_debug_dix)
4634                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4635                 break;
4636
4637         case SD_DIF_TYPE3_PROTECTION:
4638                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4639                 if (scsi_debug_dix)
4640                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4641                 break;
4642
4643         default:
4644                 if (scsi_debug_dix)
4645                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4646                 break;
4647         }
4648
4649         scsi_host_set_prot(hpnt, host_prot);
4650
4651         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4652                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4653                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4654                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4655                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4656                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4657                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4658                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4659
4660         if (scsi_debug_guard == 1)
4661                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4662         else
4663                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4664
4665         error = scsi_add_host(hpnt, &sdbg_host->dev);
4666         if (error) {
4667                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4668                 error = -ENODEV;
4669                 scsi_host_put(hpnt);
4670         } else
4671                 scsi_scan_host(hpnt);
4672
4673         return error;
4674 }
4675
4676 static int sdebug_driver_remove(struct device * dev)
4677 {
4678         struct sdebug_host_info *sdbg_host;
4679         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4680
4681         sdbg_host = to_sdebug_host(dev);
4682
4683         if (!sdbg_host) {
4684                 printk(KERN_ERR "%s: Unable to locate host info\n",
4685                        __func__);
4686                 return -ENODEV;
4687         }
4688
4689         scsi_remove_host(sdbg_host->shost);
4690
4691         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4692                                  dev_list) {
4693                 list_del(&sdbg_devinfo->dev_list);
4694                 kfree(sdbg_devinfo);
4695         }
4696
4697         scsi_host_put(sdbg_host->shost);
4698         return 0;
4699 }
4700
4701 static int pseudo_lld_bus_match(struct device *dev,
4702                                 struct device_driver *dev_driver)
4703 {
4704         return 1;
4705 }
4706
4707 static struct bus_type pseudo_lld_bus = {
4708         .name = "pseudo",
4709         .match = pseudo_lld_bus_match,
4710         .probe = sdebug_driver_probe,
4711         .remove = sdebug_driver_remove,
4712         .drv_groups = sdebug_drv_groups,
4713 };