2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/div64.h>
57 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
58 #define HPSA_DRIVER_VERSION "3.4.4-1"
59 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
62 /* How long to wait (in milliseconds) for board to go into simple mode */
63 #define MAX_CONFIG_WAIT 30000
64 #define MAX_IOCTL_CONFIG_WAIT 1000
66 /*define how many times we will try a command because of bus resets */
67 #define MAX_CMD_RETRIES 3
69 /* Embedded module documentation macros - see modules.h */
70 MODULE_AUTHOR("Hewlett-Packard Company");
71 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
73 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
74 MODULE_VERSION(HPSA_DRIVER_VERSION);
75 MODULE_LICENSE("GPL");
77 static int hpsa_allow_any;
78 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(hpsa_allow_any,
80 "Allow hpsa driver to access unknown HP Smart Array hardware");
81 static int hpsa_simple_mode;
82 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
83 MODULE_PARM_DESC(hpsa_simple_mode,
84 "Use 'simple mode' rather than 'performant mode'");
86 /* define the PCI info for the cards we can control */
87 static const struct pci_device_id hpsa_pci_device_id[] = {
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
132 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
133 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
134 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
138 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
140 /* board_id = Subsystem Device ID & Vendor ID
141 * product = Marketing Name for the board
142 * access = Address of the struct of function pointers
144 static struct board_type products[] = {
145 {0x3241103C, "Smart Array P212", &SA5_access},
146 {0x3243103C, "Smart Array P410", &SA5_access},
147 {0x3245103C, "Smart Array P410i", &SA5_access},
148 {0x3247103C, "Smart Array P411", &SA5_access},
149 {0x3249103C, "Smart Array P812", &SA5_access},
150 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access},
152 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
153 {0x3350103C, "Smart Array P222", &SA5_access},
154 {0x3351103C, "Smart Array P420", &SA5_access},
155 {0x3352103C, "Smart Array P421", &SA5_access},
156 {0x3353103C, "Smart Array P822", &SA5_access},
157 {0x3354103C, "Smart Array P420i", &SA5_access},
158 {0x3355103C, "Smart Array P220i", &SA5_access},
159 {0x3356103C, "Smart Array P721m", &SA5_access},
160 {0x1921103C, "Smart Array P830i", &SA5_access},
161 {0x1922103C, "Smart Array P430", &SA5_access},
162 {0x1923103C, "Smart Array P431", &SA5_access},
163 {0x1924103C, "Smart Array P830", &SA5_access},
164 {0x1926103C, "Smart Array P731m", &SA5_access},
165 {0x1928103C, "Smart Array P230i", &SA5_access},
166 {0x1929103C, "Smart Array P530", &SA5_access},
167 {0x21BD103C, "Smart Array", &SA5_access},
168 {0x21BE103C, "Smart Array", &SA5_access},
169 {0x21BF103C, "Smart Array", &SA5_access},
170 {0x21C0103C, "Smart Array", &SA5_access},
171 {0x21C1103C, "Smart Array", &SA5_access},
172 {0x21C2103C, "Smart Array", &SA5_access},
173 {0x21C3103C, "Smart Array", &SA5_access},
174 {0x21C4103C, "Smart Array", &SA5_access},
175 {0x21C5103C, "Smart Array", &SA5_access},
176 {0x21C6103C, "Smart Array", &SA5_access},
177 {0x21C7103C, "Smart Array", &SA5_access},
178 {0x21C8103C, "Smart Array", &SA5_access},
179 {0x21C9103C, "Smart Array", &SA5_access},
180 {0x21CA103C, "Smart Array", &SA5_access},
181 {0x21CB103C, "Smart Array", &SA5_access},
182 {0x21CC103C, "Smart Array", &SA5_access},
183 {0x21CD103C, "Smart Array", &SA5_access},
184 {0x21CE103C, "Smart Array", &SA5_access},
185 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
186 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
187 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
188 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
189 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
190 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
193 static int number_of_controllers;
195 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
196 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
197 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
198 static void lock_and_start_io(struct ctlr_info *h);
199 static void start_io(struct ctlr_info *h, unsigned long *flags);
202 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
206 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
207 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
208 static struct CommandList *cmd_alloc(struct ctlr_info *h);
209 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
210 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
211 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
213 #define VPD_PAGE (1 << 8)
215 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
216 static void hpsa_scan_start(struct Scsi_Host *);
217 static int hpsa_scan_finished(struct Scsi_Host *sh,
218 unsigned long elapsed_time);
219 static int hpsa_change_queue_depth(struct scsi_device *sdev,
220 int qdepth, int reason);
222 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
223 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
224 static int hpsa_slave_alloc(struct scsi_device *sdev);
225 static void hpsa_slave_destroy(struct scsi_device *sdev);
227 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
228 static int check_for_unit_attention(struct ctlr_info *h,
229 struct CommandList *c);
230 static void check_ioctl_unit_attention(struct ctlr_info *h,
231 struct CommandList *c);
232 /* performant mode helper functions */
233 static void calc_bucket_map(int *bucket, int num_buckets,
234 int nsgs, int min_blocks, int *bucket_map);
235 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
236 static inline u32 next_command(struct ctlr_info *h, u8 q);
237 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
238 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
240 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
241 unsigned long *memory_bar);
242 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
243 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
245 static inline void finish_cmd(struct CommandList *c);
246 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
247 #define BOARD_NOT_READY 0
248 #define BOARD_READY 1
249 static void hpsa_drain_accel_commands(struct ctlr_info *h);
250 static void hpsa_flush_cache(struct ctlr_info *h);
251 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
252 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
255 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
257 unsigned long *priv = shost_priv(sdev->host);
258 return (struct ctlr_info *) *priv;
261 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
263 unsigned long *priv = shost_priv(sh);
264 return (struct ctlr_info *) *priv;
267 static int check_for_unit_attention(struct ctlr_info *h,
268 struct CommandList *c)
270 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
273 switch (c->err_info->SenseInfo[12]) {
275 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
276 "detected, command retried\n", h->ctlr);
279 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
280 "detected, action required\n", h->ctlr);
282 case REPORT_LUNS_CHANGED:
283 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
284 "changed, action required\n", h->ctlr);
286 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
287 * target (array) devices.
291 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
292 "or device reset detected\n", h->ctlr);
294 case UNIT_ATTENTION_CLEARED:
295 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
296 "cleared by another initiator\n", h->ctlr);
299 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
300 "unit attention detected\n", h->ctlr);
306 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
308 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
309 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
310 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
312 dev_warn(&h->pdev->dev, HPSA "device busy");
316 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
317 struct device_attribute *attr,
318 const char *buf, size_t count)
322 struct Scsi_Host *shost = class_to_shost(dev);
325 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
327 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
328 strncpy(tmpbuf, buf, len);
330 if (sscanf(tmpbuf, "%d", &status) != 1)
332 h = shost_to_hba(shost);
333 h->acciopath_status = !!status;
334 dev_warn(&h->pdev->dev,
335 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
336 h->acciopath_status ? "enabled" : "disabled");
340 static ssize_t host_store_raid_offload_debug(struct device *dev,
341 struct device_attribute *attr,
342 const char *buf, size_t count)
344 int debug_level, len;
346 struct Scsi_Host *shost = class_to_shost(dev);
349 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
351 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
352 strncpy(tmpbuf, buf, len);
354 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
358 h = shost_to_hba(shost);
359 h->raid_offload_debug = debug_level;
360 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
361 h->raid_offload_debug);
365 static ssize_t host_store_rescan(struct device *dev,
366 struct device_attribute *attr,
367 const char *buf, size_t count)
370 struct Scsi_Host *shost = class_to_shost(dev);
371 h = shost_to_hba(shost);
372 hpsa_scan_start(h->scsi_host);
376 static ssize_t host_show_firmware_revision(struct device *dev,
377 struct device_attribute *attr, char *buf)
380 struct Scsi_Host *shost = class_to_shost(dev);
381 unsigned char *fwrev;
383 h = shost_to_hba(shost);
384 if (!h->hba_inquiry_data)
386 fwrev = &h->hba_inquiry_data[32];
387 return snprintf(buf, 20, "%c%c%c%c\n",
388 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
391 static ssize_t host_show_commands_outstanding(struct device *dev,
392 struct device_attribute *attr, char *buf)
394 struct Scsi_Host *shost = class_to_shost(dev);
395 struct ctlr_info *h = shost_to_hba(shost);
397 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
400 static ssize_t host_show_transport_mode(struct device *dev,
401 struct device_attribute *attr, char *buf)
404 struct Scsi_Host *shost = class_to_shost(dev);
406 h = shost_to_hba(shost);
407 return snprintf(buf, 20, "%s\n",
408 h->transMethod & CFGTBL_Trans_Performant ?
409 "performant" : "simple");
412 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
413 struct device_attribute *attr, char *buf)
416 struct Scsi_Host *shost = class_to_shost(dev);
418 h = shost_to_hba(shost);
419 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
420 (h->acciopath_status == 1) ? "enabled" : "disabled");
423 /* List of controllers which cannot be hard reset on kexec with reset_devices */
424 static u32 unresettable_controller[] = {
425 0x324a103C, /* Smart Array P712m */
426 0x324b103C, /* SmartArray P711m */
427 0x3223103C, /* Smart Array P800 */
428 0x3234103C, /* Smart Array P400 */
429 0x3235103C, /* Smart Array P400i */
430 0x3211103C, /* Smart Array E200i */
431 0x3212103C, /* Smart Array E200 */
432 0x3213103C, /* Smart Array E200i */
433 0x3214103C, /* Smart Array E200i */
434 0x3215103C, /* Smart Array E200i */
435 0x3237103C, /* Smart Array E500 */
436 0x323D103C, /* Smart Array P700m */
437 0x40800E11, /* Smart Array 5i */
438 0x409C0E11, /* Smart Array 6400 */
439 0x409D0E11, /* Smart Array 6400 EM */
440 0x40700E11, /* Smart Array 5300 */
441 0x40820E11, /* Smart Array 532 */
442 0x40830E11, /* Smart Array 5312 */
443 0x409A0E11, /* Smart Array 641 */
444 0x409B0E11, /* Smart Array 642 */
445 0x40910E11, /* Smart Array 6i */
448 /* List of controllers which cannot even be soft reset */
449 static u32 soft_unresettable_controller[] = {
450 0x40800E11, /* Smart Array 5i */
451 0x40700E11, /* Smart Array 5300 */
452 0x40820E11, /* Smart Array 532 */
453 0x40830E11, /* Smart Array 5312 */
454 0x409A0E11, /* Smart Array 641 */
455 0x409B0E11, /* Smart Array 642 */
456 0x40910E11, /* Smart Array 6i */
457 /* Exclude 640x boards. These are two pci devices in one slot
458 * which share a battery backed cache module. One controls the
459 * cache, the other accesses the cache through the one that controls
460 * it. If we reset the one controlling the cache, the other will
461 * likely not be happy. Just forbid resetting this conjoined mess.
462 * The 640x isn't really supported by hpsa anyway.
464 0x409C0E11, /* Smart Array 6400 */
465 0x409D0E11, /* Smart Array 6400 EM */
468 static int ctlr_is_hard_resettable(u32 board_id)
472 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
473 if (unresettable_controller[i] == board_id)
478 static int ctlr_is_soft_resettable(u32 board_id)
482 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
483 if (soft_unresettable_controller[i] == board_id)
488 static int ctlr_is_resettable(u32 board_id)
490 return ctlr_is_hard_resettable(board_id) ||
491 ctlr_is_soft_resettable(board_id);
494 static ssize_t host_show_resettable(struct device *dev,
495 struct device_attribute *attr, char *buf)
498 struct Scsi_Host *shost = class_to_shost(dev);
500 h = shost_to_hba(shost);
501 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
504 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
506 return (scsi3addr[3] & 0xC0) == 0x40;
509 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
512 #define HPSA_RAID_0 0
513 #define HPSA_RAID_4 1
514 #define HPSA_RAID_1 2 /* also used for RAID 10 */
515 #define HPSA_RAID_5 3 /* also used for RAID 50 */
516 #define HPSA_RAID_51 4
517 #define HPSA_RAID_6 5 /* also used for RAID 60 */
518 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
519 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
521 static ssize_t raid_level_show(struct device *dev,
522 struct device_attribute *attr, char *buf)
525 unsigned char rlevel;
527 struct scsi_device *sdev;
528 struct hpsa_scsi_dev_t *hdev;
531 sdev = to_scsi_device(dev);
532 h = sdev_to_hba(sdev);
533 spin_lock_irqsave(&h->lock, flags);
534 hdev = sdev->hostdata;
536 spin_unlock_irqrestore(&h->lock, flags);
540 /* Is this even a logical drive? */
541 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
542 spin_unlock_irqrestore(&h->lock, flags);
543 l = snprintf(buf, PAGE_SIZE, "N/A\n");
547 rlevel = hdev->raid_level;
548 spin_unlock_irqrestore(&h->lock, flags);
549 if (rlevel > RAID_UNKNOWN)
550 rlevel = RAID_UNKNOWN;
551 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
555 static ssize_t lunid_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
559 struct scsi_device *sdev;
560 struct hpsa_scsi_dev_t *hdev;
562 unsigned char lunid[8];
564 sdev = to_scsi_device(dev);
565 h = sdev_to_hba(sdev);
566 spin_lock_irqsave(&h->lock, flags);
567 hdev = sdev->hostdata;
569 spin_unlock_irqrestore(&h->lock, flags);
572 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
573 spin_unlock_irqrestore(&h->lock, flags);
574 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
575 lunid[0], lunid[1], lunid[2], lunid[3],
576 lunid[4], lunid[5], lunid[6], lunid[7]);
579 static ssize_t unique_id_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
583 struct scsi_device *sdev;
584 struct hpsa_scsi_dev_t *hdev;
586 unsigned char sn[16];
588 sdev = to_scsi_device(dev);
589 h = sdev_to_hba(sdev);
590 spin_lock_irqsave(&h->lock, flags);
591 hdev = sdev->hostdata;
593 spin_unlock_irqrestore(&h->lock, flags);
596 memcpy(sn, hdev->device_id, sizeof(sn));
597 spin_unlock_irqrestore(&h->lock, flags);
598 return snprintf(buf, 16 * 2 + 2,
599 "%02X%02X%02X%02X%02X%02X%02X%02X"
600 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
601 sn[0], sn[1], sn[2], sn[3],
602 sn[4], sn[5], sn[6], sn[7],
603 sn[8], sn[9], sn[10], sn[11],
604 sn[12], sn[13], sn[14], sn[15]);
607 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
608 struct device_attribute *attr, char *buf)
611 struct scsi_device *sdev;
612 struct hpsa_scsi_dev_t *hdev;
616 sdev = to_scsi_device(dev);
617 h = sdev_to_hba(sdev);
618 spin_lock_irqsave(&h->lock, flags);
619 hdev = sdev->hostdata;
621 spin_unlock_irqrestore(&h->lock, flags);
624 offload_enabled = hdev->offload_enabled;
625 spin_unlock_irqrestore(&h->lock, flags);
626 return snprintf(buf, 20, "%d\n", offload_enabled);
629 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
630 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
631 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
632 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
633 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
634 host_show_hp_ssd_smart_path_enabled, NULL);
635 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
636 host_show_hp_ssd_smart_path_status,
637 host_store_hp_ssd_smart_path_status);
638 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
639 host_store_raid_offload_debug);
640 static DEVICE_ATTR(firmware_revision, S_IRUGO,
641 host_show_firmware_revision, NULL);
642 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
643 host_show_commands_outstanding, NULL);
644 static DEVICE_ATTR(transport_mode, S_IRUGO,
645 host_show_transport_mode, NULL);
646 static DEVICE_ATTR(resettable, S_IRUGO,
647 host_show_resettable, NULL);
649 static struct device_attribute *hpsa_sdev_attrs[] = {
650 &dev_attr_raid_level,
653 &dev_attr_hp_ssd_smart_path_enabled,
657 static struct device_attribute *hpsa_shost_attrs[] = {
659 &dev_attr_firmware_revision,
660 &dev_attr_commands_outstanding,
661 &dev_attr_transport_mode,
662 &dev_attr_resettable,
663 &dev_attr_hp_ssd_smart_path_status,
664 &dev_attr_raid_offload_debug,
668 static struct scsi_host_template hpsa_driver_template = {
669 .module = THIS_MODULE,
672 .queuecommand = hpsa_scsi_queue_command,
673 .scan_start = hpsa_scan_start,
674 .scan_finished = hpsa_scan_finished,
675 .change_queue_depth = hpsa_change_queue_depth,
677 .use_clustering = ENABLE_CLUSTERING,
678 .eh_abort_handler = hpsa_eh_abort_handler,
679 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
681 .slave_alloc = hpsa_slave_alloc,
682 .slave_destroy = hpsa_slave_destroy,
684 .compat_ioctl = hpsa_compat_ioctl,
686 .sdev_attrs = hpsa_sdev_attrs,
687 .shost_attrs = hpsa_shost_attrs,
693 /* Enqueuing and dequeuing functions for cmdlists. */
694 static inline void addQ(struct list_head *list, struct CommandList *c)
696 list_add_tail(&c->list, list);
699 static inline u32 next_command(struct ctlr_info *h, u8 q)
702 struct reply_queue_buffer *rq = &h->reply_queue[q];
705 if (h->transMethod & CFGTBL_Trans_io_accel1)
706 return h->access.command_completed(h, q);
708 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
709 return h->access.command_completed(h, q);
711 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
712 a = rq->head[rq->current_entry];
714 spin_lock_irqsave(&h->lock, flags);
715 h->commands_outstanding--;
716 spin_unlock_irqrestore(&h->lock, flags);
720 /* Check for wraparound */
721 if (rq->current_entry == h->max_commands) {
722 rq->current_entry = 0;
729 * There are some special bits in the bus address of the
730 * command that we have to set for the controller to know
731 * how to process the command:
733 * Normal performant mode:
734 * bit 0: 1 means performant mode, 0 means simple mode.
735 * bits 1-3 = block fetch table entry
736 * bits 4-6 = command type (== 0)
739 * bit 0 = "performant mode" bit.
740 * bits 1-3 = block fetch table entry
741 * bits 4-6 = command type (== 110)
742 * (command type is needed because ioaccel1 mode
743 * commands are submitted through the same register as normal
744 * mode commands, so this is how the controller knows whether
745 * the command is normal mode or ioaccel1 mode.)
748 * bit 0 = "performant mode" bit.
749 * bits 1-4 = block fetch table entry (note extra bit)
750 * bits 4-6 = not needed, because ioaccel2 mode has
751 * a separate special register for submitting commands.
754 /* set_performant_mode: Modify the tag for cciss performant
755 * set bit 0 for pull model, bits 3-1 for block fetch
758 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
760 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
761 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
762 if (likely(h->msix_vector > 0))
763 c->Header.ReplyQueue =
764 raw_smp_processor_id() % h->nreply_queues;
768 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
769 struct CommandList *c)
771 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
773 /* Tell the controller to post the reply to the queue for this
774 * processor. This seems to give the best I/O throughput.
776 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
777 /* Set the bits in the address sent down to include:
778 * - performant mode bit (bit 0)
779 * - pull count (bits 1-3)
780 * - command type (bits 4-6)
782 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
783 IOACCEL1_BUSADDR_CMDTYPE;
786 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
787 struct CommandList *c)
789 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
791 /* Tell the controller to post the reply to the queue for this
792 * processor. This seems to give the best I/O throughput.
794 cp->reply_queue = smp_processor_id() % h->nreply_queues;
795 /* Set the bits in the address sent down to include:
796 * - performant mode bit not used in ioaccel mode 2
797 * - pull count (bits 0-3)
798 * - command type isn't needed for ioaccel2
800 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
803 static int is_firmware_flash_cmd(u8 *cdb)
805 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
809 * During firmware flash, the heartbeat register may not update as frequently
810 * as it should. So we dial down lockup detection during firmware flash. and
811 * dial it back up when firmware flash completes.
813 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
814 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
815 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
816 struct CommandList *c)
818 if (!is_firmware_flash_cmd(c->Request.CDB))
820 atomic_inc(&h->firmware_flash_in_progress);
821 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
824 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
825 struct CommandList *c)
827 if (is_firmware_flash_cmd(c->Request.CDB) &&
828 atomic_dec_and_test(&h->firmware_flash_in_progress))
829 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
832 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
833 struct CommandList *c)
837 switch (c->cmd_type) {
839 set_ioaccel1_performant_mode(h, c);
842 set_ioaccel2_performant_mode(h, c);
845 set_performant_mode(h, c);
847 dial_down_lockup_detection_during_fw_flash(h, c);
848 spin_lock_irqsave(&h->lock, flags);
852 spin_unlock_irqrestore(&h->lock, flags);
855 static inline void removeQ(struct CommandList *c)
857 if (WARN_ON(list_empty(&c->list)))
859 list_del_init(&c->list);
862 static inline int is_hba_lunid(unsigned char scsi3addr[])
864 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
867 static inline int is_scsi_rev_5(struct ctlr_info *h)
869 if (!h->hba_inquiry_data)
871 if ((h->hba_inquiry_data[2] & 0x07) == 5)
876 static int hpsa_find_target_lun(struct ctlr_info *h,
877 unsigned char scsi3addr[], int bus, int *target, int *lun)
879 /* finds an unused bus, target, lun for a new physical device
880 * assumes h->devlock is held
883 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
885 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
887 for (i = 0; i < h->ndevices; i++) {
888 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
889 __set_bit(h->dev[i]->target, lun_taken);
892 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
893 if (i < HPSA_MAX_DEVICES) {
902 /* Add an entry into h->dev[] array. */
903 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
904 struct hpsa_scsi_dev_t *device,
905 struct hpsa_scsi_dev_t *added[], int *nadded)
907 /* assumes h->devlock is held */
910 unsigned char addr1[8], addr2[8];
911 struct hpsa_scsi_dev_t *sd;
913 if (n >= HPSA_MAX_DEVICES) {
914 dev_err(&h->pdev->dev, "too many devices, some will be "
919 /* physical devices do not have lun or target assigned until now. */
920 if (device->lun != -1)
921 /* Logical device, lun is already assigned. */
924 /* If this device a non-zero lun of a multi-lun device
925 * byte 4 of the 8-byte LUN addr will contain the logical
926 * unit no, zero otherise.
928 if (device->scsi3addr[4] == 0) {
929 /* This is not a non-zero lun of a multi-lun device */
930 if (hpsa_find_target_lun(h, device->scsi3addr,
931 device->bus, &device->target, &device->lun) != 0)
936 /* This is a non-zero lun of a multi-lun device.
937 * Search through our list and find the device which
938 * has the same 8 byte LUN address, excepting byte 4.
939 * Assign the same bus and target for this new LUN.
940 * Use the logical unit number from the firmware.
942 memcpy(addr1, device->scsi3addr, 8);
944 for (i = 0; i < n; i++) {
946 memcpy(addr2, sd->scsi3addr, 8);
948 /* differ only in byte 4? */
949 if (memcmp(addr1, addr2, 8) == 0) {
950 device->bus = sd->bus;
951 device->target = sd->target;
952 device->lun = device->scsi3addr[4];
956 if (device->lun == -1) {
957 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
958 " suspect firmware bug or unsupported hardware "
967 added[*nadded] = device;
970 /* initially, (before registering with scsi layer) we don't
971 * know our hostno and we don't want to print anything first
972 * time anyway (the scsi layer's inquiries will show that info)
974 /* if (hostno != -1) */
975 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
976 scsi_device_type(device->devtype), hostno,
977 device->bus, device->target, device->lun);
981 /* Update an entry in h->dev[] array. */
982 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
983 int entry, struct hpsa_scsi_dev_t *new_entry)
985 /* assumes h->devlock is held */
986 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
988 /* Raid level changed. */
989 h->dev[entry]->raid_level = new_entry->raid_level;
991 /* Raid offload parameters changed. */
992 h->dev[entry]->offload_config = new_entry->offload_config;
993 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
994 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
995 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
996 h->dev[entry]->raid_map = new_entry->raid_map;
998 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
999 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1000 new_entry->target, new_entry->lun);
1003 /* Replace an entry from h->dev[] array. */
1004 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1005 int entry, struct hpsa_scsi_dev_t *new_entry,
1006 struct hpsa_scsi_dev_t *added[], int *nadded,
1007 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1009 /* assumes h->devlock is held */
1010 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1011 removed[*nremoved] = h->dev[entry];
1015 * New physical devices won't have target/lun assigned yet
1016 * so we need to preserve the values in the slot we are replacing.
1018 if (new_entry->target == -1) {
1019 new_entry->target = h->dev[entry]->target;
1020 new_entry->lun = h->dev[entry]->lun;
1023 h->dev[entry] = new_entry;
1024 added[*nadded] = new_entry;
1026 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1027 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1028 new_entry->target, new_entry->lun);
1031 /* Remove an entry from h->dev[] array. */
1032 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1033 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1035 /* assumes h->devlock is held */
1037 struct hpsa_scsi_dev_t *sd;
1039 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1042 removed[*nremoved] = h->dev[entry];
1045 for (i = entry; i < h->ndevices-1; i++)
1046 h->dev[i] = h->dev[i+1];
1048 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1049 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1053 #define SCSI3ADDR_EQ(a, b) ( \
1054 (a)[7] == (b)[7] && \
1055 (a)[6] == (b)[6] && \
1056 (a)[5] == (b)[5] && \
1057 (a)[4] == (b)[4] && \
1058 (a)[3] == (b)[3] && \
1059 (a)[2] == (b)[2] && \
1060 (a)[1] == (b)[1] && \
1063 static void fixup_botched_add(struct ctlr_info *h,
1064 struct hpsa_scsi_dev_t *added)
1066 /* called when scsi_add_device fails in order to re-adjust
1067 * h->dev[] to match the mid layer's view.
1069 unsigned long flags;
1072 spin_lock_irqsave(&h->lock, flags);
1073 for (i = 0; i < h->ndevices; i++) {
1074 if (h->dev[i] == added) {
1075 for (j = i; j < h->ndevices-1; j++)
1076 h->dev[j] = h->dev[j+1];
1081 spin_unlock_irqrestore(&h->lock, flags);
1085 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1086 struct hpsa_scsi_dev_t *dev2)
1088 /* we compare everything except lun and target as these
1089 * are not yet assigned. Compare parts likely
1092 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1093 sizeof(dev1->scsi3addr)) != 0)
1095 if (memcmp(dev1->device_id, dev2->device_id,
1096 sizeof(dev1->device_id)) != 0)
1098 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1100 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1102 if (dev1->devtype != dev2->devtype)
1104 if (dev1->bus != dev2->bus)
1109 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1110 struct hpsa_scsi_dev_t *dev2)
1112 /* Device attributes that can change, but don't mean
1113 * that the device is a different device, nor that the OS
1114 * needs to be told anything about the change.
1116 if (dev1->raid_level != dev2->raid_level)
1118 if (dev1->offload_config != dev2->offload_config)
1120 if (dev1->offload_enabled != dev2->offload_enabled)
1125 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1126 * and return needle location in *index. If scsi3addr matches, but not
1127 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1128 * location in *index.
1129 * In the case of a minor device attribute change, such as RAID level, just
1130 * return DEVICE_UPDATED, along with the updated device's location in index.
1131 * If needle not found, return DEVICE_NOT_FOUND.
1133 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1134 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1138 #define DEVICE_NOT_FOUND 0
1139 #define DEVICE_CHANGED 1
1140 #define DEVICE_SAME 2
1141 #define DEVICE_UPDATED 3
1142 for (i = 0; i < haystack_size; i++) {
1143 if (haystack[i] == NULL) /* previously removed. */
1145 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1147 if (device_is_the_same(needle, haystack[i])) {
1148 if (device_updated(needle, haystack[i]))
1149 return DEVICE_UPDATED;
1152 /* Keep offline devices offline */
1153 if (needle->volume_offline)
1154 return DEVICE_NOT_FOUND;
1155 return DEVICE_CHANGED;
1160 return DEVICE_NOT_FOUND;
1163 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1164 unsigned char scsi3addr[])
1166 struct offline_device_entry *device;
1167 unsigned long flags;
1169 /* Check to see if device is already on the list */
1170 spin_lock_irqsave(&h->offline_device_lock, flags);
1171 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1172 if (memcmp(device->scsi3addr, scsi3addr,
1173 sizeof(device->scsi3addr)) == 0) {
1174 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1178 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1180 /* Device is not on the list, add it. */
1181 device = kmalloc(sizeof(*device), GFP_KERNEL);
1183 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1186 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1187 spin_lock_irqsave(&h->offline_device_lock, flags);
1188 list_add_tail(&device->offline_list, &h->offline_device_list);
1189 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1192 /* Print a message explaining various offline volume states */
1193 static void hpsa_show_volume_status(struct ctlr_info *h,
1194 struct hpsa_scsi_dev_t *sd)
1196 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1197 dev_info(&h->pdev->dev,
1198 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1199 h->scsi_host->host_no,
1200 sd->bus, sd->target, sd->lun);
1201 switch (sd->volume_offline) {
1204 case HPSA_LV_UNDERGOING_ERASE:
1205 dev_info(&h->pdev->dev,
1206 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1207 h->scsi_host->host_no,
1208 sd->bus, sd->target, sd->lun);
1210 case HPSA_LV_UNDERGOING_RPI:
1211 dev_info(&h->pdev->dev,
1212 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1213 h->scsi_host->host_no,
1214 sd->bus, sd->target, sd->lun);
1216 case HPSA_LV_PENDING_RPI:
1217 dev_info(&h->pdev->dev,
1218 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1219 h->scsi_host->host_no,
1220 sd->bus, sd->target, sd->lun);
1222 case HPSA_LV_ENCRYPTED_NO_KEY:
1223 dev_info(&h->pdev->dev,
1224 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1225 h->scsi_host->host_no,
1226 sd->bus, sd->target, sd->lun);
1228 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1229 dev_info(&h->pdev->dev,
1230 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1231 h->scsi_host->host_no,
1232 sd->bus, sd->target, sd->lun);
1234 case HPSA_LV_UNDERGOING_ENCRYPTION:
1235 dev_info(&h->pdev->dev,
1236 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1237 h->scsi_host->host_no,
1238 sd->bus, sd->target, sd->lun);
1240 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1241 dev_info(&h->pdev->dev,
1242 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1243 h->scsi_host->host_no,
1244 sd->bus, sd->target, sd->lun);
1246 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1247 dev_info(&h->pdev->dev,
1248 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1249 h->scsi_host->host_no,
1250 sd->bus, sd->target, sd->lun);
1252 case HPSA_LV_PENDING_ENCRYPTION:
1253 dev_info(&h->pdev->dev,
1254 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1255 h->scsi_host->host_no,
1256 sd->bus, sd->target, sd->lun);
1258 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1259 dev_info(&h->pdev->dev,
1260 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1261 h->scsi_host->host_no,
1262 sd->bus, sd->target, sd->lun);
1267 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1268 struct hpsa_scsi_dev_t *sd[], int nsds)
1270 /* sd contains scsi3 addresses and devtypes, and inquiry
1271 * data. This function takes what's in sd to be the current
1272 * reality and updates h->dev[] to reflect that reality.
1274 int i, entry, device_change, changes = 0;
1275 struct hpsa_scsi_dev_t *csd;
1276 unsigned long flags;
1277 struct hpsa_scsi_dev_t **added, **removed;
1278 int nadded, nremoved;
1279 struct Scsi_Host *sh = NULL;
1281 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1282 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1284 if (!added || !removed) {
1285 dev_warn(&h->pdev->dev, "out of memory in "
1286 "adjust_hpsa_scsi_table\n");
1290 spin_lock_irqsave(&h->devlock, flags);
1292 /* find any devices in h->dev[] that are not in
1293 * sd[] and remove them from h->dev[], and for any
1294 * devices which have changed, remove the old device
1295 * info and add the new device info.
1296 * If minor device attributes change, just update
1297 * the existing device structure.
1302 while (i < h->ndevices) {
1304 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1305 if (device_change == DEVICE_NOT_FOUND) {
1307 hpsa_scsi_remove_entry(h, hostno, i,
1308 removed, &nremoved);
1309 continue; /* remove ^^^, hence i not incremented */
1310 } else if (device_change == DEVICE_CHANGED) {
1312 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1313 added, &nadded, removed, &nremoved);
1314 /* Set it to NULL to prevent it from being freed
1315 * at the bottom of hpsa_update_scsi_devices()
1318 } else if (device_change == DEVICE_UPDATED) {
1319 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1324 /* Now, make sure every device listed in sd[] is also
1325 * listed in h->dev[], adding them if they aren't found
1328 for (i = 0; i < nsds; i++) {
1329 if (!sd[i]) /* if already added above. */
1332 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1333 * as the SCSI mid-layer does not handle such devices well.
1334 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1335 * at 160Hz, and prevents the system from coming up.
1337 if (sd[i]->volume_offline) {
1338 hpsa_show_volume_status(h, sd[i]);
1339 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1340 h->scsi_host->host_no,
1341 sd[i]->bus, sd[i]->target, sd[i]->lun);
1345 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1346 h->ndevices, &entry);
1347 if (device_change == DEVICE_NOT_FOUND) {
1349 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1350 added, &nadded) != 0)
1352 sd[i] = NULL; /* prevent from being freed later. */
1353 } else if (device_change == DEVICE_CHANGED) {
1354 /* should never happen... */
1356 dev_warn(&h->pdev->dev,
1357 "device unexpectedly changed.\n");
1358 /* but if it does happen, we just ignore that device */
1361 spin_unlock_irqrestore(&h->devlock, flags);
1363 /* Monitor devices which are in one of several NOT READY states to be
1364 * brought online later. This must be done without holding h->devlock,
1365 * so don't touch h->dev[]
1367 for (i = 0; i < nsds; i++) {
1368 if (!sd[i]) /* if already added above. */
1370 if (sd[i]->volume_offline)
1371 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1374 /* Don't notify scsi mid layer of any changes the first time through
1375 * (or if there are no changes) scsi_scan_host will do it later the
1376 * first time through.
1378 if (hostno == -1 || !changes)
1382 /* Notify scsi mid layer of any removed devices */
1383 for (i = 0; i < nremoved; i++) {
1384 struct scsi_device *sdev =
1385 scsi_device_lookup(sh, removed[i]->bus,
1386 removed[i]->target, removed[i]->lun);
1388 scsi_remove_device(sdev);
1389 scsi_device_put(sdev);
1391 /* We don't expect to get here.
1392 * future cmds to this device will get selection
1393 * timeout as if the device was gone.
1395 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1396 " for removal.", hostno, removed[i]->bus,
1397 removed[i]->target, removed[i]->lun);
1403 /* Notify scsi mid layer of any added devices */
1404 for (i = 0; i < nadded; i++) {
1405 if (scsi_add_device(sh, added[i]->bus,
1406 added[i]->target, added[i]->lun) == 0)
1408 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1409 "device not added.\n", hostno, added[i]->bus,
1410 added[i]->target, added[i]->lun);
1411 /* now we have to remove it from h->dev,
1412 * since it didn't get added to scsi mid layer
1414 fixup_botched_add(h, added[i]);
1423 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1424 * Assume's h->devlock is held.
1426 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1427 int bus, int target, int lun)
1430 struct hpsa_scsi_dev_t *sd;
1432 for (i = 0; i < h->ndevices; i++) {
1434 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1440 /* link sdev->hostdata to our per-device structure. */
1441 static int hpsa_slave_alloc(struct scsi_device *sdev)
1443 struct hpsa_scsi_dev_t *sd;
1444 unsigned long flags;
1445 struct ctlr_info *h;
1447 h = sdev_to_hba(sdev);
1448 spin_lock_irqsave(&h->devlock, flags);
1449 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1450 sdev_id(sdev), sdev->lun);
1452 sdev->hostdata = sd;
1453 spin_unlock_irqrestore(&h->devlock, flags);
1457 static void hpsa_slave_destroy(struct scsi_device *sdev)
1459 /* nothing to do. */
1462 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1466 if (!h->cmd_sg_list)
1468 for (i = 0; i < h->nr_cmds; i++) {
1469 kfree(h->cmd_sg_list[i]);
1470 h->cmd_sg_list[i] = NULL;
1472 kfree(h->cmd_sg_list);
1473 h->cmd_sg_list = NULL;
1476 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1480 if (h->chainsize <= 0)
1483 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1485 if (!h->cmd_sg_list)
1487 for (i = 0; i < h->nr_cmds; i++) {
1488 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1489 h->chainsize, GFP_KERNEL);
1490 if (!h->cmd_sg_list[i])
1496 hpsa_free_sg_chain_blocks(h);
1500 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1501 struct CommandList *c)
1503 struct SGDescriptor *chain_sg, *chain_block;
1506 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1507 chain_block = h->cmd_sg_list[c->cmdindex];
1508 chain_sg->Ext = HPSA_SG_CHAIN;
1509 chain_sg->Len = sizeof(*chain_sg) *
1510 (c->Header.SGTotal - h->max_cmd_sg_entries);
1511 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1513 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1514 /* prevent subsequent unmapping */
1515 chain_sg->Addr.lower = 0;
1516 chain_sg->Addr.upper = 0;
1519 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1520 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1524 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1525 struct CommandList *c)
1527 struct SGDescriptor *chain_sg;
1528 union u64bit temp64;
1530 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1533 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1534 temp64.val32.lower = chain_sg->Addr.lower;
1535 temp64.val32.upper = chain_sg->Addr.upper;
1536 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1540 /* Decode the various types of errors on ioaccel2 path.
1541 * Return 1 for any error that should generate a RAID path retry.
1542 * Return 0 for errors that don't require a RAID path retry.
1544 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1545 struct CommandList *c,
1546 struct scsi_cmnd *cmd,
1547 struct io_accel2_cmd *c2)
1552 switch (c2->error_data.serv_response) {
1553 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1554 switch (c2->error_data.status) {
1555 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1557 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1558 dev_warn(&h->pdev->dev,
1559 "%s: task complete with check condition.\n",
1560 "HP SSD Smart Path");
1561 cmd->result |= SAM_STAT_CHECK_CONDITION;
1562 if (c2->error_data.data_present !=
1563 IOACCEL2_SENSE_DATA_PRESENT) {
1564 memset(cmd->sense_buffer, 0,
1565 SCSI_SENSE_BUFFERSIZE);
1568 /* copy the sense data */
1569 data_len = c2->error_data.sense_data_len;
1570 if (data_len > SCSI_SENSE_BUFFERSIZE)
1571 data_len = SCSI_SENSE_BUFFERSIZE;
1572 if (data_len > sizeof(c2->error_data.sense_data_buff))
1574 sizeof(c2->error_data.sense_data_buff);
1575 memcpy(cmd->sense_buffer,
1576 c2->error_data.sense_data_buff, data_len);
1579 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1580 dev_warn(&h->pdev->dev,
1581 "%s: task complete with BUSY status.\n",
1582 "HP SSD Smart Path");
1585 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1586 dev_warn(&h->pdev->dev,
1587 "%s: task complete with reservation conflict.\n",
1588 "HP SSD Smart Path");
1591 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1592 /* Make scsi midlayer do unlimited retries */
1593 cmd->result = DID_IMM_RETRY << 16;
1595 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1596 dev_warn(&h->pdev->dev,
1597 "%s: task complete with aborted status.\n",
1598 "HP SSD Smart Path");
1602 dev_warn(&h->pdev->dev,
1603 "%s: task complete with unrecognized status: 0x%02x\n",
1604 "HP SSD Smart Path", c2->error_data.status);
1609 case IOACCEL2_SERV_RESPONSE_FAILURE:
1610 /* don't expect to get here. */
1611 dev_warn(&h->pdev->dev,
1612 "unexpected delivery or target failure, status = 0x%02x\n",
1613 c2->error_data.status);
1616 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1618 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1620 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1621 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1624 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1625 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1628 dev_warn(&h->pdev->dev,
1629 "%s: Unrecognized server response: 0x%02x\n",
1630 "HP SSD Smart Path",
1631 c2->error_data.serv_response);
1636 return retry; /* retry on raid path? */
1639 static void process_ioaccel2_completion(struct ctlr_info *h,
1640 struct CommandList *c, struct scsi_cmnd *cmd,
1641 struct hpsa_scsi_dev_t *dev)
1643 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1646 /* check for good status */
1647 if (likely(c2->error_data.serv_response == 0 &&
1648 c2->error_data.status == 0)) {
1650 cmd->scsi_done(cmd);
1654 /* Any RAID offload error results in retry which will use
1655 * the normal I/O path so the controller can handle whatever's
1658 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1659 c2->error_data.serv_response ==
1660 IOACCEL2_SERV_RESPONSE_FAILURE) {
1661 dev->offload_enabled = 0;
1662 h->drv_req_rescan = 1; /* schedule controller for a rescan */
1663 cmd->result = DID_SOFT_ERROR << 16;
1665 cmd->scsi_done(cmd);
1668 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1669 /* If error found, disable Smart Path, schedule a rescan,
1670 * and force a retry on the standard path.
1673 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1674 "HP SSD Smart Path");
1675 dev->offload_enabled = 0; /* Disable Smart Path */
1676 h->drv_req_rescan = 1; /* schedule controller rescan */
1677 cmd->result = DID_SOFT_ERROR << 16;
1680 cmd->scsi_done(cmd);
1683 static void complete_scsi_command(struct CommandList *cp)
1685 struct scsi_cmnd *cmd;
1686 struct ctlr_info *h;
1687 struct ErrorInfo *ei;
1688 struct hpsa_scsi_dev_t *dev;
1690 unsigned char sense_key;
1691 unsigned char asc; /* additional sense code */
1692 unsigned char ascq; /* additional sense code qualifier */
1693 unsigned long sense_data_size;
1696 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1698 dev = cmd->device->hostdata;
1700 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1701 if ((cp->cmd_type == CMD_SCSI) &&
1702 (cp->Header.SGTotal > h->max_cmd_sg_entries))
1703 hpsa_unmap_sg_chain_block(h, cp);
1705 cmd->result = (DID_OK << 16); /* host byte */
1706 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1708 if (cp->cmd_type == CMD_IOACCEL2)
1709 return process_ioaccel2_completion(h, cp, cmd, dev);
1711 cmd->result |= ei->ScsiStatus;
1713 scsi_set_resid(cmd, ei->ResidualCnt);
1714 if (ei->CommandStatus == 0) {
1716 cmd->scsi_done(cmd);
1720 /* copy the sense data */
1721 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1722 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1724 sense_data_size = sizeof(ei->SenseInfo);
1725 if (ei->SenseLen < sense_data_size)
1726 sense_data_size = ei->SenseLen;
1728 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1730 /* For I/O accelerator commands, copy over some fields to the normal
1731 * CISS header used below for error handling.
1733 if (cp->cmd_type == CMD_IOACCEL1) {
1734 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1735 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1736 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1737 cp->Header.Tag.lower = c->Tag.lower;
1738 cp->Header.Tag.upper = c->Tag.upper;
1739 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1740 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1742 /* Any RAID offload error results in retry which will use
1743 * the normal I/O path so the controller can handle whatever's
1746 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1747 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1748 dev->offload_enabled = 0;
1749 cmd->result = DID_SOFT_ERROR << 16;
1751 cmd->scsi_done(cmd);
1756 /* an error has occurred */
1757 switch (ei->CommandStatus) {
1759 case CMD_TARGET_STATUS:
1760 if (ei->ScsiStatus) {
1762 sense_key = 0xf & ei->SenseInfo[2];
1763 /* Get additional sense code */
1764 asc = ei->SenseInfo[12];
1765 /* Get addition sense code qualifier */
1766 ascq = ei->SenseInfo[13];
1769 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1770 if (check_for_unit_attention(h, cp))
1772 if (sense_key == ILLEGAL_REQUEST) {
1774 * SCSI REPORT_LUNS is commonly unsupported on
1775 * Smart Array. Suppress noisy complaint.
1777 if (cp->Request.CDB[0] == REPORT_LUNS)
1780 /* If ASC/ASCQ indicate Logical Unit
1781 * Not Supported condition,
1783 if ((asc == 0x25) && (ascq == 0x0)) {
1784 dev_warn(&h->pdev->dev, "cp %p "
1785 "has check condition\n", cp);
1790 if (sense_key == NOT_READY) {
1791 /* If Sense is Not Ready, Logical Unit
1792 * Not ready, Manual Intervention
1795 if ((asc == 0x04) && (ascq == 0x03)) {
1796 dev_warn(&h->pdev->dev, "cp %p "
1797 "has check condition: unit "
1798 "not ready, manual "
1799 "intervention required\n", cp);
1803 if (sense_key == ABORTED_COMMAND) {
1804 /* Aborted command is retryable */
1805 dev_warn(&h->pdev->dev, "cp %p "
1806 "has check condition: aborted command: "
1807 "ASC: 0x%x, ASCQ: 0x%x\n",
1809 cmd->result |= DID_SOFT_ERROR << 16;
1812 /* Must be some other type of check condition */
1813 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1815 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1816 "Returning result: 0x%x, "
1817 "cmd=[%02x %02x %02x %02x %02x "
1818 "%02x %02x %02x %02x %02x %02x "
1819 "%02x %02x %02x %02x %02x]\n",
1820 cp, sense_key, asc, ascq,
1822 cmd->cmnd[0], cmd->cmnd[1],
1823 cmd->cmnd[2], cmd->cmnd[3],
1824 cmd->cmnd[4], cmd->cmnd[5],
1825 cmd->cmnd[6], cmd->cmnd[7],
1826 cmd->cmnd[8], cmd->cmnd[9],
1827 cmd->cmnd[10], cmd->cmnd[11],
1828 cmd->cmnd[12], cmd->cmnd[13],
1829 cmd->cmnd[14], cmd->cmnd[15]);
1834 /* Problem was not a check condition
1835 * Pass it up to the upper layers...
1837 if (ei->ScsiStatus) {
1838 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1839 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1840 "Returning result: 0x%x\n",
1842 sense_key, asc, ascq,
1844 } else { /* scsi status is zero??? How??? */
1845 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1846 "Returning no connection.\n", cp),
1848 /* Ordinarily, this case should never happen,
1849 * but there is a bug in some released firmware
1850 * revisions that allows it to happen if, for
1851 * example, a 4100 backplane loses power and
1852 * the tape drive is in it. We assume that
1853 * it's a fatal error of some kind because we
1854 * can't show that it wasn't. We will make it
1855 * look like selection timeout since that is
1856 * the most common reason for this to occur,
1857 * and it's severe enough.
1860 cmd->result = DID_NO_CONNECT << 16;
1864 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1866 case CMD_DATA_OVERRUN:
1867 dev_warn(&h->pdev->dev, "cp %p has"
1868 " completed with data overrun "
1872 /* print_bytes(cp, sizeof(*cp), 1, 0);
1874 /* We get CMD_INVALID if you address a non-existent device
1875 * instead of a selection timeout (no response). You will
1876 * see this if you yank out a drive, then try to access it.
1877 * This is kind of a shame because it means that any other
1878 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1879 * missing target. */
1880 cmd->result = DID_NO_CONNECT << 16;
1883 case CMD_PROTOCOL_ERR:
1884 cmd->result = DID_ERROR << 16;
1885 dev_warn(&h->pdev->dev, "cp %p has "
1886 "protocol error\n", cp);
1888 case CMD_HARDWARE_ERR:
1889 cmd->result = DID_ERROR << 16;
1890 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1892 case CMD_CONNECTION_LOST:
1893 cmd->result = DID_ERROR << 16;
1894 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1897 cmd->result = DID_ABORT << 16;
1898 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1899 cp, ei->ScsiStatus);
1901 case CMD_ABORT_FAILED:
1902 cmd->result = DID_ERROR << 16;
1903 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1905 case CMD_UNSOLICITED_ABORT:
1906 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1907 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1911 cmd->result = DID_TIME_OUT << 16;
1912 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1914 case CMD_UNABORTABLE:
1915 cmd->result = DID_ERROR << 16;
1916 dev_warn(&h->pdev->dev, "Command unabortable\n");
1918 case CMD_IOACCEL_DISABLED:
1919 /* This only handles the direct pass-through case since RAID
1920 * offload is handled above. Just attempt a retry.
1922 cmd->result = DID_SOFT_ERROR << 16;
1923 dev_warn(&h->pdev->dev,
1924 "cp %p had HP SSD Smart Path error\n", cp);
1927 cmd->result = DID_ERROR << 16;
1928 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1929 cp, ei->CommandStatus);
1932 cmd->scsi_done(cmd);
1935 static void hpsa_pci_unmap(struct pci_dev *pdev,
1936 struct CommandList *c, int sg_used, int data_direction)
1939 union u64bit addr64;
1941 for (i = 0; i < sg_used; i++) {
1942 addr64.val32.lower = c->SG[i].Addr.lower;
1943 addr64.val32.upper = c->SG[i].Addr.upper;
1944 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1949 static int hpsa_map_one(struct pci_dev *pdev,
1950 struct CommandList *cp,
1957 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1958 cp->Header.SGList = 0;
1959 cp->Header.SGTotal = 0;
1963 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1964 if (dma_mapping_error(&pdev->dev, addr64)) {
1965 /* Prevent subsequent unmap of something never mapped */
1966 cp->Header.SGList = 0;
1967 cp->Header.SGTotal = 0;
1970 cp->SG[0].Addr.lower =
1971 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1972 cp->SG[0].Addr.upper =
1973 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1974 cp->SG[0].Len = buflen;
1975 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1976 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1977 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1981 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1982 struct CommandList *c)
1984 DECLARE_COMPLETION_ONSTACK(wait);
1987 enqueue_cmd_and_start_io(h, c);
1988 wait_for_completion(&wait);
1991 static u32 lockup_detected(struct ctlr_info *h)
1994 u32 rc, *lockup_detected;
1997 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1998 rc = *lockup_detected;
2003 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2004 struct CommandList *c)
2006 /* If controller lockup detected, fake a hardware error. */
2007 if (unlikely(lockup_detected(h)))
2008 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2010 hpsa_scsi_do_simple_cmd_core(h, c);
2013 #define MAX_DRIVER_CMD_RETRIES 25
2014 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2015 struct CommandList *c, int data_direction)
2017 int backoff_time = 10, retry_count = 0;
2020 memset(c->err_info, 0, sizeof(*c->err_info));
2021 hpsa_scsi_do_simple_cmd_core(h, c);
2023 if (retry_count > 3) {
2024 msleep(backoff_time);
2025 if (backoff_time < 1000)
2028 } while ((check_for_unit_attention(h, c) ||
2029 check_for_busy(h, c)) &&
2030 retry_count <= MAX_DRIVER_CMD_RETRIES);
2031 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2034 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2035 struct CommandList *c)
2037 const u8 *cdb = c->Request.CDB;
2038 const u8 *lun = c->Header.LUN.LunAddrBytes;
2040 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2041 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2042 txt, lun[0], lun[1], lun[2], lun[3],
2043 lun[4], lun[5], lun[6], lun[7],
2044 cdb[0], cdb[1], cdb[2], cdb[3],
2045 cdb[4], cdb[5], cdb[6], cdb[7],
2046 cdb[8], cdb[9], cdb[10], cdb[11],
2047 cdb[12], cdb[13], cdb[14], cdb[15]);
2050 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2051 struct CommandList *cp)
2053 const struct ErrorInfo *ei = cp->err_info;
2054 struct device *d = &cp->h->pdev->dev;
2055 const u8 *sd = ei->SenseInfo;
2057 switch (ei->CommandStatus) {
2058 case CMD_TARGET_STATUS:
2059 hpsa_print_cmd(h, "SCSI status", cp);
2060 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2061 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2062 sd[2] & 0x0f, sd[12], sd[13]);
2064 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2065 if (ei->ScsiStatus == 0)
2066 dev_warn(d, "SCSI status is abnormally zero. "
2067 "(probably indicates selection timeout "
2068 "reported incorrectly due to a known "
2069 "firmware bug, circa July, 2001.)\n");
2071 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2073 case CMD_DATA_OVERRUN:
2074 hpsa_print_cmd(h, "overrun condition", cp);
2077 /* controller unfortunately reports SCSI passthru's
2078 * to non-existent targets as invalid commands.
2080 hpsa_print_cmd(h, "invalid command", cp);
2081 dev_warn(d, "probably means device no longer present\n");
2084 case CMD_PROTOCOL_ERR:
2085 hpsa_print_cmd(h, "protocol error", cp);
2087 case CMD_HARDWARE_ERR:
2088 hpsa_print_cmd(h, "hardware error", cp);
2090 case CMD_CONNECTION_LOST:
2091 hpsa_print_cmd(h, "connection lost", cp);
2094 hpsa_print_cmd(h, "aborted", cp);
2096 case CMD_ABORT_FAILED:
2097 hpsa_print_cmd(h, "abort failed", cp);
2099 case CMD_UNSOLICITED_ABORT:
2100 hpsa_print_cmd(h, "unsolicited abort", cp);
2103 hpsa_print_cmd(h, "timed out", cp);
2105 case CMD_UNABORTABLE:
2106 hpsa_print_cmd(h, "unabortable", cp);
2109 hpsa_print_cmd(h, "unknown status", cp);
2110 dev_warn(d, "Unknown command status %x\n",
2115 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2116 u16 page, unsigned char *buf,
2117 unsigned char bufsize)
2120 struct CommandList *c;
2121 struct ErrorInfo *ei;
2123 c = cmd_special_alloc(h);
2125 if (c == NULL) { /* trouble... */
2126 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2130 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2131 page, scsi3addr, TYPE_CMD)) {
2135 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2137 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2138 hpsa_scsi_interpret_error(h, c);
2142 cmd_special_free(h, c);
2146 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2147 unsigned char *scsi3addr, unsigned char page,
2148 struct bmic_controller_parameters *buf, size_t bufsize)
2151 struct CommandList *c;
2152 struct ErrorInfo *ei;
2154 c = cmd_special_alloc(h);
2156 if (c == NULL) { /* trouble... */
2157 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2161 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2162 page, scsi3addr, TYPE_CMD)) {
2166 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2168 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2169 hpsa_scsi_interpret_error(h, c);
2173 cmd_special_free(h, c);
2177 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2181 struct CommandList *c;
2182 struct ErrorInfo *ei;
2184 c = cmd_special_alloc(h);
2186 if (c == NULL) { /* trouble... */
2187 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2191 /* fill_cmd can't fail here, no data buffer to map. */
2192 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2193 scsi3addr, TYPE_MSG);
2194 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2195 hpsa_scsi_do_simple_cmd_core(h, c);
2196 /* no unmap needed here because no data xfer. */
2199 if (ei->CommandStatus != 0) {
2200 hpsa_scsi_interpret_error(h, c);
2203 cmd_special_free(h, c);
2207 static void hpsa_get_raid_level(struct ctlr_info *h,
2208 unsigned char *scsi3addr, unsigned char *raid_level)
2213 *raid_level = RAID_UNKNOWN;
2214 buf = kzalloc(64, GFP_KERNEL);
2217 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2219 *raid_level = buf[8];
2220 if (*raid_level > RAID_UNKNOWN)
2221 *raid_level = RAID_UNKNOWN;
2226 #define HPSA_MAP_DEBUG
2227 #ifdef HPSA_MAP_DEBUG
2228 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2229 struct raid_map_data *map_buff)
2231 struct raid_map_disk_data *dd = &map_buff->data[0];
2233 u16 map_cnt, row_cnt, disks_per_row;
2238 /* Show details only if debugging has been activated. */
2239 if (h->raid_offload_debug < 2)
2242 dev_info(&h->pdev->dev, "structure_size = %u\n",
2243 le32_to_cpu(map_buff->structure_size));
2244 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2245 le32_to_cpu(map_buff->volume_blk_size));
2246 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2247 le64_to_cpu(map_buff->volume_blk_cnt));
2248 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2249 map_buff->phys_blk_shift);
2250 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2251 map_buff->parity_rotation_shift);
2252 dev_info(&h->pdev->dev, "strip_size = %u\n",
2253 le16_to_cpu(map_buff->strip_size));
2254 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2255 le64_to_cpu(map_buff->disk_starting_blk));
2256 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2257 le64_to_cpu(map_buff->disk_blk_cnt));
2258 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2259 le16_to_cpu(map_buff->data_disks_per_row));
2260 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2261 le16_to_cpu(map_buff->metadata_disks_per_row));
2262 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2263 le16_to_cpu(map_buff->row_cnt));
2264 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2265 le16_to_cpu(map_buff->layout_map_count));
2266 dev_info(&h->pdev->dev, "flags = %u\n",
2267 le16_to_cpu(map_buff->flags));
2268 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2269 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2271 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2272 dev_info(&h->pdev->dev, "dekindex = %u\n",
2273 le16_to_cpu(map_buff->dekindex));
2275 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2276 for (map = 0; map < map_cnt; map++) {
2277 dev_info(&h->pdev->dev, "Map%u:\n", map);
2278 row_cnt = le16_to_cpu(map_buff->row_cnt);
2279 for (row = 0; row < row_cnt; row++) {
2280 dev_info(&h->pdev->dev, " Row%u:\n", row);
2282 le16_to_cpu(map_buff->data_disks_per_row);
2283 for (col = 0; col < disks_per_row; col++, dd++)
2284 dev_info(&h->pdev->dev,
2285 " D%02u: h=0x%04x xor=%u,%u\n",
2286 col, dd->ioaccel_handle,
2287 dd->xor_mult[0], dd->xor_mult[1]);
2289 le16_to_cpu(map_buff->metadata_disks_per_row);
2290 for (col = 0; col < disks_per_row; col++, dd++)
2291 dev_info(&h->pdev->dev,
2292 " M%02u: h=0x%04x xor=%u,%u\n",
2293 col, dd->ioaccel_handle,
2294 dd->xor_mult[0], dd->xor_mult[1]);
2299 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2300 __attribute__((unused)) int rc,
2301 __attribute__((unused)) struct raid_map_data *map_buff)
2306 static int hpsa_get_raid_map(struct ctlr_info *h,
2307 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2310 struct CommandList *c;
2311 struct ErrorInfo *ei;
2313 c = cmd_special_alloc(h);
2315 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2318 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2319 sizeof(this_device->raid_map), 0,
2320 scsi3addr, TYPE_CMD)) {
2321 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2322 cmd_special_free(h, c);
2325 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2327 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2328 hpsa_scsi_interpret_error(h, c);
2329 cmd_special_free(h, c);
2332 cmd_special_free(h, c);
2334 /* @todo in the future, dynamically allocate RAID map memory */
2335 if (le32_to_cpu(this_device->raid_map.structure_size) >
2336 sizeof(this_device->raid_map)) {
2337 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2340 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2344 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2345 unsigned char scsi3addr[], u8 page)
2350 unsigned char *buf, bufsize;
2352 buf = kzalloc(256, GFP_KERNEL);
2356 /* Get the size of the page list first */
2357 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2358 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2359 buf, HPSA_VPD_HEADER_SZ);
2361 goto exit_unsupported;
2363 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2364 bufsize = pages + HPSA_VPD_HEADER_SZ;
2368 /* Get the whole VPD page list */
2369 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2370 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2373 goto exit_unsupported;
2376 for (i = 1; i <= pages; i++)
2377 if (buf[3 + i] == page)
2378 goto exit_supported;
2387 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2388 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2394 this_device->offload_config = 0;
2395 this_device->offload_enabled = 0;
2397 buf = kzalloc(64, GFP_KERNEL);
2400 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2402 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2403 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2407 #define IOACCEL_STATUS_BYTE 4
2408 #define OFFLOAD_CONFIGURED_BIT 0x01
2409 #define OFFLOAD_ENABLED_BIT 0x02
2410 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2411 this_device->offload_config =
2412 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2413 if (this_device->offload_config) {
2414 this_device->offload_enabled =
2415 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2416 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2417 this_device->offload_enabled = 0;
2424 /* Get the device id from inquiry page 0x83 */
2425 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2426 unsigned char *device_id, int buflen)
2433 buf = kzalloc(64, GFP_KERNEL);
2436 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2438 memcpy(device_id, &buf[8], buflen);
2443 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2444 struct ReportLUNdata *buf, int bufsize,
2445 int extended_response)
2448 struct CommandList *c;
2449 unsigned char scsi3addr[8];
2450 struct ErrorInfo *ei;
2452 c = cmd_special_alloc(h);
2453 if (c == NULL) { /* trouble... */
2454 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2457 /* address the controller */
2458 memset(scsi3addr, 0, sizeof(scsi3addr));
2459 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2460 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2464 if (extended_response)
2465 c->Request.CDB[1] = extended_response;
2466 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2468 if (ei->CommandStatus != 0 &&
2469 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2470 hpsa_scsi_interpret_error(h, c);
2473 if (buf->extended_response_flag != extended_response) {
2474 dev_err(&h->pdev->dev,
2475 "report luns requested format %u, got %u\n",
2477 buf->extended_response_flag);
2482 cmd_special_free(h, c);
2486 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2487 struct ReportLUNdata *buf,
2488 int bufsize, int extended_response)
2490 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2493 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2494 struct ReportLUNdata *buf, int bufsize)
2496 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2499 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2500 int bus, int target, int lun)
2503 device->target = target;
2507 /* Use VPD inquiry to get details of volume status */
2508 static int hpsa_get_volume_status(struct ctlr_info *h,
2509 unsigned char scsi3addr[])
2516 buf = kzalloc(64, GFP_KERNEL);
2518 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2520 /* Does controller have VPD for logical volume status? */
2521 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2524 /* Get the size of the VPD return buffer */
2525 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2526 buf, HPSA_VPD_HEADER_SZ);
2531 /* Now get the whole VPD buffer */
2532 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2533 buf, size + HPSA_VPD_HEADER_SZ);
2536 status = buf[4]; /* status byte */
2542 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2545 /* Determine offline status of a volume.
2548 * 0xff (offline for unknown reasons)
2549 * # (integer code indicating one of several NOT READY states
2550 * describing why a volume is to be kept offline)
2552 static int hpsa_volume_offline(struct ctlr_info *h,
2553 unsigned char scsi3addr[])
2555 struct CommandList *c;
2556 unsigned char *sense, sense_key, asc, ascq;
2560 #define ASC_LUN_NOT_READY 0x04
2561 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2562 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2567 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2568 hpsa_scsi_do_simple_cmd_core(h, c);
2569 sense = c->err_info->SenseInfo;
2570 sense_key = sense[2];
2573 cmd_status = c->err_info->CommandStatus;
2574 scsi_status = c->err_info->ScsiStatus;
2576 /* Is the volume 'not ready'? */
2577 if (cmd_status != CMD_TARGET_STATUS ||
2578 scsi_status != SAM_STAT_CHECK_CONDITION ||
2579 sense_key != NOT_READY ||
2580 asc != ASC_LUN_NOT_READY) {
2584 /* Determine the reason for not ready state */
2585 ldstat = hpsa_get_volume_status(h, scsi3addr);
2587 /* Keep volume offline in certain cases: */
2589 case HPSA_LV_UNDERGOING_ERASE:
2590 case HPSA_LV_UNDERGOING_RPI:
2591 case HPSA_LV_PENDING_RPI:
2592 case HPSA_LV_ENCRYPTED_NO_KEY:
2593 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2594 case HPSA_LV_UNDERGOING_ENCRYPTION:
2595 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2596 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2598 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2599 /* If VPD status page isn't available,
2600 * use ASC/ASCQ to determine state
2602 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2603 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2612 static int hpsa_update_device_info(struct ctlr_info *h,
2613 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2614 unsigned char *is_OBDR_device)
2617 #define OBDR_SIG_OFFSET 43
2618 #define OBDR_TAPE_SIG "$DR-10"
2619 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2620 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2622 unsigned char *inq_buff;
2623 unsigned char *obdr_sig;
2625 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2629 /* Do an inquiry to the device to see what it is. */
2630 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2631 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2632 /* Inquiry failed (msg printed already) */
2633 dev_err(&h->pdev->dev,
2634 "hpsa_update_device_info: inquiry failed\n");
2638 this_device->devtype = (inq_buff[0] & 0x1f);
2639 memcpy(this_device->scsi3addr, scsi3addr, 8);
2640 memcpy(this_device->vendor, &inq_buff[8],
2641 sizeof(this_device->vendor));
2642 memcpy(this_device->model, &inq_buff[16],
2643 sizeof(this_device->model));
2644 memset(this_device->device_id, 0,
2645 sizeof(this_device->device_id));
2646 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2647 sizeof(this_device->device_id));
2649 if (this_device->devtype == TYPE_DISK &&
2650 is_logical_dev_addr_mode(scsi3addr)) {
2653 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2654 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2655 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2656 volume_offline = hpsa_volume_offline(h, scsi3addr);
2657 if (volume_offline < 0 || volume_offline > 0xff)
2658 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2659 this_device->volume_offline = volume_offline & 0xff;
2661 this_device->raid_level = RAID_UNKNOWN;
2662 this_device->offload_config = 0;
2663 this_device->offload_enabled = 0;
2664 this_device->volume_offline = 0;
2667 if (is_OBDR_device) {
2668 /* See if this is a One-Button-Disaster-Recovery device
2669 * by looking for "$DR-10" at offset 43 in inquiry data.
2671 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2672 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2673 strncmp(obdr_sig, OBDR_TAPE_SIG,
2674 OBDR_SIG_LEN) == 0);
2685 static unsigned char *ext_target_model[] = {
2695 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2699 for (i = 0; ext_target_model[i]; i++)
2700 if (strncmp(device->model, ext_target_model[i],
2701 strlen(ext_target_model[i])) == 0)
2706 /* Helper function to assign bus, target, lun mapping of devices.
2707 * Puts non-external target logical volumes on bus 0, external target logical
2708 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2709 * Logical drive target and lun are assigned at this time, but
2710 * physical device lun and target assignment are deferred (assigned
2711 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2713 static void figure_bus_target_lun(struct ctlr_info *h,
2714 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2716 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2718 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2719 /* physical device, target and lun filled in later */
2720 if (is_hba_lunid(lunaddrbytes))
2721 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2723 /* defer target, lun assignment for physical devices */
2724 hpsa_set_bus_target_lun(device, 2, -1, -1);
2727 /* It's a logical device */
2728 if (is_ext_target(h, device)) {
2729 /* external target way, put logicals on bus 1
2730 * and match target/lun numbers box
2731 * reports, other smart array, bus 0, target 0, match lunid
2733 hpsa_set_bus_target_lun(device,
2734 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2737 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2741 * If there is no lun 0 on a target, linux won't find any devices.
2742 * For the external targets (arrays), we have to manually detect the enclosure
2743 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2744 * it for some reason. *tmpdevice is the target we're adding,
2745 * this_device is a pointer into the current element of currentsd[]
2746 * that we're building up in update_scsi_devices(), below.
2747 * lunzerobits is a bitmap that tracks which targets already have a
2749 * Returns 1 if an enclosure was added, 0 if not.
2751 static int add_ext_target_dev(struct ctlr_info *h,
2752 struct hpsa_scsi_dev_t *tmpdevice,
2753 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2754 unsigned long lunzerobits[], int *n_ext_target_devs)
2756 unsigned char scsi3addr[8];
2758 if (test_bit(tmpdevice->target, lunzerobits))
2759 return 0; /* There is already a lun 0 on this target. */
2761 if (!is_logical_dev_addr_mode(lunaddrbytes))
2762 return 0; /* It's the logical targets that may lack lun 0. */
2764 if (!is_ext_target(h, tmpdevice))
2765 return 0; /* Only external target devices have this problem. */
2767 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2770 memset(scsi3addr, 0, 8);
2771 scsi3addr[3] = tmpdevice->target;
2772 if (is_hba_lunid(scsi3addr))
2773 return 0; /* Don't add the RAID controller here. */
2775 if (is_scsi_rev_5(h))
2776 return 0; /* p1210m doesn't need to do this. */
2778 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2779 dev_warn(&h->pdev->dev, "Maximum number of external "
2780 "target devices exceeded. Check your hardware "
2785 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2787 (*n_ext_target_devs)++;
2788 hpsa_set_bus_target_lun(this_device,
2789 tmpdevice->bus, tmpdevice->target, 0);
2790 set_bit(tmpdevice->target, lunzerobits);
2795 * Get address of physical disk used for an ioaccel2 mode command:
2796 * 1. Extract ioaccel2 handle from the command.
2797 * 2. Find a matching ioaccel2 handle from list of physical disks.
2799 * 1 and set scsi3addr to address of matching physical
2800 * 0 if no matching physical disk was found.
2802 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2803 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2805 struct ReportExtendedLUNdata *physicals = NULL;
2806 int responsesize = 24; /* size of physical extended response */
2807 int extended = 2; /* flag forces reporting 'other dev info'. */
2808 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2809 u32 nphysicals = 0; /* number of reported physical devs */
2810 int found = 0; /* found match (1) or not (0) */
2811 u32 find; /* handle we need to match */
2813 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2814 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2815 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2816 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2817 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2819 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2820 return 0; /* no match */
2822 /* point to the ioaccel2 device handle */
2823 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2825 return 0; /* no match */
2827 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2829 return 0; /* no match */
2831 d = scmd->device->hostdata;
2833 return 0; /* no match */
2835 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2836 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2837 find = c2a->scsi_nexus;
2839 if (h->raid_offload_debug > 0)
2840 dev_info(&h->pdev->dev,
2841 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2842 __func__, scsi_nexus,
2843 d->device_id[0], d->device_id[1], d->device_id[2],
2844 d->device_id[3], d->device_id[4], d->device_id[5],
2845 d->device_id[6], d->device_id[7], d->device_id[8],
2846 d->device_id[9], d->device_id[10], d->device_id[11],
2847 d->device_id[12], d->device_id[13], d->device_id[14],
2850 /* Get the list of physical devices */
2851 physicals = kzalloc(reportsize, GFP_KERNEL);
2852 if (physicals == NULL)
2854 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2855 reportsize, extended)) {
2856 dev_err(&h->pdev->dev,
2857 "Can't lookup %s device handle: report physical LUNs failed.\n",
2858 "HP SSD Smart Path");
2862 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2865 /* find ioaccel2 handle in list of physicals: */
2866 for (i = 0; i < nphysicals; i++) {
2867 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2869 /* handle is in bytes 28-31 of each lun */
2870 if (entry->ioaccel_handle != find)
2871 continue; /* didn't match */
2873 memcpy(scsi3addr, entry->lunid, 8);
2874 if (h->raid_offload_debug > 0)
2875 dev_info(&h->pdev->dev,
2876 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2878 entry->ioaccel_handle, scsi3addr);
2879 break; /* found it */
2890 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2891 * logdev. The number of luns in physdev and logdev are returned in
2892 * *nphysicals and *nlogicals, respectively.
2893 * Returns 0 on success, -1 otherwise.
2895 static int hpsa_gather_lun_info(struct ctlr_info *h,
2897 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2898 struct ReportLUNdata *logdev, u32 *nlogicals)
2900 int physical_entry_size = 8;
2904 /* For I/O accelerator mode we need to read physical device handles */
2905 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2906 h->transMethod & CFGTBL_Trans_io_accel2) {
2907 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2908 physical_entry_size = 24;
2910 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
2912 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2915 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2916 physical_entry_size;
2917 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2918 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2919 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2920 *nphysicals - HPSA_MAX_PHYS_LUN);
2921 *nphysicals = HPSA_MAX_PHYS_LUN;
2923 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2924 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2927 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2928 /* Reject Logicals in excess of our max capability. */
2929 if (*nlogicals > HPSA_MAX_LUN) {
2930 dev_warn(&h->pdev->dev,
2931 "maximum logical LUNs (%d) exceeded. "
2932 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2933 *nlogicals - HPSA_MAX_LUN);
2934 *nlogicals = HPSA_MAX_LUN;
2936 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2937 dev_warn(&h->pdev->dev,
2938 "maximum logical + physical LUNs (%d) exceeded. "
2939 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2940 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2941 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2946 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2947 int i, int nphysicals, int nlogicals,
2948 struct ReportExtendedLUNdata *physdev_list,
2949 struct ReportLUNdata *logdev_list)
2951 /* Helper function, figure out where the LUN ID info is coming from
2952 * given index i, lists of physical and logical devices, where in
2953 * the list the raid controller is supposed to appear (first or last)
2956 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2957 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2959 if (i == raid_ctlr_position)
2960 return RAID_CTLR_LUNID;
2962 if (i < logicals_start)
2963 return &physdev_list->LUN[i -
2964 (raid_ctlr_position == 0)].lunid[0];
2966 if (i < last_device)
2967 return &logdev_list->LUN[i - nphysicals -
2968 (raid_ctlr_position == 0)][0];
2973 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2976 int hba_mode_enabled;
2977 struct bmic_controller_parameters *ctlr_params;
2978 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2983 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2984 sizeof(struct bmic_controller_parameters));
2991 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2993 return hba_mode_enabled;
2996 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2998 /* the idea here is we could get notified
2999 * that some devices have changed, so we do a report
3000 * physical luns and report logical luns cmd, and adjust
3001 * our list of devices accordingly.
3003 * The scsi3addr's of devices won't change so long as the
3004 * adapter is not reset. That means we can rescan and
3005 * tell which devices we already know about, vs. new
3006 * devices, vs. disappearing devices.
3008 struct ReportExtendedLUNdata *physdev_list = NULL;
3009 struct ReportLUNdata *logdev_list = NULL;
3012 int physical_mode = 0;
3013 u32 ndev_allocated = 0;
3014 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3016 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3017 int i, n_ext_target_devs, ndevs_to_allocate;
3018 int raid_ctlr_position;
3019 int rescan_hba_mode;
3020 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3022 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3023 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3024 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3025 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3027 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
3028 dev_err(&h->pdev->dev, "out of memory\n");
3031 memset(lunzerobits, 0, sizeof(lunzerobits));
3033 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3034 if (rescan_hba_mode < 0)
3037 if (!h->hba_mode_enabled && rescan_hba_mode)
3038 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3039 else if (h->hba_mode_enabled && !rescan_hba_mode)
3040 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3042 h->hba_mode_enabled = rescan_hba_mode;
3044 if (hpsa_gather_lun_info(h, reportlunsize,
3045 (struct ReportLUNdata *) physdev_list, &nphysicals,
3046 &physical_mode, logdev_list, &nlogicals))
3049 /* We might see up to the maximum number of logical and physical disks
3050 * plus external target devices, and a device for the local RAID
3053 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3055 /* Allocate the per device structures */
3056 for (i = 0; i < ndevs_to_allocate; i++) {
3057 if (i >= HPSA_MAX_DEVICES) {
3058 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3059 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3060 ndevs_to_allocate - HPSA_MAX_DEVICES);
3064 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3065 if (!currentsd[i]) {
3066 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3067 __FILE__, __LINE__);
3073 if (is_scsi_rev_5(h))
3074 raid_ctlr_position = 0;
3076 raid_ctlr_position = nphysicals + nlogicals;
3078 /* adjust our table of devices */
3079 n_ext_target_devs = 0;
3080 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3081 u8 *lunaddrbytes, is_OBDR = 0;
3083 /* Figure out where the LUN ID info is coming from */
3084 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3085 i, nphysicals, nlogicals, physdev_list, logdev_list);
3086 /* skip masked physical devices. */
3087 if (lunaddrbytes[3] & 0xC0 &&
3088 i < nphysicals + (raid_ctlr_position == 0))
3091 /* Get device type, vendor, model, device id */
3092 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3094 continue; /* skip it if we can't talk to it. */
3095 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3096 this_device = currentsd[ncurrent];
3099 * For external target devices, we have to insert a LUN 0 which
3100 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3101 * is nonetheless an enclosure device there. We have to
3102 * present that otherwise linux won't find anything if
3103 * there is no lun 0.
3105 if (add_ext_target_dev(h, tmpdevice, this_device,
3106 lunaddrbytes, lunzerobits,
3107 &n_ext_target_devs)) {
3109 this_device = currentsd[ncurrent];
3112 *this_device = *tmpdevice;
3114 switch (this_device->devtype) {
3116 /* We don't *really* support actual CD-ROM devices,
3117 * just "One Button Disaster Recovery" tape drive
3118 * which temporarily pretends to be a CD-ROM drive.
3119 * So we check that the device is really an OBDR tape
3120 * device by checking for "$DR-10" in bytes 43-48 of
3127 if (h->hba_mode_enabled) {
3128 /* never use raid mapper in HBA mode */
3129 this_device->offload_enabled = 0;
3132 } else if (h->acciopath_status) {
3133 if (i >= nphysicals) {
3143 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3144 memcpy(&this_device->ioaccel_handle,
3146 sizeof(this_device->ioaccel_handle));
3151 case TYPE_MEDIUM_CHANGER:
3155 /* Only present the Smartarray HBA as a RAID controller.
3156 * If it's a RAID controller other than the HBA itself
3157 * (an external RAID controller, MSA500 or similar)
3160 if (!is_hba_lunid(lunaddrbytes))
3167 if (ncurrent >= HPSA_MAX_DEVICES)
3170 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3173 for (i = 0; i < ndev_allocated; i++)
3174 kfree(currentsd[i]);
3176 kfree(physdev_list);
3180 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3181 * dma mapping and fills in the scatter gather entries of the
3184 static int hpsa_scatter_gather(struct ctlr_info *h,
3185 struct CommandList *cp,
3186 struct scsi_cmnd *cmd)
3189 struct scatterlist *sg;
3191 int use_sg, i, sg_index, chained;
3192 struct SGDescriptor *curr_sg;
3194 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3196 use_sg = scsi_dma_map(cmd);
3201 goto sglist_finished;
3206 scsi_for_each_sg(cmd, sg, use_sg, i) {
3207 if (i == h->max_cmd_sg_entries - 1 &&
3208 use_sg > h->max_cmd_sg_entries) {
3210 curr_sg = h->cmd_sg_list[cp->cmdindex];
3213 addr64 = (u64) sg_dma_address(sg);
3214 len = sg_dma_len(sg);
3215 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3216 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3218 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
3222 if (use_sg + chained > h->maxSG)
3223 h->maxSG = use_sg + chained;
3226 cp->Header.SGList = h->max_cmd_sg_entries;
3227 cp->Header.SGTotal = (u16) (use_sg + 1);
3228 if (hpsa_map_sg_chain_block(h, cp)) {
3229 scsi_dma_unmap(cmd);
3237 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3238 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
3242 #define IO_ACCEL_INELIGIBLE (1)
3243 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3249 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3256 if (*cdb_len == 6) {
3257 block = (((u32) cdb[2]) << 8) | cdb[3];
3260 BUG_ON(*cdb_len != 12);
3261 block = (((u32) cdb[2]) << 24) |
3262 (((u32) cdb[3]) << 16) |
3263 (((u32) cdb[4]) << 8) |
3266 (((u32) cdb[6]) << 24) |
3267 (((u32) cdb[7]) << 16) |
3268 (((u32) cdb[8]) << 8) |
3271 if (block_cnt > 0xffff)
3272 return IO_ACCEL_INELIGIBLE;
3274 cdb[0] = is_write ? WRITE_10 : READ_10;
3276 cdb[2] = (u8) (block >> 24);
3277 cdb[3] = (u8) (block >> 16);
3278 cdb[4] = (u8) (block >> 8);
3279 cdb[5] = (u8) (block);
3281 cdb[7] = (u8) (block_cnt >> 8);
3282 cdb[8] = (u8) (block_cnt);
3290 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3291 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3294 struct scsi_cmnd *cmd = c->scsi_cmd;
3295 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3297 unsigned int total_len = 0;
3298 struct scatterlist *sg;
3301 struct SGDescriptor *curr_sg;
3302 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3304 /* TODO: implement chaining support */
3305 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3306 return IO_ACCEL_INELIGIBLE;
3308 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3310 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3311 return IO_ACCEL_INELIGIBLE;
3313 c->cmd_type = CMD_IOACCEL1;
3315 /* Adjust the DMA address to point to the accelerated command buffer */
3316 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3317 (c->cmdindex * sizeof(*cp));
3318 BUG_ON(c->busaddr & 0x0000007F);
3320 use_sg = scsi_dma_map(cmd);
3326 scsi_for_each_sg(cmd, sg, use_sg, i) {
3327 addr64 = (u64) sg_dma_address(sg);
3328 len = sg_dma_len(sg);
3330 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3331 curr_sg->Addr.upper =
3332 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3335 if (i == (scsi_sg_count(cmd) - 1))
3336 curr_sg->Ext = HPSA_SG_LAST;
3338 curr_sg->Ext = 0; /* we are not chaining */
3342 switch (cmd->sc_data_direction) {
3344 control |= IOACCEL1_CONTROL_DATA_OUT;
3346 case DMA_FROM_DEVICE:
3347 control |= IOACCEL1_CONTROL_DATA_IN;
3350 control |= IOACCEL1_CONTROL_NODATAXFER;
3353 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3354 cmd->sc_data_direction);
3359 control |= IOACCEL1_CONTROL_NODATAXFER;
3362 c->Header.SGList = use_sg;
3363 /* Fill out the command structure to submit */
3364 cp->dev_handle = ioaccel_handle & 0xFFFF;
3365 cp->transfer_len = total_len;
3366 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
3367 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
3368 cp->control = control;
3369 memcpy(cp->CDB, cdb, cdb_len);
3370 memcpy(cp->CISS_LUN, scsi3addr, 8);
3371 /* Tag was already set at init time. */
3372 enqueue_cmd_and_start_io(h, c);
3377 * Queue a command directly to a device behind the controller using the
3378 * I/O accelerator path.
3380 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3381 struct CommandList *c)
3383 struct scsi_cmnd *cmd = c->scsi_cmd;
3384 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3386 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3387 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3391 * Set encryption parameters for the ioaccel2 request
3393 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3394 struct CommandList *c, struct io_accel2_cmd *cp)
3396 struct scsi_cmnd *cmd = c->scsi_cmd;
3397 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3398 struct raid_map_data *map = &dev->raid_map;
3401 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3403 /* Are we doing encryption on this device */
3404 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3406 /* Set the data encryption key index. */
3407 cp->dekindex = map->dekindex;
3409 /* Set the encryption enable flag, encoded into direction field. */
3410 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3412 /* Set encryption tweak values based on logical block address
3413 * If block size is 512, tweak value is LBA.
3414 * For other block sizes, tweak is (LBA * block size)/ 512)
3416 switch (cmd->cmnd[0]) {
3417 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3420 if (map->volume_blk_size == 512) {
3422 (((u32) cmd->cmnd[2]) << 8) |
3424 cp->tweak_upper = 0;
3427 (((u64) cmd->cmnd[2]) << 8) |
3429 first_block = (first_block * map->volume_blk_size)/512;
3430 cp->tweak_lower = (u32)first_block;
3431 cp->tweak_upper = (u32)(first_block >> 32);
3436 if (map->volume_blk_size == 512) {
3438 (((u32) cmd->cmnd[2]) << 24) |
3439 (((u32) cmd->cmnd[3]) << 16) |
3440 (((u32) cmd->cmnd[4]) << 8) |
3442 cp->tweak_upper = 0;
3445 (((u64) cmd->cmnd[2]) << 24) |
3446 (((u64) cmd->cmnd[3]) << 16) |
3447 (((u64) cmd->cmnd[4]) << 8) |
3449 first_block = (first_block * map->volume_blk_size)/512;
3450 cp->tweak_lower = (u32)first_block;
3451 cp->tweak_upper = (u32)(first_block >> 32);
3454 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3457 if (map->volume_blk_size == 512) {
3459 (((u32) cmd->cmnd[2]) << 24) |
3460 (((u32) cmd->cmnd[3]) << 16) |
3461 (((u32) cmd->cmnd[4]) << 8) |
3463 cp->tweak_upper = 0;
3466 (((u64) cmd->cmnd[2]) << 24) |
3467 (((u64) cmd->cmnd[3]) << 16) |
3468 (((u64) cmd->cmnd[4]) << 8) |
3470 first_block = (first_block * map->volume_blk_size)/512;
3471 cp->tweak_lower = (u32)first_block;
3472 cp->tweak_upper = (u32)(first_block >> 32);
3477 if (map->volume_blk_size == 512) {
3479 (((u32) cmd->cmnd[6]) << 24) |
3480 (((u32) cmd->cmnd[7]) << 16) |
3481 (((u32) cmd->cmnd[8]) << 8) |
3484 (((u32) cmd->cmnd[2]) << 24) |
3485 (((u32) cmd->cmnd[3]) << 16) |
3486 (((u32) cmd->cmnd[4]) << 8) |
3490 (((u64) cmd->cmnd[2]) << 56) |
3491 (((u64) cmd->cmnd[3]) << 48) |
3492 (((u64) cmd->cmnd[4]) << 40) |
3493 (((u64) cmd->cmnd[5]) << 32) |
3494 (((u64) cmd->cmnd[6]) << 24) |
3495 (((u64) cmd->cmnd[7]) << 16) |
3496 (((u64) cmd->cmnd[8]) << 8) |
3498 first_block = (first_block * map->volume_blk_size)/512;
3499 cp->tweak_lower = (u32)first_block;
3500 cp->tweak_upper = (u32)(first_block >> 32);
3504 dev_err(&h->pdev->dev,
3505 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3512 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3513 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3516 struct scsi_cmnd *cmd = c->scsi_cmd;
3517 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3518 struct ioaccel2_sg_element *curr_sg;
3520 struct scatterlist *sg;
3525 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3526 return IO_ACCEL_INELIGIBLE;
3528 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3529 return IO_ACCEL_INELIGIBLE;
3530 c->cmd_type = CMD_IOACCEL2;
3531 /* Adjust the DMA address to point to the accelerated command buffer */
3532 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3533 (c->cmdindex * sizeof(*cp));
3534 BUG_ON(c->busaddr & 0x0000007F);
3536 memset(cp, 0, sizeof(*cp));
3537 cp->IU_type = IOACCEL2_IU_TYPE;
3539 use_sg = scsi_dma_map(cmd);
3544 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3546 scsi_for_each_sg(cmd, sg, use_sg, i) {
3547 addr64 = (u64) sg_dma_address(sg);
3548 len = sg_dma_len(sg);
3550 curr_sg->address = cpu_to_le64(addr64);
3551 curr_sg->length = cpu_to_le32(len);
3552 curr_sg->reserved[0] = 0;
3553 curr_sg->reserved[1] = 0;
3554 curr_sg->reserved[2] = 0;
3555 curr_sg->chain_indicator = 0;
3559 switch (cmd->sc_data_direction) {
3561 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3562 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3564 case DMA_FROM_DEVICE:
3565 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3566 cp->direction |= IOACCEL2_DIR_DATA_IN;
3569 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3570 cp->direction |= IOACCEL2_DIR_NO_DATA;
3573 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3574 cmd->sc_data_direction);
3579 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3580 cp->direction |= IOACCEL2_DIR_NO_DATA;
3583 /* Set encryption parameters, if necessary */
3584 set_encrypt_ioaccel2(h, c, cp);
3586 cp->scsi_nexus = ioaccel_handle;
3587 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
3589 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3591 /* fill in sg elements */
3592 cp->sg_count = (u8) use_sg;
3594 cp->data_len = cpu_to_le32(total_len);
3595 cp->err_ptr = cpu_to_le64(c->busaddr +
3596 offsetof(struct io_accel2_cmd, error_data));
3597 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3599 enqueue_cmd_and_start_io(h, c);
3604 * Queue a command to the correct I/O accelerator path.
3606 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3607 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3610 if (h->transMethod & CFGTBL_Trans_io_accel1)
3611 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3612 cdb, cdb_len, scsi3addr);
3614 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3615 cdb, cdb_len, scsi3addr);
3618 static void raid_map_helper(struct raid_map_data *map,
3619 int offload_to_mirror, u32 *map_index, u32 *current_group)
3621 if (offload_to_mirror == 0) {
3622 /* use physical disk in the first mirrored group. */
3623 *map_index %= map->data_disks_per_row;
3627 /* determine mirror group that *map_index indicates */
3628 *current_group = *map_index / map->data_disks_per_row;
3629 if (offload_to_mirror == *current_group)
3631 if (*current_group < (map->layout_map_count - 1)) {
3632 /* select map index from next group */
3633 *map_index += map->data_disks_per_row;
3636 /* select map index from first group */
3637 *map_index %= map->data_disks_per_row;
3640 } while (offload_to_mirror != *current_group);
3644 * Attempt to perform offload RAID mapping for a logical volume I/O.
3646 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3647 struct CommandList *c)
3649 struct scsi_cmnd *cmd = c->scsi_cmd;
3650 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3651 struct raid_map_data *map = &dev->raid_map;
3652 struct raid_map_disk_data *dd = &map->data[0];
3655 u64 first_block, last_block;
3658 u64 first_row, last_row;
3659 u32 first_row_offset, last_row_offset;
3660 u32 first_column, last_column;
3661 u64 r0_first_row, r0_last_row;
3662 u32 r5or6_blocks_per_row;
3663 u64 r5or6_first_row, r5or6_last_row;
3664 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3665 u32 r5or6_first_column, r5or6_last_column;
3666 u32 total_disks_per_row;
3668 u32 first_group, last_group, current_group;
3675 #if BITS_PER_LONG == 32
3678 int offload_to_mirror;
3680 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3682 /* check for valid opcode, get LBA and block count */
3683 switch (cmd->cmnd[0]) {
3688 (((u64) cmd->cmnd[2]) << 8) |
3690 block_cnt = cmd->cmnd[4];
3698 (((u64) cmd->cmnd[2]) << 24) |
3699 (((u64) cmd->cmnd[3]) << 16) |
3700 (((u64) cmd->cmnd[4]) << 8) |
3703 (((u32) cmd->cmnd[7]) << 8) |
3710 (((u64) cmd->cmnd[2]) << 24) |
3711 (((u64) cmd->cmnd[3]) << 16) |
3712 (((u64) cmd->cmnd[4]) << 8) |
3715 (((u32) cmd->cmnd[6]) << 24) |
3716 (((u32) cmd->cmnd[7]) << 16) |
3717 (((u32) cmd->cmnd[8]) << 8) |
3724 (((u64) cmd->cmnd[2]) << 56) |
3725 (((u64) cmd->cmnd[3]) << 48) |
3726 (((u64) cmd->cmnd[4]) << 40) |
3727 (((u64) cmd->cmnd[5]) << 32) |
3728 (((u64) cmd->cmnd[6]) << 24) |
3729 (((u64) cmd->cmnd[7]) << 16) |
3730 (((u64) cmd->cmnd[8]) << 8) |
3733 (((u32) cmd->cmnd[10]) << 24) |
3734 (((u32) cmd->cmnd[11]) << 16) |
3735 (((u32) cmd->cmnd[12]) << 8) |
3739 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3741 last_block = first_block + block_cnt - 1;
3743 /* check for write to non-RAID-0 */
3744 if (is_write && dev->raid_level != 0)
3745 return IO_ACCEL_INELIGIBLE;
3747 /* check for invalid block or wraparound */
3748 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3749 return IO_ACCEL_INELIGIBLE;
3751 /* calculate stripe information for the request */
3752 blocks_per_row = map->data_disks_per_row * map->strip_size;
3753 #if BITS_PER_LONG == 32
3754 tmpdiv = first_block;
3755 (void) do_div(tmpdiv, blocks_per_row);
3757 tmpdiv = last_block;
3758 (void) do_div(tmpdiv, blocks_per_row);
3760 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3761 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3762 tmpdiv = first_row_offset;
3763 (void) do_div(tmpdiv, map->strip_size);
3764 first_column = tmpdiv;
3765 tmpdiv = last_row_offset;
3766 (void) do_div(tmpdiv, map->strip_size);
3767 last_column = tmpdiv;
3769 first_row = first_block / blocks_per_row;
3770 last_row = last_block / blocks_per_row;
3771 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3772 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3773 first_column = first_row_offset / map->strip_size;
3774 last_column = last_row_offset / map->strip_size;
3777 /* if this isn't a single row/column then give to the controller */
3778 if ((first_row != last_row) || (first_column != last_column))
3779 return IO_ACCEL_INELIGIBLE;
3781 /* proceeding with driver mapping */
3782 total_disks_per_row = map->data_disks_per_row +
3783 map->metadata_disks_per_row;
3784 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3786 map_index = (map_row * total_disks_per_row) + first_column;
3788 switch (dev->raid_level) {
3790 break; /* nothing special to do */
3792 /* Handles load balance across RAID 1 members.
3793 * (2-drive R1 and R10 with even # of drives.)
3794 * Appropriate for SSDs, not optimal for HDDs
3796 BUG_ON(map->layout_map_count != 2);
3797 if (dev->offload_to_mirror)
3798 map_index += map->data_disks_per_row;
3799 dev->offload_to_mirror = !dev->offload_to_mirror;
3802 /* Handles N-way mirrors (R1-ADM)
3803 * and R10 with # of drives divisible by 3.)
3805 BUG_ON(map->layout_map_count != 3);
3807 offload_to_mirror = dev->offload_to_mirror;
3808 raid_map_helper(map, offload_to_mirror,
3809 &map_index, ¤t_group);
3810 /* set mirror group to use next time */
3812 (offload_to_mirror >= map->layout_map_count - 1)
3813 ? 0 : offload_to_mirror + 1;
3814 dev->offload_to_mirror = offload_to_mirror;
3815 /* Avoid direct use of dev->offload_to_mirror within this
3816 * function since multiple threads might simultaneously
3817 * increment it beyond the range of dev->layout_map_count -1.
3822 if (map->layout_map_count <= 1)
3825 /* Verify first and last block are in same RAID group */
3826 r5or6_blocks_per_row =
3827 map->strip_size * map->data_disks_per_row;
3828 BUG_ON(r5or6_blocks_per_row == 0);
3829 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3830 #if BITS_PER_LONG == 32
3831 tmpdiv = first_block;
3832 first_group = do_div(tmpdiv, stripesize);
3833 tmpdiv = first_group;
3834 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3835 first_group = tmpdiv;
3836 tmpdiv = last_block;
3837 last_group = do_div(tmpdiv, stripesize);
3838 tmpdiv = last_group;
3839 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3840 last_group = tmpdiv;
3842 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3843 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3845 if (first_group != last_group)
3846 return IO_ACCEL_INELIGIBLE;
3848 /* Verify request is in a single row of RAID 5/6 */
3849 #if BITS_PER_LONG == 32
3850 tmpdiv = first_block;
3851 (void) do_div(tmpdiv, stripesize);
3852 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3853 tmpdiv = last_block;
3854 (void) do_div(tmpdiv, stripesize);
3855 r5or6_last_row = r0_last_row = tmpdiv;
3857 first_row = r5or6_first_row = r0_first_row =
3858 first_block / stripesize;
3859 r5or6_last_row = r0_last_row = last_block / stripesize;
3861 if (r5or6_first_row != r5or6_last_row)
3862 return IO_ACCEL_INELIGIBLE;
3865 /* Verify request is in a single column */
3866 #if BITS_PER_LONG == 32
3867 tmpdiv = first_block;
3868 first_row_offset = do_div(tmpdiv, stripesize);
3869 tmpdiv = first_row_offset;
3870 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3871 r5or6_first_row_offset = first_row_offset;
3872 tmpdiv = last_block;
3873 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3874 tmpdiv = r5or6_last_row_offset;
3875 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3876 tmpdiv = r5or6_first_row_offset;
3877 (void) do_div(tmpdiv, map->strip_size);
3878 first_column = r5or6_first_column = tmpdiv;
3879 tmpdiv = r5or6_last_row_offset;
3880 (void) do_div(tmpdiv, map->strip_size);
3881 r5or6_last_column = tmpdiv;
3883 first_row_offset = r5or6_first_row_offset =
3884 (u32)((first_block % stripesize) %
3885 r5or6_blocks_per_row);
3887 r5or6_last_row_offset =
3888 (u32)((last_block % stripesize) %
3889 r5or6_blocks_per_row);
3891 first_column = r5or6_first_column =
3892 r5or6_first_row_offset / map->strip_size;
3894 r5or6_last_row_offset / map->strip_size;
3896 if (r5or6_first_column != r5or6_last_column)
3897 return IO_ACCEL_INELIGIBLE;
3899 /* Request is eligible */
3900 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3903 map_index = (first_group *
3904 (map->row_cnt * total_disks_per_row)) +
3905 (map_row * total_disks_per_row) + first_column;
3908 return IO_ACCEL_INELIGIBLE;
3911 disk_handle = dd[map_index].ioaccel_handle;
3912 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3913 (first_row_offset - (first_column * map->strip_size));
3914 disk_block_cnt = block_cnt;
3916 /* handle differing logical/physical block sizes */
3917 if (map->phys_blk_shift) {
3918 disk_block <<= map->phys_blk_shift;
3919 disk_block_cnt <<= map->phys_blk_shift;
3921 BUG_ON(disk_block_cnt > 0xffff);
3923 /* build the new CDB for the physical disk I/O */
3924 if (disk_block > 0xffffffff) {
3925 cdb[0] = is_write ? WRITE_16 : READ_16;
3927 cdb[2] = (u8) (disk_block >> 56);
3928 cdb[3] = (u8) (disk_block >> 48);
3929 cdb[4] = (u8) (disk_block >> 40);
3930 cdb[5] = (u8) (disk_block >> 32);
3931 cdb[6] = (u8) (disk_block >> 24);
3932 cdb[7] = (u8) (disk_block >> 16);
3933 cdb[8] = (u8) (disk_block >> 8);
3934 cdb[9] = (u8) (disk_block);
3935 cdb[10] = (u8) (disk_block_cnt >> 24);
3936 cdb[11] = (u8) (disk_block_cnt >> 16);
3937 cdb[12] = (u8) (disk_block_cnt >> 8);
3938 cdb[13] = (u8) (disk_block_cnt);
3943 cdb[0] = is_write ? WRITE_10 : READ_10;
3945 cdb[2] = (u8) (disk_block >> 24);
3946 cdb[3] = (u8) (disk_block >> 16);
3947 cdb[4] = (u8) (disk_block >> 8);
3948 cdb[5] = (u8) (disk_block);
3950 cdb[7] = (u8) (disk_block_cnt >> 8);
3951 cdb[8] = (u8) (disk_block_cnt);
3955 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3959 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3960 void (*done)(struct scsi_cmnd *))
3962 struct ctlr_info *h;
3963 struct hpsa_scsi_dev_t *dev;
3964 unsigned char scsi3addr[8];
3965 struct CommandList *c;
3968 /* Get the ptr to our adapter structure out of cmd->host. */
3969 h = sdev_to_hba(cmd->device);
3970 dev = cmd->device->hostdata;
3972 cmd->result = DID_NO_CONNECT << 16;
3976 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3978 if (unlikely(lockup_detected(h))) {
3979 cmd->result = DID_ERROR << 16;
3984 if (c == NULL) { /* trouble... */
3985 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3986 return SCSI_MLQUEUE_HOST_BUSY;
3989 /* Fill in the command list header */
3991 cmd->scsi_done = done; /* save this for use by completion code */
3993 /* save c in case we have to abort it */
3994 cmd->host_scribble = (unsigned char *) c;
3996 c->cmd_type = CMD_SCSI;
3999 /* Call alternate submit routine for I/O accelerated commands.
4000 * Retries always go down the normal I/O path.
4002 if (likely(cmd->retries == 0 &&
4003 cmd->request->cmd_type == REQ_TYPE_FS &&
4004 h->acciopath_status)) {
4005 if (dev->offload_enabled) {
4006 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4008 return 0; /* Sent on ioaccel path */
4009 if (rc < 0) { /* scsi_dma_map failed. */
4011 return SCSI_MLQUEUE_HOST_BUSY;
4013 } else if (dev->ioaccel_handle) {
4014 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4016 return 0; /* Sent on direct map path */
4017 if (rc < 0) { /* scsi_dma_map failed. */
4019 return SCSI_MLQUEUE_HOST_BUSY;
4024 c->Header.ReplyQueue = 0; /* unused in simple mode */
4025 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4026 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
4027 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
4029 /* Fill in the request block... */
4031 c->Request.Timeout = 0;
4032 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4033 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4034 c->Request.CDBLen = cmd->cmd_len;
4035 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4036 c->Request.Type.Type = TYPE_CMD;
4037 c->Request.Type.Attribute = ATTR_SIMPLE;
4038 switch (cmd->sc_data_direction) {
4040 c->Request.Type.Direction = XFER_WRITE;
4042 case DMA_FROM_DEVICE:
4043 c->Request.Type.Direction = XFER_READ;
4046 c->Request.Type.Direction = XFER_NONE;
4048 case DMA_BIDIRECTIONAL:
4049 /* This can happen if a buggy application does a scsi passthru
4050 * and sets both inlen and outlen to non-zero. ( see
4051 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4054 c->Request.Type.Direction = XFER_RSVD;
4055 /* This is technically wrong, and hpsa controllers should
4056 * reject it with CMD_INVALID, which is the most correct
4057 * response, but non-fibre backends appear to let it
4058 * slide by, and give the same results as if this field
4059 * were set correctly. Either way is acceptable for
4060 * our purposes here.
4066 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4067 cmd->sc_data_direction);
4072 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4074 return SCSI_MLQUEUE_HOST_BUSY;
4076 enqueue_cmd_and_start_io(h, c);
4077 /* the cmd'll come back via intr handler in complete_scsi_command() */
4081 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4083 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4085 unsigned long flags;
4088 * Don't let rescans be initiated on a controller known
4089 * to be locked up. If the controller locks up *during*
4090 * a rescan, that thread is probably hosed, but at least
4091 * we can prevent new rescan threads from piling up on a
4092 * locked up controller.
4094 if (unlikely(lockup_detected(h))) {
4095 spin_lock_irqsave(&h->scan_lock, flags);
4096 h->scan_finished = 1;
4097 wake_up_all(&h->scan_wait_queue);
4098 spin_unlock_irqrestore(&h->scan_lock, flags);
4104 static void hpsa_scan_start(struct Scsi_Host *sh)
4106 struct ctlr_info *h = shost_to_hba(sh);
4107 unsigned long flags;
4109 if (do_not_scan_if_controller_locked_up(h))
4112 /* wait until any scan already in progress is finished. */
4114 spin_lock_irqsave(&h->scan_lock, flags);
4115 if (h->scan_finished)
4117 spin_unlock_irqrestore(&h->scan_lock, flags);
4118 wait_event(h->scan_wait_queue, h->scan_finished);
4119 /* Note: We don't need to worry about a race between this
4120 * thread and driver unload because the midlayer will
4121 * have incremented the reference count, so unload won't
4122 * happen if we're in here.
4125 h->scan_finished = 0; /* mark scan as in progress */
4126 spin_unlock_irqrestore(&h->scan_lock, flags);
4128 if (do_not_scan_if_controller_locked_up(h))
4131 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4133 spin_lock_irqsave(&h->scan_lock, flags);
4134 h->scan_finished = 1; /* mark scan as finished. */
4135 wake_up_all(&h->scan_wait_queue);
4136 spin_unlock_irqrestore(&h->scan_lock, flags);
4139 static int hpsa_scan_finished(struct Scsi_Host *sh,
4140 unsigned long elapsed_time)
4142 struct ctlr_info *h = shost_to_hba(sh);
4143 unsigned long flags;
4146 spin_lock_irqsave(&h->scan_lock, flags);
4147 finished = h->scan_finished;
4148 spin_unlock_irqrestore(&h->scan_lock, flags);
4152 static int hpsa_change_queue_depth(struct scsi_device *sdev,
4153 int qdepth, int reason)
4155 struct ctlr_info *h = sdev_to_hba(sdev);
4157 if (reason != SCSI_QDEPTH_DEFAULT)
4163 if (qdepth > h->nr_cmds)
4164 qdepth = h->nr_cmds;
4165 scsi_adjust_queue_depth(sdev, qdepth);
4166 return sdev->queue_depth;
4169 static void hpsa_unregister_scsi(struct ctlr_info *h)
4171 /* we are being forcibly unloaded, and may not refuse. */
4172 scsi_remove_host(h->scsi_host);
4173 scsi_host_put(h->scsi_host);
4174 h->scsi_host = NULL;
4177 static int hpsa_register_scsi(struct ctlr_info *h)
4179 struct Scsi_Host *sh;
4182 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4189 sh->max_channel = 3;
4190 sh->max_cmd_len = MAX_COMMAND_SIZE;
4191 sh->max_lun = HPSA_MAX_LUN;
4192 sh->max_id = HPSA_MAX_LUN;
4193 sh->can_queue = h->nr_cmds;
4194 if (h->hba_mode_enabled)
4195 sh->cmd_per_lun = 7;
4197 sh->cmd_per_lun = h->nr_cmds;
4198 sh->sg_tablesize = h->maxsgentries;
4200 sh->hostdata[0] = (unsigned long) h;
4201 sh->irq = h->intr[h->intr_mode];
4202 sh->unique_id = sh->irq;
4203 error = scsi_add_host(sh, &h->pdev->dev);
4210 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4211 " failed for controller %d\n", __func__, h->ctlr);
4215 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4216 " failed for controller %d\n", __func__, h->ctlr);
4220 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4221 unsigned char lunaddr[])
4225 int waittime = 1; /* seconds */
4226 struct CommandList *c;
4228 c = cmd_special_alloc(h);
4230 dev_warn(&h->pdev->dev, "out of memory in "
4231 "wait_for_device_to_become_ready.\n");
4235 /* Send test unit ready until device ready, or give up. */
4236 while (count < HPSA_TUR_RETRY_LIMIT) {
4238 /* Wait for a bit. do this first, because if we send
4239 * the TUR right away, the reset will just abort it.
4241 msleep(1000 * waittime);
4243 rc = 0; /* Device ready. */
4245 /* Increase wait time with each try, up to a point. */
4246 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4247 waittime = waittime * 2;
4249 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4250 (void) fill_cmd(c, TEST_UNIT_READY, h,
4251 NULL, 0, 0, lunaddr, TYPE_CMD);
4252 hpsa_scsi_do_simple_cmd_core(h, c);
4253 /* no unmap needed here because no data xfer. */
4255 if (c->err_info->CommandStatus == CMD_SUCCESS)
4258 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4259 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4260 (c->err_info->SenseInfo[2] == NO_SENSE ||
4261 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4264 dev_warn(&h->pdev->dev, "waiting %d secs "
4265 "for device to become ready.\n", waittime);
4266 rc = 1; /* device not ready. */
4270 dev_warn(&h->pdev->dev, "giving up on device.\n");
4272 dev_warn(&h->pdev->dev, "device is ready.\n");
4274 cmd_special_free(h, c);
4278 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4279 * complaining. Doing a host- or bus-reset can't do anything good here.
4281 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4284 struct ctlr_info *h;
4285 struct hpsa_scsi_dev_t *dev;
4287 /* find the controller to which the command to be aborted was sent */
4288 h = sdev_to_hba(scsicmd->device);
4289 if (h == NULL) /* paranoia */
4291 dev = scsicmd->device->hostdata;
4293 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4294 "device lookup failed.\n");
4297 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4298 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4299 /* send a reset to the SCSI LUN which the command was sent to */
4300 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4301 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4304 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4308 static void swizzle_abort_tag(u8 *tag)
4312 memcpy(original_tag, tag, 8);
4313 tag[0] = original_tag[3];
4314 tag[1] = original_tag[2];
4315 tag[2] = original_tag[1];
4316 tag[3] = original_tag[0];
4317 tag[4] = original_tag[7];
4318 tag[5] = original_tag[6];
4319 tag[6] = original_tag[5];
4320 tag[7] = original_tag[4];
4323 static void hpsa_get_tag(struct ctlr_info *h,
4324 struct CommandList *c, u32 *taglower, u32 *tagupper)
4326 if (c->cmd_type == CMD_IOACCEL1) {
4327 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4328 &h->ioaccel_cmd_pool[c->cmdindex];
4329 *tagupper = cm1->Tag.upper;
4330 *taglower = cm1->Tag.lower;
4333 if (c->cmd_type == CMD_IOACCEL2) {
4334 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4335 &h->ioaccel2_cmd_pool[c->cmdindex];
4336 /* upper tag not used in ioaccel2 mode */
4337 memset(tagupper, 0, sizeof(*tagupper));
4338 *taglower = cm2->Tag;
4341 *tagupper = c->Header.Tag.upper;
4342 *taglower = c->Header.Tag.lower;
4346 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4347 struct CommandList *abort, int swizzle)
4350 struct CommandList *c;
4351 struct ErrorInfo *ei;
4352 u32 tagupper, taglower;
4354 c = cmd_special_alloc(h);
4355 if (c == NULL) { /* trouble... */
4356 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4360 /* fill_cmd can't fail here, no buffer to map */
4361 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4362 0, 0, scsi3addr, TYPE_MSG);
4364 swizzle_abort_tag(&c->Request.CDB[4]);
4365 hpsa_scsi_do_simple_cmd_core(h, c);
4366 hpsa_get_tag(h, abort, &taglower, &tagupper);
4367 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4368 __func__, tagupper, taglower);
4369 /* no unmap needed here because no data xfer. */
4372 switch (ei->CommandStatus) {
4375 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4379 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4380 __func__, tagupper, taglower);
4381 hpsa_scsi_interpret_error(h, c);
4385 cmd_special_free(h, c);
4386 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4387 __func__, tagupper, taglower);
4392 * hpsa_find_cmd_in_queue
4394 * Used to determine whether a command (find) is still present
4395 * in queue_head. Optionally excludes the last element of queue_head.
4397 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4398 * not yet been submitted, and so can be aborted by the driver without
4399 * sending an abort to the hardware.
4401 * Returns pointer to command if found in queue, NULL otherwise.
4403 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4404 struct scsi_cmnd *find, struct list_head *queue_head)
4406 unsigned long flags;
4407 struct CommandList *c = NULL; /* ptr into cmpQ */
4411 spin_lock_irqsave(&h->lock, flags);
4412 list_for_each_entry(c, queue_head, list) {
4413 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4415 if (c->scsi_cmd == find) {
4416 spin_unlock_irqrestore(&h->lock, flags);
4420 spin_unlock_irqrestore(&h->lock, flags);
4424 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4425 u8 *tag, struct list_head *queue_head)
4427 unsigned long flags;
4428 struct CommandList *c;
4430 spin_lock_irqsave(&h->lock, flags);
4431 list_for_each_entry(c, queue_head, list) {
4432 if (memcmp(&c->Header.Tag, tag, 8) != 0)
4434 spin_unlock_irqrestore(&h->lock, flags);
4437 spin_unlock_irqrestore(&h->lock, flags);
4441 /* ioaccel2 path firmware cannot handle abort task requests.
4442 * Change abort requests to physical target reset, and send to the
4443 * address of the physical disk used for the ioaccel 2 command.
4444 * Return 0 on success (IO_OK)
4448 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4449 unsigned char *scsi3addr, struct CommandList *abort)
4452 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4453 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4454 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4455 unsigned char *psa = &phys_scsi3addr[0];
4457 /* Get a pointer to the hpsa logical device. */
4458 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4459 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4461 dev_warn(&h->pdev->dev,
4462 "Cannot abort: no device pointer for command.\n");
4463 return -1; /* not abortable */
4466 if (h->raid_offload_debug > 0)
4467 dev_info(&h->pdev->dev,
4468 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4469 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4470 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4471 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4473 if (!dev->offload_enabled) {
4474 dev_warn(&h->pdev->dev,
4475 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4476 return -1; /* not abortable */
4479 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4480 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4481 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4482 return -1; /* not abortable */
4485 /* send the reset */
4486 if (h->raid_offload_debug > 0)
4487 dev_info(&h->pdev->dev,
4488 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4489 psa[0], psa[1], psa[2], psa[3],
4490 psa[4], psa[5], psa[6], psa[7]);
4491 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4493 dev_warn(&h->pdev->dev,
4494 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4495 psa[0], psa[1], psa[2], psa[3],
4496 psa[4], psa[5], psa[6], psa[7]);
4497 return rc; /* failed to reset */
4500 /* wait for device to recover */
4501 if (wait_for_device_to_become_ready(h, psa) != 0) {
4502 dev_warn(&h->pdev->dev,
4503 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4504 psa[0], psa[1], psa[2], psa[3],
4505 psa[4], psa[5], psa[6], psa[7]);
4506 return -1; /* failed to recover */
4509 /* device recovered */
4510 dev_info(&h->pdev->dev,
4511 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4512 psa[0], psa[1], psa[2], psa[3],
4513 psa[4], psa[5], psa[6], psa[7]);
4515 return rc; /* success */
4518 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4519 * tell which kind we're dealing with, so we send the abort both ways. There
4520 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4521 * way we construct our tags but we check anyway in case the assumptions which
4522 * make this true someday become false.
4524 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4525 unsigned char *scsi3addr, struct CommandList *abort)
4528 struct CommandList *c;
4529 int rc = 0, rc2 = 0;
4531 /* ioccelerator mode 2 commands should be aborted via the
4532 * accelerated path, since RAID path is unaware of these commands,
4533 * but underlying firmware can't handle abort TMF.
4534 * Change abort to physical device reset.
4536 if (abort->cmd_type == CMD_IOACCEL2)
4537 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4539 /* we do not expect to find the swizzled tag in our queue, but
4540 * check anyway just to be sure the assumptions which make this
4541 * the case haven't become wrong.
4543 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4544 swizzle_abort_tag(swizzled_tag);
4545 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4547 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4548 return hpsa_send_abort(h, scsi3addr, abort, 0);
4550 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4552 /* if the command is still in our queue, we can't conclude that it was
4553 * aborted (it might have just completed normally) but in any case
4554 * we don't need to try to abort it another way.
4556 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4558 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4562 /* Send an abort for the specified command.
4563 * If the device and controller support it,
4564 * send a task abort request.
4566 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4570 struct ctlr_info *h;
4571 struct hpsa_scsi_dev_t *dev;
4572 struct CommandList *abort; /* pointer to command to be aborted */
4573 struct CommandList *found;
4574 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4575 char msg[256]; /* For debug messaging. */
4577 u32 tagupper, taglower;
4579 /* Find the controller of the command to be aborted */
4580 h = sdev_to_hba(sc->device);
4582 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4585 /* Check that controller supports some kind of task abort */
4586 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4587 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4590 memset(msg, 0, sizeof(msg));
4591 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4592 h->scsi_host->host_no, sc->device->channel,
4593 sc->device->id, sc->device->lun);
4595 /* Find the device of the command to be aborted */
4596 dev = sc->device->hostdata;
4598 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4603 /* Get SCSI command to be aborted */
4604 abort = (struct CommandList *) sc->host_scribble;
4605 if (abort == NULL) {
4606 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4610 hpsa_get_tag(h, abort, &taglower, &tagupper);
4611 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4612 as = (struct scsi_cmnd *) abort->scsi_cmd;
4614 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4615 as->cmnd[0], as->serial_number);
4616 dev_dbg(&h->pdev->dev, "%s\n", msg);
4617 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4618 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4620 /* Search reqQ to See if command is queued but not submitted,
4621 * if so, complete the command with aborted status and remove
4624 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4626 found->err_info->CommandStatus = CMD_ABORTED;
4628 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4633 /* not in reqQ, if also not in cmpQ, must have already completed */
4634 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4636 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
4642 * Command is in flight, or possibly already completed
4643 * by the firmware (but not to the scsi mid layer) but we can't
4644 * distinguish which. Send the abort down.
4646 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4648 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4649 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4650 h->scsi_host->host_no,
4651 dev->bus, dev->target, dev->lun);
4654 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4656 /* If the abort(s) above completed and actually aborted the
4657 * command, then the command to be aborted should already be
4658 * completed. If not, wait around a bit more to see if they
4659 * manage to complete normally.
4661 #define ABORT_COMPLETE_WAIT_SECS 30
4662 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4663 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4668 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4669 msg, ABORT_COMPLETE_WAIT_SECS);
4675 * For operations that cannot sleep, a command block is allocated at init,
4676 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4677 * which ones are free or in use. Lock must be held when calling this.
4678 * cmd_free() is the complement.
4680 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4682 struct CommandList *c;
4684 union u64bit temp64;
4685 dma_addr_t cmd_dma_handle, err_dma_handle;
4686 unsigned long flags;
4688 spin_lock_irqsave(&h->lock, flags);
4690 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4691 if (i == h->nr_cmds) {
4692 spin_unlock_irqrestore(&h->lock, flags);
4695 } while (test_and_set_bit
4696 (i & (BITS_PER_LONG - 1),
4697 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
4698 spin_unlock_irqrestore(&h->lock, flags);
4700 c = h->cmd_pool + i;
4701 memset(c, 0, sizeof(*c));
4702 cmd_dma_handle = h->cmd_pool_dhandle
4704 c->err_info = h->errinfo_pool + i;
4705 memset(c->err_info, 0, sizeof(*c->err_info));
4706 err_dma_handle = h->errinfo_pool_dhandle
4707 + i * sizeof(*c->err_info);
4711 INIT_LIST_HEAD(&c->list);
4712 c->busaddr = (u32) cmd_dma_handle;
4713 temp64.val = (u64) err_dma_handle;
4714 c->ErrDesc.Addr.lower = temp64.val32.lower;
4715 c->ErrDesc.Addr.upper = temp64.val32.upper;
4716 c->ErrDesc.Len = sizeof(*c->err_info);
4722 /* For operations that can wait for kmalloc to possibly sleep,
4723 * this routine can be called. Lock need not be held to call
4724 * cmd_special_alloc. cmd_special_free() is the complement.
4726 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4728 struct CommandList *c;
4729 union u64bit temp64;
4730 dma_addr_t cmd_dma_handle, err_dma_handle;
4732 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
4736 c->cmd_type = CMD_SCSI;
4739 c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
4742 if (c->err_info == NULL) {
4743 pci_free_consistent(h->pdev,
4744 sizeof(*c), c, cmd_dma_handle);
4748 INIT_LIST_HEAD(&c->list);
4749 c->busaddr = (u32) cmd_dma_handle;
4750 temp64.val = (u64) err_dma_handle;
4751 c->ErrDesc.Addr.lower = temp64.val32.lower;
4752 c->ErrDesc.Addr.upper = temp64.val32.upper;
4753 c->ErrDesc.Len = sizeof(*c->err_info);
4759 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4762 unsigned long flags;
4764 i = c - h->cmd_pool;
4765 spin_lock_irqsave(&h->lock, flags);
4766 clear_bit(i & (BITS_PER_LONG - 1),
4767 h->cmd_pool_bits + (i / BITS_PER_LONG));
4768 spin_unlock_irqrestore(&h->lock, flags);
4771 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4773 union u64bit temp64;
4775 temp64.val32.lower = c->ErrDesc.Addr.lower;
4776 temp64.val32.upper = c->ErrDesc.Addr.upper;
4777 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4778 c->err_info, (dma_addr_t) temp64.val);
4779 pci_free_consistent(h->pdev, sizeof(*c),
4780 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
4783 #ifdef CONFIG_COMPAT
4785 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4788 IOCTL32_Command_struct __user *arg32 =
4789 (IOCTL32_Command_struct __user *) arg;
4790 IOCTL_Command_struct arg64;
4791 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4795 memset(&arg64, 0, sizeof(arg64));
4797 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4798 sizeof(arg64.LUN_info));
4799 err |= copy_from_user(&arg64.Request, &arg32->Request,
4800 sizeof(arg64.Request));
4801 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4802 sizeof(arg64.error_info));
4803 err |= get_user(arg64.buf_size, &arg32->buf_size);
4804 err |= get_user(cp, &arg32->buf);
4805 arg64.buf = compat_ptr(cp);
4806 err |= copy_to_user(p, &arg64, sizeof(arg64));
4811 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4814 err |= copy_in_user(&arg32->error_info, &p->error_info,
4815 sizeof(arg32->error_info));
4821 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4822 int cmd, void __user *arg)
4824 BIG_IOCTL32_Command_struct __user *arg32 =
4825 (BIG_IOCTL32_Command_struct __user *) arg;
4826 BIG_IOCTL_Command_struct arg64;
4827 BIG_IOCTL_Command_struct __user *p =
4828 compat_alloc_user_space(sizeof(arg64));
4832 memset(&arg64, 0, sizeof(arg64));
4834 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4835 sizeof(arg64.LUN_info));
4836 err |= copy_from_user(&arg64.Request, &arg32->Request,
4837 sizeof(arg64.Request));
4838 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4839 sizeof(arg64.error_info));
4840 err |= get_user(arg64.buf_size, &arg32->buf_size);
4841 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4842 err |= get_user(cp, &arg32->buf);
4843 arg64.buf = compat_ptr(cp);
4844 err |= copy_to_user(p, &arg64, sizeof(arg64));
4849 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4852 err |= copy_in_user(&arg32->error_info, &p->error_info,
4853 sizeof(arg32->error_info));
4859 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4862 case CCISS_GETPCIINFO:
4863 case CCISS_GETINTINFO:
4864 case CCISS_SETINTINFO:
4865 case CCISS_GETNODENAME:
4866 case CCISS_SETNODENAME:
4867 case CCISS_GETHEARTBEAT:
4868 case CCISS_GETBUSTYPES:
4869 case CCISS_GETFIRMVER:
4870 case CCISS_GETDRIVVER:
4871 case CCISS_REVALIDVOLS:
4872 case CCISS_DEREGDISK:
4873 case CCISS_REGNEWDISK:
4875 case CCISS_RESCANDISK:
4876 case CCISS_GETLUNINFO:
4877 return hpsa_ioctl(dev, cmd, arg);
4879 case CCISS_PASSTHRU32:
4880 return hpsa_ioctl32_passthru(dev, cmd, arg);
4881 case CCISS_BIG_PASSTHRU32:
4882 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4885 return -ENOIOCTLCMD;
4890 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4892 struct hpsa_pci_info pciinfo;
4896 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4897 pciinfo.bus = h->pdev->bus->number;
4898 pciinfo.dev_fn = h->pdev->devfn;
4899 pciinfo.board_id = h->board_id;
4900 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4905 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4907 DriverVer_type DriverVer;
4908 unsigned char vmaj, vmin, vsubmin;
4911 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4912 &vmaj, &vmin, &vsubmin);
4914 dev_info(&h->pdev->dev, "driver version string '%s' "
4915 "unrecognized.", HPSA_DRIVER_VERSION);
4920 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4923 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4928 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4930 IOCTL_Command_struct iocommand;
4931 struct CommandList *c;
4933 union u64bit temp64;
4938 if (!capable(CAP_SYS_RAWIO))
4940 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4942 if ((iocommand.buf_size < 1) &&
4943 (iocommand.Request.Type.Direction != XFER_NONE)) {
4946 if (iocommand.buf_size > 0) {
4947 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4950 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4951 /* Copy the data into the buffer we created */
4952 if (copy_from_user(buff, iocommand.buf,
4953 iocommand.buf_size)) {
4958 memset(buff, 0, iocommand.buf_size);
4961 c = cmd_special_alloc(h);
4966 /* Fill in the command type */
4967 c->cmd_type = CMD_IOCTL_PEND;
4968 /* Fill in Command Header */
4969 c->Header.ReplyQueue = 0; /* unused in simple mode */
4970 if (iocommand.buf_size > 0) { /* buffer to fill */
4971 c->Header.SGList = 1;
4972 c->Header.SGTotal = 1;
4973 } else { /* no buffers to fill */
4974 c->Header.SGList = 0;
4975 c->Header.SGTotal = 0;
4977 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4978 /* use the kernel address the cmd block for tag */
4979 c->Header.Tag.lower = c->busaddr;
4981 /* Fill in Request block */
4982 memcpy(&c->Request, &iocommand.Request,
4983 sizeof(c->Request));
4985 /* Fill in the scatter gather information */
4986 if (iocommand.buf_size > 0) {
4987 temp64.val = pci_map_single(h->pdev, buff,
4988 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4989 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4990 c->SG[0].Addr.lower = 0;
4991 c->SG[0].Addr.upper = 0;
4996 c->SG[0].Addr.lower = temp64.val32.lower;
4997 c->SG[0].Addr.upper = temp64.val32.upper;
4998 c->SG[0].Len = iocommand.buf_size;
4999 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
5001 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5002 if (iocommand.buf_size > 0)
5003 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5004 check_ioctl_unit_attention(h, c);
5006 /* Copy the error information out */
5007 memcpy(&iocommand.error_info, c->err_info,
5008 sizeof(iocommand.error_info));
5009 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5013 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5014 iocommand.buf_size > 0) {
5015 /* Copy the data out of the buffer we created */
5016 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5022 cmd_special_free(h, c);
5028 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5030 BIG_IOCTL_Command_struct *ioc;
5031 struct CommandList *c;
5032 unsigned char **buff = NULL;
5033 int *buff_size = NULL;
5034 union u64bit temp64;
5040 BYTE __user *data_ptr;
5044 if (!capable(CAP_SYS_RAWIO))
5046 ioc = (BIG_IOCTL_Command_struct *)
5047 kmalloc(sizeof(*ioc), GFP_KERNEL);
5052 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5056 if ((ioc->buf_size < 1) &&
5057 (ioc->Request.Type.Direction != XFER_NONE)) {
5061 /* Check kmalloc limits using all SGs */
5062 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5066 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5070 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5075 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5080 left = ioc->buf_size;
5081 data_ptr = ioc->buf;
5083 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5084 buff_size[sg_used] = sz;
5085 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5086 if (buff[sg_used] == NULL) {
5090 if (ioc->Request.Type.Direction & XFER_WRITE) {
5091 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5096 memset(buff[sg_used], 0, sz);
5101 c = cmd_special_alloc(h);
5106 c->cmd_type = CMD_IOCTL_PEND;
5107 c->Header.ReplyQueue = 0;
5108 c->Header.SGList = c->Header.SGTotal = sg_used;
5109 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5110 c->Header.Tag.lower = c->busaddr;
5111 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5112 if (ioc->buf_size > 0) {
5114 for (i = 0; i < sg_used; i++) {
5115 temp64.val = pci_map_single(h->pdev, buff[i],
5116 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5117 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5118 c->SG[i].Addr.lower = 0;
5119 c->SG[i].Addr.upper = 0;
5121 hpsa_pci_unmap(h->pdev, c, i,
5122 PCI_DMA_BIDIRECTIONAL);
5126 c->SG[i].Addr.lower = temp64.val32.lower;
5127 c->SG[i].Addr.upper = temp64.val32.upper;
5128 c->SG[i].Len = buff_size[i];
5129 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
5132 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5134 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5135 check_ioctl_unit_attention(h, c);
5136 /* Copy the error information out */
5137 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5138 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5142 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5143 /* Copy the data out of the buffer we created */
5144 BYTE __user *ptr = ioc->buf;
5145 for (i = 0; i < sg_used; i++) {
5146 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5150 ptr += buff_size[i];
5155 cmd_special_free(h, c);
5158 for (i = 0; i < sg_used; i++)
5167 static void check_ioctl_unit_attention(struct ctlr_info *h,
5168 struct CommandList *c)
5170 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5171 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5172 (void) check_for_unit_attention(h, c);
5175 static int increment_passthru_count(struct ctlr_info *h)
5177 unsigned long flags;
5179 spin_lock_irqsave(&h->passthru_count_lock, flags);
5180 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5181 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5184 h->passthru_count++;
5185 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5189 static void decrement_passthru_count(struct ctlr_info *h)
5191 unsigned long flags;
5193 spin_lock_irqsave(&h->passthru_count_lock, flags);
5194 if (h->passthru_count <= 0) {
5195 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5196 /* not expecting to get here. */
5197 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5200 h->passthru_count--;
5201 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5207 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5209 struct ctlr_info *h;
5210 void __user *argp = (void __user *)arg;
5213 h = sdev_to_hba(dev);
5216 case CCISS_DEREGDISK:
5217 case CCISS_REGNEWDISK:
5219 hpsa_scan_start(h->scsi_host);
5221 case CCISS_GETPCIINFO:
5222 return hpsa_getpciinfo_ioctl(h, argp);
5223 case CCISS_GETDRIVVER:
5224 return hpsa_getdrivver_ioctl(h, argp);
5225 case CCISS_PASSTHRU:
5226 if (increment_passthru_count(h))
5228 rc = hpsa_passthru_ioctl(h, argp);
5229 decrement_passthru_count(h);
5231 case CCISS_BIG_PASSTHRU:
5232 if (increment_passthru_count(h))
5234 rc = hpsa_big_passthru_ioctl(h, argp);
5235 decrement_passthru_count(h);
5242 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5245 struct CommandList *c;
5250 /* fill_cmd can't fail here, no data buffer to map */
5251 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5252 RAID_CTLR_LUNID, TYPE_MSG);
5253 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5255 enqueue_cmd_and_start_io(h, c);
5256 /* Don't wait for completion, the reset won't complete. Don't free
5257 * the command either. This is the last command we will send before
5258 * re-initializing everything, so it doesn't matter and won't leak.
5263 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5264 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5267 int pci_dir = XFER_NONE;
5268 struct CommandList *a; /* for commands to be aborted */
5270 c->cmd_type = CMD_IOCTL_PEND;
5271 c->Header.ReplyQueue = 0;
5272 if (buff != NULL && size > 0) {
5273 c->Header.SGList = 1;
5274 c->Header.SGTotal = 1;
5276 c->Header.SGList = 0;
5277 c->Header.SGTotal = 0;
5279 c->Header.Tag.lower = c->busaddr;
5280 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5282 c->Request.Type.Type = cmd_type;
5283 if (cmd_type == TYPE_CMD) {
5286 /* are we trying to read a vital product page */
5287 if (page_code & VPD_PAGE) {
5288 c->Request.CDB[1] = 0x01;
5289 c->Request.CDB[2] = (page_code & 0xff);
5291 c->Request.CDBLen = 6;
5292 c->Request.Type.Attribute = ATTR_SIMPLE;
5293 c->Request.Type.Direction = XFER_READ;
5294 c->Request.Timeout = 0;
5295 c->Request.CDB[0] = HPSA_INQUIRY;
5296 c->Request.CDB[4] = size & 0xFF;
5298 case HPSA_REPORT_LOG:
5299 case HPSA_REPORT_PHYS:
5300 /* Talking to controller so It's a physical command
5301 mode = 00 target = 0. Nothing to write.
5303 c->Request.CDBLen = 12;
5304 c->Request.Type.Attribute = ATTR_SIMPLE;
5305 c->Request.Type.Direction = XFER_READ;
5306 c->Request.Timeout = 0;
5307 c->Request.CDB[0] = cmd;
5308 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5309 c->Request.CDB[7] = (size >> 16) & 0xFF;
5310 c->Request.CDB[8] = (size >> 8) & 0xFF;
5311 c->Request.CDB[9] = size & 0xFF;
5313 case HPSA_CACHE_FLUSH:
5314 c->Request.CDBLen = 12;
5315 c->Request.Type.Attribute = ATTR_SIMPLE;
5316 c->Request.Type.Direction = XFER_WRITE;
5317 c->Request.Timeout = 0;
5318 c->Request.CDB[0] = BMIC_WRITE;
5319 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5320 c->Request.CDB[7] = (size >> 8) & 0xFF;
5321 c->Request.CDB[8] = size & 0xFF;
5323 case TEST_UNIT_READY:
5324 c->Request.CDBLen = 6;
5325 c->Request.Type.Attribute = ATTR_SIMPLE;
5326 c->Request.Type.Direction = XFER_NONE;
5327 c->Request.Timeout = 0;
5329 case HPSA_GET_RAID_MAP:
5330 c->Request.CDBLen = 12;
5331 c->Request.Type.Attribute = ATTR_SIMPLE;
5332 c->Request.Type.Direction = XFER_READ;
5333 c->Request.Timeout = 0;
5334 c->Request.CDB[0] = HPSA_CISS_READ;
5335 c->Request.CDB[1] = cmd;
5336 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5337 c->Request.CDB[7] = (size >> 16) & 0xFF;
5338 c->Request.CDB[8] = (size >> 8) & 0xFF;
5339 c->Request.CDB[9] = size & 0xFF;
5341 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5342 c->Request.CDBLen = 10;
5343 c->Request.Type.Attribute = ATTR_SIMPLE;
5344 c->Request.Type.Direction = XFER_READ;
5345 c->Request.Timeout = 0;
5346 c->Request.CDB[0] = BMIC_READ;
5347 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5348 c->Request.CDB[7] = (size >> 16) & 0xFF;
5349 c->Request.CDB[8] = (size >> 8) & 0xFF;
5352 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5356 } else if (cmd_type == TYPE_MSG) {
5359 case HPSA_DEVICE_RESET_MSG:
5360 c->Request.CDBLen = 16;
5361 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
5362 c->Request.Type.Attribute = ATTR_SIMPLE;
5363 c->Request.Type.Direction = XFER_NONE;
5364 c->Request.Timeout = 0; /* Don't time out */
5365 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5366 c->Request.CDB[0] = cmd;
5367 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5368 /* If bytes 4-7 are zero, it means reset the */
5370 c->Request.CDB[4] = 0x00;
5371 c->Request.CDB[5] = 0x00;
5372 c->Request.CDB[6] = 0x00;
5373 c->Request.CDB[7] = 0x00;
5375 case HPSA_ABORT_MSG:
5376 a = buff; /* point to command to be aborted */
5377 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5378 a->Header.Tag.upper, a->Header.Tag.lower,
5379 c->Header.Tag.upper, c->Header.Tag.lower);
5380 c->Request.CDBLen = 16;
5381 c->Request.Type.Type = TYPE_MSG;
5382 c->Request.Type.Attribute = ATTR_SIMPLE;
5383 c->Request.Type.Direction = XFER_WRITE;
5384 c->Request.Timeout = 0; /* Don't time out */
5385 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5386 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5387 c->Request.CDB[2] = 0x00; /* reserved */
5388 c->Request.CDB[3] = 0x00; /* reserved */
5389 /* Tag to abort goes in CDB[4]-CDB[11] */
5390 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
5391 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
5392 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
5393 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
5394 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
5395 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
5396 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
5397 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
5398 c->Request.CDB[12] = 0x00; /* reserved */
5399 c->Request.CDB[13] = 0x00; /* reserved */
5400 c->Request.CDB[14] = 0x00; /* reserved */
5401 c->Request.CDB[15] = 0x00; /* reserved */
5404 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5409 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5413 switch (c->Request.Type.Direction) {
5415 pci_dir = PCI_DMA_FROMDEVICE;
5418 pci_dir = PCI_DMA_TODEVICE;
5421 pci_dir = PCI_DMA_NONE;
5424 pci_dir = PCI_DMA_BIDIRECTIONAL;
5426 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5432 * Map (physical) PCI mem into (virtual) kernel space
5434 static void __iomem *remap_pci_mem(ulong base, ulong size)
5436 ulong page_base = ((ulong) base) & PAGE_MASK;
5437 ulong page_offs = ((ulong) base) - page_base;
5438 void __iomem *page_remapped = ioremap_nocache(page_base,
5441 return page_remapped ? (page_remapped + page_offs) : NULL;
5444 /* Takes cmds off the submission queue and sends them to the hardware,
5445 * then puts them on the queue of cmds waiting for completion.
5446 * Assumes h->lock is held
5448 static void start_io(struct ctlr_info *h, unsigned long *flags)
5450 struct CommandList *c;
5452 while (!list_empty(&h->reqQ)) {
5453 c = list_entry(h->reqQ.next, struct CommandList, list);
5454 /* can't do anything if fifo is full */
5455 if ((h->access.fifo_full(h))) {
5456 h->fifo_recently_full = 1;
5457 dev_warn(&h->pdev->dev, "fifo full\n");
5460 h->fifo_recently_full = 0;
5462 /* Get the first entry from the Request Q */
5466 /* Put job onto the completed Q */
5469 /* Must increment commands_outstanding before unlocking
5470 * and submitting to avoid race checking for fifo full
5473 h->commands_outstanding++;
5475 /* Tell the controller execute command */
5476 spin_unlock_irqrestore(&h->lock, *flags);
5477 h->access.submit_command(h, c);
5478 spin_lock_irqsave(&h->lock, *flags);
5482 static void lock_and_start_io(struct ctlr_info *h)
5484 unsigned long flags;
5486 spin_lock_irqsave(&h->lock, flags);
5487 start_io(h, &flags);
5488 spin_unlock_irqrestore(&h->lock, flags);
5491 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5493 return h->access.command_completed(h, q);
5496 static inline bool interrupt_pending(struct ctlr_info *h)
5498 return h->access.intr_pending(h);
5501 static inline long interrupt_not_for_us(struct ctlr_info *h)
5503 return (h->access.intr_pending(h) == 0) ||
5504 (h->interrupts_enabled == 0);
5507 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5510 if (unlikely(tag_index >= h->nr_cmds)) {
5511 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5517 static inline void finish_cmd(struct CommandList *c)
5519 unsigned long flags;
5520 int io_may_be_stalled = 0;
5521 struct ctlr_info *h = c->h;
5523 spin_lock_irqsave(&h->lock, flags);
5527 * Check for possibly stalled i/o.
5529 * If a fifo_full condition is encountered, requests will back up
5530 * in h->reqQ. This queue is only emptied out by start_io which is
5531 * only called when a new i/o request comes in. If no i/o's are
5532 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5533 * start_io from here if we detect such a danger.
5535 * Normally, we shouldn't hit this case, but pounding on the
5536 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5537 * commands_outstanding is low. We want to avoid calling
5538 * start_io from in here as much as possible, and esp. don't
5539 * want to get in a cycle where we call start_io every time
5542 if (unlikely(h->fifo_recently_full) &&
5543 h->commands_outstanding < 5)
5544 io_may_be_stalled = 1;
5546 spin_unlock_irqrestore(&h->lock, flags);
5548 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5549 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5550 || c->cmd_type == CMD_IOACCEL2))
5551 complete_scsi_command(c);
5552 else if (c->cmd_type == CMD_IOCTL_PEND)
5553 complete(c->waiting);
5554 if (unlikely(io_may_be_stalled))
5555 lock_and_start_io(h);
5558 static inline u32 hpsa_tag_contains_index(u32 tag)
5560 return tag & DIRECT_LOOKUP_BIT;
5563 static inline u32 hpsa_tag_to_index(u32 tag)
5565 return tag >> DIRECT_LOOKUP_SHIFT;
5569 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5571 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5572 #define HPSA_SIMPLE_ERROR_BITS 0x03
5573 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5574 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5575 return tag & ~HPSA_PERF_ERROR_BITS;
5578 /* process completion of an indexed ("direct lookup") command */
5579 static inline void process_indexed_cmd(struct ctlr_info *h,
5583 struct CommandList *c;
5585 tag_index = hpsa_tag_to_index(raw_tag);
5586 if (!bad_tag(h, tag_index, raw_tag)) {
5587 c = h->cmd_pool + tag_index;
5592 /* process completion of a non-indexed command */
5593 static inline void process_nonindexed_cmd(struct ctlr_info *h,
5597 struct CommandList *c = NULL;
5598 unsigned long flags;
5600 tag = hpsa_tag_discard_error_bits(h, raw_tag);
5601 spin_lock_irqsave(&h->lock, flags);
5602 list_for_each_entry(c, &h->cmpQ, list) {
5603 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
5604 spin_unlock_irqrestore(&h->lock, flags);
5609 spin_unlock_irqrestore(&h->lock, flags);
5610 bad_tag(h, h->nr_cmds + 1, raw_tag);
5613 /* Some controllers, like p400, will give us one interrupt
5614 * after a soft reset, even if we turned interrupts off.
5615 * Only need to check for this in the hpsa_xxx_discard_completions
5618 static int ignore_bogus_interrupt(struct ctlr_info *h)
5620 if (likely(!reset_devices))
5623 if (likely(h->interrupts_enabled))
5626 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5627 "(known firmware bug.) Ignoring.\n");
5633 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5634 * Relies on (h-q[x] == x) being true for x such that
5635 * 0 <= x < MAX_REPLY_QUEUES.
5637 static struct ctlr_info *queue_to_hba(u8 *queue)
5639 return container_of((queue - *queue), struct ctlr_info, q[0]);
5642 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5644 struct ctlr_info *h = queue_to_hba(queue);
5645 u8 q = *(u8 *) queue;
5648 if (ignore_bogus_interrupt(h))
5651 if (interrupt_not_for_us(h))
5653 h->last_intr_timestamp = get_jiffies_64();
5654 while (interrupt_pending(h)) {
5655 raw_tag = get_next_completion(h, q);
5656 while (raw_tag != FIFO_EMPTY)
5657 raw_tag = next_command(h, q);
5662 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5664 struct ctlr_info *h = queue_to_hba(queue);
5666 u8 q = *(u8 *) queue;
5668 if (ignore_bogus_interrupt(h))
5671 h->last_intr_timestamp = get_jiffies_64();
5672 raw_tag = get_next_completion(h, q);
5673 while (raw_tag != FIFO_EMPTY)
5674 raw_tag = next_command(h, q);
5678 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5680 struct ctlr_info *h = queue_to_hba((u8 *) queue);
5682 u8 q = *(u8 *) queue;
5684 if (interrupt_not_for_us(h))
5686 h->last_intr_timestamp = get_jiffies_64();
5687 while (interrupt_pending(h)) {
5688 raw_tag = get_next_completion(h, q);
5689 while (raw_tag != FIFO_EMPTY) {
5690 if (likely(hpsa_tag_contains_index(raw_tag)))
5691 process_indexed_cmd(h, raw_tag);
5693 process_nonindexed_cmd(h, raw_tag);
5694 raw_tag = next_command(h, q);
5700 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5702 struct ctlr_info *h = queue_to_hba(queue);
5704 u8 q = *(u8 *) queue;
5706 h->last_intr_timestamp = get_jiffies_64();
5707 raw_tag = get_next_completion(h, q);
5708 while (raw_tag != FIFO_EMPTY) {
5709 if (likely(hpsa_tag_contains_index(raw_tag)))
5710 process_indexed_cmd(h, raw_tag);
5712 process_nonindexed_cmd(h, raw_tag);
5713 raw_tag = next_command(h, q);
5718 /* Send a message CDB to the firmware. Careful, this only works
5719 * in simple mode, not performant mode due to the tag lookup.
5720 * We only ever use this immediately after a controller reset.
5722 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5726 struct CommandListHeader CommandHeader;
5727 struct RequestBlock Request;
5728 struct ErrDescriptor ErrorDescriptor;
5730 struct Command *cmd;
5731 static const size_t cmd_sz = sizeof(*cmd) +
5732 sizeof(cmd->ErrorDescriptor);
5734 uint32_t paddr32, tag;
5735 void __iomem *vaddr;
5738 vaddr = pci_ioremap_bar(pdev, 0);
5742 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5743 * CCISS commands, so they must be allocated from the lower 4GiB of
5746 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5752 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5758 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5759 * although there's no guarantee, we assume that the address is at
5760 * least 4-byte aligned (most likely, it's page-aligned).
5764 cmd->CommandHeader.ReplyQueue = 0;
5765 cmd->CommandHeader.SGList = 0;
5766 cmd->CommandHeader.SGTotal = 0;
5767 cmd->CommandHeader.Tag.lower = paddr32;
5768 cmd->CommandHeader.Tag.upper = 0;
5769 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5771 cmd->Request.CDBLen = 16;
5772 cmd->Request.Type.Type = TYPE_MSG;
5773 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5774 cmd->Request.Type.Direction = XFER_NONE;
5775 cmd->Request.Timeout = 0; /* Don't time out */
5776 cmd->Request.CDB[0] = opcode;
5777 cmd->Request.CDB[1] = type;
5778 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5779 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
5780 cmd->ErrorDescriptor.Addr.upper = 0;
5781 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
5783 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5785 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5786 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5787 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
5789 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5794 /* we leak the DMA buffer here ... no choice since the controller could
5795 * still complete the command.
5797 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5798 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5803 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5805 if (tag & HPSA_ERROR_BIT) {
5806 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5811 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5816 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5818 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5819 void __iomem *vaddr, u32 use_doorbell)
5825 /* For everything after the P600, the PCI power state method
5826 * of resetting the controller doesn't work, so we have this
5827 * other way using the doorbell register.
5829 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5830 writel(use_doorbell, vaddr + SA5_DOORBELL);
5832 /* PMC hardware guys tell us we need a 10 second delay after
5833 * doorbell reset and before any attempt to talk to the board
5834 * at all to ensure that this actually works and doesn't fall
5835 * over in some weird corner cases.
5838 } else { /* Try to do it the PCI power state way */
5840 /* Quoting from the Open CISS Specification: "The Power
5841 * Management Control/Status Register (CSR) controls the power
5842 * state of the device. The normal operating state is D0,
5843 * CSR=00h. The software off state is D3, CSR=03h. To reset
5844 * the controller, place the interface device in D3 then to D0,
5845 * this causes a secondary PCI reset which will reset the
5848 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5851 "hpsa_reset_controller: "
5852 "PCI PM not supported\n");
5855 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5856 /* enter the D3hot power management state */
5857 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5858 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5860 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5864 /* enter the D0 power management state */
5865 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5867 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5870 * The P600 requires a small delay when changing states.
5871 * Otherwise we may think the board did not reset and we bail.
5872 * This for kdump only and is particular to the P600.
5879 static void init_driver_version(char *driver_version, int len)
5881 memset(driver_version, 0, len);
5882 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5885 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5887 char *driver_version;
5888 int i, size = sizeof(cfgtable->driver_version);
5890 driver_version = kmalloc(size, GFP_KERNEL);
5891 if (!driver_version)
5894 init_driver_version(driver_version, size);
5895 for (i = 0; i < size; i++)
5896 writeb(driver_version[i], &cfgtable->driver_version[i]);
5897 kfree(driver_version);
5901 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5902 unsigned char *driver_ver)
5906 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5907 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5910 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5913 char *driver_ver, *old_driver_ver;
5914 int rc, size = sizeof(cfgtable->driver_version);
5916 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5917 if (!old_driver_ver)
5919 driver_ver = old_driver_ver + size;
5921 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5922 * should have been changed, otherwise we know the reset failed.
5924 init_driver_version(old_driver_ver, size);
5925 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5926 rc = !memcmp(driver_ver, old_driver_ver, size);
5927 kfree(old_driver_ver);
5930 /* This does a hard reset of the controller using PCI power management
5931 * states or the using the doorbell register.
5933 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5937 u64 cfg_base_addr_index;
5938 void __iomem *vaddr;
5939 unsigned long paddr;
5940 u32 misc_fw_support;
5942 struct CfgTable __iomem *cfgtable;
5945 u16 command_register;
5947 /* For controllers as old as the P600, this is very nearly
5950 * pci_save_state(pci_dev);
5951 * pci_set_power_state(pci_dev, PCI_D3hot);
5952 * pci_set_power_state(pci_dev, PCI_D0);
5953 * pci_restore_state(pci_dev);
5955 * For controllers newer than the P600, the pci power state
5956 * method of resetting doesn't work so we have another way
5957 * using the doorbell register.
5960 rc = hpsa_lookup_board_id(pdev, &board_id);
5961 if (rc < 0 || !ctlr_is_resettable(board_id)) {
5962 dev_warn(&pdev->dev, "Not resetting device.\n");
5966 /* if controller is soft- but not hard resettable... */
5967 if (!ctlr_is_hard_resettable(board_id))
5968 return -ENOTSUPP; /* try soft reset later. */
5970 /* Save the PCI command register */
5971 pci_read_config_word(pdev, 4, &command_register);
5972 pci_save_state(pdev);
5974 /* find the first memory BAR, so we can find the cfg table */
5975 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5978 vaddr = remap_pci_mem(paddr, 0x250);
5982 /* find cfgtable in order to check if reset via doorbell is supported */
5983 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5984 &cfg_base_addr_index, &cfg_offset);
5987 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5988 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5993 rc = write_driver_ver_to_cfgtable(cfgtable);
5997 /* If reset via doorbell register is supported, use that.
5998 * There are two such methods. Favor the newest method.
6000 misc_fw_support = readl(&cfgtable->misc_fw_support);
6001 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6003 use_doorbell = DOORBELL_CTLR_RESET2;
6005 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6007 dev_warn(&pdev->dev, "Soft reset not supported. "
6008 "Firmware update is required.\n");
6009 rc = -ENOTSUPP; /* try soft reset */
6010 goto unmap_cfgtable;
6014 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6016 goto unmap_cfgtable;
6018 pci_restore_state(pdev);
6019 pci_write_config_word(pdev, 4, command_register);
6021 /* Some devices (notably the HP Smart Array 5i Controller)
6022 need a little pause here */
6023 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6025 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6027 dev_warn(&pdev->dev,
6028 "failed waiting for board to become ready "
6029 "after hard reset\n");
6030 goto unmap_cfgtable;
6033 rc = controller_reset_failed(vaddr);
6035 goto unmap_cfgtable;
6037 dev_warn(&pdev->dev, "Unable to successfully reset "
6038 "controller. Will try soft reset.\n");
6041 dev_info(&pdev->dev, "board ready after hard reset.\n");
6053 * We cannot read the structure directly, for portability we must use
6055 * This is for debug only.
6057 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6063 dev_info(dev, "Controller Configuration information\n");
6064 dev_info(dev, "------------------------------------\n");
6065 for (i = 0; i < 4; i++)
6066 temp_name[i] = readb(&(tb->Signature[i]));
6067 temp_name[4] = '\0';
6068 dev_info(dev, " Signature = %s\n", temp_name);
6069 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6070 dev_info(dev, " Transport methods supported = 0x%x\n",
6071 readl(&(tb->TransportSupport)));
6072 dev_info(dev, " Transport methods active = 0x%x\n",
6073 readl(&(tb->TransportActive)));
6074 dev_info(dev, " Requested transport Method = 0x%x\n",
6075 readl(&(tb->HostWrite.TransportRequest)));
6076 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6077 readl(&(tb->HostWrite.CoalIntDelay)));
6078 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6079 readl(&(tb->HostWrite.CoalIntCount)));
6080 dev_info(dev, " Max outstanding commands = 0x%d\n",
6081 readl(&(tb->CmdsOutMax)));
6082 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6083 for (i = 0; i < 16; i++)
6084 temp_name[i] = readb(&(tb->ServerName[i]));
6085 temp_name[16] = '\0';
6086 dev_info(dev, " Server Name = %s\n", temp_name);
6087 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6088 readl(&(tb->HeartBeat)));
6089 #endif /* HPSA_DEBUG */
6092 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6094 int i, offset, mem_type, bar_type;
6096 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6099 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6100 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6101 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6104 mem_type = pci_resource_flags(pdev, i) &
6105 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6107 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6108 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6109 offset += 4; /* 32 bit */
6111 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6114 default: /* reserved in PCI 2.2 */
6115 dev_warn(&pdev->dev,
6116 "base address is invalid\n");
6121 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6127 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6128 * controllers that are capable. If not, we use IO-APIC mode.
6131 static void hpsa_interrupt_mode(struct ctlr_info *h)
6133 #ifdef CONFIG_PCI_MSI
6135 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6137 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6138 hpsa_msix_entries[i].vector = 0;
6139 hpsa_msix_entries[i].entry = i;
6142 /* Some boards advertise MSI but don't really support it */
6143 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6144 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6145 goto default_int_mode;
6146 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6147 dev_info(&h->pdev->dev, "MSIX\n");
6148 h->msix_vector = MAX_REPLY_QUEUES;
6149 if (h->msix_vector > num_online_cpus())
6150 h->msix_vector = num_online_cpus();
6151 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6154 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6156 goto single_msi_mode;
6157 } else if (err < h->msix_vector) {
6158 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6159 "available\n", err);
6161 h->msix_vector = err;
6162 for (i = 0; i < h->msix_vector; i++)
6163 h->intr[i] = hpsa_msix_entries[i].vector;
6167 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6168 dev_info(&h->pdev->dev, "MSI\n");
6169 if (!pci_enable_msi(h->pdev))
6172 dev_warn(&h->pdev->dev, "MSI init failed\n");
6175 #endif /* CONFIG_PCI_MSI */
6176 /* if we get here we're going to use the default interrupt mode */
6177 h->intr[h->intr_mode] = h->pdev->irq;
6180 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6183 u32 subsystem_vendor_id, subsystem_device_id;
6185 subsystem_vendor_id = pdev->subsystem_vendor;
6186 subsystem_device_id = pdev->subsystem_device;
6187 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6188 subsystem_vendor_id;
6190 for (i = 0; i < ARRAY_SIZE(products); i++)
6191 if (*board_id == products[i].board_id)
6194 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6195 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6197 dev_warn(&pdev->dev, "unrecognized board ID: "
6198 "0x%08x, ignoring.\n", *board_id);
6201 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6204 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6205 unsigned long *memory_bar)
6209 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6210 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6211 /* addressing mode bits already removed */
6212 *memory_bar = pci_resource_start(pdev, i);
6213 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6217 dev_warn(&pdev->dev, "no memory BAR found\n");
6221 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6227 iterations = HPSA_BOARD_READY_ITERATIONS;
6229 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6231 for (i = 0; i < iterations; i++) {
6232 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6233 if (wait_for_ready) {
6234 if (scratchpad == HPSA_FIRMWARE_READY)
6237 if (scratchpad != HPSA_FIRMWARE_READY)
6240 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6242 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6246 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6247 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6250 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6251 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6252 *cfg_base_addr &= (u32) 0x0000ffff;
6253 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6254 if (*cfg_base_addr_index == -1) {
6255 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6261 static int hpsa_find_cfgtables(struct ctlr_info *h)
6265 u64 cfg_base_addr_index;
6269 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6270 &cfg_base_addr_index, &cfg_offset);
6273 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6274 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6277 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6280 /* Find performant mode table. */
6281 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6282 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6283 cfg_base_addr_index)+cfg_offset+trans_offset,
6284 sizeof(*h->transtable));
6290 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6292 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6294 /* Limit commands in memory limited kdump scenario. */
6295 if (reset_devices && h->max_commands > 32)
6296 h->max_commands = 32;
6298 if (h->max_commands < 16) {
6299 dev_warn(&h->pdev->dev, "Controller reports "
6300 "max supported commands of %d, an obvious lie. "
6301 "Using 16. Ensure that firmware is up to date.\n",
6303 h->max_commands = 16;
6307 /* Interrogate the hardware for some limits:
6308 * max commands, max SG elements without chaining, and with chaining,
6309 * SG chain block size, etc.
6311 static void hpsa_find_board_params(struct ctlr_info *h)
6313 hpsa_get_max_perf_mode_cmds(h);
6314 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6315 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6316 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6318 * Limit in-command s/g elements to 32 save dma'able memory.
6319 * Howvever spec says if 0, use 31
6321 h->max_cmd_sg_entries = 31;
6322 if (h->maxsgentries > 512) {
6323 h->max_cmd_sg_entries = 32;
6324 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6325 h->maxsgentries--; /* save one for chain pointer */
6328 h->maxsgentries = 31; /* default to traditional values */
6331 /* Find out what task management functions are supported and cache */
6332 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6333 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6334 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6335 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6336 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6339 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6341 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6342 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6348 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6352 driver_support = readl(&(h->cfgtable->driver_support));
6353 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6355 driver_support |= ENABLE_SCSI_PREFETCH;
6357 driver_support |= ENABLE_UNIT_ATTN;
6358 writel(driver_support, &(h->cfgtable->driver_support));
6361 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6362 * in a prefetch beyond physical memory.
6364 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6368 if (h->board_id != 0x3225103C)
6370 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6371 dma_prefetch |= 0x8000;
6372 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6375 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6379 unsigned long flags;
6380 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6381 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6382 spin_lock_irqsave(&h->lock, flags);
6383 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6384 spin_unlock_irqrestore(&h->lock, flags);
6385 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6387 /* delay and try again */
6392 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6396 unsigned long flags;
6398 /* under certain very rare conditions, this can take awhile.
6399 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6400 * as we enter this code.)
6402 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6403 spin_lock_irqsave(&h->lock, flags);
6404 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6405 spin_unlock_irqrestore(&h->lock, flags);
6406 if (!(doorbell_value & CFGTBL_ChangeReq))
6408 /* delay and try again */
6409 usleep_range(10000, 20000);
6413 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6417 trans_support = readl(&(h->cfgtable->TransportSupport));
6418 if (!(trans_support & SIMPLE_MODE))
6421 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6423 /* Update the field, and then ring the doorbell */
6424 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6425 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6426 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6427 hpsa_wait_for_mode_change_ack(h);
6428 print_cfg_table(&h->pdev->dev, h->cfgtable);
6429 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6431 h->transMethod = CFGTBL_Trans_Simple;
6434 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6438 static int hpsa_pci_init(struct ctlr_info *h)
6440 int prod_index, err;
6442 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6445 h->product_name = products[prod_index].product_name;
6446 h->access = *(products[prod_index].access);
6448 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6449 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6451 err = pci_enable_device(h->pdev);
6453 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6457 /* Enable bus mastering (pci_disable_device may disable this) */
6458 pci_set_master(h->pdev);
6460 err = pci_request_regions(h->pdev, HPSA);
6462 dev_err(&h->pdev->dev,
6463 "cannot obtain PCI resources, aborting\n");
6466 hpsa_interrupt_mode(h);
6467 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6469 goto err_out_free_res;
6470 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6473 goto err_out_free_res;
6475 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6477 goto err_out_free_res;
6478 err = hpsa_find_cfgtables(h);
6480 goto err_out_free_res;
6481 hpsa_find_board_params(h);
6483 if (!hpsa_CISS_signature_present(h)) {
6485 goto err_out_free_res;
6487 hpsa_set_driver_support_bits(h);
6488 hpsa_p600_dma_prefetch_quirk(h);
6489 err = hpsa_enter_simple_mode(h);
6491 goto err_out_free_res;
6496 iounmap(h->transtable);
6498 iounmap(h->cfgtable);
6501 pci_disable_device(h->pdev);
6502 pci_release_regions(h->pdev);
6506 static void hpsa_hba_inquiry(struct ctlr_info *h)
6510 #define HBA_INQUIRY_BYTE_COUNT 64
6511 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6512 if (!h->hba_inquiry_data)
6514 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6515 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6517 kfree(h->hba_inquiry_data);
6518 h->hba_inquiry_data = NULL;
6522 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6529 /* kdump kernel is loading, we don't know in which state is
6530 * the pci interface. The dev->enable_cnt is equal zero
6531 * so we call enable+disable, wait a while and switch it on.
6533 rc = pci_enable_device(pdev);
6535 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6538 pci_disable_device(pdev);
6539 msleep(260); /* a randomly chosen number */
6540 rc = pci_enable_device(pdev);
6542 dev_warn(&pdev->dev, "failed to enable device.\n");
6545 pci_set_master(pdev);
6546 /* Reset the controller with a PCI power-cycle or via doorbell */
6547 rc = hpsa_kdump_hard_reset_controller(pdev);
6549 /* -ENOTSUPP here means we cannot reset the controller
6550 * but it's already (and still) up and running in
6551 * "performant mode". Or, it might be 640x, which can't reset
6552 * due to concerns about shared bbwc between 6402/6404 pair.
6555 if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
6560 /* Now try to get the controller to respond to a no-op */
6561 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
6562 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6563 if (hpsa_noop(pdev) == 0)
6566 dev_warn(&pdev->dev, "no-op failed%s\n",
6567 (i < 11 ? "; re-trying" : ""));
6572 pci_disable_device(pdev);
6576 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6578 h->cmd_pool_bits = kzalloc(
6579 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6580 sizeof(unsigned long), GFP_KERNEL);
6581 h->cmd_pool = pci_alloc_consistent(h->pdev,
6582 h->nr_cmds * sizeof(*h->cmd_pool),
6583 &(h->cmd_pool_dhandle));
6584 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6585 h->nr_cmds * sizeof(*h->errinfo_pool),
6586 &(h->errinfo_pool_dhandle));
6587 if ((h->cmd_pool_bits == NULL)
6588 || (h->cmd_pool == NULL)
6589 || (h->errinfo_pool == NULL)) {
6590 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6596 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6598 kfree(h->cmd_pool_bits);
6600 pci_free_consistent(h->pdev,
6601 h->nr_cmds * sizeof(struct CommandList),
6602 h->cmd_pool, h->cmd_pool_dhandle);
6603 if (h->ioaccel2_cmd_pool)
6604 pci_free_consistent(h->pdev,
6605 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6606 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6607 if (h->errinfo_pool)
6608 pci_free_consistent(h->pdev,
6609 h->nr_cmds * sizeof(struct ErrorInfo),
6611 h->errinfo_pool_dhandle);
6612 if (h->ioaccel_cmd_pool)
6613 pci_free_consistent(h->pdev,
6614 h->nr_cmds * sizeof(struct io_accel1_cmd),
6615 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6618 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6622 cpu = cpumask_first(cpu_online_mask);
6623 for (i = 0; i < h->msix_vector; i++) {
6624 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6625 cpu = cpumask_next(cpu, cpu_online_mask);
6629 static int hpsa_request_irq(struct ctlr_info *h,
6630 irqreturn_t (*msixhandler)(int, void *),
6631 irqreturn_t (*intxhandler)(int, void *))
6636 * initialize h->q[x] = x so that interrupt handlers know which
6639 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6642 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6643 /* If performant mode and MSI-X, use multiple reply queues */
6644 for (i = 0; i < h->msix_vector; i++)
6645 rc = request_irq(h->intr[i], msixhandler,
6648 hpsa_irq_affinity_hints(h);
6650 /* Use single reply pool */
6651 if (h->msix_vector > 0 || h->msi_vector) {
6652 rc = request_irq(h->intr[h->intr_mode],
6653 msixhandler, 0, h->devname,
6654 &h->q[h->intr_mode]);
6656 rc = request_irq(h->intr[h->intr_mode],
6657 intxhandler, IRQF_SHARED, h->devname,
6658 &h->q[h->intr_mode]);
6662 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6663 h->intr[h->intr_mode], h->devname);
6669 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6671 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6672 HPSA_RESET_TYPE_CONTROLLER)) {
6673 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6677 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6678 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6679 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6683 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6684 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6685 dev_warn(&h->pdev->dev, "Board failed to become ready "
6686 "after soft reset.\n");
6693 static void free_irqs(struct ctlr_info *h)
6697 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6698 /* Single reply queue, only one irq to free */
6700 irq_set_affinity_hint(h->intr[i], NULL);
6701 free_irq(h->intr[i], &h->q[i]);
6705 for (i = 0; i < h->msix_vector; i++) {
6706 irq_set_affinity_hint(h->intr[i], NULL);
6707 free_irq(h->intr[i], &h->q[i]);
6711 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6714 #ifdef CONFIG_PCI_MSI
6715 if (h->msix_vector) {
6716 if (h->pdev->msix_enabled)
6717 pci_disable_msix(h->pdev);
6718 } else if (h->msi_vector) {
6719 if (h->pdev->msi_enabled)
6720 pci_disable_msi(h->pdev);
6722 #endif /* CONFIG_PCI_MSI */
6725 static void hpsa_free_reply_queues(struct ctlr_info *h)
6729 for (i = 0; i < h->nreply_queues; i++) {
6730 if (!h->reply_queue[i].head)
6732 pci_free_consistent(h->pdev, h->reply_queue_size,
6733 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6734 h->reply_queue[i].head = NULL;
6735 h->reply_queue[i].busaddr = 0;
6739 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6741 hpsa_free_irqs_and_disable_msix(h);
6742 hpsa_free_sg_chain_blocks(h);
6743 hpsa_free_cmd_pool(h);
6744 kfree(h->ioaccel1_blockFetchTable);
6745 kfree(h->blockFetchTable);
6746 hpsa_free_reply_queues(h);
6750 iounmap(h->transtable);
6752 iounmap(h->cfgtable);
6753 pci_disable_device(h->pdev);
6754 pci_release_regions(h->pdev);
6758 /* Called when controller lockup detected. */
6759 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6761 struct CommandList *c = NULL;
6763 assert_spin_locked(&h->lock);
6764 /* Mark all outstanding commands as failed and complete them. */
6765 while (!list_empty(list)) {
6766 c = list_entry(list->next, struct CommandList, list);
6767 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6772 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6776 cpu = cpumask_first(cpu_online_mask);
6777 for (i = 0; i < num_online_cpus(); i++) {
6778 u32 *lockup_detected;
6779 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6780 *lockup_detected = value;
6781 cpu = cpumask_next(cpu, cpu_online_mask);
6783 wmb(); /* be sure the per-cpu variables are out to memory */
6786 static void controller_lockup_detected(struct ctlr_info *h)
6788 unsigned long flags;
6789 u32 lockup_detected;
6791 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6792 spin_lock_irqsave(&h->lock, flags);
6793 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6794 if (!lockup_detected) {
6795 /* no heartbeat, but controller gave us a zero. */
6796 dev_warn(&h->pdev->dev,
6797 "lockup detected but scratchpad register is zero\n");
6798 lockup_detected = 0xffffffff;
6800 set_lockup_detected_for_all_cpus(h, lockup_detected);
6801 spin_unlock_irqrestore(&h->lock, flags);
6802 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6804 pci_disable_device(h->pdev);
6805 spin_lock_irqsave(&h->lock, flags);
6806 fail_all_cmds_on_list(h, &h->cmpQ);
6807 fail_all_cmds_on_list(h, &h->reqQ);
6808 spin_unlock_irqrestore(&h->lock, flags);
6811 static void detect_controller_lockup(struct ctlr_info *h)
6815 unsigned long flags;
6817 now = get_jiffies_64();
6818 /* If we've received an interrupt recently, we're ok. */
6819 if (time_after64(h->last_intr_timestamp +
6820 (h->heartbeat_sample_interval), now))
6824 * If we've already checked the heartbeat recently, we're ok.
6825 * This could happen if someone sends us a signal. We
6826 * otherwise don't care about signals in this thread.
6828 if (time_after64(h->last_heartbeat_timestamp +
6829 (h->heartbeat_sample_interval), now))
6832 /* If heartbeat has not changed since we last looked, we're not ok. */
6833 spin_lock_irqsave(&h->lock, flags);
6834 heartbeat = readl(&h->cfgtable->HeartBeat);
6835 spin_unlock_irqrestore(&h->lock, flags);
6836 if (h->last_heartbeat == heartbeat) {
6837 controller_lockup_detected(h);
6842 h->last_heartbeat = heartbeat;
6843 h->last_heartbeat_timestamp = now;
6846 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6851 /* Clear the driver-requested rescan flag */
6852 h->drv_req_rescan = 0;
6854 /* Ask the controller to clear the events we're handling. */
6855 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6856 | CFGTBL_Trans_io_accel2)) &&
6857 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6858 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6860 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6861 event_type = "state change";
6862 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6863 event_type = "configuration change";
6864 /* Stop sending new RAID offload reqs via the IO accelerator */
6865 scsi_block_requests(h->scsi_host);
6866 for (i = 0; i < h->ndevices; i++)
6867 h->dev[i]->offload_enabled = 0;
6868 hpsa_drain_accel_commands(h);
6869 /* Set 'accelerator path config change' bit */
6870 dev_warn(&h->pdev->dev,
6871 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6872 h->events, event_type);
6873 writel(h->events, &(h->cfgtable->clear_event_notify));
6874 /* Set the "clear event notify field update" bit 6 */
6875 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6876 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6877 hpsa_wait_for_clear_event_notify_ack(h);
6878 scsi_unblock_requests(h->scsi_host);
6880 /* Acknowledge controller notification events. */
6881 writel(h->events, &(h->cfgtable->clear_event_notify));
6882 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6883 hpsa_wait_for_clear_event_notify_ack(h);
6885 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6886 hpsa_wait_for_mode_change_ack(h);
6892 /* Check a register on the controller to see if there are configuration
6893 * changes (added/changed/removed logical drives, etc.) which mean that
6894 * we should rescan the controller for devices.
6895 * Also check flag for driver-initiated rescan.
6897 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6899 if (h->drv_req_rescan)
6902 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6905 h->events = readl(&(h->cfgtable->event_notify));
6906 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6910 * Check if any of the offline devices have become ready
6912 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6914 unsigned long flags;
6915 struct offline_device_entry *d;
6916 struct list_head *this, *tmp;
6918 spin_lock_irqsave(&h->offline_device_lock, flags);
6919 list_for_each_safe(this, tmp, &h->offline_device_list) {
6920 d = list_entry(this, struct offline_device_entry,
6922 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6923 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6924 spin_lock_irqsave(&h->offline_device_lock, flags);
6925 list_del(&d->offline_list);
6926 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6929 spin_lock_irqsave(&h->offline_device_lock, flags);
6931 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6936 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6938 unsigned long flags;
6939 struct ctlr_info *h = container_of(to_delayed_work(work),
6940 struct ctlr_info, monitor_ctlr_work);
6941 detect_controller_lockup(h);
6942 if (lockup_detected(h))
6945 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6946 scsi_host_get(h->scsi_host);
6947 h->drv_req_rescan = 0;
6948 hpsa_ack_ctlr_events(h);
6949 hpsa_scan_start(h->scsi_host);
6950 scsi_host_put(h->scsi_host);
6953 spin_lock_irqsave(&h->lock, flags);
6954 if (h->remove_in_progress) {
6955 spin_unlock_irqrestore(&h->lock, flags);
6958 schedule_delayed_work(&h->monitor_ctlr_work,
6959 h->heartbeat_sample_interval);
6960 spin_unlock_irqrestore(&h->lock, flags);
6963 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6966 struct ctlr_info *h;
6967 int try_soft_reset = 0;
6968 unsigned long flags;
6970 if (number_of_controllers == 0)
6971 printk(KERN_INFO DRIVER_NAME "\n");
6973 rc = hpsa_init_reset_devices(pdev);
6975 if (rc != -ENOTSUPP)
6977 /* If the reset fails in a particular way (it has no way to do
6978 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6979 * a soft reset once we get the controller configured up to the
6980 * point that it can accept a command.
6986 reinit_after_soft_reset:
6988 /* Command structures must be aligned on a 32-byte boundary because
6989 * the 5 lower bits of the address are used by the hardware. and by
6990 * the driver. See comments in hpsa.h for more info.
6992 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6993 h = kzalloc(sizeof(*h), GFP_KERNEL);
6998 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6999 INIT_LIST_HEAD(&h->cmpQ);
7000 INIT_LIST_HEAD(&h->reqQ);
7001 INIT_LIST_HEAD(&h->offline_device_list);
7002 spin_lock_init(&h->lock);
7003 spin_lock_init(&h->offline_device_lock);
7004 spin_lock_init(&h->scan_lock);
7005 spin_lock_init(&h->passthru_count_lock);
7007 /* Allocate and clear per-cpu variable lockup_detected */
7008 h->lockup_detected = alloc_percpu(u32);
7009 if (!h->lockup_detected) {
7013 set_lockup_detected_for_all_cpus(h, 0);
7015 rc = hpsa_pci_init(h);
7019 sprintf(h->devname, HPSA "%d", number_of_controllers);
7020 h->ctlr = number_of_controllers;
7021 number_of_controllers++;
7023 /* configure PCI DMA stuff */
7024 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7028 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7032 dev_err(&pdev->dev, "no suitable DMA available\n");
7037 /* make sure the board interrupts are off */
7038 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7040 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
7042 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7043 h->devname, pdev->device,
7044 h->intr[h->intr_mode], dac ? "" : " not");
7045 if (hpsa_allocate_cmd_pool(h))
7047 if (hpsa_allocate_sg_chain_blocks(h))
7049 init_waitqueue_head(&h->scan_wait_queue);
7050 h->scan_finished = 1; /* no scan currently in progress */
7052 pci_set_drvdata(pdev, h);
7054 h->hba_mode_enabled = 0;
7055 h->scsi_host = NULL;
7056 spin_lock_init(&h->devlock);
7057 hpsa_put_ctlr_into_performant_mode(h);
7059 /* At this point, the controller is ready to take commands.
7060 * Now, if reset_devices and the hard reset didn't work, try
7061 * the soft reset and see if that works.
7063 if (try_soft_reset) {
7065 /* This is kind of gross. We may or may not get a completion
7066 * from the soft reset command, and if we do, then the value
7067 * from the fifo may or may not be valid. So, we wait 10 secs
7068 * after the reset throwing away any completions we get during
7069 * that time. Unregister the interrupt handler and register
7070 * fake ones to scoop up any residual completions.
7072 spin_lock_irqsave(&h->lock, flags);
7073 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7074 spin_unlock_irqrestore(&h->lock, flags);
7076 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
7077 hpsa_intx_discard_completions);
7079 dev_warn(&h->pdev->dev, "Failed to request_irq after "
7084 rc = hpsa_kdump_soft_reset(h);
7086 /* Neither hard nor soft reset worked, we're hosed. */
7089 dev_info(&h->pdev->dev, "Board READY.\n");
7090 dev_info(&h->pdev->dev,
7091 "Waiting for stale completions to drain.\n");
7092 h->access.set_intr_mask(h, HPSA_INTR_ON);
7094 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7096 rc = controller_reset_failed(h->cfgtable);
7098 dev_info(&h->pdev->dev,
7099 "Soft reset appears to have failed.\n");
7101 /* since the controller's reset, we have to go back and re-init
7102 * everything. Easiest to just forget what we've done and do it
7105 hpsa_undo_allocations_after_kdump_soft_reset(h);
7108 /* don't go to clean4, we already unallocated */
7111 goto reinit_after_soft_reset;
7114 /* Enable Accelerated IO path at driver layer */
7115 h->acciopath_status = 1;
7117 h->drv_req_rescan = 0;
7119 /* Turn the interrupts on so we can service requests */
7120 h->access.set_intr_mask(h, HPSA_INTR_ON);
7122 hpsa_hba_inquiry(h);
7123 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
7125 /* Monitor the controller for firmware lockups */
7126 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7127 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7128 schedule_delayed_work(&h->monitor_ctlr_work,
7129 h->heartbeat_sample_interval);
7133 hpsa_free_sg_chain_blocks(h);
7134 hpsa_free_cmd_pool(h);
7138 if (h->lockup_detected)
7139 free_percpu(h->lockup_detected);
7144 static void hpsa_flush_cache(struct ctlr_info *h)
7147 struct CommandList *c;
7149 /* Don't bother trying to flush the cache if locked up */
7150 if (unlikely(lockup_detected(h)))
7152 flush_buf = kzalloc(4, GFP_KERNEL);
7156 c = cmd_special_alloc(h);
7158 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7161 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7162 RAID_CTLR_LUNID, TYPE_CMD)) {
7165 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7166 if (c->err_info->CommandStatus != 0)
7168 dev_warn(&h->pdev->dev,
7169 "error flushing cache on controller\n");
7170 cmd_special_free(h, c);
7175 static void hpsa_shutdown(struct pci_dev *pdev)
7177 struct ctlr_info *h;
7179 h = pci_get_drvdata(pdev);
7180 /* Turn board interrupts off and send the flush cache command
7181 * sendcmd will turn off interrupt, and send the flush...
7182 * To write all data in the battery backed cache to disks
7184 hpsa_flush_cache(h);
7185 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7186 hpsa_free_irqs_and_disable_msix(h);
7189 static void hpsa_free_device_info(struct ctlr_info *h)
7193 for (i = 0; i < h->ndevices; i++)
7197 static void hpsa_remove_one(struct pci_dev *pdev)
7199 struct ctlr_info *h;
7200 unsigned long flags;
7202 if (pci_get_drvdata(pdev) == NULL) {
7203 dev_err(&pdev->dev, "unable to remove device\n");
7206 h = pci_get_drvdata(pdev);
7208 /* Get rid of any controller monitoring work items */
7209 spin_lock_irqsave(&h->lock, flags);
7210 h->remove_in_progress = 1;
7211 cancel_delayed_work(&h->monitor_ctlr_work);
7212 spin_unlock_irqrestore(&h->lock, flags);
7214 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7215 hpsa_shutdown(pdev);
7217 iounmap(h->transtable);
7218 iounmap(h->cfgtable);
7219 hpsa_free_device_info(h);
7220 hpsa_free_sg_chain_blocks(h);
7221 pci_free_consistent(h->pdev,
7222 h->nr_cmds * sizeof(struct CommandList),
7223 h->cmd_pool, h->cmd_pool_dhandle);
7224 pci_free_consistent(h->pdev,
7225 h->nr_cmds * sizeof(struct ErrorInfo),
7226 h->errinfo_pool, h->errinfo_pool_dhandle);
7227 hpsa_free_reply_queues(h);
7228 kfree(h->cmd_pool_bits);
7229 kfree(h->blockFetchTable);
7230 kfree(h->ioaccel1_blockFetchTable);
7231 kfree(h->ioaccel2_blockFetchTable);
7232 kfree(h->hba_inquiry_data);
7233 pci_disable_device(pdev);
7234 pci_release_regions(pdev);
7235 free_percpu(h->lockup_detected);
7239 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7240 __attribute__((unused)) pm_message_t state)
7245 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7250 static struct pci_driver hpsa_pci_driver = {
7252 .probe = hpsa_init_one,
7253 .remove = hpsa_remove_one,
7254 .id_table = hpsa_pci_device_id, /* id_table */
7255 .shutdown = hpsa_shutdown,
7256 .suspend = hpsa_suspend,
7257 .resume = hpsa_resume,
7260 /* Fill in bucket_map[], given nsgs (the max number of
7261 * scatter gather elements supported) and bucket[],
7262 * which is an array of 8 integers. The bucket[] array
7263 * contains 8 different DMA transfer sizes (in 16
7264 * byte increments) which the controller uses to fetch
7265 * commands. This function fills in bucket_map[], which
7266 * maps a given number of scatter gather elements to one of
7267 * the 8 DMA transfer sizes. The point of it is to allow the
7268 * controller to only do as much DMA as needed to fetch the
7269 * command, with the DMA transfer size encoded in the lower
7270 * bits of the command address.
7272 static void calc_bucket_map(int bucket[], int num_buckets,
7273 int nsgs, int min_blocks, int *bucket_map)
7277 /* Note, bucket_map must have nsgs+1 entries. */
7278 for (i = 0; i <= nsgs; i++) {
7279 /* Compute size of a command with i SG entries */
7280 size = i + min_blocks;
7281 b = num_buckets; /* Assume the biggest bucket */
7282 /* Find the bucket that is just big enough */
7283 for (j = 0; j < num_buckets; j++) {
7284 if (bucket[j] >= size) {
7289 /* for a command with i SG entries, use bucket b. */
7294 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7297 unsigned long register_value;
7298 unsigned long transMethod = CFGTBL_Trans_Performant |
7299 (trans_support & CFGTBL_Trans_use_short_tags) |
7300 CFGTBL_Trans_enable_directed_msix |
7301 (trans_support & (CFGTBL_Trans_io_accel1 |
7302 CFGTBL_Trans_io_accel2));
7303 struct access_method access = SA5_performant_access;
7305 /* This is a bit complicated. There are 8 registers on
7306 * the controller which we write to to tell it 8 different
7307 * sizes of commands which there may be. It's a way of
7308 * reducing the DMA done to fetch each command. Encoded into
7309 * each command's tag are 3 bits which communicate to the controller
7310 * which of the eight sizes that command fits within. The size of
7311 * each command depends on how many scatter gather entries there are.
7312 * Each SG entry requires 16 bytes. The eight registers are programmed
7313 * with the number of 16-byte blocks a command of that size requires.
7314 * The smallest command possible requires 5 such 16 byte blocks.
7315 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7316 * blocks. Note, this only extends to the SG entries contained
7317 * within the command block, and does not extend to chained blocks
7318 * of SG elements. bft[] contains the eight values we write to
7319 * the registers. They are not evenly distributed, but have more
7320 * sizes for small commands, and fewer sizes for larger commands.
7322 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7323 #define MIN_IOACCEL2_BFT_ENTRY 5
7324 #define HPSA_IOACCEL2_HEADER_SZ 4
7325 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7326 13, 14, 15, 16, 17, 18, 19,
7327 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7328 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7329 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7330 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7331 16 * MIN_IOACCEL2_BFT_ENTRY);
7332 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7333 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7334 /* 5 = 1 s/g entry or 4k
7335 * 6 = 2 s/g entry or 8k
7336 * 8 = 4 s/g entry or 16k
7337 * 10 = 6 s/g entry or 24k
7340 /* If the controller supports either ioaccel method then
7341 * we can also use the RAID stack submit path that does not
7342 * perform the superfluous readl() after each command submission.
7344 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7345 access = SA5_performant_access_no_read;
7347 /* Controller spec: zero out this buffer. */
7348 for (i = 0; i < h->nreply_queues; i++)
7349 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7351 bft[7] = SG_ENTRIES_IN_CMD + 4;
7352 calc_bucket_map(bft, ARRAY_SIZE(bft),
7353 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7354 for (i = 0; i < 8; i++)
7355 writel(bft[i], &h->transtable->BlockFetch[i]);
7357 /* size of controller ring buffer */
7358 writel(h->max_commands, &h->transtable->RepQSize);
7359 writel(h->nreply_queues, &h->transtable->RepQCount);
7360 writel(0, &h->transtable->RepQCtrAddrLow32);
7361 writel(0, &h->transtable->RepQCtrAddrHigh32);
7363 for (i = 0; i < h->nreply_queues; i++) {
7364 writel(0, &h->transtable->RepQAddr[i].upper);
7365 writel(h->reply_queue[i].busaddr,
7366 &h->transtable->RepQAddr[i].lower);
7369 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7370 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7372 * enable outbound interrupt coalescing in accelerator mode;
7374 if (trans_support & CFGTBL_Trans_io_accel1) {
7375 access = SA5_ioaccel_mode1_access;
7376 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7377 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7379 if (trans_support & CFGTBL_Trans_io_accel2) {
7380 access = SA5_ioaccel_mode2_access;
7381 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7382 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7385 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7386 hpsa_wait_for_mode_change_ack(h);
7387 register_value = readl(&(h->cfgtable->TransportActive));
7388 if (!(register_value & CFGTBL_Trans_Performant)) {
7389 dev_warn(&h->pdev->dev, "unable to get board into"
7390 " performant mode\n");
7393 /* Change the access methods to the performant access methods */
7395 h->transMethod = transMethod;
7397 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7398 (trans_support & CFGTBL_Trans_io_accel2)))
7401 if (trans_support & CFGTBL_Trans_io_accel1) {
7402 /* Set up I/O accelerator mode */
7403 for (i = 0; i < h->nreply_queues; i++) {
7404 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7405 h->reply_queue[i].current_entry =
7406 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7408 bft[7] = h->ioaccel_maxsg + 8;
7409 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7410 h->ioaccel1_blockFetchTable);
7412 /* initialize all reply queue entries to unused */
7413 for (i = 0; i < h->nreply_queues; i++)
7414 memset(h->reply_queue[i].head,
7415 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7416 h->reply_queue_size);
7418 /* set all the constant fields in the accelerator command
7419 * frames once at init time to save CPU cycles later.
7421 for (i = 0; i < h->nr_cmds; i++) {
7422 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7424 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7425 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7426 (i * sizeof(struct ErrorInfo)));
7427 cp->err_info_len = sizeof(struct ErrorInfo);
7428 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7429 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7430 cp->timeout_sec = 0;
7432 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7435 cp->host_addr.lower =
7436 (u32) (h->ioaccel_cmd_pool_dhandle +
7437 (i * sizeof(struct io_accel1_cmd)));
7438 cp->host_addr.upper = 0;
7440 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7441 u64 cfg_offset, cfg_base_addr_index;
7442 u32 bft2_offset, cfg_base_addr;
7445 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7446 &cfg_base_addr_index, &cfg_offset);
7447 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7448 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7449 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7450 4, h->ioaccel2_blockFetchTable);
7451 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7452 BUILD_BUG_ON(offsetof(struct CfgTable,
7453 io_accel_request_size_offset) != 0xb8);
7454 h->ioaccel2_bft2_regs =
7455 remap_pci_mem(pci_resource_start(h->pdev,
7456 cfg_base_addr_index) +
7457 cfg_offset + bft2_offset,
7459 sizeof(*h->ioaccel2_bft2_regs));
7460 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7461 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7463 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7464 hpsa_wait_for_mode_change_ack(h);
7467 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7470 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7471 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7472 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7474 /* Command structures must be aligned on a 128-byte boundary
7475 * because the 7 lower bits of the address are used by the
7478 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7479 IOACCEL1_COMMANDLIST_ALIGNMENT);
7480 h->ioaccel_cmd_pool =
7481 pci_alloc_consistent(h->pdev,
7482 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7483 &(h->ioaccel_cmd_pool_dhandle));
7485 h->ioaccel1_blockFetchTable =
7486 kmalloc(((h->ioaccel_maxsg + 1) *
7487 sizeof(u32)), GFP_KERNEL);
7489 if ((h->ioaccel_cmd_pool == NULL) ||
7490 (h->ioaccel1_blockFetchTable == NULL))
7493 memset(h->ioaccel_cmd_pool, 0,
7494 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7498 if (h->ioaccel_cmd_pool)
7499 pci_free_consistent(h->pdev,
7500 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7501 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7502 kfree(h->ioaccel1_blockFetchTable);
7506 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7508 /* Allocate ioaccel2 mode command blocks and block fetch table */
7511 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7512 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7513 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7515 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7516 IOACCEL2_COMMANDLIST_ALIGNMENT);
7517 h->ioaccel2_cmd_pool =
7518 pci_alloc_consistent(h->pdev,
7519 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7520 &(h->ioaccel2_cmd_pool_dhandle));
7522 h->ioaccel2_blockFetchTable =
7523 kmalloc(((h->ioaccel_maxsg + 1) *
7524 sizeof(u32)), GFP_KERNEL);
7526 if ((h->ioaccel2_cmd_pool == NULL) ||
7527 (h->ioaccel2_blockFetchTable == NULL))
7530 memset(h->ioaccel2_cmd_pool, 0,
7531 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7535 if (h->ioaccel2_cmd_pool)
7536 pci_free_consistent(h->pdev,
7537 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7538 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7539 kfree(h->ioaccel2_blockFetchTable);
7543 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7546 unsigned long transMethod = CFGTBL_Trans_Performant |
7547 CFGTBL_Trans_use_short_tags;
7550 if (hpsa_simple_mode)
7553 trans_support = readl(&(h->cfgtable->TransportSupport));
7554 if (!(trans_support & PERFORMANT_MODE))
7557 /* Check for I/O accelerator mode support */
7558 if (trans_support & CFGTBL_Trans_io_accel1) {
7559 transMethod |= CFGTBL_Trans_io_accel1 |
7560 CFGTBL_Trans_enable_directed_msix;
7561 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7564 if (trans_support & CFGTBL_Trans_io_accel2) {
7565 transMethod |= CFGTBL_Trans_io_accel2 |
7566 CFGTBL_Trans_enable_directed_msix;
7567 if (ioaccel2_alloc_cmds_and_bft(h))
7572 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7573 hpsa_get_max_perf_mode_cmds(h);
7574 /* Performant mode ring buffer and supporting data structures */
7575 h->reply_queue_size = h->max_commands * sizeof(u64);
7577 for (i = 0; i < h->nreply_queues; i++) {
7578 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7579 h->reply_queue_size,
7580 &(h->reply_queue[i].busaddr));
7581 if (!h->reply_queue[i].head)
7583 h->reply_queue[i].size = h->max_commands;
7584 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7585 h->reply_queue[i].current_entry = 0;
7588 /* Need a block fetch table for performant mode */
7589 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7590 sizeof(u32)), GFP_KERNEL);
7591 if (!h->blockFetchTable)
7594 hpsa_enter_performant_mode(h, trans_support);
7598 hpsa_free_reply_queues(h);
7599 kfree(h->blockFetchTable);
7602 static int is_accelerated_cmd(struct CommandList *c)
7604 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7607 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7609 struct CommandList *c = NULL;
7610 unsigned long flags;
7613 do { /* wait for all outstanding commands to drain out */
7615 spin_lock_irqsave(&h->lock, flags);
7616 list_for_each_entry(c, &h->cmpQ, list)
7617 accel_cmds_out += is_accelerated_cmd(c);
7618 list_for_each_entry(c, &h->reqQ, list)
7619 accel_cmds_out += is_accelerated_cmd(c);
7620 spin_unlock_irqrestore(&h->lock, flags);
7621 if (accel_cmds_out <= 0)
7628 * This is it. Register the PCI driver information for the cards we control
7629 * the OS will call our registered routines when it finds one of our cards.
7631 static int __init hpsa_init(void)
7633 return pci_register_driver(&hpsa_pci_driver);
7636 static void __exit hpsa_cleanup(void)
7638 pci_unregister_driver(&hpsa_pci_driver);
7641 static void __attribute__((unused)) verify_offsets(void)
7643 #define VERIFY_OFFSET(member, offset) \
7644 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7646 VERIFY_OFFSET(structure_size, 0);
7647 VERIFY_OFFSET(volume_blk_size, 4);
7648 VERIFY_OFFSET(volume_blk_cnt, 8);
7649 VERIFY_OFFSET(phys_blk_shift, 16);
7650 VERIFY_OFFSET(parity_rotation_shift, 17);
7651 VERIFY_OFFSET(strip_size, 18);
7652 VERIFY_OFFSET(disk_starting_blk, 20);
7653 VERIFY_OFFSET(disk_blk_cnt, 28);
7654 VERIFY_OFFSET(data_disks_per_row, 36);
7655 VERIFY_OFFSET(metadata_disks_per_row, 38);
7656 VERIFY_OFFSET(row_cnt, 40);
7657 VERIFY_OFFSET(layout_map_count, 42);
7658 VERIFY_OFFSET(flags, 44);
7659 VERIFY_OFFSET(dekindex, 46);
7660 /* VERIFY_OFFSET(reserved, 48 */
7661 VERIFY_OFFSET(data, 64);
7663 #undef VERIFY_OFFSET
7665 #define VERIFY_OFFSET(member, offset) \
7666 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7668 VERIFY_OFFSET(IU_type, 0);
7669 VERIFY_OFFSET(direction, 1);
7670 VERIFY_OFFSET(reply_queue, 2);
7671 /* VERIFY_OFFSET(reserved1, 3); */
7672 VERIFY_OFFSET(scsi_nexus, 4);
7673 VERIFY_OFFSET(Tag, 8);
7674 VERIFY_OFFSET(cdb, 16);
7675 VERIFY_OFFSET(cciss_lun, 32);
7676 VERIFY_OFFSET(data_len, 40);
7677 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7678 VERIFY_OFFSET(sg_count, 45);
7679 /* VERIFY_OFFSET(reserved3 */
7680 VERIFY_OFFSET(err_ptr, 48);
7681 VERIFY_OFFSET(err_len, 56);
7682 /* VERIFY_OFFSET(reserved4 */
7683 VERIFY_OFFSET(sg, 64);
7685 #undef VERIFY_OFFSET
7687 #define VERIFY_OFFSET(member, offset) \
7688 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7690 VERIFY_OFFSET(dev_handle, 0x00);
7691 VERIFY_OFFSET(reserved1, 0x02);
7692 VERIFY_OFFSET(function, 0x03);
7693 VERIFY_OFFSET(reserved2, 0x04);
7694 VERIFY_OFFSET(err_info, 0x0C);
7695 VERIFY_OFFSET(reserved3, 0x10);
7696 VERIFY_OFFSET(err_info_len, 0x12);
7697 VERIFY_OFFSET(reserved4, 0x13);
7698 VERIFY_OFFSET(sgl_offset, 0x14);
7699 VERIFY_OFFSET(reserved5, 0x15);
7700 VERIFY_OFFSET(transfer_len, 0x1C);
7701 VERIFY_OFFSET(reserved6, 0x20);
7702 VERIFY_OFFSET(io_flags, 0x24);
7703 VERIFY_OFFSET(reserved7, 0x26);
7704 VERIFY_OFFSET(LUN, 0x34);
7705 VERIFY_OFFSET(control, 0x3C);
7706 VERIFY_OFFSET(CDB, 0x40);
7707 VERIFY_OFFSET(reserved8, 0x50);
7708 VERIFY_OFFSET(host_context_flags, 0x60);
7709 VERIFY_OFFSET(timeout_sec, 0x62);
7710 VERIFY_OFFSET(ReplyQueue, 0x64);
7711 VERIFY_OFFSET(reserved9, 0x65);
7712 VERIFY_OFFSET(Tag, 0x68);
7713 VERIFY_OFFSET(host_addr, 0x70);
7714 VERIFY_OFFSET(CISS_LUN, 0x78);
7715 VERIFY_OFFSET(SG, 0x78 + 8);
7716 #undef VERIFY_OFFSET
7719 module_init(hpsa_init);
7720 module_exit(hpsa_cleanup);