2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
63 /* How long to wait for CISS doorbell communication */
64 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
65 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
66 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
67 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
68 #define MAX_IOCTL_CONFIG_WAIT 1000
70 /*define how many times we will try a command because of bus resets */
71 #define MAX_CMD_RETRIES 3
73 /* Embedded module documentation macros - see modules.h */
74 MODULE_AUTHOR("Hewlett-Packard Company");
75 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
77 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
78 MODULE_VERSION(HPSA_DRIVER_VERSION);
79 MODULE_LICENSE("GPL");
81 static int hpsa_allow_any;
82 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
83 MODULE_PARM_DESC(hpsa_allow_any,
84 "Allow hpsa driver to access unknown HP Smart Array hardware");
85 static int hpsa_simple_mode;
86 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_simple_mode,
88 "Use 'simple mode' rather than 'performant mode'");
90 /* define the PCI info for the cards we can control */
91 static const struct pci_device_id hpsa_pci_device_id[] = {
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
136 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
137 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
138 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
142 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
144 /* board_id = Subsystem Device ID & Vendor ID
145 * product = Marketing Name for the board
146 * access = Address of the struct of function pointers
148 static struct board_type products[] = {
149 {0x3241103C, "Smart Array P212", &SA5_access},
150 {0x3243103C, "Smart Array P410", &SA5_access},
151 {0x3245103C, "Smart Array P410i", &SA5_access},
152 {0x3247103C, "Smart Array P411", &SA5_access},
153 {0x3249103C, "Smart Array P812", &SA5_access},
154 {0x324A103C, "Smart Array P712m", &SA5_access},
155 {0x324B103C, "Smart Array P711m", &SA5_access},
156 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
157 {0x3350103C, "Smart Array P222", &SA5_access},
158 {0x3351103C, "Smart Array P420", &SA5_access},
159 {0x3352103C, "Smart Array P421", &SA5_access},
160 {0x3353103C, "Smart Array P822", &SA5_access},
161 {0x3354103C, "Smart Array P420i", &SA5_access},
162 {0x3355103C, "Smart Array P220i", &SA5_access},
163 {0x3356103C, "Smart Array P721m", &SA5_access},
164 {0x1921103C, "Smart Array P830i", &SA5_access},
165 {0x1922103C, "Smart Array P430", &SA5_access},
166 {0x1923103C, "Smart Array P431", &SA5_access},
167 {0x1924103C, "Smart Array P830", &SA5_access},
168 {0x1926103C, "Smart Array P731m", &SA5_access},
169 {0x1928103C, "Smart Array P230i", &SA5_access},
170 {0x1929103C, "Smart Array P530", &SA5_access},
171 {0x21BD103C, "Smart Array P244br", &SA5_access},
172 {0x21BE103C, "Smart Array P741m", &SA5_access},
173 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
174 {0x21C0103C, "Smart Array P440ar", &SA5_access},
175 {0x21C1103C, "Smart Array P840ar", &SA5_access},
176 {0x21C2103C, "Smart Array P440", &SA5_access},
177 {0x21C3103C, "Smart Array P441", &SA5_access},
178 {0x21C4103C, "Smart Array", &SA5_access},
179 {0x21C5103C, "Smart Array P841", &SA5_access},
180 {0x21C6103C, "Smart HBA H244br", &SA5_access},
181 {0x21C7103C, "Smart HBA H240", &SA5_access},
182 {0x21C8103C, "Smart HBA H241", &SA5_access},
183 {0x21C9103C, "Smart Array", &SA5_access},
184 {0x21CA103C, "Smart Array P246br", &SA5_access},
185 {0x21CB103C, "Smart Array P840", &SA5_access},
186 {0x21CC103C, "Smart Array", &SA5_access},
187 {0x21CD103C, "Smart Array", &SA5_access},
188 {0x21CE103C, "Smart HBA", &SA5_access},
189 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
190 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
191 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
192 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
193 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
194 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
197 static int number_of_controllers;
199 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
200 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
201 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
204 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
208 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
209 static struct CommandList *cmd_alloc(struct ctlr_info *h);
210 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
211 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
213 static void hpsa_free_cmd_pool(struct ctlr_info *h);
214 #define VPD_PAGE (1 << 8)
216 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
217 static void hpsa_scan_start(struct Scsi_Host *);
218 static int hpsa_scan_finished(struct Scsi_Host *sh,
219 unsigned long elapsed_time);
220 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
222 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
223 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
224 static int hpsa_slave_alloc(struct scsi_device *sdev);
225 static int hpsa_slave_configure(struct scsi_device *sdev);
226 static void hpsa_slave_destroy(struct scsi_device *sdev);
228 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
229 static int check_for_unit_attention(struct ctlr_info *h,
230 struct CommandList *c);
231 static void check_ioctl_unit_attention(struct ctlr_info *h,
232 struct CommandList *c);
233 /* performant mode helper functions */
234 static void calc_bucket_map(int *bucket, int num_buckets,
235 int nsgs, int min_blocks, u32 *bucket_map);
236 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
237 static inline u32 next_command(struct ctlr_info *h, u8 q);
238 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
239 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
241 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
242 unsigned long *memory_bar);
243 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
244 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
246 static inline void finish_cmd(struct CommandList *c);
247 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
248 #define BOARD_NOT_READY 0
249 #define BOARD_READY 1
250 static void hpsa_drain_accel_commands(struct ctlr_info *h);
251 static void hpsa_flush_cache(struct ctlr_info *h);
252 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
253 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
254 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
255 static void hpsa_command_resubmit_worker(struct work_struct *work);
256 static u32 lockup_detected(struct ctlr_info *h);
257 static int detect_controller_lockup(struct ctlr_info *h);
259 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
261 unsigned long *priv = shost_priv(sdev->host);
262 return (struct ctlr_info *) *priv;
265 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
267 unsigned long *priv = shost_priv(sh);
268 return (struct ctlr_info *) *priv;
271 static int check_for_unit_attention(struct ctlr_info *h,
272 struct CommandList *c)
274 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
277 switch (c->err_info->SenseInfo[12]) {
279 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
280 "detected, command retried\n", h->ctlr);
283 dev_warn(&h->pdev->dev,
284 HPSA "%d: LUN failure detected\n", h->ctlr);
286 case REPORT_LUNS_CHANGED:
287 dev_warn(&h->pdev->dev,
288 HPSA "%d: report LUN data changed\n", h->ctlr);
290 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
291 * target (array) devices.
295 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
296 "or device reset detected\n", h->ctlr);
298 case UNIT_ATTENTION_CLEARED:
299 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
300 "cleared by another initiator\n", h->ctlr);
303 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
304 "unit attention detected\n", h->ctlr);
310 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
312 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
313 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
314 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
316 dev_warn(&h->pdev->dev, HPSA "device busy");
320 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
321 struct device_attribute *attr,
322 const char *buf, size_t count)
326 struct Scsi_Host *shost = class_to_shost(dev);
329 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
331 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
332 strncpy(tmpbuf, buf, len);
334 if (sscanf(tmpbuf, "%d", &status) != 1)
336 h = shost_to_hba(shost);
337 h->acciopath_status = !!status;
338 dev_warn(&h->pdev->dev,
339 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
340 h->acciopath_status ? "enabled" : "disabled");
344 static ssize_t host_store_raid_offload_debug(struct device *dev,
345 struct device_attribute *attr,
346 const char *buf, size_t count)
348 int debug_level, len;
350 struct Scsi_Host *shost = class_to_shost(dev);
353 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
355 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
356 strncpy(tmpbuf, buf, len);
358 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
362 h = shost_to_hba(shost);
363 h->raid_offload_debug = debug_level;
364 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
365 h->raid_offload_debug);
369 static ssize_t host_store_rescan(struct device *dev,
370 struct device_attribute *attr,
371 const char *buf, size_t count)
374 struct Scsi_Host *shost = class_to_shost(dev);
375 h = shost_to_hba(shost);
376 hpsa_scan_start(h->scsi_host);
380 static ssize_t host_show_firmware_revision(struct device *dev,
381 struct device_attribute *attr, char *buf)
384 struct Scsi_Host *shost = class_to_shost(dev);
385 unsigned char *fwrev;
387 h = shost_to_hba(shost);
388 if (!h->hba_inquiry_data)
390 fwrev = &h->hba_inquiry_data[32];
391 return snprintf(buf, 20, "%c%c%c%c\n",
392 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
395 static ssize_t host_show_commands_outstanding(struct device *dev,
396 struct device_attribute *attr, char *buf)
398 struct Scsi_Host *shost = class_to_shost(dev);
399 struct ctlr_info *h = shost_to_hba(shost);
401 return snprintf(buf, 20, "%d\n",
402 atomic_read(&h->commands_outstanding));
405 static ssize_t host_show_transport_mode(struct device *dev,
406 struct device_attribute *attr, char *buf)
409 struct Scsi_Host *shost = class_to_shost(dev);
411 h = shost_to_hba(shost);
412 return snprintf(buf, 20, "%s\n",
413 h->transMethod & CFGTBL_Trans_Performant ?
414 "performant" : "simple");
417 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
418 struct device_attribute *attr, char *buf)
421 struct Scsi_Host *shost = class_to_shost(dev);
423 h = shost_to_hba(shost);
424 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
425 (h->acciopath_status == 1) ? "enabled" : "disabled");
428 /* List of controllers which cannot be hard reset on kexec with reset_devices */
429 static u32 unresettable_controller[] = {
430 0x324a103C, /* Smart Array P712m */
431 0x324b103C, /* SmartArray P711m */
432 0x3223103C, /* Smart Array P800 */
433 0x3234103C, /* Smart Array P400 */
434 0x3235103C, /* Smart Array P400i */
435 0x3211103C, /* Smart Array E200i */
436 0x3212103C, /* Smart Array E200 */
437 0x3213103C, /* Smart Array E200i */
438 0x3214103C, /* Smart Array E200i */
439 0x3215103C, /* Smart Array E200i */
440 0x3237103C, /* Smart Array E500 */
441 0x323D103C, /* Smart Array P700m */
442 0x40800E11, /* Smart Array 5i */
443 0x409C0E11, /* Smart Array 6400 */
444 0x409D0E11, /* Smart Array 6400 EM */
445 0x40700E11, /* Smart Array 5300 */
446 0x40820E11, /* Smart Array 532 */
447 0x40830E11, /* Smart Array 5312 */
448 0x409A0E11, /* Smart Array 641 */
449 0x409B0E11, /* Smart Array 642 */
450 0x40910E11, /* Smart Array 6i */
453 /* List of controllers which cannot even be soft reset */
454 static u32 soft_unresettable_controller[] = {
455 0x40800E11, /* Smart Array 5i */
456 0x40700E11, /* Smart Array 5300 */
457 0x40820E11, /* Smart Array 532 */
458 0x40830E11, /* Smart Array 5312 */
459 0x409A0E11, /* Smart Array 641 */
460 0x409B0E11, /* Smart Array 642 */
461 0x40910E11, /* Smart Array 6i */
462 /* Exclude 640x boards. These are two pci devices in one slot
463 * which share a battery backed cache module. One controls the
464 * cache, the other accesses the cache through the one that controls
465 * it. If we reset the one controlling the cache, the other will
466 * likely not be happy. Just forbid resetting this conjoined mess.
467 * The 640x isn't really supported by hpsa anyway.
469 0x409C0E11, /* Smart Array 6400 */
470 0x409D0E11, /* Smart Array 6400 EM */
473 static int ctlr_is_hard_resettable(u32 board_id)
477 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
478 if (unresettable_controller[i] == board_id)
483 static int ctlr_is_soft_resettable(u32 board_id)
487 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
488 if (soft_unresettable_controller[i] == board_id)
493 static int ctlr_is_resettable(u32 board_id)
495 return ctlr_is_hard_resettable(board_id) ||
496 ctlr_is_soft_resettable(board_id);
499 static ssize_t host_show_resettable(struct device *dev,
500 struct device_attribute *attr, char *buf)
503 struct Scsi_Host *shost = class_to_shost(dev);
505 h = shost_to_hba(shost);
506 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
509 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
511 return (scsi3addr[3] & 0xC0) == 0x40;
514 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
515 "1(+0)ADM", "UNKNOWN"
517 #define HPSA_RAID_0 0
518 #define HPSA_RAID_4 1
519 #define HPSA_RAID_1 2 /* also used for RAID 10 */
520 #define HPSA_RAID_5 3 /* also used for RAID 50 */
521 #define HPSA_RAID_51 4
522 #define HPSA_RAID_6 5 /* also used for RAID 60 */
523 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
524 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
526 static ssize_t raid_level_show(struct device *dev,
527 struct device_attribute *attr, char *buf)
530 unsigned char rlevel;
532 struct scsi_device *sdev;
533 struct hpsa_scsi_dev_t *hdev;
536 sdev = to_scsi_device(dev);
537 h = sdev_to_hba(sdev);
538 spin_lock_irqsave(&h->lock, flags);
539 hdev = sdev->hostdata;
541 spin_unlock_irqrestore(&h->lock, flags);
545 /* Is this even a logical drive? */
546 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
547 spin_unlock_irqrestore(&h->lock, flags);
548 l = snprintf(buf, PAGE_SIZE, "N/A\n");
552 rlevel = hdev->raid_level;
553 spin_unlock_irqrestore(&h->lock, flags);
554 if (rlevel > RAID_UNKNOWN)
555 rlevel = RAID_UNKNOWN;
556 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
560 static ssize_t lunid_show(struct device *dev,
561 struct device_attribute *attr, char *buf)
564 struct scsi_device *sdev;
565 struct hpsa_scsi_dev_t *hdev;
567 unsigned char lunid[8];
569 sdev = to_scsi_device(dev);
570 h = sdev_to_hba(sdev);
571 spin_lock_irqsave(&h->lock, flags);
572 hdev = sdev->hostdata;
574 spin_unlock_irqrestore(&h->lock, flags);
577 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
578 spin_unlock_irqrestore(&h->lock, flags);
579 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
580 lunid[0], lunid[1], lunid[2], lunid[3],
581 lunid[4], lunid[5], lunid[6], lunid[7]);
584 static ssize_t unique_id_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
588 struct scsi_device *sdev;
589 struct hpsa_scsi_dev_t *hdev;
591 unsigned char sn[16];
593 sdev = to_scsi_device(dev);
594 h = sdev_to_hba(sdev);
595 spin_lock_irqsave(&h->lock, flags);
596 hdev = sdev->hostdata;
598 spin_unlock_irqrestore(&h->lock, flags);
601 memcpy(sn, hdev->device_id, sizeof(sn));
602 spin_unlock_irqrestore(&h->lock, flags);
603 return snprintf(buf, 16 * 2 + 2,
604 "%02X%02X%02X%02X%02X%02X%02X%02X"
605 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
606 sn[0], sn[1], sn[2], sn[3],
607 sn[4], sn[5], sn[6], sn[7],
608 sn[8], sn[9], sn[10], sn[11],
609 sn[12], sn[13], sn[14], sn[15]);
612 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
613 struct device_attribute *attr, char *buf)
616 struct scsi_device *sdev;
617 struct hpsa_scsi_dev_t *hdev;
621 sdev = to_scsi_device(dev);
622 h = sdev_to_hba(sdev);
623 spin_lock_irqsave(&h->lock, flags);
624 hdev = sdev->hostdata;
626 spin_unlock_irqrestore(&h->lock, flags);
629 offload_enabled = hdev->offload_enabled;
630 spin_unlock_irqrestore(&h->lock, flags);
631 return snprintf(buf, 20, "%d\n", offload_enabled);
634 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
635 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
636 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
637 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
638 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
639 host_show_hp_ssd_smart_path_enabled, NULL);
640 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
641 host_show_hp_ssd_smart_path_status,
642 host_store_hp_ssd_smart_path_status);
643 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
644 host_store_raid_offload_debug);
645 static DEVICE_ATTR(firmware_revision, S_IRUGO,
646 host_show_firmware_revision, NULL);
647 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
648 host_show_commands_outstanding, NULL);
649 static DEVICE_ATTR(transport_mode, S_IRUGO,
650 host_show_transport_mode, NULL);
651 static DEVICE_ATTR(resettable, S_IRUGO,
652 host_show_resettable, NULL);
654 static struct device_attribute *hpsa_sdev_attrs[] = {
655 &dev_attr_raid_level,
658 &dev_attr_hp_ssd_smart_path_enabled,
662 static struct device_attribute *hpsa_shost_attrs[] = {
664 &dev_attr_firmware_revision,
665 &dev_attr_commands_outstanding,
666 &dev_attr_transport_mode,
667 &dev_attr_resettable,
668 &dev_attr_hp_ssd_smart_path_status,
669 &dev_attr_raid_offload_debug,
673 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
674 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
676 static struct scsi_host_template hpsa_driver_template = {
677 .module = THIS_MODULE,
680 .queuecommand = hpsa_scsi_queue_command,
681 .scan_start = hpsa_scan_start,
682 .scan_finished = hpsa_scan_finished,
683 .change_queue_depth = hpsa_change_queue_depth,
685 .use_clustering = ENABLE_CLUSTERING,
686 .eh_abort_handler = hpsa_eh_abort_handler,
687 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
689 .slave_alloc = hpsa_slave_alloc,
690 .slave_configure = hpsa_slave_configure,
691 .slave_destroy = hpsa_slave_destroy,
693 .compat_ioctl = hpsa_compat_ioctl,
695 .sdev_attrs = hpsa_sdev_attrs,
696 .shost_attrs = hpsa_shost_attrs,
701 static inline u32 next_command(struct ctlr_info *h, u8 q)
704 struct reply_queue_buffer *rq = &h->reply_queue[q];
706 if (h->transMethod & CFGTBL_Trans_io_accel1)
707 return h->access.command_completed(h, q);
709 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
710 return h->access.command_completed(h, q);
712 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
713 a = rq->head[rq->current_entry];
715 atomic_dec(&h->commands_outstanding);
719 /* Check for wraparound */
720 if (rq->current_entry == h->max_commands) {
721 rq->current_entry = 0;
728 * There are some special bits in the bus address of the
729 * command that we have to set for the controller to know
730 * how to process the command:
732 * Normal performant mode:
733 * bit 0: 1 means performant mode, 0 means simple mode.
734 * bits 1-3 = block fetch table entry
735 * bits 4-6 = command type (== 0)
738 * bit 0 = "performant mode" bit.
739 * bits 1-3 = block fetch table entry
740 * bits 4-6 = command type (== 110)
741 * (command type is needed because ioaccel1 mode
742 * commands are submitted through the same register as normal
743 * mode commands, so this is how the controller knows whether
744 * the command is normal mode or ioaccel1 mode.)
747 * bit 0 = "performant mode" bit.
748 * bits 1-4 = block fetch table entry (note extra bit)
749 * bits 4-6 = not needed, because ioaccel2 mode has
750 * a separate special register for submitting commands.
754 * set_performant_mode: Modify the tag for cciss performant
755 * set bit 0 for pull model, bits 3-1 for block fetch
758 #define DEFAULT_REPLY_QUEUE (-1)
759 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
762 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
763 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
764 if (unlikely(!h->msix_vector))
766 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
767 c->Header.ReplyQueue =
768 raw_smp_processor_id() % h->nreply_queues;
770 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
774 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
775 struct CommandList *c,
778 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
781 * Tell the controller to post the reply to the queue for this
782 * processor. This seems to give the best I/O throughput.
784 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
785 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
787 cp->ReplyQueue = reply_queue % h->nreply_queues;
789 * Set the bits in the address sent down to include:
790 * - performant mode bit (bit 0)
791 * - pull count (bits 1-3)
792 * - command type (bits 4-6)
794 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
795 IOACCEL1_BUSADDR_CMDTYPE;
798 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
799 struct CommandList *c,
802 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
805 * Tell the controller to post the reply to the queue for this
806 * processor. This seems to give the best I/O throughput.
808 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
809 cp->reply_queue = smp_processor_id() % h->nreply_queues;
811 cp->reply_queue = reply_queue % h->nreply_queues;
813 * Set the bits in the address sent down to include:
814 * - performant mode bit not used in ioaccel mode 2
815 * - pull count (bits 0-3)
816 * - command type isn't needed for ioaccel2
818 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
821 static int is_firmware_flash_cmd(u8 *cdb)
823 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
827 * During firmware flash, the heartbeat register may not update as frequently
828 * as it should. So we dial down lockup detection during firmware flash. and
829 * dial it back up when firmware flash completes.
831 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
832 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
833 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
834 struct CommandList *c)
836 if (!is_firmware_flash_cmd(c->Request.CDB))
838 atomic_inc(&h->firmware_flash_in_progress);
839 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
842 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
843 struct CommandList *c)
845 if (is_firmware_flash_cmd(c->Request.CDB) &&
846 atomic_dec_and_test(&h->firmware_flash_in_progress))
847 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
850 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
851 struct CommandList *c, int reply_queue)
853 dial_down_lockup_detection_during_fw_flash(h, c);
854 atomic_inc(&h->commands_outstanding);
855 switch (c->cmd_type) {
857 set_ioaccel1_performant_mode(h, c, reply_queue);
858 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
861 set_ioaccel2_performant_mode(h, c, reply_queue);
862 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
865 set_performant_mode(h, c, reply_queue);
866 h->access.submit_command(h, c);
870 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
871 struct CommandList *c)
873 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
876 static inline int is_hba_lunid(unsigned char scsi3addr[])
878 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
881 static inline int is_scsi_rev_5(struct ctlr_info *h)
883 if (!h->hba_inquiry_data)
885 if ((h->hba_inquiry_data[2] & 0x07) == 5)
890 static int hpsa_find_target_lun(struct ctlr_info *h,
891 unsigned char scsi3addr[], int bus, int *target, int *lun)
893 /* finds an unused bus, target, lun for a new physical device
894 * assumes h->devlock is held
897 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
899 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
901 for (i = 0; i < h->ndevices; i++) {
902 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
903 __set_bit(h->dev[i]->target, lun_taken);
906 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
907 if (i < HPSA_MAX_DEVICES) {
916 static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
917 struct hpsa_scsi_dev_t *dev, char *description)
919 dev_printk(level, &h->pdev->dev,
920 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
921 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
923 scsi_device_type(dev->devtype),
926 dev->raid_level > RAID_UNKNOWN ?
927 "RAID-?" : raid_label[dev->raid_level],
928 dev->offload_config ? '+' : '-',
929 dev->offload_enabled ? '+' : '-',
933 /* Add an entry into h->dev[] array. */
934 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
935 struct hpsa_scsi_dev_t *device,
936 struct hpsa_scsi_dev_t *added[], int *nadded)
938 /* assumes h->devlock is held */
941 unsigned char addr1[8], addr2[8];
942 struct hpsa_scsi_dev_t *sd;
944 if (n >= HPSA_MAX_DEVICES) {
945 dev_err(&h->pdev->dev, "too many devices, some will be "
950 /* physical devices do not have lun or target assigned until now. */
951 if (device->lun != -1)
952 /* Logical device, lun is already assigned. */
955 /* If this device a non-zero lun of a multi-lun device
956 * byte 4 of the 8-byte LUN addr will contain the logical
957 * unit no, zero otherwise.
959 if (device->scsi3addr[4] == 0) {
960 /* This is not a non-zero lun of a multi-lun device */
961 if (hpsa_find_target_lun(h, device->scsi3addr,
962 device->bus, &device->target, &device->lun) != 0)
967 /* This is a non-zero lun of a multi-lun device.
968 * Search through our list and find the device which
969 * has the same 8 byte LUN address, excepting byte 4.
970 * Assign the same bus and target for this new LUN.
971 * Use the logical unit number from the firmware.
973 memcpy(addr1, device->scsi3addr, 8);
975 for (i = 0; i < n; i++) {
977 memcpy(addr2, sd->scsi3addr, 8);
979 /* differ only in byte 4? */
980 if (memcmp(addr1, addr2, 8) == 0) {
981 device->bus = sd->bus;
982 device->target = sd->target;
983 device->lun = device->scsi3addr[4];
987 if (device->lun == -1) {
988 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
989 " suspect firmware bug or unsupported hardware "
998 device->offload_to_be_enabled = device->offload_enabled;
999 device->offload_enabled = 0;
1000 added[*nadded] = device;
1002 hpsa_show_dev_msg(KERN_INFO, h, device,
1003 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
1007 /* Update an entry in h->dev[] array. */
1008 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1009 int entry, struct hpsa_scsi_dev_t *new_entry)
1011 /* assumes h->devlock is held */
1012 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1014 /* Raid level changed. */
1015 h->dev[entry]->raid_level = new_entry->raid_level;
1017 /* Raid offload parameters changed. Careful about the ordering. */
1018 if (new_entry->offload_config && new_entry->offload_enabled) {
1020 * if drive is newly offload_enabled, we want to copy the
1021 * raid map data first. If previously offload_enabled and
1022 * offload_config were set, raid map data had better be
1023 * the same as it was before. if raid map data is changed
1024 * then it had better be the case that
1025 * h->dev[entry]->offload_enabled is currently 0.
1027 h->dev[entry]->raid_map = new_entry->raid_map;
1028 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1030 h->dev[entry]->offload_config = new_entry->offload_config;
1031 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1032 h->dev[entry]->queue_depth = new_entry->queue_depth;
1035 * We can turn off ioaccel offload now, but need to delay turning
1036 * it on until we can update h->dev[entry]->phys_disk[], but we
1037 * can't do that until all the devices are updated.
1039 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1040 if (!new_entry->offload_enabled)
1041 h->dev[entry]->offload_enabled = 0;
1043 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1046 /* Replace an entry from h->dev[] array. */
1047 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1048 int entry, struct hpsa_scsi_dev_t *new_entry,
1049 struct hpsa_scsi_dev_t *added[], int *nadded,
1050 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1052 /* assumes h->devlock is held */
1053 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1054 removed[*nremoved] = h->dev[entry];
1058 * New physical devices won't have target/lun assigned yet
1059 * so we need to preserve the values in the slot we are replacing.
1061 if (new_entry->target == -1) {
1062 new_entry->target = h->dev[entry]->target;
1063 new_entry->lun = h->dev[entry]->lun;
1066 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1067 new_entry->offload_enabled = 0;
1068 h->dev[entry] = new_entry;
1069 added[*nadded] = new_entry;
1071 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1074 /* Remove an entry from h->dev[] array. */
1075 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1076 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1078 /* assumes h->devlock is held */
1080 struct hpsa_scsi_dev_t *sd;
1082 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1085 removed[*nremoved] = h->dev[entry];
1088 for (i = entry; i < h->ndevices-1; i++)
1089 h->dev[i] = h->dev[i+1];
1091 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1094 #define SCSI3ADDR_EQ(a, b) ( \
1095 (a)[7] == (b)[7] && \
1096 (a)[6] == (b)[6] && \
1097 (a)[5] == (b)[5] && \
1098 (a)[4] == (b)[4] && \
1099 (a)[3] == (b)[3] && \
1100 (a)[2] == (b)[2] && \
1101 (a)[1] == (b)[1] && \
1104 static void fixup_botched_add(struct ctlr_info *h,
1105 struct hpsa_scsi_dev_t *added)
1107 /* called when scsi_add_device fails in order to re-adjust
1108 * h->dev[] to match the mid layer's view.
1110 unsigned long flags;
1113 spin_lock_irqsave(&h->lock, flags);
1114 for (i = 0; i < h->ndevices; i++) {
1115 if (h->dev[i] == added) {
1116 for (j = i; j < h->ndevices-1; j++)
1117 h->dev[j] = h->dev[j+1];
1122 spin_unlock_irqrestore(&h->lock, flags);
1126 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1127 struct hpsa_scsi_dev_t *dev2)
1129 /* we compare everything except lun and target as these
1130 * are not yet assigned. Compare parts likely
1133 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1134 sizeof(dev1->scsi3addr)) != 0)
1136 if (memcmp(dev1->device_id, dev2->device_id,
1137 sizeof(dev1->device_id)) != 0)
1139 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1141 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1143 if (dev1->devtype != dev2->devtype)
1145 if (dev1->bus != dev2->bus)
1150 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1151 struct hpsa_scsi_dev_t *dev2)
1153 /* Device attributes that can change, but don't mean
1154 * that the device is a different device, nor that the OS
1155 * needs to be told anything about the change.
1157 if (dev1->raid_level != dev2->raid_level)
1159 if (dev1->offload_config != dev2->offload_config)
1161 if (dev1->offload_enabled != dev2->offload_enabled)
1163 if (dev1->queue_depth != dev2->queue_depth)
1168 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1169 * and return needle location in *index. If scsi3addr matches, but not
1170 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1171 * location in *index.
1172 * In the case of a minor device attribute change, such as RAID level, just
1173 * return DEVICE_UPDATED, along with the updated device's location in index.
1174 * If needle not found, return DEVICE_NOT_FOUND.
1176 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1177 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1181 #define DEVICE_NOT_FOUND 0
1182 #define DEVICE_CHANGED 1
1183 #define DEVICE_SAME 2
1184 #define DEVICE_UPDATED 3
1185 for (i = 0; i < haystack_size; i++) {
1186 if (haystack[i] == NULL) /* previously removed. */
1188 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1190 if (device_is_the_same(needle, haystack[i])) {
1191 if (device_updated(needle, haystack[i]))
1192 return DEVICE_UPDATED;
1195 /* Keep offline devices offline */
1196 if (needle->volume_offline)
1197 return DEVICE_NOT_FOUND;
1198 return DEVICE_CHANGED;
1203 return DEVICE_NOT_FOUND;
1206 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1207 unsigned char scsi3addr[])
1209 struct offline_device_entry *device;
1210 unsigned long flags;
1212 /* Check to see if device is already on the list */
1213 spin_lock_irqsave(&h->offline_device_lock, flags);
1214 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1215 if (memcmp(device->scsi3addr, scsi3addr,
1216 sizeof(device->scsi3addr)) == 0) {
1217 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1221 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1223 /* Device is not on the list, add it. */
1224 device = kmalloc(sizeof(*device), GFP_KERNEL);
1226 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1229 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1230 spin_lock_irqsave(&h->offline_device_lock, flags);
1231 list_add_tail(&device->offline_list, &h->offline_device_list);
1232 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1235 /* Print a message explaining various offline volume states */
1236 static void hpsa_show_volume_status(struct ctlr_info *h,
1237 struct hpsa_scsi_dev_t *sd)
1239 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1240 dev_info(&h->pdev->dev,
1241 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1242 h->scsi_host->host_no,
1243 sd->bus, sd->target, sd->lun);
1244 switch (sd->volume_offline) {
1247 case HPSA_LV_UNDERGOING_ERASE:
1248 dev_info(&h->pdev->dev,
1249 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1250 h->scsi_host->host_no,
1251 sd->bus, sd->target, sd->lun);
1253 case HPSA_LV_UNDERGOING_RPI:
1254 dev_info(&h->pdev->dev,
1255 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1256 h->scsi_host->host_no,
1257 sd->bus, sd->target, sd->lun);
1259 case HPSA_LV_PENDING_RPI:
1260 dev_info(&h->pdev->dev,
1261 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1262 h->scsi_host->host_no,
1263 sd->bus, sd->target, sd->lun);
1265 case HPSA_LV_ENCRYPTED_NO_KEY:
1266 dev_info(&h->pdev->dev,
1267 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1268 h->scsi_host->host_no,
1269 sd->bus, sd->target, sd->lun);
1271 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1272 dev_info(&h->pdev->dev,
1273 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1274 h->scsi_host->host_no,
1275 sd->bus, sd->target, sd->lun);
1277 case HPSA_LV_UNDERGOING_ENCRYPTION:
1278 dev_info(&h->pdev->dev,
1279 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1280 h->scsi_host->host_no,
1281 sd->bus, sd->target, sd->lun);
1283 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1284 dev_info(&h->pdev->dev,
1285 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1286 h->scsi_host->host_no,
1287 sd->bus, sd->target, sd->lun);
1289 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1290 dev_info(&h->pdev->dev,
1291 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1292 h->scsi_host->host_no,
1293 sd->bus, sd->target, sd->lun);
1295 case HPSA_LV_PENDING_ENCRYPTION:
1296 dev_info(&h->pdev->dev,
1297 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1298 h->scsi_host->host_no,
1299 sd->bus, sd->target, sd->lun);
1301 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1302 dev_info(&h->pdev->dev,
1303 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1304 h->scsi_host->host_no,
1305 sd->bus, sd->target, sd->lun);
1311 * Figure the list of physical drive pointers for a logical drive with
1312 * raid offload configured.
1314 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1315 struct hpsa_scsi_dev_t *dev[], int ndevices,
1316 struct hpsa_scsi_dev_t *logical_drive)
1318 struct raid_map_data *map = &logical_drive->raid_map;
1319 struct raid_map_disk_data *dd = &map->data[0];
1321 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1322 le16_to_cpu(map->metadata_disks_per_row);
1323 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1324 le16_to_cpu(map->layout_map_count) *
1325 total_disks_per_row;
1326 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1327 total_disks_per_row;
1330 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1331 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1334 for (i = 0; i < nraid_map_entries; i++) {
1335 logical_drive->phys_disk[i] = NULL;
1336 if (!logical_drive->offload_config)
1338 for (j = 0; j < ndevices; j++) {
1339 if (dev[j]->devtype != TYPE_DISK)
1341 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1343 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1346 logical_drive->phys_disk[i] = dev[j];
1348 qdepth = min(h->nr_cmds, qdepth +
1349 logical_drive->phys_disk[i]->queue_depth);
1354 * This can happen if a physical drive is removed and
1355 * the logical drive is degraded. In that case, the RAID
1356 * map data will refer to a physical disk which isn't actually
1357 * present. And in that case offload_enabled should already
1358 * be 0, but we'll turn it off here just in case
1360 if (!logical_drive->phys_disk[i]) {
1361 logical_drive->offload_enabled = 0;
1362 logical_drive->offload_to_be_enabled = 0;
1363 logical_drive->queue_depth = 8;
1366 if (nraid_map_entries)
1368 * This is correct for reads, too high for full stripe writes,
1369 * way too high for partial stripe writes
1371 logical_drive->queue_depth = qdepth;
1373 logical_drive->queue_depth = h->nr_cmds;
1376 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1377 struct hpsa_scsi_dev_t *dev[], int ndevices)
1381 for (i = 0; i < ndevices; i++) {
1382 if (dev[i]->devtype != TYPE_DISK)
1384 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1388 * If offload is currently enabled, the RAID map and
1389 * phys_disk[] assignment *better* not be changing
1390 * and since it isn't changing, we do not need to
1393 if (dev[i]->offload_enabled)
1396 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1400 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1401 struct hpsa_scsi_dev_t *sd[], int nsds)
1403 /* sd contains scsi3 addresses and devtypes, and inquiry
1404 * data. This function takes what's in sd to be the current
1405 * reality and updates h->dev[] to reflect that reality.
1407 int i, entry, device_change, changes = 0;
1408 struct hpsa_scsi_dev_t *csd;
1409 unsigned long flags;
1410 struct hpsa_scsi_dev_t **added, **removed;
1411 int nadded, nremoved;
1412 struct Scsi_Host *sh = NULL;
1414 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1415 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1417 if (!added || !removed) {
1418 dev_warn(&h->pdev->dev, "out of memory in "
1419 "adjust_hpsa_scsi_table\n");
1423 spin_lock_irqsave(&h->devlock, flags);
1425 /* find any devices in h->dev[] that are not in
1426 * sd[] and remove them from h->dev[], and for any
1427 * devices which have changed, remove the old device
1428 * info and add the new device info.
1429 * If minor device attributes change, just update
1430 * the existing device structure.
1435 while (i < h->ndevices) {
1437 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1438 if (device_change == DEVICE_NOT_FOUND) {
1440 hpsa_scsi_remove_entry(h, hostno, i,
1441 removed, &nremoved);
1442 continue; /* remove ^^^, hence i not incremented */
1443 } else if (device_change == DEVICE_CHANGED) {
1445 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1446 added, &nadded, removed, &nremoved);
1447 /* Set it to NULL to prevent it from being freed
1448 * at the bottom of hpsa_update_scsi_devices()
1451 } else if (device_change == DEVICE_UPDATED) {
1452 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1457 /* Now, make sure every device listed in sd[] is also
1458 * listed in h->dev[], adding them if they aren't found
1461 for (i = 0; i < nsds; i++) {
1462 if (!sd[i]) /* if already added above. */
1465 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1466 * as the SCSI mid-layer does not handle such devices well.
1467 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1468 * at 160Hz, and prevents the system from coming up.
1470 if (sd[i]->volume_offline) {
1471 hpsa_show_volume_status(h, sd[i]);
1472 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1476 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1477 h->ndevices, &entry);
1478 if (device_change == DEVICE_NOT_FOUND) {
1480 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1481 added, &nadded) != 0)
1483 sd[i] = NULL; /* prevent from being freed later. */
1484 } else if (device_change == DEVICE_CHANGED) {
1485 /* should never happen... */
1487 dev_warn(&h->pdev->dev,
1488 "device unexpectedly changed.\n");
1489 /* but if it does happen, we just ignore that device */
1492 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1494 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1495 * any logical drives that need it enabled.
1497 for (i = 0; i < h->ndevices; i++)
1498 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1500 spin_unlock_irqrestore(&h->devlock, flags);
1502 /* Monitor devices which are in one of several NOT READY states to be
1503 * brought online later. This must be done without holding h->devlock,
1504 * so don't touch h->dev[]
1506 for (i = 0; i < nsds; i++) {
1507 if (!sd[i]) /* if already added above. */
1509 if (sd[i]->volume_offline)
1510 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1513 /* Don't notify scsi mid layer of any changes the first time through
1514 * (or if there are no changes) scsi_scan_host will do it later the
1515 * first time through.
1517 if (hostno == -1 || !changes)
1521 /* Notify scsi mid layer of any removed devices */
1522 for (i = 0; i < nremoved; i++) {
1523 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1524 struct scsi_device *sdev =
1525 scsi_device_lookup(sh, removed[i]->bus,
1526 removed[i]->target, removed[i]->lun);
1528 scsi_remove_device(sdev);
1529 scsi_device_put(sdev);
1532 * We don't expect to get here.
1533 * future cmds to this device will get selection
1534 * timeout as if the device was gone.
1536 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1537 "didn't find device for removal.");
1544 /* Notify scsi mid layer of any added devices */
1545 for (i = 0; i < nadded; i++) {
1546 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1548 if (scsi_add_device(sh, added[i]->bus,
1549 added[i]->target, added[i]->lun) == 0)
1551 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1552 "addition failed, device not added.");
1553 /* now we have to remove it from h->dev,
1554 * since it didn't get added to scsi mid layer
1556 fixup_botched_add(h, added[i]);
1565 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1566 * Assume's h->devlock is held.
1568 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1569 int bus, int target, int lun)
1572 struct hpsa_scsi_dev_t *sd;
1574 for (i = 0; i < h->ndevices; i++) {
1576 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1582 static int hpsa_slave_alloc(struct scsi_device *sdev)
1584 struct hpsa_scsi_dev_t *sd;
1585 unsigned long flags;
1586 struct ctlr_info *h;
1588 h = sdev_to_hba(sdev);
1589 spin_lock_irqsave(&h->devlock, flags);
1590 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1591 sdev_id(sdev), sdev->lun);
1593 atomic_set(&sd->ioaccel_cmds_out, 0);
1594 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1596 sdev->hostdata = NULL;
1597 spin_unlock_irqrestore(&h->devlock, flags);
1601 /* configure scsi device based on internal per-device structure */
1602 static int hpsa_slave_configure(struct scsi_device *sdev)
1604 struct hpsa_scsi_dev_t *sd;
1607 sd = sdev->hostdata;
1608 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1611 queue_depth = sd->queue_depth != 0 ?
1612 sd->queue_depth : sdev->host->can_queue;
1614 queue_depth = sdev->host->can_queue;
1616 scsi_change_queue_depth(sdev, queue_depth);
1621 static void hpsa_slave_destroy(struct scsi_device *sdev)
1623 /* nothing to do. */
1626 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1630 if (!h->cmd_sg_list)
1632 for (i = 0; i < h->nr_cmds; i++) {
1633 kfree(h->cmd_sg_list[i]);
1634 h->cmd_sg_list[i] = NULL;
1636 kfree(h->cmd_sg_list);
1637 h->cmd_sg_list = NULL;
1640 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1644 if (h->chainsize <= 0)
1647 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1649 if (!h->cmd_sg_list) {
1650 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1653 for (i = 0; i < h->nr_cmds; i++) {
1654 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1655 h->chainsize, GFP_KERNEL);
1656 if (!h->cmd_sg_list[i]) {
1657 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1664 hpsa_free_sg_chain_blocks(h);
1668 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1669 struct CommandList *c)
1671 struct SGDescriptor *chain_sg, *chain_block;
1675 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1676 chain_block = h->cmd_sg_list[c->cmdindex];
1677 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1678 chain_len = sizeof(*chain_sg) *
1679 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1680 chain_sg->Len = cpu_to_le32(chain_len);
1681 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1683 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1684 /* prevent subsequent unmapping */
1685 chain_sg->Addr = cpu_to_le64(0);
1688 chain_sg->Addr = cpu_to_le64(temp64);
1692 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1693 struct CommandList *c)
1695 struct SGDescriptor *chain_sg;
1697 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1700 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1701 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1702 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1706 /* Decode the various types of errors on ioaccel2 path.
1707 * Return 1 for any error that should generate a RAID path retry.
1708 * Return 0 for errors that don't require a RAID path retry.
1710 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1711 struct CommandList *c,
1712 struct scsi_cmnd *cmd,
1713 struct io_accel2_cmd *c2)
1718 switch (c2->error_data.serv_response) {
1719 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1720 switch (c2->error_data.status) {
1721 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1723 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1724 dev_warn(&h->pdev->dev,
1725 "%s: task complete with check condition.\n",
1726 "HP SSD Smart Path");
1727 cmd->result |= SAM_STAT_CHECK_CONDITION;
1728 if (c2->error_data.data_present !=
1729 IOACCEL2_SENSE_DATA_PRESENT) {
1730 memset(cmd->sense_buffer, 0,
1731 SCSI_SENSE_BUFFERSIZE);
1734 /* copy the sense data */
1735 data_len = c2->error_data.sense_data_len;
1736 if (data_len > SCSI_SENSE_BUFFERSIZE)
1737 data_len = SCSI_SENSE_BUFFERSIZE;
1738 if (data_len > sizeof(c2->error_data.sense_data_buff))
1740 sizeof(c2->error_data.sense_data_buff);
1741 memcpy(cmd->sense_buffer,
1742 c2->error_data.sense_data_buff, data_len);
1745 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1746 dev_warn(&h->pdev->dev,
1747 "%s: task complete with BUSY status.\n",
1748 "HP SSD Smart Path");
1751 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1752 dev_warn(&h->pdev->dev,
1753 "%s: task complete with reservation conflict.\n",
1754 "HP SSD Smart Path");
1757 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1758 /* Make scsi midlayer do unlimited retries */
1759 cmd->result = DID_IMM_RETRY << 16;
1761 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1762 dev_warn(&h->pdev->dev,
1763 "%s: task complete with aborted status.\n",
1764 "HP SSD Smart Path");
1768 dev_warn(&h->pdev->dev,
1769 "%s: task complete with unrecognized status: 0x%02x\n",
1770 "HP SSD Smart Path", c2->error_data.status);
1775 case IOACCEL2_SERV_RESPONSE_FAILURE:
1776 /* don't expect to get here. */
1777 dev_warn(&h->pdev->dev,
1778 "unexpected delivery or target failure, status = 0x%02x\n",
1779 c2->error_data.status);
1782 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1784 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1786 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1787 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1790 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1791 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1794 dev_warn(&h->pdev->dev,
1795 "%s: Unrecognized server response: 0x%02x\n",
1796 "HP SSD Smart Path",
1797 c2->error_data.serv_response);
1802 return retry; /* retry on raid path? */
1805 static void process_ioaccel2_completion(struct ctlr_info *h,
1806 struct CommandList *c, struct scsi_cmnd *cmd,
1807 struct hpsa_scsi_dev_t *dev)
1809 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1811 /* check for good status */
1812 if (likely(c2->error_data.serv_response == 0 &&
1813 c2->error_data.status == 0)) {
1815 cmd->scsi_done(cmd);
1819 /* Any RAID offload error results in retry which will use
1820 * the normal I/O path so the controller can handle whatever's
1823 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1824 c2->error_data.serv_response ==
1825 IOACCEL2_SERV_RESPONSE_FAILURE) {
1826 if (c2->error_data.status ==
1827 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1828 dev->offload_enabled = 0;
1832 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1836 cmd->scsi_done(cmd);
1840 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1841 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1844 static void complete_scsi_command(struct CommandList *cp)
1846 struct scsi_cmnd *cmd;
1847 struct ctlr_info *h;
1848 struct ErrorInfo *ei;
1849 struct hpsa_scsi_dev_t *dev;
1851 unsigned char sense_key;
1852 unsigned char asc; /* additional sense code */
1853 unsigned char ascq; /* additional sense code qualifier */
1854 unsigned long sense_data_size;
1859 dev = cmd->device->hostdata;
1861 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1862 if ((cp->cmd_type == CMD_SCSI) &&
1863 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1864 hpsa_unmap_sg_chain_block(h, cp);
1866 cmd->result = (DID_OK << 16); /* host byte */
1867 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1869 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1870 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1873 * We check for lockup status here as it may be set for
1874 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
1875 * fail_all_oustanding_cmds()
1877 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
1878 /* DID_NO_CONNECT will prevent a retry */
1879 cmd->result = DID_NO_CONNECT << 16;
1881 cmd->scsi_done(cmd);
1885 if (cp->cmd_type == CMD_IOACCEL2)
1886 return process_ioaccel2_completion(h, cp, cmd, dev);
1888 cmd->result |= ei->ScsiStatus;
1890 scsi_set_resid(cmd, ei->ResidualCnt);
1891 if (ei->CommandStatus == 0) {
1892 if (cp->cmd_type == CMD_IOACCEL1)
1893 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1895 cmd->scsi_done(cmd);
1899 /* copy the sense data */
1900 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1901 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1903 sense_data_size = sizeof(ei->SenseInfo);
1904 if (ei->SenseLen < sense_data_size)
1905 sense_data_size = ei->SenseLen;
1907 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1909 /* For I/O accelerator commands, copy over some fields to the normal
1910 * CISS header used below for error handling.
1912 if (cp->cmd_type == CMD_IOACCEL1) {
1913 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1914 cp->Header.SGList = scsi_sg_count(cmd);
1915 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1916 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1917 IOACCEL1_IOFLAGS_CDBLEN_MASK;
1918 cp->Header.tag = c->tag;
1919 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1920 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1922 /* Any RAID offload error results in retry which will use
1923 * the normal I/O path so the controller can handle whatever's
1926 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1927 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1928 dev->offload_enabled = 0;
1929 INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1930 queue_work_on(raw_smp_processor_id(),
1931 h->resubmit_wq, &cp->work);
1936 /* an error has occurred */
1937 switch (ei->CommandStatus) {
1939 case CMD_TARGET_STATUS:
1940 if (ei->ScsiStatus) {
1942 sense_key = 0xf & ei->SenseInfo[2];
1943 /* Get additional sense code */
1944 asc = ei->SenseInfo[12];
1945 /* Get addition sense code qualifier */
1946 ascq = ei->SenseInfo[13];
1948 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1949 if (sense_key == ABORTED_COMMAND) {
1950 cmd->result |= DID_SOFT_ERROR << 16;
1955 /* Problem was not a check condition
1956 * Pass it up to the upper layers...
1958 if (ei->ScsiStatus) {
1959 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1960 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1961 "Returning result: 0x%x\n",
1963 sense_key, asc, ascq,
1965 } else { /* scsi status is zero??? How??? */
1966 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1967 "Returning no connection.\n", cp),
1969 /* Ordinarily, this case should never happen,
1970 * but there is a bug in some released firmware
1971 * revisions that allows it to happen if, for
1972 * example, a 4100 backplane loses power and
1973 * the tape drive is in it. We assume that
1974 * it's a fatal error of some kind because we
1975 * can't show that it wasn't. We will make it
1976 * look like selection timeout since that is
1977 * the most common reason for this to occur,
1978 * and it's severe enough.
1981 cmd->result = DID_NO_CONNECT << 16;
1985 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1987 case CMD_DATA_OVERRUN:
1988 dev_warn(&h->pdev->dev,
1989 "CDB %16phN data overrun\n", cp->Request.CDB);
1992 /* print_bytes(cp, sizeof(*cp), 1, 0);
1994 /* We get CMD_INVALID if you address a non-existent device
1995 * instead of a selection timeout (no response). You will
1996 * see this if you yank out a drive, then try to access it.
1997 * This is kind of a shame because it means that any other
1998 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1999 * missing target. */
2000 cmd->result = DID_NO_CONNECT << 16;
2003 case CMD_PROTOCOL_ERR:
2004 cmd->result = DID_ERROR << 16;
2005 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2008 case CMD_HARDWARE_ERR:
2009 cmd->result = DID_ERROR << 16;
2010 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2013 case CMD_CONNECTION_LOST:
2014 cmd->result = DID_ERROR << 16;
2015 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2019 cmd->result = DID_ABORT << 16;
2020 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2021 cp->Request.CDB, ei->ScsiStatus);
2023 case CMD_ABORT_FAILED:
2024 cmd->result = DID_ERROR << 16;
2025 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2028 case CMD_UNSOLICITED_ABORT:
2029 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2030 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2034 cmd->result = DID_TIME_OUT << 16;
2035 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2038 case CMD_UNABORTABLE:
2039 cmd->result = DID_ERROR << 16;
2040 dev_warn(&h->pdev->dev, "Command unabortable\n");
2042 case CMD_IOACCEL_DISABLED:
2043 /* This only handles the direct pass-through case since RAID
2044 * offload is handled above. Just attempt a retry.
2046 cmd->result = DID_SOFT_ERROR << 16;
2047 dev_warn(&h->pdev->dev,
2048 "cp %p had HP SSD Smart Path error\n", cp);
2051 cmd->result = DID_ERROR << 16;
2052 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2053 cp, ei->CommandStatus);
2056 cmd->scsi_done(cmd);
2059 static void hpsa_pci_unmap(struct pci_dev *pdev,
2060 struct CommandList *c, int sg_used, int data_direction)
2064 for (i = 0; i < sg_used; i++)
2065 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2066 le32_to_cpu(c->SG[i].Len),
2070 static int hpsa_map_one(struct pci_dev *pdev,
2071 struct CommandList *cp,
2078 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2079 cp->Header.SGList = 0;
2080 cp->Header.SGTotal = cpu_to_le16(0);
2084 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2085 if (dma_mapping_error(&pdev->dev, addr64)) {
2086 /* Prevent subsequent unmap of something never mapped */
2087 cp->Header.SGList = 0;
2088 cp->Header.SGTotal = cpu_to_le16(0);
2091 cp->SG[0].Addr = cpu_to_le64(addr64);
2092 cp->SG[0].Len = cpu_to_le32(buflen);
2093 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2094 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2095 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2099 #define NO_TIMEOUT ((unsigned long) -1)
2100 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2101 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2102 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2104 DECLARE_COMPLETION_ONSTACK(wait);
2107 __enqueue_cmd_and_start_io(h, c, reply_queue);
2108 if (timeout_msecs == NO_TIMEOUT) {
2109 /* TODO: get rid of this no-timeout thing */
2110 wait_for_completion_io(&wait);
2113 if (!wait_for_completion_io_timeout(&wait,
2114 msecs_to_jiffies(timeout_msecs))) {
2115 dev_warn(&h->pdev->dev, "Command timed out.\n");
2121 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2122 int reply_queue, unsigned long timeout_msecs)
2124 if (unlikely(lockup_detected(h))) {
2125 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2128 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2131 static u32 lockup_detected(struct ctlr_info *h)
2134 u32 rc, *lockup_detected;
2137 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2138 rc = *lockup_detected;
2143 #define MAX_DRIVER_CMD_RETRIES 25
2144 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2145 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2147 int backoff_time = 10, retry_count = 0;
2151 memset(c->err_info, 0, sizeof(*c->err_info));
2152 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2157 if (retry_count > 3) {
2158 msleep(backoff_time);
2159 if (backoff_time < 1000)
2162 } while ((check_for_unit_attention(h, c) ||
2163 check_for_busy(h, c)) &&
2164 retry_count <= MAX_DRIVER_CMD_RETRIES);
2165 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2166 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2171 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2172 struct CommandList *c)
2174 const u8 *cdb = c->Request.CDB;
2175 const u8 *lun = c->Header.LUN.LunAddrBytes;
2177 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2178 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2179 txt, lun[0], lun[1], lun[2], lun[3],
2180 lun[4], lun[5], lun[6], lun[7],
2181 cdb[0], cdb[1], cdb[2], cdb[3],
2182 cdb[4], cdb[5], cdb[6], cdb[7],
2183 cdb[8], cdb[9], cdb[10], cdb[11],
2184 cdb[12], cdb[13], cdb[14], cdb[15]);
2187 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2188 struct CommandList *cp)
2190 const struct ErrorInfo *ei = cp->err_info;
2191 struct device *d = &cp->h->pdev->dev;
2192 const u8 *sd = ei->SenseInfo;
2194 switch (ei->CommandStatus) {
2195 case CMD_TARGET_STATUS:
2196 hpsa_print_cmd(h, "SCSI status", cp);
2197 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2198 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2199 sd[2] & 0x0f, sd[12], sd[13]);
2201 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2202 if (ei->ScsiStatus == 0)
2203 dev_warn(d, "SCSI status is abnormally zero. "
2204 "(probably indicates selection timeout "
2205 "reported incorrectly due to a known "
2206 "firmware bug, circa July, 2001.)\n");
2208 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2210 case CMD_DATA_OVERRUN:
2211 hpsa_print_cmd(h, "overrun condition", cp);
2214 /* controller unfortunately reports SCSI passthru's
2215 * to non-existent targets as invalid commands.
2217 hpsa_print_cmd(h, "invalid command", cp);
2218 dev_warn(d, "probably means device no longer present\n");
2221 case CMD_PROTOCOL_ERR:
2222 hpsa_print_cmd(h, "protocol error", cp);
2224 case CMD_HARDWARE_ERR:
2225 hpsa_print_cmd(h, "hardware error", cp);
2227 case CMD_CONNECTION_LOST:
2228 hpsa_print_cmd(h, "connection lost", cp);
2231 hpsa_print_cmd(h, "aborted", cp);
2233 case CMD_ABORT_FAILED:
2234 hpsa_print_cmd(h, "abort failed", cp);
2236 case CMD_UNSOLICITED_ABORT:
2237 hpsa_print_cmd(h, "unsolicited abort", cp);
2240 hpsa_print_cmd(h, "timed out", cp);
2242 case CMD_UNABORTABLE:
2243 hpsa_print_cmd(h, "unabortable", cp);
2245 case CMD_CTLR_LOCKUP:
2246 hpsa_print_cmd(h, "controller lockup detected", cp);
2249 hpsa_print_cmd(h, "unknown status", cp);
2250 dev_warn(d, "Unknown command status %x\n",
2255 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2256 u16 page, unsigned char *buf,
2257 unsigned char bufsize)
2260 struct CommandList *c;
2261 struct ErrorInfo *ei;
2266 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2270 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2271 page, scsi3addr, TYPE_CMD)) {
2275 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2276 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2280 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2281 hpsa_scsi_interpret_error(h, c);
2289 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2290 unsigned char *scsi3addr, unsigned char page,
2291 struct bmic_controller_parameters *buf, size_t bufsize)
2294 struct CommandList *c;
2295 struct ErrorInfo *ei;
2298 if (c == NULL) { /* trouble... */
2299 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2303 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2304 page, scsi3addr, TYPE_CMD)) {
2308 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2309 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2313 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2314 hpsa_scsi_interpret_error(h, c);
2322 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2323 u8 reset_type, int reply_queue)
2326 struct CommandList *c;
2327 struct ErrorInfo *ei;
2331 if (c == NULL) { /* trouble... */
2332 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2336 /* fill_cmd can't fail here, no data buffer to map. */
2337 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2338 scsi3addr, TYPE_MSG);
2339 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2340 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2342 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2345 /* no unmap needed here because no data xfer. */
2348 if (ei->CommandStatus != 0) {
2349 hpsa_scsi_interpret_error(h, c);
2357 static void hpsa_get_raid_level(struct ctlr_info *h,
2358 unsigned char *scsi3addr, unsigned char *raid_level)
2363 *raid_level = RAID_UNKNOWN;
2364 buf = kzalloc(64, GFP_KERNEL);
2367 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2369 *raid_level = buf[8];
2370 if (*raid_level > RAID_UNKNOWN)
2371 *raid_level = RAID_UNKNOWN;
2376 #define HPSA_MAP_DEBUG
2377 #ifdef HPSA_MAP_DEBUG
2378 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2379 struct raid_map_data *map_buff)
2381 struct raid_map_disk_data *dd = &map_buff->data[0];
2383 u16 map_cnt, row_cnt, disks_per_row;
2388 /* Show details only if debugging has been activated. */
2389 if (h->raid_offload_debug < 2)
2392 dev_info(&h->pdev->dev, "structure_size = %u\n",
2393 le32_to_cpu(map_buff->structure_size));
2394 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2395 le32_to_cpu(map_buff->volume_blk_size));
2396 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2397 le64_to_cpu(map_buff->volume_blk_cnt));
2398 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2399 map_buff->phys_blk_shift);
2400 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2401 map_buff->parity_rotation_shift);
2402 dev_info(&h->pdev->dev, "strip_size = %u\n",
2403 le16_to_cpu(map_buff->strip_size));
2404 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2405 le64_to_cpu(map_buff->disk_starting_blk));
2406 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2407 le64_to_cpu(map_buff->disk_blk_cnt));
2408 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2409 le16_to_cpu(map_buff->data_disks_per_row));
2410 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2411 le16_to_cpu(map_buff->metadata_disks_per_row));
2412 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2413 le16_to_cpu(map_buff->row_cnt));
2414 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2415 le16_to_cpu(map_buff->layout_map_count));
2416 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2417 le16_to_cpu(map_buff->flags));
2418 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2419 le16_to_cpu(map_buff->flags) &
2420 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2421 dev_info(&h->pdev->dev, "dekindex = %u\n",
2422 le16_to_cpu(map_buff->dekindex));
2423 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2424 for (map = 0; map < map_cnt; map++) {
2425 dev_info(&h->pdev->dev, "Map%u:\n", map);
2426 row_cnt = le16_to_cpu(map_buff->row_cnt);
2427 for (row = 0; row < row_cnt; row++) {
2428 dev_info(&h->pdev->dev, " Row%u:\n", row);
2430 le16_to_cpu(map_buff->data_disks_per_row);
2431 for (col = 0; col < disks_per_row; col++, dd++)
2432 dev_info(&h->pdev->dev,
2433 " D%02u: h=0x%04x xor=%u,%u\n",
2434 col, dd->ioaccel_handle,
2435 dd->xor_mult[0], dd->xor_mult[1]);
2437 le16_to_cpu(map_buff->metadata_disks_per_row);
2438 for (col = 0; col < disks_per_row; col++, dd++)
2439 dev_info(&h->pdev->dev,
2440 " M%02u: h=0x%04x xor=%u,%u\n",
2441 col, dd->ioaccel_handle,
2442 dd->xor_mult[0], dd->xor_mult[1]);
2447 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2448 __attribute__((unused)) int rc,
2449 __attribute__((unused)) struct raid_map_data *map_buff)
2454 static int hpsa_get_raid_map(struct ctlr_info *h,
2455 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2458 struct CommandList *c;
2459 struct ErrorInfo *ei;
2463 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2466 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2467 sizeof(this_device->raid_map), 0,
2468 scsi3addr, TYPE_CMD)) {
2469 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2473 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2474 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2478 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2479 hpsa_scsi_interpret_error(h, c);
2485 /* @todo in the future, dynamically allocate RAID map memory */
2486 if (le32_to_cpu(this_device->raid_map.structure_size) >
2487 sizeof(this_device->raid_map)) {
2488 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2491 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2498 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2499 unsigned char scsi3addr[], u16 bmic_device_index,
2500 struct bmic_identify_physical_device *buf, size_t bufsize)
2503 struct CommandList *c;
2504 struct ErrorInfo *ei;
2507 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2508 0, RAID_CTLR_LUNID, TYPE_CMD);
2512 c->Request.CDB[2] = bmic_device_index & 0xff;
2513 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2515 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2518 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2519 hpsa_scsi_interpret_error(h, c);
2527 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2528 unsigned char scsi3addr[], u8 page)
2533 unsigned char *buf, bufsize;
2535 buf = kzalloc(256, GFP_KERNEL);
2539 /* Get the size of the page list first */
2540 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2541 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2542 buf, HPSA_VPD_HEADER_SZ);
2544 goto exit_unsupported;
2546 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2547 bufsize = pages + HPSA_VPD_HEADER_SZ;
2551 /* Get the whole VPD page list */
2552 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2553 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2556 goto exit_unsupported;
2559 for (i = 1; i <= pages; i++)
2560 if (buf[3 + i] == page)
2561 goto exit_supported;
2570 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2571 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2577 this_device->offload_config = 0;
2578 this_device->offload_enabled = 0;
2579 this_device->offload_to_be_enabled = 0;
2581 buf = kzalloc(64, GFP_KERNEL);
2584 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2586 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2587 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2591 #define IOACCEL_STATUS_BYTE 4
2592 #define OFFLOAD_CONFIGURED_BIT 0x01
2593 #define OFFLOAD_ENABLED_BIT 0x02
2594 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2595 this_device->offload_config =
2596 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2597 if (this_device->offload_config) {
2598 this_device->offload_enabled =
2599 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2600 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2601 this_device->offload_enabled = 0;
2603 this_device->offload_to_be_enabled = this_device->offload_enabled;
2609 /* Get the device id from inquiry page 0x83 */
2610 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2611 unsigned char *device_id, int buflen)
2618 buf = kzalloc(64, GFP_KERNEL);
2621 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2623 memcpy(device_id, &buf[8], buflen);
2628 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2629 void *buf, int bufsize,
2630 int extended_response)
2633 struct CommandList *c;
2634 unsigned char scsi3addr[8];
2635 struct ErrorInfo *ei;
2638 if (c == NULL) { /* trouble... */
2639 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2642 /* address the controller */
2643 memset(scsi3addr, 0, sizeof(scsi3addr));
2644 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2645 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2649 if (extended_response)
2650 c->Request.CDB[1] = extended_response;
2651 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2652 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2656 if (ei->CommandStatus != 0 &&
2657 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2658 hpsa_scsi_interpret_error(h, c);
2661 struct ReportLUNdata *rld = buf;
2663 if (rld->extended_response_flag != extended_response) {
2664 dev_err(&h->pdev->dev,
2665 "report luns requested format %u, got %u\n",
2667 rld->extended_response_flag);
2676 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2677 struct ReportExtendedLUNdata *buf, int bufsize)
2679 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2680 HPSA_REPORT_PHYS_EXTENDED);
2683 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2684 struct ReportLUNdata *buf, int bufsize)
2686 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2689 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2690 int bus, int target, int lun)
2693 device->target = target;
2697 /* Use VPD inquiry to get details of volume status */
2698 static int hpsa_get_volume_status(struct ctlr_info *h,
2699 unsigned char scsi3addr[])
2706 buf = kzalloc(64, GFP_KERNEL);
2708 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2710 /* Does controller have VPD for logical volume status? */
2711 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2714 /* Get the size of the VPD return buffer */
2715 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2716 buf, HPSA_VPD_HEADER_SZ);
2721 /* Now get the whole VPD buffer */
2722 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2723 buf, size + HPSA_VPD_HEADER_SZ);
2726 status = buf[4]; /* status byte */
2732 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2735 /* Determine offline status of a volume.
2738 * 0xff (offline for unknown reasons)
2739 * # (integer code indicating one of several NOT READY states
2740 * describing why a volume is to be kept offline)
2742 static int hpsa_volume_offline(struct ctlr_info *h,
2743 unsigned char scsi3addr[])
2745 struct CommandList *c;
2746 unsigned char *sense, sense_key, asc, ascq;
2750 #define ASC_LUN_NOT_READY 0x04
2751 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2752 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2757 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2758 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
2763 sense = c->err_info->SenseInfo;
2764 sense_key = sense[2];
2767 cmd_status = c->err_info->CommandStatus;
2768 scsi_status = c->err_info->ScsiStatus;
2770 /* Is the volume 'not ready'? */
2771 if (cmd_status != CMD_TARGET_STATUS ||
2772 scsi_status != SAM_STAT_CHECK_CONDITION ||
2773 sense_key != NOT_READY ||
2774 asc != ASC_LUN_NOT_READY) {
2778 /* Determine the reason for not ready state */
2779 ldstat = hpsa_get_volume_status(h, scsi3addr);
2781 /* Keep volume offline in certain cases: */
2783 case HPSA_LV_UNDERGOING_ERASE:
2784 case HPSA_LV_UNDERGOING_RPI:
2785 case HPSA_LV_PENDING_RPI:
2786 case HPSA_LV_ENCRYPTED_NO_KEY:
2787 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2788 case HPSA_LV_UNDERGOING_ENCRYPTION:
2789 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2790 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2792 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2793 /* If VPD status page isn't available,
2794 * use ASC/ASCQ to determine state
2796 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2797 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2806 static int hpsa_update_device_info(struct ctlr_info *h,
2807 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2808 unsigned char *is_OBDR_device)
2811 #define OBDR_SIG_OFFSET 43
2812 #define OBDR_TAPE_SIG "$DR-10"
2813 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2814 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2816 unsigned char *inq_buff;
2817 unsigned char *obdr_sig;
2819 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2823 /* Do an inquiry to the device to see what it is. */
2824 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2825 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2826 /* Inquiry failed (msg printed already) */
2827 dev_err(&h->pdev->dev,
2828 "hpsa_update_device_info: inquiry failed\n");
2832 this_device->devtype = (inq_buff[0] & 0x1f);
2833 memcpy(this_device->scsi3addr, scsi3addr, 8);
2834 memcpy(this_device->vendor, &inq_buff[8],
2835 sizeof(this_device->vendor));
2836 memcpy(this_device->model, &inq_buff[16],
2837 sizeof(this_device->model));
2838 memset(this_device->device_id, 0,
2839 sizeof(this_device->device_id));
2840 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2841 sizeof(this_device->device_id));
2843 if (this_device->devtype == TYPE_DISK &&
2844 is_logical_dev_addr_mode(scsi3addr)) {
2847 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2848 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2849 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2850 volume_offline = hpsa_volume_offline(h, scsi3addr);
2851 if (volume_offline < 0 || volume_offline > 0xff)
2852 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2853 this_device->volume_offline = volume_offline & 0xff;
2855 this_device->raid_level = RAID_UNKNOWN;
2856 this_device->offload_config = 0;
2857 this_device->offload_enabled = 0;
2858 this_device->offload_to_be_enabled = 0;
2859 this_device->volume_offline = 0;
2860 this_device->queue_depth = h->nr_cmds;
2863 if (is_OBDR_device) {
2864 /* See if this is a One-Button-Disaster-Recovery device
2865 * by looking for "$DR-10" at offset 43 in inquiry data.
2867 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2868 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2869 strncmp(obdr_sig, OBDR_TAPE_SIG,
2870 OBDR_SIG_LEN) == 0);
2881 static unsigned char *ext_target_model[] = {
2891 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2895 for (i = 0; ext_target_model[i]; i++)
2896 if (strncmp(device->model, ext_target_model[i],
2897 strlen(ext_target_model[i])) == 0)
2902 /* Helper function to assign bus, target, lun mapping of devices.
2903 * Puts non-external target logical volumes on bus 0, external target logical
2904 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2905 * Logical drive target and lun are assigned at this time, but
2906 * physical device lun and target assignment are deferred (assigned
2907 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2909 static void figure_bus_target_lun(struct ctlr_info *h,
2910 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2912 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2914 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2915 /* physical device, target and lun filled in later */
2916 if (is_hba_lunid(lunaddrbytes))
2917 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2919 /* defer target, lun assignment for physical devices */
2920 hpsa_set_bus_target_lun(device, 2, -1, -1);
2923 /* It's a logical device */
2924 if (is_ext_target(h, device)) {
2925 /* external target way, put logicals on bus 1
2926 * and match target/lun numbers box
2927 * reports, other smart array, bus 0, target 0, match lunid
2929 hpsa_set_bus_target_lun(device,
2930 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2933 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2937 * If there is no lun 0 on a target, linux won't find any devices.
2938 * For the external targets (arrays), we have to manually detect the enclosure
2939 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2940 * it for some reason. *tmpdevice is the target we're adding,
2941 * this_device is a pointer into the current element of currentsd[]
2942 * that we're building up in update_scsi_devices(), below.
2943 * lunzerobits is a bitmap that tracks which targets already have a
2945 * Returns 1 if an enclosure was added, 0 if not.
2947 static int add_ext_target_dev(struct ctlr_info *h,
2948 struct hpsa_scsi_dev_t *tmpdevice,
2949 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2950 unsigned long lunzerobits[], int *n_ext_target_devs)
2952 unsigned char scsi3addr[8];
2954 if (test_bit(tmpdevice->target, lunzerobits))
2955 return 0; /* There is already a lun 0 on this target. */
2957 if (!is_logical_dev_addr_mode(lunaddrbytes))
2958 return 0; /* It's the logical targets that may lack lun 0. */
2960 if (!is_ext_target(h, tmpdevice))
2961 return 0; /* Only external target devices have this problem. */
2963 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2966 memset(scsi3addr, 0, 8);
2967 scsi3addr[3] = tmpdevice->target;
2968 if (is_hba_lunid(scsi3addr))
2969 return 0; /* Don't add the RAID controller here. */
2971 if (is_scsi_rev_5(h))
2972 return 0; /* p1210m doesn't need to do this. */
2974 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2975 dev_warn(&h->pdev->dev, "Maximum number of external "
2976 "target devices exceeded. Check your hardware "
2981 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2983 (*n_ext_target_devs)++;
2984 hpsa_set_bus_target_lun(this_device,
2985 tmpdevice->bus, tmpdevice->target, 0);
2986 set_bit(tmpdevice->target, lunzerobits);
2991 * Get address of physical disk used for an ioaccel2 mode command:
2992 * 1. Extract ioaccel2 handle from the command.
2993 * 2. Find a matching ioaccel2 handle from list of physical disks.
2995 * 1 and set scsi3addr to address of matching physical
2996 * 0 if no matching physical disk was found.
2998 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2999 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3001 struct io_accel2_cmd *c2 =
3002 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3003 unsigned long flags;
3006 spin_lock_irqsave(&h->devlock, flags);
3007 for (i = 0; i < h->ndevices; i++)
3008 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3009 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3010 sizeof(h->dev[i]->scsi3addr));
3011 spin_unlock_irqrestore(&h->devlock, flags);
3014 spin_unlock_irqrestore(&h->devlock, flags);
3019 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3020 * logdev. The number of luns in physdev and logdev are returned in
3021 * *nphysicals and *nlogicals, respectively.
3022 * Returns 0 on success, -1 otherwise.
3024 static int hpsa_gather_lun_info(struct ctlr_info *h,
3025 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3026 struct ReportLUNdata *logdev, u32 *nlogicals)
3028 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3029 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3032 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3033 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3034 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3035 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3036 *nphysicals = HPSA_MAX_PHYS_LUN;
3038 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3039 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3042 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3043 /* Reject Logicals in excess of our max capability. */
3044 if (*nlogicals > HPSA_MAX_LUN) {
3045 dev_warn(&h->pdev->dev,
3046 "maximum logical LUNs (%d) exceeded. "
3047 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3048 *nlogicals - HPSA_MAX_LUN);
3049 *nlogicals = HPSA_MAX_LUN;
3051 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3052 dev_warn(&h->pdev->dev,
3053 "maximum logical + physical LUNs (%d) exceeded. "
3054 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3055 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3056 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3061 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3062 int i, int nphysicals, int nlogicals,
3063 struct ReportExtendedLUNdata *physdev_list,
3064 struct ReportLUNdata *logdev_list)
3066 /* Helper function, figure out where the LUN ID info is coming from
3067 * given index i, lists of physical and logical devices, where in
3068 * the list the raid controller is supposed to appear (first or last)
3071 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3072 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3074 if (i == raid_ctlr_position)
3075 return RAID_CTLR_LUNID;
3077 if (i < logicals_start)
3078 return &physdev_list->LUN[i -
3079 (raid_ctlr_position == 0)].lunid[0];
3081 if (i < last_device)
3082 return &logdev_list->LUN[i - nphysicals -
3083 (raid_ctlr_position == 0)][0];
3088 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3091 int hba_mode_enabled;
3092 struct bmic_controller_parameters *ctlr_params;
3093 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3098 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3099 sizeof(struct bmic_controller_parameters));
3106 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3108 return hba_mode_enabled;
3111 /* get physical drive ioaccel handle and queue depth */
3112 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3113 struct hpsa_scsi_dev_t *dev,
3115 struct bmic_identify_physical_device *id_phys)
3118 struct ext_report_lun_entry *rle =
3119 (struct ext_report_lun_entry *) lunaddrbytes;
3121 dev->ioaccel_handle = rle->ioaccel_handle;
3122 memset(id_phys, 0, sizeof(*id_phys));
3123 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3124 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3127 /* Reserve space for FW operations */
3128 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3129 #define DRIVE_QUEUE_DEPTH 7
3131 le16_to_cpu(id_phys->current_queue_depth_limit) -
3132 DRIVE_CMDS_RESERVED_FOR_FW;
3134 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3135 atomic_set(&dev->ioaccel_cmds_out, 0);
3138 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3140 /* the idea here is we could get notified
3141 * that some devices have changed, so we do a report
3142 * physical luns and report logical luns cmd, and adjust
3143 * our list of devices accordingly.
3145 * The scsi3addr's of devices won't change so long as the
3146 * adapter is not reset. That means we can rescan and
3147 * tell which devices we already know about, vs. new
3148 * devices, vs. disappearing devices.
3150 struct ReportExtendedLUNdata *physdev_list = NULL;
3151 struct ReportLUNdata *logdev_list = NULL;
3152 struct bmic_identify_physical_device *id_phys = NULL;
3155 u32 ndev_allocated = 0;
3156 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3158 int i, n_ext_target_devs, ndevs_to_allocate;
3159 int raid_ctlr_position;
3160 int rescan_hba_mode;
3161 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3163 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3164 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3165 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3166 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3167 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3169 if (!currentsd || !physdev_list || !logdev_list ||
3170 !tmpdevice || !id_phys) {
3171 dev_err(&h->pdev->dev, "out of memory\n");
3174 memset(lunzerobits, 0, sizeof(lunzerobits));
3176 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3177 if (rescan_hba_mode < 0)
3180 if (!h->hba_mode_enabled && rescan_hba_mode)
3181 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3182 else if (h->hba_mode_enabled && !rescan_hba_mode)
3183 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3185 h->hba_mode_enabled = rescan_hba_mode;
3187 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3188 logdev_list, &nlogicals))
3191 /* We might see up to the maximum number of logical and physical disks
3192 * plus external target devices, and a device for the local RAID
3195 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3197 /* Allocate the per device structures */
3198 for (i = 0; i < ndevs_to_allocate; i++) {
3199 if (i >= HPSA_MAX_DEVICES) {
3200 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3201 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3202 ndevs_to_allocate - HPSA_MAX_DEVICES);
3206 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3207 if (!currentsd[i]) {
3208 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3209 __FILE__, __LINE__);
3215 if (is_scsi_rev_5(h))
3216 raid_ctlr_position = 0;
3218 raid_ctlr_position = nphysicals + nlogicals;
3220 /* adjust our table of devices */
3221 n_ext_target_devs = 0;
3222 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3223 u8 *lunaddrbytes, is_OBDR = 0;
3225 /* Figure out where the LUN ID info is coming from */
3226 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3227 i, nphysicals, nlogicals, physdev_list, logdev_list);
3229 /* skip masked non-disk devices */
3230 if (MASKED_DEVICE(lunaddrbytes))
3231 if (i < nphysicals + (raid_ctlr_position == 0) &&
3232 NON_DISK_PHYS_DEV(lunaddrbytes))
3235 /* Get device type, vendor, model, device id */
3236 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3238 continue; /* skip it if we can't talk to it. */
3239 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3240 this_device = currentsd[ncurrent];
3243 * For external target devices, we have to insert a LUN 0 which
3244 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3245 * is nonetheless an enclosure device there. We have to
3246 * present that otherwise linux won't find anything if
3247 * there is no lun 0.
3249 if (add_ext_target_dev(h, tmpdevice, this_device,
3250 lunaddrbytes, lunzerobits,
3251 &n_ext_target_devs)) {
3253 this_device = currentsd[ncurrent];
3256 *this_device = *tmpdevice;
3258 /* do not expose masked devices */
3259 if (MASKED_DEVICE(lunaddrbytes) &&
3260 i < nphysicals + (raid_ctlr_position == 0)) {
3261 if (h->hba_mode_enabled)
3262 dev_warn(&h->pdev->dev,
3263 "Masked physical device detected\n");
3264 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3266 this_device->expose_state =
3267 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3270 switch (this_device->devtype) {
3272 /* We don't *really* support actual CD-ROM devices,
3273 * just "One Button Disaster Recovery" tape drive
3274 * which temporarily pretends to be a CD-ROM drive.
3275 * So we check that the device is really an OBDR tape
3276 * device by checking for "$DR-10" in bytes 43-48 of
3283 if (h->hba_mode_enabled) {
3284 /* never use raid mapper in HBA mode */
3285 this_device->offload_enabled = 0;
3288 } else if (h->acciopath_status) {
3289 if (i >= nphysicals) {
3299 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3300 h->transMethod & CFGTBL_Trans_io_accel2) {
3301 hpsa_get_ioaccel_drive_info(h, this_device,
3302 lunaddrbytes, id_phys);
3303 atomic_set(&this_device->ioaccel_cmds_out, 0);
3308 case TYPE_MEDIUM_CHANGER:
3311 case TYPE_ENCLOSURE:
3312 if (h->hba_mode_enabled)
3316 /* Only present the Smartarray HBA as a RAID controller.
3317 * If it's a RAID controller other than the HBA itself
3318 * (an external RAID controller, MSA500 or similar)
3321 if (!is_hba_lunid(lunaddrbytes))
3328 if (ncurrent >= HPSA_MAX_DEVICES)
3331 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3334 for (i = 0; i < ndev_allocated; i++)
3335 kfree(currentsd[i]);
3337 kfree(physdev_list);
3342 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3343 struct scatterlist *sg)
3345 u64 addr64 = (u64) sg_dma_address(sg);
3346 unsigned int len = sg_dma_len(sg);
3348 desc->Addr = cpu_to_le64(addr64);
3349 desc->Len = cpu_to_le32(len);
3354 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3355 * dma mapping and fills in the scatter gather entries of the
3358 static int hpsa_scatter_gather(struct ctlr_info *h,
3359 struct CommandList *cp,
3360 struct scsi_cmnd *cmd)
3362 struct scatterlist *sg;
3363 int use_sg, i, sg_index, chained;
3364 struct SGDescriptor *curr_sg;
3366 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3368 use_sg = scsi_dma_map(cmd);
3373 goto sglist_finished;
3378 scsi_for_each_sg(cmd, sg, use_sg, i) {
3379 if (i == h->max_cmd_sg_entries - 1 &&
3380 use_sg > h->max_cmd_sg_entries) {
3382 curr_sg = h->cmd_sg_list[cp->cmdindex];
3385 hpsa_set_sg_descriptor(curr_sg, sg);
3389 /* Back the pointer up to the last entry and mark it as "last". */
3390 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3392 if (use_sg + chained > h->maxSG)
3393 h->maxSG = use_sg + chained;
3396 cp->Header.SGList = h->max_cmd_sg_entries;
3397 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3398 if (hpsa_map_sg_chain_block(h, cp)) {
3399 scsi_dma_unmap(cmd);
3407 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3408 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3412 #define IO_ACCEL_INELIGIBLE (1)
3413 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3419 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3426 if (*cdb_len == 6) {
3427 block = (((u32) cdb[2]) << 8) | cdb[3];
3430 BUG_ON(*cdb_len != 12);
3431 block = (((u32) cdb[2]) << 24) |
3432 (((u32) cdb[3]) << 16) |
3433 (((u32) cdb[4]) << 8) |
3436 (((u32) cdb[6]) << 24) |
3437 (((u32) cdb[7]) << 16) |
3438 (((u32) cdb[8]) << 8) |
3441 if (block_cnt > 0xffff)
3442 return IO_ACCEL_INELIGIBLE;
3444 cdb[0] = is_write ? WRITE_10 : READ_10;
3446 cdb[2] = (u8) (block >> 24);
3447 cdb[3] = (u8) (block >> 16);
3448 cdb[4] = (u8) (block >> 8);
3449 cdb[5] = (u8) (block);
3451 cdb[7] = (u8) (block_cnt >> 8);
3452 cdb[8] = (u8) (block_cnt);
3460 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3461 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3462 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3464 struct scsi_cmnd *cmd = c->scsi_cmd;
3465 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3467 unsigned int total_len = 0;
3468 struct scatterlist *sg;
3471 struct SGDescriptor *curr_sg;
3472 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3474 /* TODO: implement chaining support */
3475 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3476 atomic_dec(&phys_disk->ioaccel_cmds_out);
3477 return IO_ACCEL_INELIGIBLE;
3480 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3482 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3483 atomic_dec(&phys_disk->ioaccel_cmds_out);
3484 return IO_ACCEL_INELIGIBLE;
3487 c->cmd_type = CMD_IOACCEL1;
3489 /* Adjust the DMA address to point to the accelerated command buffer */
3490 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3491 (c->cmdindex * sizeof(*cp));
3492 BUG_ON(c->busaddr & 0x0000007F);
3494 use_sg = scsi_dma_map(cmd);
3496 atomic_dec(&phys_disk->ioaccel_cmds_out);
3502 scsi_for_each_sg(cmd, sg, use_sg, i) {
3503 addr64 = (u64) sg_dma_address(sg);
3504 len = sg_dma_len(sg);
3506 curr_sg->Addr = cpu_to_le64(addr64);
3507 curr_sg->Len = cpu_to_le32(len);
3508 curr_sg->Ext = cpu_to_le32(0);
3511 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3513 switch (cmd->sc_data_direction) {
3515 control |= IOACCEL1_CONTROL_DATA_OUT;
3517 case DMA_FROM_DEVICE:
3518 control |= IOACCEL1_CONTROL_DATA_IN;
3521 control |= IOACCEL1_CONTROL_NODATAXFER;
3524 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3525 cmd->sc_data_direction);
3530 control |= IOACCEL1_CONTROL_NODATAXFER;
3533 c->Header.SGList = use_sg;
3534 /* Fill out the command structure to submit */
3535 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3536 cp->transfer_len = cpu_to_le32(total_len);
3537 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3538 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3539 cp->control = cpu_to_le32(control);
3540 memcpy(cp->CDB, cdb, cdb_len);
3541 memcpy(cp->CISS_LUN, scsi3addr, 8);
3542 /* Tag was already set at init time. */
3543 enqueue_cmd_and_start_io(h, c);
3548 * Queue a command directly to a device behind the controller using the
3549 * I/O accelerator path.
3551 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3552 struct CommandList *c)
3554 struct scsi_cmnd *cmd = c->scsi_cmd;
3555 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3559 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3560 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3564 * Set encryption parameters for the ioaccel2 request
3566 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3567 struct CommandList *c, struct io_accel2_cmd *cp)
3569 struct scsi_cmnd *cmd = c->scsi_cmd;
3570 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3571 struct raid_map_data *map = &dev->raid_map;
3574 /* Are we doing encryption on this device */
3575 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3577 /* Set the data encryption key index. */
3578 cp->dekindex = map->dekindex;
3580 /* Set the encryption enable flag, encoded into direction field. */
3581 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3583 /* Set encryption tweak values based on logical block address
3584 * If block size is 512, tweak value is LBA.
3585 * For other block sizes, tweak is (LBA * block size)/ 512)
3587 switch (cmd->cmnd[0]) {
3588 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3591 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3595 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3598 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3602 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3605 dev_err(&h->pdev->dev,
3606 "ERROR: %s: size (0x%x) not supported for encryption\n",
3607 __func__, cmd->cmnd[0]);
3612 if (le32_to_cpu(map->volume_blk_size) != 512)
3613 first_block = first_block *
3614 le32_to_cpu(map->volume_blk_size)/512;
3616 cp->tweak_lower = cpu_to_le32(first_block);
3617 cp->tweak_upper = cpu_to_le32(first_block >> 32);
3620 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3621 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3622 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3624 struct scsi_cmnd *cmd = c->scsi_cmd;
3625 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3626 struct ioaccel2_sg_element *curr_sg;
3628 struct scatterlist *sg;
3633 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3634 atomic_dec(&phys_disk->ioaccel_cmds_out);
3635 return IO_ACCEL_INELIGIBLE;
3638 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3639 atomic_dec(&phys_disk->ioaccel_cmds_out);
3640 return IO_ACCEL_INELIGIBLE;
3643 c->cmd_type = CMD_IOACCEL2;
3644 /* Adjust the DMA address to point to the accelerated command buffer */
3645 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3646 (c->cmdindex * sizeof(*cp));
3647 BUG_ON(c->busaddr & 0x0000007F);
3649 memset(cp, 0, sizeof(*cp));
3650 cp->IU_type = IOACCEL2_IU_TYPE;
3652 use_sg = scsi_dma_map(cmd);
3654 atomic_dec(&phys_disk->ioaccel_cmds_out);
3659 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3661 scsi_for_each_sg(cmd, sg, use_sg, i) {
3662 addr64 = (u64) sg_dma_address(sg);
3663 len = sg_dma_len(sg);
3665 curr_sg->address = cpu_to_le64(addr64);
3666 curr_sg->length = cpu_to_le32(len);
3667 curr_sg->reserved[0] = 0;
3668 curr_sg->reserved[1] = 0;
3669 curr_sg->reserved[2] = 0;
3670 curr_sg->chain_indicator = 0;
3674 switch (cmd->sc_data_direction) {
3676 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3677 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3679 case DMA_FROM_DEVICE:
3680 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3681 cp->direction |= IOACCEL2_DIR_DATA_IN;
3684 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3685 cp->direction |= IOACCEL2_DIR_NO_DATA;
3688 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3689 cmd->sc_data_direction);
3694 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3695 cp->direction |= IOACCEL2_DIR_NO_DATA;
3698 /* Set encryption parameters, if necessary */
3699 set_encrypt_ioaccel2(h, c, cp);
3701 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3702 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3703 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3705 /* fill in sg elements */
3706 cp->sg_count = (u8) use_sg;
3708 cp->data_len = cpu_to_le32(total_len);
3709 cp->err_ptr = cpu_to_le64(c->busaddr +
3710 offsetof(struct io_accel2_cmd, error_data));
3711 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3713 enqueue_cmd_and_start_io(h, c);
3718 * Queue a command to the correct I/O accelerator path.
3720 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3721 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3722 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3724 /* Try to honor the device's queue depth */
3725 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3726 phys_disk->queue_depth) {
3727 atomic_dec(&phys_disk->ioaccel_cmds_out);
3728 return IO_ACCEL_INELIGIBLE;
3730 if (h->transMethod & CFGTBL_Trans_io_accel1)
3731 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3732 cdb, cdb_len, scsi3addr,
3735 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3736 cdb, cdb_len, scsi3addr,
3740 static void raid_map_helper(struct raid_map_data *map,
3741 int offload_to_mirror, u32 *map_index, u32 *current_group)
3743 if (offload_to_mirror == 0) {
3744 /* use physical disk in the first mirrored group. */
3745 *map_index %= le16_to_cpu(map->data_disks_per_row);
3749 /* determine mirror group that *map_index indicates */
3750 *current_group = *map_index /
3751 le16_to_cpu(map->data_disks_per_row);
3752 if (offload_to_mirror == *current_group)
3754 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3755 /* select map index from next group */
3756 *map_index += le16_to_cpu(map->data_disks_per_row);
3759 /* select map index from first group */
3760 *map_index %= le16_to_cpu(map->data_disks_per_row);
3763 } while (offload_to_mirror != *current_group);
3767 * Attempt to perform offload RAID mapping for a logical volume I/O.
3769 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3770 struct CommandList *c)
3772 struct scsi_cmnd *cmd = c->scsi_cmd;
3773 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3774 struct raid_map_data *map = &dev->raid_map;
3775 struct raid_map_disk_data *dd = &map->data[0];
3778 u64 first_block, last_block;
3781 u64 first_row, last_row;
3782 u32 first_row_offset, last_row_offset;
3783 u32 first_column, last_column;
3784 u64 r0_first_row, r0_last_row;
3785 u32 r5or6_blocks_per_row;
3786 u64 r5or6_first_row, r5or6_last_row;
3787 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3788 u32 r5or6_first_column, r5or6_last_column;
3789 u32 total_disks_per_row;
3791 u32 first_group, last_group, current_group;
3799 #if BITS_PER_LONG == 32
3802 int offload_to_mirror;
3804 /* check for valid opcode, get LBA and block count */
3805 switch (cmd->cmnd[0]) {
3810 (((u64) cmd->cmnd[2]) << 8) |
3812 block_cnt = cmd->cmnd[4];
3820 (((u64) cmd->cmnd[2]) << 24) |
3821 (((u64) cmd->cmnd[3]) << 16) |
3822 (((u64) cmd->cmnd[4]) << 8) |
3825 (((u32) cmd->cmnd[7]) << 8) |
3832 (((u64) cmd->cmnd[2]) << 24) |
3833 (((u64) cmd->cmnd[3]) << 16) |
3834 (((u64) cmd->cmnd[4]) << 8) |
3837 (((u32) cmd->cmnd[6]) << 24) |
3838 (((u32) cmd->cmnd[7]) << 16) |
3839 (((u32) cmd->cmnd[8]) << 8) |
3846 (((u64) cmd->cmnd[2]) << 56) |
3847 (((u64) cmd->cmnd[3]) << 48) |
3848 (((u64) cmd->cmnd[4]) << 40) |
3849 (((u64) cmd->cmnd[5]) << 32) |
3850 (((u64) cmd->cmnd[6]) << 24) |
3851 (((u64) cmd->cmnd[7]) << 16) |
3852 (((u64) cmd->cmnd[8]) << 8) |
3855 (((u32) cmd->cmnd[10]) << 24) |
3856 (((u32) cmd->cmnd[11]) << 16) |
3857 (((u32) cmd->cmnd[12]) << 8) |
3861 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3863 last_block = first_block + block_cnt - 1;
3865 /* check for write to non-RAID-0 */
3866 if (is_write && dev->raid_level != 0)
3867 return IO_ACCEL_INELIGIBLE;
3869 /* check for invalid block or wraparound */
3870 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3871 last_block < first_block)
3872 return IO_ACCEL_INELIGIBLE;
3874 /* calculate stripe information for the request */
3875 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3876 le16_to_cpu(map->strip_size);
3877 strip_size = le16_to_cpu(map->strip_size);
3878 #if BITS_PER_LONG == 32
3879 tmpdiv = first_block;
3880 (void) do_div(tmpdiv, blocks_per_row);
3882 tmpdiv = last_block;
3883 (void) do_div(tmpdiv, blocks_per_row);
3885 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3886 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3887 tmpdiv = first_row_offset;
3888 (void) do_div(tmpdiv, strip_size);
3889 first_column = tmpdiv;
3890 tmpdiv = last_row_offset;
3891 (void) do_div(tmpdiv, strip_size);
3892 last_column = tmpdiv;
3894 first_row = first_block / blocks_per_row;
3895 last_row = last_block / blocks_per_row;
3896 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3897 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3898 first_column = first_row_offset / strip_size;
3899 last_column = last_row_offset / strip_size;
3902 /* if this isn't a single row/column then give to the controller */
3903 if ((first_row != last_row) || (first_column != last_column))
3904 return IO_ACCEL_INELIGIBLE;
3906 /* proceeding with driver mapping */
3907 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3908 le16_to_cpu(map->metadata_disks_per_row);
3909 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3910 le16_to_cpu(map->row_cnt);
3911 map_index = (map_row * total_disks_per_row) + first_column;
3913 switch (dev->raid_level) {
3915 break; /* nothing special to do */
3917 /* Handles load balance across RAID 1 members.
3918 * (2-drive R1 and R10 with even # of drives.)
3919 * Appropriate for SSDs, not optimal for HDDs
3921 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3922 if (dev->offload_to_mirror)
3923 map_index += le16_to_cpu(map->data_disks_per_row);
3924 dev->offload_to_mirror = !dev->offload_to_mirror;
3927 /* Handles N-way mirrors (R1-ADM)
3928 * and R10 with # of drives divisible by 3.)
3930 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3932 offload_to_mirror = dev->offload_to_mirror;
3933 raid_map_helper(map, offload_to_mirror,
3934 &map_index, ¤t_group);
3935 /* set mirror group to use next time */
3937 (offload_to_mirror >=
3938 le16_to_cpu(map->layout_map_count) - 1)
3939 ? 0 : offload_to_mirror + 1;
3940 dev->offload_to_mirror = offload_to_mirror;
3941 /* Avoid direct use of dev->offload_to_mirror within this
3942 * function since multiple threads might simultaneously
3943 * increment it beyond the range of dev->layout_map_count -1.
3948 if (le16_to_cpu(map->layout_map_count) <= 1)
3951 /* Verify first and last block are in same RAID group */
3952 r5or6_blocks_per_row =
3953 le16_to_cpu(map->strip_size) *
3954 le16_to_cpu(map->data_disks_per_row);
3955 BUG_ON(r5or6_blocks_per_row == 0);
3956 stripesize = r5or6_blocks_per_row *
3957 le16_to_cpu(map->layout_map_count);
3958 #if BITS_PER_LONG == 32
3959 tmpdiv = first_block;
3960 first_group = do_div(tmpdiv, stripesize);
3961 tmpdiv = first_group;
3962 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3963 first_group = tmpdiv;
3964 tmpdiv = last_block;
3965 last_group = do_div(tmpdiv, stripesize);
3966 tmpdiv = last_group;
3967 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3968 last_group = tmpdiv;
3970 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3971 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3973 if (first_group != last_group)
3974 return IO_ACCEL_INELIGIBLE;
3976 /* Verify request is in a single row of RAID 5/6 */
3977 #if BITS_PER_LONG == 32
3978 tmpdiv = first_block;
3979 (void) do_div(tmpdiv, stripesize);
3980 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3981 tmpdiv = last_block;
3982 (void) do_div(tmpdiv, stripesize);
3983 r5or6_last_row = r0_last_row = tmpdiv;
3985 first_row = r5or6_first_row = r0_first_row =
3986 first_block / stripesize;
3987 r5or6_last_row = r0_last_row = last_block / stripesize;
3989 if (r5or6_first_row != r5or6_last_row)
3990 return IO_ACCEL_INELIGIBLE;
3993 /* Verify request is in a single column */
3994 #if BITS_PER_LONG == 32
3995 tmpdiv = first_block;
3996 first_row_offset = do_div(tmpdiv, stripesize);
3997 tmpdiv = first_row_offset;
3998 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3999 r5or6_first_row_offset = first_row_offset;
4000 tmpdiv = last_block;
4001 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4002 tmpdiv = r5or6_last_row_offset;
4003 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4004 tmpdiv = r5or6_first_row_offset;
4005 (void) do_div(tmpdiv, map->strip_size);
4006 first_column = r5or6_first_column = tmpdiv;
4007 tmpdiv = r5or6_last_row_offset;
4008 (void) do_div(tmpdiv, map->strip_size);
4009 r5or6_last_column = tmpdiv;
4011 first_row_offset = r5or6_first_row_offset =
4012 (u32)((first_block % stripesize) %
4013 r5or6_blocks_per_row);
4015 r5or6_last_row_offset =
4016 (u32)((last_block % stripesize) %
4017 r5or6_blocks_per_row);
4019 first_column = r5or6_first_column =
4020 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4022 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4024 if (r5or6_first_column != r5or6_last_column)
4025 return IO_ACCEL_INELIGIBLE;
4027 /* Request is eligible */
4028 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4029 le16_to_cpu(map->row_cnt);
4031 map_index = (first_group *
4032 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4033 (map_row * total_disks_per_row) + first_column;
4036 return IO_ACCEL_INELIGIBLE;
4039 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4040 return IO_ACCEL_INELIGIBLE;
4042 c->phys_disk = dev->phys_disk[map_index];
4044 disk_handle = dd[map_index].ioaccel_handle;
4045 disk_block = le64_to_cpu(map->disk_starting_blk) +
4046 first_row * le16_to_cpu(map->strip_size) +
4047 (first_row_offset - first_column *
4048 le16_to_cpu(map->strip_size));
4049 disk_block_cnt = block_cnt;
4051 /* handle differing logical/physical block sizes */
4052 if (map->phys_blk_shift) {
4053 disk_block <<= map->phys_blk_shift;
4054 disk_block_cnt <<= map->phys_blk_shift;
4056 BUG_ON(disk_block_cnt > 0xffff);
4058 /* build the new CDB for the physical disk I/O */
4059 if (disk_block > 0xffffffff) {
4060 cdb[0] = is_write ? WRITE_16 : READ_16;
4062 cdb[2] = (u8) (disk_block >> 56);
4063 cdb[3] = (u8) (disk_block >> 48);
4064 cdb[4] = (u8) (disk_block >> 40);
4065 cdb[5] = (u8) (disk_block >> 32);
4066 cdb[6] = (u8) (disk_block >> 24);
4067 cdb[7] = (u8) (disk_block >> 16);
4068 cdb[8] = (u8) (disk_block >> 8);
4069 cdb[9] = (u8) (disk_block);
4070 cdb[10] = (u8) (disk_block_cnt >> 24);
4071 cdb[11] = (u8) (disk_block_cnt >> 16);
4072 cdb[12] = (u8) (disk_block_cnt >> 8);
4073 cdb[13] = (u8) (disk_block_cnt);
4078 cdb[0] = is_write ? WRITE_10 : READ_10;
4080 cdb[2] = (u8) (disk_block >> 24);
4081 cdb[3] = (u8) (disk_block >> 16);
4082 cdb[4] = (u8) (disk_block >> 8);
4083 cdb[5] = (u8) (disk_block);
4085 cdb[7] = (u8) (disk_block_cnt >> 8);
4086 cdb[8] = (u8) (disk_block_cnt);
4090 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4092 dev->phys_disk[map_index]);
4096 * Submit commands down the "normal" RAID stack path
4097 * All callers to hpsa_ciss_submit must check lockup_detected
4098 * beforehand, before (opt.) and after calling cmd_alloc
4100 static int hpsa_ciss_submit(struct ctlr_info *h,
4101 struct CommandList *c, struct scsi_cmnd *cmd,
4102 unsigned char scsi3addr[])
4104 cmd->host_scribble = (unsigned char *) c;
4105 c->cmd_type = CMD_SCSI;
4107 c->Header.ReplyQueue = 0; /* unused in simple mode */
4108 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4109 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4111 /* Fill in the request block... */
4113 c->Request.Timeout = 0;
4114 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4115 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4116 c->Request.CDBLen = cmd->cmd_len;
4117 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4118 switch (cmd->sc_data_direction) {
4120 c->Request.type_attr_dir =
4121 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4123 case DMA_FROM_DEVICE:
4124 c->Request.type_attr_dir =
4125 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4128 c->Request.type_attr_dir =
4129 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4131 case DMA_BIDIRECTIONAL:
4132 /* This can happen if a buggy application does a scsi passthru
4133 * and sets both inlen and outlen to non-zero. ( see
4134 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4137 c->Request.type_attr_dir =
4138 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4139 /* This is technically wrong, and hpsa controllers should
4140 * reject it with CMD_INVALID, which is the most correct
4141 * response, but non-fibre backends appear to let it
4142 * slide by, and give the same results as if this field
4143 * were set correctly. Either way is acceptable for
4144 * our purposes here.
4150 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4151 cmd->sc_data_direction);
4156 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4158 return SCSI_MLQUEUE_HOST_BUSY;
4160 enqueue_cmd_and_start_io(h, c);
4161 /* the cmd'll come back via intr handler in complete_scsi_command() */
4165 static void hpsa_command_resubmit_worker(struct work_struct *work)
4167 struct scsi_cmnd *cmd;
4168 struct hpsa_scsi_dev_t *dev;
4169 struct CommandList *c =
4170 container_of(work, struct CommandList, work);
4173 dev = cmd->device->hostdata;
4175 cmd->result = DID_NO_CONNECT << 16;
4176 cmd->scsi_done(cmd);
4179 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4181 * If we get here, it means dma mapping failed. Try
4182 * again via scsi mid layer, which will then get
4183 * SCSI_MLQUEUE_HOST_BUSY.
4185 cmd->result = DID_IMM_RETRY << 16;
4186 cmd->scsi_done(cmd);
4190 /* Running in struct Scsi_Host->host_lock less mode */
4191 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4193 struct ctlr_info *h;
4194 struct hpsa_scsi_dev_t *dev;
4195 unsigned char scsi3addr[8];
4196 struct CommandList *c;
4199 /* Get the ptr to our adapter structure out of cmd->host. */
4200 h = sdev_to_hba(cmd->device);
4201 dev = cmd->device->hostdata;
4203 cmd->result = DID_NO_CONNECT << 16;
4204 cmd->scsi_done(cmd);
4207 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4209 if (unlikely(lockup_detected(h))) {
4210 cmd->result = DID_NO_CONNECT << 16;
4211 cmd->scsi_done(cmd);
4215 if (c == NULL) { /* trouble... */
4216 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4217 return SCSI_MLQUEUE_HOST_BUSY;
4219 if (unlikely(lockup_detected(h))) {
4220 cmd->result = DID_NO_CONNECT << 16;
4222 cmd->scsi_done(cmd);
4227 * Call alternate submit routine for I/O accelerated commands.
4228 * Retries always go down the normal I/O path.
4230 if (likely(cmd->retries == 0 &&
4231 cmd->request->cmd_type == REQ_TYPE_FS &&
4232 h->acciopath_status)) {
4234 cmd->host_scribble = (unsigned char *) c;
4235 c->cmd_type = CMD_SCSI;
4238 if (dev->offload_enabled) {
4239 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4241 return 0; /* Sent on ioaccel path */
4242 if (rc < 0) { /* scsi_dma_map failed. */
4244 return SCSI_MLQUEUE_HOST_BUSY;
4246 } else if (dev->ioaccel_handle) {
4247 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4249 return 0; /* Sent on direct map path */
4250 if (rc < 0) { /* scsi_dma_map failed. */
4252 return SCSI_MLQUEUE_HOST_BUSY;
4256 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4259 static void hpsa_scan_complete(struct ctlr_info *h)
4261 unsigned long flags;
4263 spin_lock_irqsave(&h->scan_lock, flags);
4264 h->scan_finished = 1;
4265 wake_up_all(&h->scan_wait_queue);
4266 spin_unlock_irqrestore(&h->scan_lock, flags);
4269 static void hpsa_scan_start(struct Scsi_Host *sh)
4271 struct ctlr_info *h = shost_to_hba(sh);
4272 unsigned long flags;
4275 * Don't let rescans be initiated on a controller known to be locked
4276 * up. If the controller locks up *during* a rescan, that thread is
4277 * probably hosed, but at least we can prevent new rescan threads from
4278 * piling up on a locked up controller.
4280 if (unlikely(lockup_detected(h)))
4281 return hpsa_scan_complete(h);
4283 /* wait until any scan already in progress is finished. */
4285 spin_lock_irqsave(&h->scan_lock, flags);
4286 if (h->scan_finished)
4288 spin_unlock_irqrestore(&h->scan_lock, flags);
4289 wait_event(h->scan_wait_queue, h->scan_finished);
4290 /* Note: We don't need to worry about a race between this
4291 * thread and driver unload because the midlayer will
4292 * have incremented the reference count, so unload won't
4293 * happen if we're in here.
4296 h->scan_finished = 0; /* mark scan as in progress */
4297 spin_unlock_irqrestore(&h->scan_lock, flags);
4299 if (unlikely(lockup_detected(h)))
4300 return hpsa_scan_complete(h);
4302 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4304 hpsa_scan_complete(h);
4307 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4309 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4316 else if (qdepth > logical_drive->queue_depth)
4317 qdepth = logical_drive->queue_depth;
4319 return scsi_change_queue_depth(sdev, qdepth);
4322 static int hpsa_scan_finished(struct Scsi_Host *sh,
4323 unsigned long elapsed_time)
4325 struct ctlr_info *h = shost_to_hba(sh);
4326 unsigned long flags;
4329 spin_lock_irqsave(&h->scan_lock, flags);
4330 finished = h->scan_finished;
4331 spin_unlock_irqrestore(&h->scan_lock, flags);
4335 static void hpsa_unregister_scsi(struct ctlr_info *h)
4337 /* we are being forcibly unloaded, and may not refuse. */
4338 scsi_remove_host(h->scsi_host);
4339 scsi_host_put(h->scsi_host);
4340 h->scsi_host = NULL;
4343 static int hpsa_register_scsi(struct ctlr_info *h)
4345 struct Scsi_Host *sh;
4348 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4355 sh->max_channel = 3;
4356 sh->max_cmd_len = MAX_COMMAND_SIZE;
4357 sh->max_lun = HPSA_MAX_LUN;
4358 sh->max_id = HPSA_MAX_LUN;
4359 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
4360 sh->cmd_per_lun = sh->can_queue;
4361 sh->sg_tablesize = h->maxsgentries;
4363 sh->hostdata[0] = (unsigned long) h;
4364 sh->irq = h->intr[h->intr_mode];
4365 sh->unique_id = sh->irq;
4366 error = scsi_add_host(sh, &h->pdev->dev);
4373 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4374 " failed for controller %d\n", __func__, h->ctlr);
4378 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4379 " failed for controller %d\n", __func__, h->ctlr);
4383 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4384 unsigned char lunaddr[])
4388 int waittime = 1; /* seconds */
4389 struct CommandList *c;
4393 dev_warn(&h->pdev->dev, "out of memory in "
4394 "wait_for_device_to_become_ready.\n");
4398 /* Send test unit ready until device ready, or give up. */
4399 while (count < HPSA_TUR_RETRY_LIMIT) {
4401 /* Wait for a bit. do this first, because if we send
4402 * the TUR right away, the reset will just abort it.
4404 msleep(1000 * waittime);
4406 rc = 0; /* Device ready. */
4408 /* Increase wait time with each try, up to a point. */
4409 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4410 waittime = waittime * 2;
4412 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4413 (void) fill_cmd(c, TEST_UNIT_READY, h,
4414 NULL, 0, 0, lunaddr, TYPE_CMD);
4415 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
4419 /* no unmap needed here because no data xfer. */
4421 if (c->err_info->CommandStatus == CMD_SUCCESS)
4424 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4425 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4426 (c->err_info->SenseInfo[2] == NO_SENSE ||
4427 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4430 dev_warn(&h->pdev->dev, "waiting %d secs "
4431 "for device to become ready.\n", waittime);
4432 rc = 1; /* device not ready. */
4436 dev_warn(&h->pdev->dev, "giving up on device.\n");
4438 dev_warn(&h->pdev->dev, "device is ready.\n");
4444 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4445 * complaining. Doing a host- or bus-reset can't do anything good here.
4447 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4450 struct ctlr_info *h;
4451 struct hpsa_scsi_dev_t *dev;
4453 /* find the controller to which the command to be aborted was sent */
4454 h = sdev_to_hba(scsicmd->device);
4455 if (h == NULL) /* paranoia */
4458 if (lockup_detected(h))
4461 dev = scsicmd->device->hostdata;
4463 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4464 "device lookup failed.\n");
4468 /* if controller locked up, we can guarantee command won't complete */
4469 if (lockup_detected(h)) {
4470 dev_warn(&h->pdev->dev,
4471 "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n",
4472 h->scsi_host->host_no, dev->bus, dev->target,
4477 /* this reset request might be the result of a lockup; check */
4478 if (detect_controller_lockup(h)) {
4479 dev_warn(&h->pdev->dev,
4480 "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n",
4481 h->scsi_host->host_no, dev->bus, dev->target,
4486 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
4488 /* send a reset to the SCSI LUN which the command was sent to */
4489 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4490 DEFAULT_REPLY_QUEUE);
4491 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4494 dev_warn(&h->pdev->dev,
4495 "scsi %d:%d:%d:%d reset failed\n",
4496 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4500 static void swizzle_abort_tag(u8 *tag)
4504 memcpy(original_tag, tag, 8);
4505 tag[0] = original_tag[3];
4506 tag[1] = original_tag[2];
4507 tag[2] = original_tag[1];
4508 tag[3] = original_tag[0];
4509 tag[4] = original_tag[7];
4510 tag[5] = original_tag[6];
4511 tag[6] = original_tag[5];
4512 tag[7] = original_tag[4];
4515 static void hpsa_get_tag(struct ctlr_info *h,
4516 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4519 if (c->cmd_type == CMD_IOACCEL1) {
4520 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4521 &h->ioaccel_cmd_pool[c->cmdindex];
4522 tag = le64_to_cpu(cm1->tag);
4523 *tagupper = cpu_to_le32(tag >> 32);
4524 *taglower = cpu_to_le32(tag);
4527 if (c->cmd_type == CMD_IOACCEL2) {
4528 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4529 &h->ioaccel2_cmd_pool[c->cmdindex];
4530 /* upper tag not used in ioaccel2 mode */
4531 memset(tagupper, 0, sizeof(*tagupper));
4532 *taglower = cm2->Tag;
4535 tag = le64_to_cpu(c->Header.tag);
4536 *tagupper = cpu_to_le32(tag >> 32);
4537 *taglower = cpu_to_le32(tag);
4540 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4541 struct CommandList *abort, int swizzle, int reply_queue)
4544 struct CommandList *c;
4545 struct ErrorInfo *ei;
4546 __le32 tagupper, taglower;
4549 if (c == NULL) { /* trouble... */
4550 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4554 /* fill_cmd can't fail here, no buffer to map */
4555 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4556 0, 0, scsi3addr, TYPE_MSG);
4558 swizzle_abort_tag(&c->Request.CDB[4]);
4559 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4560 hpsa_get_tag(h, abort, &taglower, &tagupper);
4561 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
4562 __func__, tagupper, taglower);
4563 /* no unmap needed here because no data xfer. */
4566 switch (ei->CommandStatus) {
4569 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4573 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4574 __func__, tagupper, taglower);
4575 hpsa_scsi_interpret_error(h, c);
4580 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4581 __func__, tagupper, taglower);
4585 /* ioaccel2 path firmware cannot handle abort task requests.
4586 * Change abort requests to physical target reset, and send to the
4587 * address of the physical disk used for the ioaccel 2 command.
4588 * Return 0 on success (IO_OK)
4592 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4593 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4596 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4597 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4598 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4599 unsigned char *psa = &phys_scsi3addr[0];
4601 /* Get a pointer to the hpsa logical device. */
4602 scmd = abort->scsi_cmd;
4603 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4605 dev_warn(&h->pdev->dev,
4606 "Cannot abort: no device pointer for command.\n");
4607 return -1; /* not abortable */
4610 if (h->raid_offload_debug > 0)
4611 dev_info(&h->pdev->dev,
4612 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4613 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4615 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4616 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4618 if (!dev->offload_enabled) {
4619 dev_warn(&h->pdev->dev,
4620 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4621 return -1; /* not abortable */
4624 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4625 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4626 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4627 return -1; /* not abortable */
4630 /* send the reset */
4631 if (h->raid_offload_debug > 0)
4632 dev_info(&h->pdev->dev,
4633 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4634 psa[0], psa[1], psa[2], psa[3],
4635 psa[4], psa[5], psa[6], psa[7]);
4636 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
4638 dev_warn(&h->pdev->dev,
4639 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4640 psa[0], psa[1], psa[2], psa[3],
4641 psa[4], psa[5], psa[6], psa[7]);
4642 return rc; /* failed to reset */
4645 /* wait for device to recover */
4646 if (wait_for_device_to_become_ready(h, psa) != 0) {
4647 dev_warn(&h->pdev->dev,
4648 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4649 psa[0], psa[1], psa[2], psa[3],
4650 psa[4], psa[5], psa[6], psa[7]);
4651 return -1; /* failed to recover */
4654 /* device recovered */
4655 dev_info(&h->pdev->dev,
4656 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4657 psa[0], psa[1], psa[2], psa[3],
4658 psa[4], psa[5], psa[6], psa[7]);
4660 return rc; /* success */
4663 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4664 * tell which kind we're dealing with, so we send the abort both ways. There
4665 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4666 * way we construct our tags but we check anyway in case the assumptions which
4667 * make this true someday become false.
4669 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4670 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
4672 /* ioccelerator mode 2 commands should be aborted via the
4673 * accelerated path, since RAID path is unaware of these commands,
4674 * but underlying firmware can't handle abort TMF.
4675 * Change abort to physical device reset.
4677 if (abort->cmd_type == CMD_IOACCEL2)
4678 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
4679 abort, reply_queue);
4681 return hpsa_send_abort(h, scsi3addr, abort, 0, reply_queue) &&
4682 hpsa_send_abort(h, scsi3addr, abort, 1, reply_queue);
4685 /* Find out which reply queue a command was meant to return on */
4686 static int hpsa_extract_reply_queue(struct ctlr_info *h,
4687 struct CommandList *c)
4689 if (c->cmd_type == CMD_IOACCEL2)
4690 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
4691 return c->Header.ReplyQueue;
4694 /* Send an abort for the specified command.
4695 * If the device and controller support it,
4696 * send a task abort request.
4698 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4702 struct ctlr_info *h;
4703 struct hpsa_scsi_dev_t *dev;
4704 struct CommandList *abort; /* pointer to command to be aborted */
4705 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4706 char msg[256]; /* For debug messaging. */
4708 __le32 tagupper, taglower;
4709 int refcount, reply_queue;
4714 /* Find the controller of the command to be aborted */
4715 h = sdev_to_hba(sc->device);
4717 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4720 /* Find the device of the command to be aborted */
4721 dev = sc->device->hostdata;
4723 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4728 /* If controller locked up, we can guarantee command won't complete */
4729 if (lockup_detected(h)) {
4730 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4731 "ABORT FAILED, lockup detected");
4735 /* This is a good time to check if controller lockup has occurred */
4736 if (detect_controller_lockup(h)) {
4737 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4738 "ABORT FAILED, new lockup detected");
4742 /* Check that controller supports some kind of task abort */
4743 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4744 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4747 memset(msg, 0, sizeof(msg));
4748 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s",
4749 h->scsi_host->host_no, sc->device->channel,
4750 sc->device->id, sc->device->lun,
4751 "Aborting command");
4753 /* Get SCSI command to be aborted */
4754 abort = (struct CommandList *) sc->host_scribble;
4755 if (abort == NULL) {
4756 /* This can happen if the command already completed. */
4759 refcount = atomic_inc_return(&abort->refcount);
4760 if (refcount == 1) { /* Command is done already. */
4764 hpsa_get_tag(h, abort, &taglower, &tagupper);
4765 reply_queue = hpsa_extract_reply_queue(h, abort);
4766 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4767 as = abort->scsi_cmd;
4769 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4770 as->cmnd[0], as->serial_number);
4771 dev_dbg(&h->pdev->dev, "%s\n", msg);
4772 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
4774 * Command is in flight, or possibly already completed
4775 * by the firmware (but not to the scsi mid layer) but we can't
4776 * distinguish which. Send the abort down.
4778 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
4780 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4781 "FAILED to abort command");
4785 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4787 /* If the abort(s) above completed and actually aborted the
4788 * command, then the command to be aborted should already be
4789 * completed. If not, wait around a bit more to see if they
4790 * manage to complete normally.
4792 #define ABORT_COMPLETE_WAIT_SECS 30
4793 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4794 refcount = atomic_read(&abort->refcount);
4802 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4803 msg, ABORT_COMPLETE_WAIT_SECS);
4809 * For operations that cannot sleep, a command block is allocated at init,
4810 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4811 * which ones are free or in use. Lock must be held when calling this.
4812 * cmd_free() is the complement.
4815 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4817 struct CommandList *c;
4819 union u64bit temp64;
4820 dma_addr_t cmd_dma_handle, err_dma_handle;
4822 unsigned long offset;
4825 * There is some *extremely* small but non-zero chance that that
4826 * multiple threads could get in here, and one thread could
4827 * be scanning through the list of bits looking for a free
4828 * one, but the free ones are always behind him, and other
4829 * threads sneak in behind him and eat them before he can
4830 * get to them, so that while there is always a free one, a
4831 * very unlucky thread might be starved anyway, never able to
4832 * beat the other threads. In reality, this happens so
4833 * infrequently as to be indistinguishable from never.
4836 offset = h->last_allocation; /* benignly racy */
4838 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
4839 if (unlikely(i == h->nr_cmds)) {
4843 c = h->cmd_pool + i;
4844 refcount = atomic_inc_return(&c->refcount);
4845 if (unlikely(refcount > 1)) {
4846 cmd_free(h, c); /* already in use */
4847 offset = (i + 1) % h->nr_cmds;
4850 set_bit(i & (BITS_PER_LONG - 1),
4851 h->cmd_pool_bits + (i / BITS_PER_LONG));
4852 break; /* it's ours now. */
4854 h->last_allocation = i; /* benignly racy */
4856 /* Zero out all of commandlist except the last field, refcount */
4857 memset(c, 0, offsetof(struct CommandList, refcount));
4858 c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
4859 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4860 c->err_info = h->errinfo_pool + i;
4861 memset(c->err_info, 0, sizeof(*c->err_info));
4862 err_dma_handle = h->errinfo_pool_dhandle
4863 + i * sizeof(*c->err_info);
4867 c->busaddr = (u32) cmd_dma_handle;
4868 temp64.val = (u64) err_dma_handle;
4869 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4870 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4876 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4878 if (atomic_dec_and_test(&c->refcount)) {
4881 i = c - h->cmd_pool;
4882 clear_bit(i & (BITS_PER_LONG - 1),
4883 h->cmd_pool_bits + (i / BITS_PER_LONG));
4887 #ifdef CONFIG_COMPAT
4889 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4892 IOCTL32_Command_struct __user *arg32 =
4893 (IOCTL32_Command_struct __user *) arg;
4894 IOCTL_Command_struct arg64;
4895 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4899 memset(&arg64, 0, sizeof(arg64));
4901 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4902 sizeof(arg64.LUN_info));
4903 err |= copy_from_user(&arg64.Request, &arg32->Request,
4904 sizeof(arg64.Request));
4905 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4906 sizeof(arg64.error_info));
4907 err |= get_user(arg64.buf_size, &arg32->buf_size);
4908 err |= get_user(cp, &arg32->buf);
4909 arg64.buf = compat_ptr(cp);
4910 err |= copy_to_user(p, &arg64, sizeof(arg64));
4915 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4918 err |= copy_in_user(&arg32->error_info, &p->error_info,
4919 sizeof(arg32->error_info));
4925 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4926 int cmd, void __user *arg)
4928 BIG_IOCTL32_Command_struct __user *arg32 =
4929 (BIG_IOCTL32_Command_struct __user *) arg;
4930 BIG_IOCTL_Command_struct arg64;
4931 BIG_IOCTL_Command_struct __user *p =
4932 compat_alloc_user_space(sizeof(arg64));
4936 memset(&arg64, 0, sizeof(arg64));
4938 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4939 sizeof(arg64.LUN_info));
4940 err |= copy_from_user(&arg64.Request, &arg32->Request,
4941 sizeof(arg64.Request));
4942 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4943 sizeof(arg64.error_info));
4944 err |= get_user(arg64.buf_size, &arg32->buf_size);
4945 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4946 err |= get_user(cp, &arg32->buf);
4947 arg64.buf = compat_ptr(cp);
4948 err |= copy_to_user(p, &arg64, sizeof(arg64));
4953 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4956 err |= copy_in_user(&arg32->error_info, &p->error_info,
4957 sizeof(arg32->error_info));
4963 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4966 case CCISS_GETPCIINFO:
4967 case CCISS_GETINTINFO:
4968 case CCISS_SETINTINFO:
4969 case CCISS_GETNODENAME:
4970 case CCISS_SETNODENAME:
4971 case CCISS_GETHEARTBEAT:
4972 case CCISS_GETBUSTYPES:
4973 case CCISS_GETFIRMVER:
4974 case CCISS_GETDRIVVER:
4975 case CCISS_REVALIDVOLS:
4976 case CCISS_DEREGDISK:
4977 case CCISS_REGNEWDISK:
4979 case CCISS_RESCANDISK:
4980 case CCISS_GETLUNINFO:
4981 return hpsa_ioctl(dev, cmd, arg);
4983 case CCISS_PASSTHRU32:
4984 return hpsa_ioctl32_passthru(dev, cmd, arg);
4985 case CCISS_BIG_PASSTHRU32:
4986 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4989 return -ENOIOCTLCMD;
4994 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4996 struct hpsa_pci_info pciinfo;
5000 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5001 pciinfo.bus = h->pdev->bus->number;
5002 pciinfo.dev_fn = h->pdev->devfn;
5003 pciinfo.board_id = h->board_id;
5004 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5009 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5011 DriverVer_type DriverVer;
5012 unsigned char vmaj, vmin, vsubmin;
5015 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5016 &vmaj, &vmin, &vsubmin);
5018 dev_info(&h->pdev->dev, "driver version string '%s' "
5019 "unrecognized.", HPSA_DRIVER_VERSION);
5024 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5027 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5032 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5034 IOCTL_Command_struct iocommand;
5035 struct CommandList *c;
5042 if (!capable(CAP_SYS_RAWIO))
5044 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5046 if ((iocommand.buf_size < 1) &&
5047 (iocommand.Request.Type.Direction != XFER_NONE)) {
5050 if (iocommand.buf_size > 0) {
5051 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5054 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5055 /* Copy the data into the buffer we created */
5056 if (copy_from_user(buff, iocommand.buf,
5057 iocommand.buf_size)) {
5062 memset(buff, 0, iocommand.buf_size);
5070 /* Fill in the command type */
5071 c->cmd_type = CMD_IOCTL_PEND;
5072 /* Fill in Command Header */
5073 c->Header.ReplyQueue = 0; /* unused in simple mode */
5074 if (iocommand.buf_size > 0) { /* buffer to fill */
5075 c->Header.SGList = 1;
5076 c->Header.SGTotal = cpu_to_le16(1);
5077 } else { /* no buffers to fill */
5078 c->Header.SGList = 0;
5079 c->Header.SGTotal = cpu_to_le16(0);
5081 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
5083 /* Fill in Request block */
5084 memcpy(&c->Request, &iocommand.Request,
5085 sizeof(c->Request));
5087 /* Fill in the scatter gather information */
5088 if (iocommand.buf_size > 0) {
5089 temp64 = pci_map_single(h->pdev, buff,
5090 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5091 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5092 c->SG[0].Addr = cpu_to_le64(0);
5093 c->SG[0].Len = cpu_to_le32(0);
5097 c->SG[0].Addr = cpu_to_le64(temp64);
5098 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5099 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
5101 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5102 if (iocommand.buf_size > 0)
5103 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5104 check_ioctl_unit_attention(h, c);
5110 /* Copy the error information out */
5111 memcpy(&iocommand.error_info, c->err_info,
5112 sizeof(iocommand.error_info));
5113 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5117 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5118 iocommand.buf_size > 0) {
5119 /* Copy the data out of the buffer we created */
5120 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5132 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5134 BIG_IOCTL_Command_struct *ioc;
5135 struct CommandList *c;
5136 unsigned char **buff = NULL;
5137 int *buff_size = NULL;
5143 BYTE __user *data_ptr;
5147 if (!capable(CAP_SYS_RAWIO))
5149 ioc = (BIG_IOCTL_Command_struct *)
5150 kmalloc(sizeof(*ioc), GFP_KERNEL);
5155 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5159 if ((ioc->buf_size < 1) &&
5160 (ioc->Request.Type.Direction != XFER_NONE)) {
5164 /* Check kmalloc limits using all SGs */
5165 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5169 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5173 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5178 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5183 left = ioc->buf_size;
5184 data_ptr = ioc->buf;
5186 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5187 buff_size[sg_used] = sz;
5188 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5189 if (buff[sg_used] == NULL) {
5193 if (ioc->Request.Type.Direction & XFER_WRITE) {
5194 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5199 memset(buff[sg_used], 0, sz);
5209 c->cmd_type = CMD_IOCTL_PEND;
5210 c->Header.ReplyQueue = 0;
5211 c->Header.SGList = (u8) sg_used;
5212 c->Header.SGTotal = cpu_to_le16(sg_used);
5213 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5214 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5215 if (ioc->buf_size > 0) {
5217 for (i = 0; i < sg_used; i++) {
5218 temp64 = pci_map_single(h->pdev, buff[i],
5219 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5220 if (dma_mapping_error(&h->pdev->dev,
5221 (dma_addr_t) temp64)) {
5222 c->SG[i].Addr = cpu_to_le64(0);
5223 c->SG[i].Len = cpu_to_le32(0);
5224 hpsa_pci_unmap(h->pdev, c, i,
5225 PCI_DMA_BIDIRECTIONAL);
5229 c->SG[i].Addr = cpu_to_le64(temp64);
5230 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5231 c->SG[i].Ext = cpu_to_le32(0);
5233 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5235 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
5237 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5238 check_ioctl_unit_attention(h, c);
5244 /* Copy the error information out */
5245 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5246 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5250 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5253 /* Copy the data out of the buffer we created */
5254 BYTE __user *ptr = ioc->buf;
5255 for (i = 0; i < sg_used; i++) {
5256 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5260 ptr += buff_size[i];
5270 for (i = 0; i < sg_used; i++)
5279 static void check_ioctl_unit_attention(struct ctlr_info *h,
5280 struct CommandList *c)
5282 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5283 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5284 (void) check_for_unit_attention(h, c);
5290 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5292 struct ctlr_info *h;
5293 void __user *argp = (void __user *)arg;
5296 h = sdev_to_hba(dev);
5299 case CCISS_DEREGDISK:
5300 case CCISS_REGNEWDISK:
5302 hpsa_scan_start(h->scsi_host);
5304 case CCISS_GETPCIINFO:
5305 return hpsa_getpciinfo_ioctl(h, argp);
5306 case CCISS_GETDRIVVER:
5307 return hpsa_getdrivver_ioctl(h, argp);
5308 case CCISS_PASSTHRU:
5309 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5311 rc = hpsa_passthru_ioctl(h, argp);
5312 atomic_inc(&h->passthru_cmds_avail);
5314 case CCISS_BIG_PASSTHRU:
5315 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5317 rc = hpsa_big_passthru_ioctl(h, argp);
5318 atomic_inc(&h->passthru_cmds_avail);
5325 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5328 struct CommandList *c;
5333 /* fill_cmd can't fail here, no data buffer to map */
5334 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5335 RAID_CTLR_LUNID, TYPE_MSG);
5336 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5338 enqueue_cmd_and_start_io(h, c);
5339 /* Don't wait for completion, the reset won't complete. Don't free
5340 * the command either. This is the last command we will send before
5341 * re-initializing everything, so it doesn't matter and won't leak.
5346 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5347 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5350 int pci_dir = XFER_NONE;
5351 struct CommandList *a; /* for commands to be aborted */
5353 c->cmd_type = CMD_IOCTL_PEND;
5354 c->Header.ReplyQueue = 0;
5355 if (buff != NULL && size > 0) {
5356 c->Header.SGList = 1;
5357 c->Header.SGTotal = cpu_to_le16(1);
5359 c->Header.SGList = 0;
5360 c->Header.SGTotal = cpu_to_le16(0);
5362 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5364 if (cmd_type == TYPE_CMD) {
5367 /* are we trying to read a vital product page */
5368 if (page_code & VPD_PAGE) {
5369 c->Request.CDB[1] = 0x01;
5370 c->Request.CDB[2] = (page_code & 0xff);
5372 c->Request.CDBLen = 6;
5373 c->Request.type_attr_dir =
5374 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5375 c->Request.Timeout = 0;
5376 c->Request.CDB[0] = HPSA_INQUIRY;
5377 c->Request.CDB[4] = size & 0xFF;
5379 case HPSA_REPORT_LOG:
5380 case HPSA_REPORT_PHYS:
5381 /* Talking to controller so It's a physical command
5382 mode = 00 target = 0. Nothing to write.
5384 c->Request.CDBLen = 12;
5385 c->Request.type_attr_dir =
5386 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5387 c->Request.Timeout = 0;
5388 c->Request.CDB[0] = cmd;
5389 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5390 c->Request.CDB[7] = (size >> 16) & 0xFF;
5391 c->Request.CDB[8] = (size >> 8) & 0xFF;
5392 c->Request.CDB[9] = size & 0xFF;
5394 case HPSA_CACHE_FLUSH:
5395 c->Request.CDBLen = 12;
5396 c->Request.type_attr_dir =
5397 TYPE_ATTR_DIR(cmd_type,
5398 ATTR_SIMPLE, XFER_WRITE);
5399 c->Request.Timeout = 0;
5400 c->Request.CDB[0] = BMIC_WRITE;
5401 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5402 c->Request.CDB[7] = (size >> 8) & 0xFF;
5403 c->Request.CDB[8] = size & 0xFF;
5405 case TEST_UNIT_READY:
5406 c->Request.CDBLen = 6;
5407 c->Request.type_attr_dir =
5408 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5409 c->Request.Timeout = 0;
5411 case HPSA_GET_RAID_MAP:
5412 c->Request.CDBLen = 12;
5413 c->Request.type_attr_dir =
5414 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5415 c->Request.Timeout = 0;
5416 c->Request.CDB[0] = HPSA_CISS_READ;
5417 c->Request.CDB[1] = cmd;
5418 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5419 c->Request.CDB[7] = (size >> 16) & 0xFF;
5420 c->Request.CDB[8] = (size >> 8) & 0xFF;
5421 c->Request.CDB[9] = size & 0xFF;
5423 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5424 c->Request.CDBLen = 10;
5425 c->Request.type_attr_dir =
5426 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5427 c->Request.Timeout = 0;
5428 c->Request.CDB[0] = BMIC_READ;
5429 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5430 c->Request.CDB[7] = (size >> 16) & 0xFF;
5431 c->Request.CDB[8] = (size >> 8) & 0xFF;
5433 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5434 c->Request.CDBLen = 10;
5435 c->Request.type_attr_dir =
5436 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5437 c->Request.Timeout = 0;
5438 c->Request.CDB[0] = BMIC_READ;
5439 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5440 c->Request.CDB[7] = (size >> 16) & 0xFF;
5441 c->Request.CDB[8] = (size >> 8) & 0XFF;
5444 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5448 } else if (cmd_type == TYPE_MSG) {
5451 case HPSA_DEVICE_RESET_MSG:
5452 c->Request.CDBLen = 16;
5453 c->Request.type_attr_dir =
5454 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5455 c->Request.Timeout = 0; /* Don't time out */
5456 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5457 c->Request.CDB[0] = cmd;
5458 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5459 /* If bytes 4-7 are zero, it means reset the */
5461 c->Request.CDB[4] = 0x00;
5462 c->Request.CDB[5] = 0x00;
5463 c->Request.CDB[6] = 0x00;
5464 c->Request.CDB[7] = 0x00;
5466 case HPSA_ABORT_MSG:
5467 a = buff; /* point to command to be aborted */
5468 dev_dbg(&h->pdev->dev,
5469 "Abort Tag:0x%016llx request Tag:0x%016llx",
5470 a->Header.tag, c->Header.tag);
5471 c->Request.CDBLen = 16;
5472 c->Request.type_attr_dir =
5473 TYPE_ATTR_DIR(cmd_type,
5474 ATTR_SIMPLE, XFER_WRITE);
5475 c->Request.Timeout = 0; /* Don't time out */
5476 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5477 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5478 c->Request.CDB[2] = 0x00; /* reserved */
5479 c->Request.CDB[3] = 0x00; /* reserved */
5480 /* Tag to abort goes in CDB[4]-CDB[11] */
5481 memcpy(&c->Request.CDB[4], &a->Header.tag,
5482 sizeof(a->Header.tag));
5483 c->Request.CDB[12] = 0x00; /* reserved */
5484 c->Request.CDB[13] = 0x00; /* reserved */
5485 c->Request.CDB[14] = 0x00; /* reserved */
5486 c->Request.CDB[15] = 0x00; /* reserved */
5489 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5494 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5498 switch (GET_DIR(c->Request.type_attr_dir)) {
5500 pci_dir = PCI_DMA_FROMDEVICE;
5503 pci_dir = PCI_DMA_TODEVICE;
5506 pci_dir = PCI_DMA_NONE;
5509 pci_dir = PCI_DMA_BIDIRECTIONAL;
5511 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5517 * Map (physical) PCI mem into (virtual) kernel space
5519 static void __iomem *remap_pci_mem(ulong base, ulong size)
5521 ulong page_base = ((ulong) base) & PAGE_MASK;
5522 ulong page_offs = ((ulong) base) - page_base;
5523 void __iomem *page_remapped = ioremap_nocache(page_base,
5526 return page_remapped ? (page_remapped + page_offs) : NULL;
5529 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5531 return h->access.command_completed(h, q);
5534 static inline bool interrupt_pending(struct ctlr_info *h)
5536 return h->access.intr_pending(h);
5539 static inline long interrupt_not_for_us(struct ctlr_info *h)
5541 return (h->access.intr_pending(h) == 0) ||
5542 (h->interrupts_enabled == 0);
5545 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5548 if (unlikely(tag_index >= h->nr_cmds)) {
5549 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5555 static inline void finish_cmd(struct CommandList *c)
5557 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5558 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5559 || c->cmd_type == CMD_IOACCEL2))
5560 complete_scsi_command(c);
5561 else if (c->cmd_type == CMD_IOCTL_PEND)
5562 complete(c->waiting);
5566 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5568 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5569 #define HPSA_SIMPLE_ERROR_BITS 0x03
5570 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5571 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5572 return tag & ~HPSA_PERF_ERROR_BITS;
5575 /* process completion of an indexed ("direct lookup") command */
5576 static inline void process_indexed_cmd(struct ctlr_info *h,
5580 struct CommandList *c;
5582 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5583 if (!bad_tag(h, tag_index, raw_tag)) {
5584 c = h->cmd_pool + tag_index;
5589 /* Some controllers, like p400, will give us one interrupt
5590 * after a soft reset, even if we turned interrupts off.
5591 * Only need to check for this in the hpsa_xxx_discard_completions
5594 static int ignore_bogus_interrupt(struct ctlr_info *h)
5596 if (likely(!reset_devices))
5599 if (likely(h->interrupts_enabled))
5602 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5603 "(known firmware bug.) Ignoring.\n");
5609 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5610 * Relies on (h-q[x] == x) being true for x such that
5611 * 0 <= x < MAX_REPLY_QUEUES.
5613 static struct ctlr_info *queue_to_hba(u8 *queue)
5615 return container_of((queue - *queue), struct ctlr_info, q[0]);
5618 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5620 struct ctlr_info *h = queue_to_hba(queue);
5621 u8 q = *(u8 *) queue;
5624 if (ignore_bogus_interrupt(h))
5627 if (interrupt_not_for_us(h))
5629 h->last_intr_timestamp = get_jiffies_64();
5630 while (interrupt_pending(h)) {
5631 raw_tag = get_next_completion(h, q);
5632 while (raw_tag != FIFO_EMPTY)
5633 raw_tag = next_command(h, q);
5638 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5640 struct ctlr_info *h = queue_to_hba(queue);
5642 u8 q = *(u8 *) queue;
5644 if (ignore_bogus_interrupt(h))
5647 h->last_intr_timestamp = get_jiffies_64();
5648 raw_tag = get_next_completion(h, q);
5649 while (raw_tag != FIFO_EMPTY)
5650 raw_tag = next_command(h, q);
5654 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5656 struct ctlr_info *h = queue_to_hba((u8 *) queue);
5658 u8 q = *(u8 *) queue;
5660 if (interrupt_not_for_us(h))
5662 h->last_intr_timestamp = get_jiffies_64();
5663 while (interrupt_pending(h)) {
5664 raw_tag = get_next_completion(h, q);
5665 while (raw_tag != FIFO_EMPTY) {
5666 process_indexed_cmd(h, raw_tag);
5667 raw_tag = next_command(h, q);
5673 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5675 struct ctlr_info *h = queue_to_hba(queue);
5677 u8 q = *(u8 *) queue;
5679 h->last_intr_timestamp = get_jiffies_64();
5680 raw_tag = get_next_completion(h, q);
5681 while (raw_tag != FIFO_EMPTY) {
5682 process_indexed_cmd(h, raw_tag);
5683 raw_tag = next_command(h, q);
5688 /* Send a message CDB to the firmware. Careful, this only works
5689 * in simple mode, not performant mode due to the tag lookup.
5690 * We only ever use this immediately after a controller reset.
5692 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5696 struct CommandListHeader CommandHeader;
5697 struct RequestBlock Request;
5698 struct ErrDescriptor ErrorDescriptor;
5700 struct Command *cmd;
5701 static const size_t cmd_sz = sizeof(*cmd) +
5702 sizeof(cmd->ErrorDescriptor);
5706 void __iomem *vaddr;
5709 vaddr = pci_ioremap_bar(pdev, 0);
5713 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5714 * CCISS commands, so they must be allocated from the lower 4GiB of
5717 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5723 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5729 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5730 * although there's no guarantee, we assume that the address is at
5731 * least 4-byte aligned (most likely, it's page-aligned).
5733 paddr32 = cpu_to_le32(paddr64);
5735 cmd->CommandHeader.ReplyQueue = 0;
5736 cmd->CommandHeader.SGList = 0;
5737 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5738 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5739 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5741 cmd->Request.CDBLen = 16;
5742 cmd->Request.type_attr_dir =
5743 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5744 cmd->Request.Timeout = 0; /* Don't time out */
5745 cmd->Request.CDB[0] = opcode;
5746 cmd->Request.CDB[1] = type;
5747 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5748 cmd->ErrorDescriptor.Addr =
5749 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5750 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5752 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5754 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5755 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5756 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5758 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5763 /* we leak the DMA buffer here ... no choice since the controller could
5764 * still complete the command.
5766 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5767 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5772 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5774 if (tag & HPSA_ERROR_BIT) {
5775 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5780 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5785 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5787 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5788 void __iomem *vaddr, u32 use_doorbell)
5792 /* For everything after the P600, the PCI power state method
5793 * of resetting the controller doesn't work, so we have this
5794 * other way using the doorbell register.
5796 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5797 writel(use_doorbell, vaddr + SA5_DOORBELL);
5799 /* PMC hardware guys tell us we need a 10 second delay after
5800 * doorbell reset and before any attempt to talk to the board
5801 * at all to ensure that this actually works and doesn't fall
5802 * over in some weird corner cases.
5805 } else { /* Try to do it the PCI power state way */
5807 /* Quoting from the Open CISS Specification: "The Power
5808 * Management Control/Status Register (CSR) controls the power
5809 * state of the device. The normal operating state is D0,
5810 * CSR=00h. The software off state is D3, CSR=03h. To reset
5811 * the controller, place the interface device in D3 then to D0,
5812 * this causes a secondary PCI reset which will reset the
5817 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5819 /* enter the D3hot power management state */
5820 rc = pci_set_power_state(pdev, PCI_D3hot);
5826 /* enter the D0 power management state */
5827 rc = pci_set_power_state(pdev, PCI_D0);
5832 * The P600 requires a small delay when changing states.
5833 * Otherwise we may think the board did not reset and we bail.
5834 * This for kdump only and is particular to the P600.
5841 static void init_driver_version(char *driver_version, int len)
5843 memset(driver_version, 0, len);
5844 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5847 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5849 char *driver_version;
5850 int i, size = sizeof(cfgtable->driver_version);
5852 driver_version = kmalloc(size, GFP_KERNEL);
5853 if (!driver_version)
5856 init_driver_version(driver_version, size);
5857 for (i = 0; i < size; i++)
5858 writeb(driver_version[i], &cfgtable->driver_version[i]);
5859 kfree(driver_version);
5863 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5864 unsigned char *driver_ver)
5868 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5869 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5872 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5875 char *driver_ver, *old_driver_ver;
5876 int rc, size = sizeof(cfgtable->driver_version);
5878 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5879 if (!old_driver_ver)
5881 driver_ver = old_driver_ver + size;
5883 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5884 * should have been changed, otherwise we know the reset failed.
5886 init_driver_version(old_driver_ver, size);
5887 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5888 rc = !memcmp(driver_ver, old_driver_ver, size);
5889 kfree(old_driver_ver);
5892 /* This does a hard reset of the controller using PCI power management
5893 * states or the using the doorbell register.
5895 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
5899 u64 cfg_base_addr_index;
5900 void __iomem *vaddr;
5901 unsigned long paddr;
5902 u32 misc_fw_support;
5904 struct CfgTable __iomem *cfgtable;
5906 u16 command_register;
5908 /* For controllers as old as the P600, this is very nearly
5911 * pci_save_state(pci_dev);
5912 * pci_set_power_state(pci_dev, PCI_D3hot);
5913 * pci_set_power_state(pci_dev, PCI_D0);
5914 * pci_restore_state(pci_dev);
5916 * For controllers newer than the P600, the pci power state
5917 * method of resetting doesn't work so we have another way
5918 * using the doorbell register.
5921 if (!ctlr_is_resettable(board_id)) {
5922 dev_warn(&pdev->dev, "Controller not resettable\n");
5926 /* if controller is soft- but not hard resettable... */
5927 if (!ctlr_is_hard_resettable(board_id))
5928 return -ENOTSUPP; /* try soft reset later. */
5930 /* Save the PCI command register */
5931 pci_read_config_word(pdev, 4, &command_register);
5932 pci_save_state(pdev);
5934 /* find the first memory BAR, so we can find the cfg table */
5935 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5938 vaddr = remap_pci_mem(paddr, 0x250);
5942 /* find cfgtable in order to check if reset via doorbell is supported */
5943 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5944 &cfg_base_addr_index, &cfg_offset);
5947 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5948 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5953 rc = write_driver_ver_to_cfgtable(cfgtable);
5955 goto unmap_cfgtable;
5957 /* If reset via doorbell register is supported, use that.
5958 * There are two such methods. Favor the newest method.
5960 misc_fw_support = readl(&cfgtable->misc_fw_support);
5961 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5963 use_doorbell = DOORBELL_CTLR_RESET2;
5965 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5967 dev_warn(&pdev->dev,
5968 "Soft reset not supported. Firmware update is required.\n");
5969 rc = -ENOTSUPP; /* try soft reset */
5970 goto unmap_cfgtable;
5974 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5976 goto unmap_cfgtable;
5978 pci_restore_state(pdev);
5979 pci_write_config_word(pdev, 4, command_register);
5981 /* Some devices (notably the HP Smart Array 5i Controller)
5982 need a little pause here */
5983 msleep(HPSA_POST_RESET_PAUSE_MSECS);
5985 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5987 dev_warn(&pdev->dev,
5988 "Failed waiting for board to become ready after hard reset\n");
5989 goto unmap_cfgtable;
5992 rc = controller_reset_failed(vaddr);
5994 goto unmap_cfgtable;
5996 dev_warn(&pdev->dev, "Unable to successfully reset "
5997 "controller. Will try soft reset.\n");
6000 dev_info(&pdev->dev, "board ready after hard reset.\n");
6012 * We cannot read the structure directly, for portability we must use
6014 * This is for debug only.
6016 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6022 dev_info(dev, "Controller Configuration information\n");
6023 dev_info(dev, "------------------------------------\n");
6024 for (i = 0; i < 4; i++)
6025 temp_name[i] = readb(&(tb->Signature[i]));
6026 temp_name[4] = '\0';
6027 dev_info(dev, " Signature = %s\n", temp_name);
6028 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6029 dev_info(dev, " Transport methods supported = 0x%x\n",
6030 readl(&(tb->TransportSupport)));
6031 dev_info(dev, " Transport methods active = 0x%x\n",
6032 readl(&(tb->TransportActive)));
6033 dev_info(dev, " Requested transport Method = 0x%x\n",
6034 readl(&(tb->HostWrite.TransportRequest)));
6035 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6036 readl(&(tb->HostWrite.CoalIntDelay)));
6037 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6038 readl(&(tb->HostWrite.CoalIntCount)));
6039 dev_info(dev, " Max outstanding commands = %d\n",
6040 readl(&(tb->CmdsOutMax)));
6041 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6042 for (i = 0; i < 16; i++)
6043 temp_name[i] = readb(&(tb->ServerName[i]));
6044 temp_name[16] = '\0';
6045 dev_info(dev, " Server Name = %s\n", temp_name);
6046 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6047 readl(&(tb->HeartBeat)));
6048 #endif /* HPSA_DEBUG */
6051 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6053 int i, offset, mem_type, bar_type;
6055 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6058 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6059 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6060 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6063 mem_type = pci_resource_flags(pdev, i) &
6064 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6066 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6067 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6068 offset += 4; /* 32 bit */
6070 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6073 default: /* reserved in PCI 2.2 */
6074 dev_warn(&pdev->dev,
6075 "base address is invalid\n");
6080 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6086 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6087 * controllers that are capable. If not, we use legacy INTx mode.
6090 static void hpsa_interrupt_mode(struct ctlr_info *h)
6092 #ifdef CONFIG_PCI_MSI
6094 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6096 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6097 hpsa_msix_entries[i].vector = 0;
6098 hpsa_msix_entries[i].entry = i;
6101 /* Some boards advertise MSI but don't really support it */
6102 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6103 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6104 goto default_int_mode;
6105 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6106 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
6107 h->msix_vector = MAX_REPLY_QUEUES;
6108 if (h->msix_vector > num_online_cpus())
6109 h->msix_vector = num_online_cpus();
6110 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6113 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6115 goto single_msi_mode;
6116 } else if (err < h->msix_vector) {
6117 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6118 "available\n", err);
6120 h->msix_vector = err;
6121 for (i = 0; i < h->msix_vector; i++)
6122 h->intr[i] = hpsa_msix_entries[i].vector;
6126 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6127 dev_info(&h->pdev->dev, "MSI capable controller\n");
6128 if (!pci_enable_msi(h->pdev))
6131 dev_warn(&h->pdev->dev, "MSI init failed\n");
6134 #endif /* CONFIG_PCI_MSI */
6135 /* if we get here we're going to use the default interrupt mode */
6136 h->intr[h->intr_mode] = h->pdev->irq;
6139 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6142 u32 subsystem_vendor_id, subsystem_device_id;
6144 subsystem_vendor_id = pdev->subsystem_vendor;
6145 subsystem_device_id = pdev->subsystem_device;
6146 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6147 subsystem_vendor_id;
6149 for (i = 0; i < ARRAY_SIZE(products); i++)
6150 if (*board_id == products[i].board_id)
6153 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6154 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6156 dev_warn(&pdev->dev, "unrecognized board ID: "
6157 "0x%08x, ignoring.\n", *board_id);
6160 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6163 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6164 unsigned long *memory_bar)
6168 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6169 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6170 /* addressing mode bits already removed */
6171 *memory_bar = pci_resource_start(pdev, i);
6172 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6176 dev_warn(&pdev->dev, "no memory BAR found\n");
6180 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6186 iterations = HPSA_BOARD_READY_ITERATIONS;
6188 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6190 for (i = 0; i < iterations; i++) {
6191 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6192 if (wait_for_ready) {
6193 if (scratchpad == HPSA_FIRMWARE_READY)
6196 if (scratchpad != HPSA_FIRMWARE_READY)
6199 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6201 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6205 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6206 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6209 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6210 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6211 *cfg_base_addr &= (u32) 0x0000ffff;
6212 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6213 if (*cfg_base_addr_index == -1) {
6214 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6220 static int hpsa_find_cfgtables(struct ctlr_info *h)
6224 u64 cfg_base_addr_index;
6228 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6229 &cfg_base_addr_index, &cfg_offset);
6232 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6233 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6235 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6238 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6241 /* Find performant mode table. */
6242 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6243 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6244 cfg_base_addr_index)+cfg_offset+trans_offset,
6245 sizeof(*h->transtable));
6251 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6253 #define MIN_MAX_COMMANDS 16
6254 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
6256 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
6258 /* Limit commands in memory limited kdump scenario. */
6259 if (reset_devices && h->max_commands > 32)
6260 h->max_commands = 32;
6262 if (h->max_commands < MIN_MAX_COMMANDS) {
6263 dev_warn(&h->pdev->dev,
6264 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
6267 h->max_commands = MIN_MAX_COMMANDS;
6271 /* If the controller reports that the total max sg entries is greater than 512,
6272 * then we know that chained SG blocks work. (Original smart arrays did not
6273 * support chained SG blocks and would return zero for max sg entries.)
6275 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6277 return h->maxsgentries > 512;
6280 /* Interrogate the hardware for some limits:
6281 * max commands, max SG elements without chaining, and with chaining,
6282 * SG chain block size, etc.
6284 static void hpsa_find_board_params(struct ctlr_info *h)
6286 hpsa_get_max_perf_mode_cmds(h);
6287 h->nr_cmds = h->max_commands;
6288 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6289 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6290 if (hpsa_supports_chained_sg_blocks(h)) {
6291 /* Limit in-command s/g elements to 32 save dma'able memory. */
6292 h->max_cmd_sg_entries = 32;
6293 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6294 h->maxsgentries--; /* save one for chain pointer */
6297 * Original smart arrays supported at most 31 s/g entries
6298 * embedded inline in the command (trying to use more
6299 * would lock up the controller)
6301 h->max_cmd_sg_entries = 31;
6302 h->maxsgentries = 31; /* default to traditional values */
6306 /* Find out what task management functions are supported and cache */
6307 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6308 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6309 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6310 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6311 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6314 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6316 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6317 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6323 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6327 driver_support = readl(&(h->cfgtable->driver_support));
6328 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6330 driver_support |= ENABLE_SCSI_PREFETCH;
6332 driver_support |= ENABLE_UNIT_ATTN;
6333 writel(driver_support, &(h->cfgtable->driver_support));
6336 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6337 * in a prefetch beyond physical memory.
6339 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6343 if (h->board_id != 0x3225103C)
6345 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6346 dma_prefetch |= 0x8000;
6347 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6350 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6354 unsigned long flags;
6355 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6356 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
6357 spin_lock_irqsave(&h->lock, flags);
6358 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6359 spin_unlock_irqrestore(&h->lock, flags);
6360 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6362 /* delay and try again */
6363 msleep(CLEAR_EVENT_WAIT_INTERVAL);
6370 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6374 unsigned long flags;
6376 /* under certain very rare conditions, this can take awhile.
6377 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6378 * as we enter this code.)
6380 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
6381 if (h->remove_in_progress)
6383 spin_lock_irqsave(&h->lock, flags);
6384 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6385 spin_unlock_irqrestore(&h->lock, flags);
6386 if (!(doorbell_value & CFGTBL_ChangeReq))
6388 /* delay and try again */
6389 msleep(MODE_CHANGE_WAIT_INTERVAL);
6396 /* return -ENODEV or other reason on error, 0 on success */
6397 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6401 trans_support = readl(&(h->cfgtable->TransportSupport));
6402 if (!(trans_support & SIMPLE_MODE))
6405 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6407 /* Update the field, and then ring the doorbell */
6408 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6409 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6410 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6411 if (hpsa_wait_for_mode_change_ack(h))
6413 print_cfg_table(&h->pdev->dev, h->cfgtable);
6414 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6416 h->transMethod = CFGTBL_Trans_Simple;
6419 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6423 static int hpsa_pci_init(struct ctlr_info *h)
6425 int prod_index, err;
6427 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6430 h->product_name = products[prod_index].product_name;
6431 h->access = *(products[prod_index].access);
6433 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6434 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6436 err = pci_enable_device(h->pdev);
6438 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6442 err = pci_request_regions(h->pdev, HPSA);
6444 dev_err(&h->pdev->dev,
6445 "cannot obtain PCI resources, aborting\n");
6449 pci_set_master(h->pdev);
6451 hpsa_interrupt_mode(h);
6452 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6454 goto err_out_free_res;
6455 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6458 goto err_out_free_res;
6460 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6462 goto err_out_free_res;
6463 err = hpsa_find_cfgtables(h);
6465 goto err_out_free_res;
6466 hpsa_find_board_params(h);
6468 if (!hpsa_CISS_signature_present(h)) {
6470 goto err_out_free_res;
6472 hpsa_set_driver_support_bits(h);
6473 hpsa_p600_dma_prefetch_quirk(h);
6474 err = hpsa_enter_simple_mode(h);
6476 goto err_out_free_res;
6481 iounmap(h->transtable);
6483 iounmap(h->cfgtable);
6486 pci_disable_device(h->pdev);
6487 pci_release_regions(h->pdev);
6491 static void hpsa_hba_inquiry(struct ctlr_info *h)
6495 #define HBA_INQUIRY_BYTE_COUNT 64
6496 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6497 if (!h->hba_inquiry_data)
6499 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6500 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6502 kfree(h->hba_inquiry_data);
6503 h->hba_inquiry_data = NULL;
6507 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
6510 void __iomem *vaddr;
6515 /* kdump kernel is loading, we don't know in which state is
6516 * the pci interface. The dev->enable_cnt is equal zero
6517 * so we call enable+disable, wait a while and switch it on.
6519 rc = pci_enable_device(pdev);
6521 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6524 pci_disable_device(pdev);
6525 msleep(260); /* a randomly chosen number */
6526 rc = pci_enable_device(pdev);
6528 dev_warn(&pdev->dev, "failed to enable device.\n");
6532 pci_set_master(pdev);
6534 vaddr = pci_ioremap_bar(pdev, 0);
6535 if (vaddr == NULL) {
6539 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6542 /* Reset the controller with a PCI power-cycle or via doorbell */
6543 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
6545 /* -ENOTSUPP here means we cannot reset the controller
6546 * but it's already (and still) up and running in
6547 * "performant mode". Or, it might be 640x, which can't reset
6548 * due to concerns about shared bbwc between 6402/6404 pair.
6553 /* Now try to get the controller to respond to a no-op */
6554 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6555 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6556 if (hpsa_noop(pdev) == 0)
6559 dev_warn(&pdev->dev, "no-op failed%s\n",
6560 (i < 11 ? "; re-trying" : ""));
6565 pci_disable_device(pdev);
6569 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6571 h->cmd_pool_bits = kzalloc(
6572 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6573 sizeof(unsigned long), GFP_KERNEL);
6574 h->cmd_pool = pci_alloc_consistent(h->pdev,
6575 h->nr_cmds * sizeof(*h->cmd_pool),
6576 &(h->cmd_pool_dhandle));
6577 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6578 h->nr_cmds * sizeof(*h->errinfo_pool),
6579 &(h->errinfo_pool_dhandle));
6580 if ((h->cmd_pool_bits == NULL)
6581 || (h->cmd_pool == NULL)
6582 || (h->errinfo_pool == NULL)) {
6583 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6588 hpsa_free_cmd_pool(h);
6592 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6594 kfree(h->cmd_pool_bits);
6596 pci_free_consistent(h->pdev,
6597 h->nr_cmds * sizeof(struct CommandList),
6598 h->cmd_pool, h->cmd_pool_dhandle);
6599 if (h->ioaccel2_cmd_pool)
6600 pci_free_consistent(h->pdev,
6601 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6602 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6603 if (h->errinfo_pool)
6604 pci_free_consistent(h->pdev,
6605 h->nr_cmds * sizeof(struct ErrorInfo),
6607 h->errinfo_pool_dhandle);
6608 if (h->ioaccel_cmd_pool)
6609 pci_free_consistent(h->pdev,
6610 h->nr_cmds * sizeof(struct io_accel1_cmd),
6611 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6614 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6618 cpu = cpumask_first(cpu_online_mask);
6619 for (i = 0; i < h->msix_vector; i++) {
6620 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6621 cpu = cpumask_next(cpu, cpu_online_mask);
6625 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6626 static void hpsa_free_irqs(struct ctlr_info *h)
6630 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6631 /* Single reply queue, only one irq to free */
6633 irq_set_affinity_hint(h->intr[i], NULL);
6634 free_irq(h->intr[i], &h->q[i]);
6638 for (i = 0; i < h->msix_vector; i++) {
6639 irq_set_affinity_hint(h->intr[i], NULL);
6640 free_irq(h->intr[i], &h->q[i]);
6642 for (; i < MAX_REPLY_QUEUES; i++)
6646 /* returns 0 on success; cleans up and returns -Enn on error */
6647 static int hpsa_request_irqs(struct ctlr_info *h,
6648 irqreturn_t (*msixhandler)(int, void *),
6649 irqreturn_t (*intxhandler)(int, void *))
6654 * initialize h->q[x] = x so that interrupt handlers know which
6657 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6660 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6661 /* If performant mode and MSI-X, use multiple reply queues */
6662 for (i = 0; i < h->msix_vector; i++) {
6663 rc = request_irq(h->intr[i], msixhandler,
6669 dev_err(&h->pdev->dev,
6670 "failed to get irq %d for %s\n",
6671 h->intr[i], h->devname);
6672 for (j = 0; j < i; j++) {
6673 free_irq(h->intr[j], &h->q[j]);
6676 for (; j < MAX_REPLY_QUEUES; j++)
6681 hpsa_irq_affinity_hints(h);
6683 /* Use single reply pool */
6684 if (h->msix_vector > 0 || h->msi_vector) {
6685 rc = request_irq(h->intr[h->intr_mode],
6686 msixhandler, 0, h->devname,
6687 &h->q[h->intr_mode]);
6689 rc = request_irq(h->intr[h->intr_mode],
6690 intxhandler, IRQF_SHARED, h->devname,
6691 &h->q[h->intr_mode]);
6695 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6696 h->intr[h->intr_mode], h->devname);
6702 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6704 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6705 HPSA_RESET_TYPE_CONTROLLER)) {
6706 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6710 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6711 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6712 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6716 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6717 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6718 dev_warn(&h->pdev->dev, "Board failed to become ready "
6719 "after soft reset.\n");
6726 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6729 #ifdef CONFIG_PCI_MSI
6730 if (h->msix_vector) {
6731 if (h->pdev->msix_enabled)
6732 pci_disable_msix(h->pdev);
6733 } else if (h->msi_vector) {
6734 if (h->pdev->msi_enabled)
6735 pci_disable_msi(h->pdev);
6737 #endif /* CONFIG_PCI_MSI */
6740 static void hpsa_free_reply_queues(struct ctlr_info *h)
6744 for (i = 0; i < h->nreply_queues; i++) {
6745 if (!h->reply_queue[i].head)
6747 pci_free_consistent(h->pdev, h->reply_queue_size,
6748 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6749 h->reply_queue[i].head = NULL;
6750 h->reply_queue[i].busaddr = 0;
6754 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6756 hpsa_free_irqs_and_disable_msix(h);
6757 hpsa_free_sg_chain_blocks(h);
6758 hpsa_free_cmd_pool(h);
6759 kfree(h->ioaccel1_blockFetchTable);
6760 kfree(h->blockFetchTable);
6761 hpsa_free_reply_queues(h);
6765 iounmap(h->transtable);
6767 iounmap(h->cfgtable);
6768 pci_disable_device(h->pdev);
6769 pci_release_regions(h->pdev);
6773 /* Called when controller lockup detected. */
6774 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6777 struct CommandList *c;
6780 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6781 for (i = 0; i < h->nr_cmds; i++) {
6782 c = h->cmd_pool + i;
6783 refcount = atomic_inc_return(&c->refcount);
6785 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
6791 dev_warn(&h->pdev->dev,
6792 "failed %d commands in fail_all\n", failcount);
6795 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6799 for_each_online_cpu(cpu) {
6800 u32 *lockup_detected;
6801 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6802 *lockup_detected = value;
6804 wmb(); /* be sure the per-cpu variables are out to memory */
6807 static void controller_lockup_detected(struct ctlr_info *h)
6809 unsigned long flags;
6810 u32 lockup_detected;
6812 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6813 spin_lock_irqsave(&h->lock, flags);
6814 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6815 if (!lockup_detected) {
6816 /* no heartbeat, but controller gave us a zero. */
6817 dev_warn(&h->pdev->dev,
6818 "lockup detected after %d but scratchpad register is zero\n",
6819 h->heartbeat_sample_interval / HZ);
6820 lockup_detected = 0xffffffff;
6822 set_lockup_detected_for_all_cpus(h, lockup_detected);
6823 spin_unlock_irqrestore(&h->lock, flags);
6824 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
6825 lockup_detected, h->heartbeat_sample_interval / HZ);
6826 pci_disable_device(h->pdev);
6827 fail_all_outstanding_cmds(h);
6830 static int detect_controller_lockup(struct ctlr_info *h)
6834 unsigned long flags;
6836 now = get_jiffies_64();
6837 /* If we've received an interrupt recently, we're ok. */
6838 if (time_after64(h->last_intr_timestamp +
6839 (h->heartbeat_sample_interval), now))
6843 * If we've already checked the heartbeat recently, we're ok.
6844 * This could happen if someone sends us a signal. We
6845 * otherwise don't care about signals in this thread.
6847 if (time_after64(h->last_heartbeat_timestamp +
6848 (h->heartbeat_sample_interval), now))
6851 /* If heartbeat has not changed since we last looked, we're not ok. */
6852 spin_lock_irqsave(&h->lock, flags);
6853 heartbeat = readl(&h->cfgtable->HeartBeat);
6854 spin_unlock_irqrestore(&h->lock, flags);
6855 if (h->last_heartbeat == heartbeat) {
6856 controller_lockup_detected(h);
6861 h->last_heartbeat = heartbeat;
6862 h->last_heartbeat_timestamp = now;
6866 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6871 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6874 /* Ask the controller to clear the events we're handling. */
6875 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6876 | CFGTBL_Trans_io_accel2)) &&
6877 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6878 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6880 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6881 event_type = "state change";
6882 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6883 event_type = "configuration change";
6884 /* Stop sending new RAID offload reqs via the IO accelerator */
6885 scsi_block_requests(h->scsi_host);
6886 for (i = 0; i < h->ndevices; i++)
6887 h->dev[i]->offload_enabled = 0;
6888 hpsa_drain_accel_commands(h);
6889 /* Set 'accelerator path config change' bit */
6890 dev_warn(&h->pdev->dev,
6891 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6892 h->events, event_type);
6893 writel(h->events, &(h->cfgtable->clear_event_notify));
6894 /* Set the "clear event notify field update" bit 6 */
6895 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6896 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6897 hpsa_wait_for_clear_event_notify_ack(h);
6898 scsi_unblock_requests(h->scsi_host);
6900 /* Acknowledge controller notification events. */
6901 writel(h->events, &(h->cfgtable->clear_event_notify));
6902 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6903 hpsa_wait_for_clear_event_notify_ack(h);
6905 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6906 hpsa_wait_for_mode_change_ack(h);
6912 /* Check a register on the controller to see if there are configuration
6913 * changes (added/changed/removed logical drives, etc.) which mean that
6914 * we should rescan the controller for devices.
6915 * Also check flag for driver-initiated rescan.
6917 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6919 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6922 h->events = readl(&(h->cfgtable->event_notify));
6923 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6927 * Check if any of the offline devices have become ready
6929 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6931 unsigned long flags;
6932 struct offline_device_entry *d;
6933 struct list_head *this, *tmp;
6935 spin_lock_irqsave(&h->offline_device_lock, flags);
6936 list_for_each_safe(this, tmp, &h->offline_device_list) {
6937 d = list_entry(this, struct offline_device_entry,
6939 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6940 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6941 spin_lock_irqsave(&h->offline_device_lock, flags);
6942 list_del(&d->offline_list);
6943 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6946 spin_lock_irqsave(&h->offline_device_lock, flags);
6948 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6952 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
6954 unsigned long flags;
6955 struct ctlr_info *h = container_of(to_delayed_work(work),
6956 struct ctlr_info, rescan_ctlr_work);
6959 if (h->remove_in_progress)
6962 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6963 scsi_host_get(h->scsi_host);
6964 hpsa_ack_ctlr_events(h);
6965 hpsa_scan_start(h->scsi_host);
6966 scsi_host_put(h->scsi_host);
6968 spin_lock_irqsave(&h->lock, flags);
6969 if (!h->remove_in_progress)
6970 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
6971 h->heartbeat_sample_interval);
6972 spin_unlock_irqrestore(&h->lock, flags);
6975 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6977 unsigned long flags;
6978 struct ctlr_info *h = container_of(to_delayed_work(work),
6979 struct ctlr_info, monitor_ctlr_work);
6981 detect_controller_lockup(h);
6982 if (lockup_detected(h))
6985 spin_lock_irqsave(&h->lock, flags);
6986 if (!h->remove_in_progress)
6987 schedule_delayed_work(&h->monitor_ctlr_work,
6988 h->heartbeat_sample_interval);
6989 spin_unlock_irqrestore(&h->lock, flags);
6992 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
6995 struct workqueue_struct *wq = NULL;
6997 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6999 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7004 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7007 struct ctlr_info *h;
7008 int try_soft_reset = 0;
7009 unsigned long flags;
7012 if (number_of_controllers == 0)
7013 printk(KERN_INFO DRIVER_NAME "\n");
7015 rc = hpsa_lookup_board_id(pdev, &board_id);
7017 dev_warn(&pdev->dev, "Board ID not found\n");
7021 rc = hpsa_init_reset_devices(pdev, board_id);
7023 if (rc != -ENOTSUPP)
7025 /* If the reset fails in a particular way (it has no way to do
7026 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7027 * a soft reset once we get the controller configured up to the
7028 * point that it can accept a command.
7034 reinit_after_soft_reset:
7036 /* Command structures must be aligned on a 32-byte boundary because
7037 * the 5 lower bits of the address are used by the hardware. and by
7038 * the driver. See comments in hpsa.h for more info.
7040 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
7041 h = kzalloc(sizeof(*h), GFP_KERNEL);
7046 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
7047 INIT_LIST_HEAD(&h->offline_device_list);
7048 spin_lock_init(&h->lock);
7049 spin_lock_init(&h->offline_device_lock);
7050 spin_lock_init(&h->scan_lock);
7051 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
7053 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
7054 if (!h->rescan_ctlr_wq) {
7059 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
7060 if (!h->resubmit_wq) {
7065 /* Allocate and clear per-cpu variable lockup_detected */
7066 h->lockup_detected = alloc_percpu(u32);
7067 if (!h->lockup_detected) {
7071 set_lockup_detected_for_all_cpus(h, 0);
7073 rc = hpsa_pci_init(h);
7077 sprintf(h->devname, HPSA "%d", number_of_controllers);
7078 h->ctlr = number_of_controllers;
7079 number_of_controllers++;
7081 /* configure PCI DMA stuff */
7082 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7086 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7090 dev_err(&pdev->dev, "no suitable DMA available\n");
7095 /* make sure the board interrupts are off */
7096 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7098 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
7100 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7101 h->devname, pdev->device,
7102 h->intr[h->intr_mode], dac ? "" : " not");
7103 rc = hpsa_allocate_cmd_pool(h);
7105 goto clean2_and_free_irqs;
7106 if (hpsa_allocate_sg_chain_blocks(h))
7108 init_waitqueue_head(&h->scan_wait_queue);
7109 h->scan_finished = 1; /* no scan currently in progress */
7111 pci_set_drvdata(pdev, h);
7113 h->hba_mode_enabled = 0;
7114 h->scsi_host = NULL;
7115 spin_lock_init(&h->devlock);
7116 hpsa_put_ctlr_into_performant_mode(h);
7118 /* At this point, the controller is ready to take commands.
7119 * Now, if reset_devices and the hard reset didn't work, try
7120 * the soft reset and see if that works.
7122 if (try_soft_reset) {
7124 /* This is kind of gross. We may or may not get a completion
7125 * from the soft reset command, and if we do, then the value
7126 * from the fifo may or may not be valid. So, we wait 10 secs
7127 * after the reset throwing away any completions we get during
7128 * that time. Unregister the interrupt handler and register
7129 * fake ones to scoop up any residual completions.
7131 spin_lock_irqsave(&h->lock, flags);
7132 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7133 spin_unlock_irqrestore(&h->lock, flags);
7135 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
7136 hpsa_intx_discard_completions);
7138 dev_warn(&h->pdev->dev,
7139 "Failed to request_irq after soft reset.\n");
7143 rc = hpsa_kdump_soft_reset(h);
7145 /* Neither hard nor soft reset worked, we're hosed. */
7148 dev_info(&h->pdev->dev, "Board READY.\n");
7149 dev_info(&h->pdev->dev,
7150 "Waiting for stale completions to drain.\n");
7151 h->access.set_intr_mask(h, HPSA_INTR_ON);
7153 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7155 rc = controller_reset_failed(h->cfgtable);
7157 dev_info(&h->pdev->dev,
7158 "Soft reset appears to have failed.\n");
7160 /* since the controller's reset, we have to go back and re-init
7161 * everything. Easiest to just forget what we've done and do it
7164 hpsa_undo_allocations_after_kdump_soft_reset(h);
7167 /* don't go to clean4, we already unallocated */
7170 goto reinit_after_soft_reset;
7173 /* Enable Accelerated IO path at driver layer */
7174 h->acciopath_status = 1;
7177 /* Turn the interrupts on so we can service requests */
7178 h->access.set_intr_mask(h, HPSA_INTR_ON);
7180 hpsa_hba_inquiry(h);
7181 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
7183 /* Monitor the controller for firmware lockups */
7184 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7185 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7186 schedule_delayed_work(&h->monitor_ctlr_work,
7187 h->heartbeat_sample_interval);
7188 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7189 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7190 h->heartbeat_sample_interval);
7194 hpsa_free_sg_chain_blocks(h);
7195 hpsa_free_cmd_pool(h);
7196 clean2_and_free_irqs:
7201 destroy_workqueue(h->resubmit_wq);
7202 if (h->rescan_ctlr_wq)
7203 destroy_workqueue(h->rescan_ctlr_wq);
7204 if (h->lockup_detected)
7205 free_percpu(h->lockup_detected);
7210 static void hpsa_flush_cache(struct ctlr_info *h)
7213 struct CommandList *c;
7216 /* Don't bother trying to flush the cache if locked up */
7217 /* FIXME not necessary if do_simple_cmd does the check */
7218 if (unlikely(lockup_detected(h)))
7220 flush_buf = kzalloc(4, GFP_KERNEL);
7226 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
7229 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7230 RAID_CTLR_LUNID, TYPE_CMD)) {
7233 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
7234 PCI_DMA_TODEVICE, NO_TIMEOUT);
7237 if (c->err_info->CommandStatus != 0)
7239 dev_warn(&h->pdev->dev,
7240 "error flushing cache on controller\n");
7246 static void hpsa_shutdown(struct pci_dev *pdev)
7248 struct ctlr_info *h;
7250 h = pci_get_drvdata(pdev);
7251 /* Turn board interrupts off and send the flush cache command
7252 * sendcmd will turn off interrupt, and send the flush...
7253 * To write all data in the battery backed cache to disks
7255 hpsa_flush_cache(h);
7256 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7257 hpsa_free_irqs_and_disable_msix(h);
7260 static void hpsa_free_device_info(struct ctlr_info *h)
7264 for (i = 0; i < h->ndevices; i++)
7268 static void hpsa_remove_one(struct pci_dev *pdev)
7270 struct ctlr_info *h;
7271 unsigned long flags;
7273 if (pci_get_drvdata(pdev) == NULL) {
7274 dev_err(&pdev->dev, "unable to remove device\n");
7277 h = pci_get_drvdata(pdev);
7279 /* Get rid of any controller monitoring work items */
7280 spin_lock_irqsave(&h->lock, flags);
7281 h->remove_in_progress = 1;
7282 spin_unlock_irqrestore(&h->lock, flags);
7283 cancel_delayed_work_sync(&h->monitor_ctlr_work);
7284 cancel_delayed_work_sync(&h->rescan_ctlr_work);
7285 destroy_workqueue(h->rescan_ctlr_wq);
7286 destroy_workqueue(h->resubmit_wq);
7287 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7288 hpsa_shutdown(pdev);
7290 iounmap(h->transtable);
7291 iounmap(h->cfgtable);
7292 hpsa_free_device_info(h);
7293 hpsa_free_sg_chain_blocks(h);
7294 pci_free_consistent(h->pdev,
7295 h->nr_cmds * sizeof(struct CommandList),
7296 h->cmd_pool, h->cmd_pool_dhandle);
7297 pci_free_consistent(h->pdev,
7298 h->nr_cmds * sizeof(struct ErrorInfo),
7299 h->errinfo_pool, h->errinfo_pool_dhandle);
7300 hpsa_free_reply_queues(h);
7301 kfree(h->cmd_pool_bits);
7302 kfree(h->blockFetchTable);
7303 kfree(h->ioaccel1_blockFetchTable);
7304 kfree(h->ioaccel2_blockFetchTable);
7305 kfree(h->hba_inquiry_data);
7306 pci_disable_device(pdev);
7307 pci_release_regions(pdev);
7308 free_percpu(h->lockup_detected);
7312 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7313 __attribute__((unused)) pm_message_t state)
7318 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7323 static struct pci_driver hpsa_pci_driver = {
7325 .probe = hpsa_init_one,
7326 .remove = hpsa_remove_one,
7327 .id_table = hpsa_pci_device_id, /* id_table */
7328 .shutdown = hpsa_shutdown,
7329 .suspend = hpsa_suspend,
7330 .resume = hpsa_resume,
7333 /* Fill in bucket_map[], given nsgs (the max number of
7334 * scatter gather elements supported) and bucket[],
7335 * which is an array of 8 integers. The bucket[] array
7336 * contains 8 different DMA transfer sizes (in 16
7337 * byte increments) which the controller uses to fetch
7338 * commands. This function fills in bucket_map[], which
7339 * maps a given number of scatter gather elements to one of
7340 * the 8 DMA transfer sizes. The point of it is to allow the
7341 * controller to only do as much DMA as needed to fetch the
7342 * command, with the DMA transfer size encoded in the lower
7343 * bits of the command address.
7345 static void calc_bucket_map(int bucket[], int num_buckets,
7346 int nsgs, int min_blocks, u32 *bucket_map)
7350 /* Note, bucket_map must have nsgs+1 entries. */
7351 for (i = 0; i <= nsgs; i++) {
7352 /* Compute size of a command with i SG entries */
7353 size = i + min_blocks;
7354 b = num_buckets; /* Assume the biggest bucket */
7355 /* Find the bucket that is just big enough */
7356 for (j = 0; j < num_buckets; j++) {
7357 if (bucket[j] >= size) {
7362 /* for a command with i SG entries, use bucket b. */
7367 /* return -ENODEV or other reason on error, 0 on success */
7368 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7371 unsigned long register_value;
7372 unsigned long transMethod = CFGTBL_Trans_Performant |
7373 (trans_support & CFGTBL_Trans_use_short_tags) |
7374 CFGTBL_Trans_enable_directed_msix |
7375 (trans_support & (CFGTBL_Trans_io_accel1 |
7376 CFGTBL_Trans_io_accel2));
7377 struct access_method access = SA5_performant_access;
7379 /* This is a bit complicated. There are 8 registers on
7380 * the controller which we write to to tell it 8 different
7381 * sizes of commands which there may be. It's a way of
7382 * reducing the DMA done to fetch each command. Encoded into
7383 * each command's tag are 3 bits which communicate to the controller
7384 * which of the eight sizes that command fits within. The size of
7385 * each command depends on how many scatter gather entries there are.
7386 * Each SG entry requires 16 bytes. The eight registers are programmed
7387 * with the number of 16-byte blocks a command of that size requires.
7388 * The smallest command possible requires 5 such 16 byte blocks.
7389 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7390 * blocks. Note, this only extends to the SG entries contained
7391 * within the command block, and does not extend to chained blocks
7392 * of SG elements. bft[] contains the eight values we write to
7393 * the registers. They are not evenly distributed, but have more
7394 * sizes for small commands, and fewer sizes for larger commands.
7396 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7397 #define MIN_IOACCEL2_BFT_ENTRY 5
7398 #define HPSA_IOACCEL2_HEADER_SZ 4
7399 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7400 13, 14, 15, 16, 17, 18, 19,
7401 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7402 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7403 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7404 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7405 16 * MIN_IOACCEL2_BFT_ENTRY);
7406 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7407 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7408 /* 5 = 1 s/g entry or 4k
7409 * 6 = 2 s/g entry or 8k
7410 * 8 = 4 s/g entry or 16k
7411 * 10 = 6 s/g entry or 24k
7414 /* If the controller supports either ioaccel method then
7415 * we can also use the RAID stack submit path that does not
7416 * perform the superfluous readl() after each command submission.
7418 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7419 access = SA5_performant_access_no_read;
7421 /* Controller spec: zero out this buffer. */
7422 for (i = 0; i < h->nreply_queues; i++)
7423 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7425 bft[7] = SG_ENTRIES_IN_CMD + 4;
7426 calc_bucket_map(bft, ARRAY_SIZE(bft),
7427 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7428 for (i = 0; i < 8; i++)
7429 writel(bft[i], &h->transtable->BlockFetch[i]);
7431 /* size of controller ring buffer */
7432 writel(h->max_commands, &h->transtable->RepQSize);
7433 writel(h->nreply_queues, &h->transtable->RepQCount);
7434 writel(0, &h->transtable->RepQCtrAddrLow32);
7435 writel(0, &h->transtable->RepQCtrAddrHigh32);
7437 for (i = 0; i < h->nreply_queues; i++) {
7438 writel(0, &h->transtable->RepQAddr[i].upper);
7439 writel(h->reply_queue[i].busaddr,
7440 &h->transtable->RepQAddr[i].lower);
7443 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7444 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7446 * enable outbound interrupt coalescing in accelerator mode;
7448 if (trans_support & CFGTBL_Trans_io_accel1) {
7449 access = SA5_ioaccel_mode1_access;
7450 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7451 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7453 if (trans_support & CFGTBL_Trans_io_accel2) {
7454 access = SA5_ioaccel_mode2_access;
7455 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7456 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7459 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7460 if (hpsa_wait_for_mode_change_ack(h)) {
7461 dev_err(&h->pdev->dev,
7462 "performant mode problem - doorbell timeout\n");
7465 register_value = readl(&(h->cfgtable->TransportActive));
7466 if (!(register_value & CFGTBL_Trans_Performant)) {
7467 dev_err(&h->pdev->dev,
7468 "performant mode problem - transport not active\n");
7471 /* Change the access methods to the performant access methods */
7473 h->transMethod = transMethod;
7475 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7476 (trans_support & CFGTBL_Trans_io_accel2)))
7479 if (trans_support & CFGTBL_Trans_io_accel1) {
7480 /* Set up I/O accelerator mode */
7481 for (i = 0; i < h->nreply_queues; i++) {
7482 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7483 h->reply_queue[i].current_entry =
7484 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7486 bft[7] = h->ioaccel_maxsg + 8;
7487 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7488 h->ioaccel1_blockFetchTable);
7490 /* initialize all reply queue entries to unused */
7491 for (i = 0; i < h->nreply_queues; i++)
7492 memset(h->reply_queue[i].head,
7493 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7494 h->reply_queue_size);
7496 /* set all the constant fields in the accelerator command
7497 * frames once at init time to save CPU cycles later.
7499 for (i = 0; i < h->nr_cmds; i++) {
7500 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7502 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7503 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7504 (i * sizeof(struct ErrorInfo)));
7505 cp->err_info_len = sizeof(struct ErrorInfo);
7506 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7507 cp->host_context_flags =
7508 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7509 cp->timeout_sec = 0;
7512 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7514 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7515 (i * sizeof(struct io_accel1_cmd)));
7517 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7518 u64 cfg_offset, cfg_base_addr_index;
7519 u32 bft2_offset, cfg_base_addr;
7522 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7523 &cfg_base_addr_index, &cfg_offset);
7524 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7525 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7526 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7527 4, h->ioaccel2_blockFetchTable);
7528 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7529 BUILD_BUG_ON(offsetof(struct CfgTable,
7530 io_accel_request_size_offset) != 0xb8);
7531 h->ioaccel2_bft2_regs =
7532 remap_pci_mem(pci_resource_start(h->pdev,
7533 cfg_base_addr_index) +
7534 cfg_offset + bft2_offset,
7536 sizeof(*h->ioaccel2_bft2_regs));
7537 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7538 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7540 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7541 if (hpsa_wait_for_mode_change_ack(h)) {
7542 dev_err(&h->pdev->dev,
7543 "performant mode problem - enabling ioaccel mode\n");
7549 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7552 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7553 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7554 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7556 /* Command structures must be aligned on a 128-byte boundary
7557 * because the 7 lower bits of the address are used by the
7560 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7561 IOACCEL1_COMMANDLIST_ALIGNMENT);
7562 h->ioaccel_cmd_pool =
7563 pci_alloc_consistent(h->pdev,
7564 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7565 &(h->ioaccel_cmd_pool_dhandle));
7567 h->ioaccel1_blockFetchTable =
7568 kmalloc(((h->ioaccel_maxsg + 1) *
7569 sizeof(u32)), GFP_KERNEL);
7571 if ((h->ioaccel_cmd_pool == NULL) ||
7572 (h->ioaccel1_blockFetchTable == NULL))
7575 memset(h->ioaccel_cmd_pool, 0,
7576 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7580 if (h->ioaccel_cmd_pool)
7581 pci_free_consistent(h->pdev,
7582 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7583 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7584 kfree(h->ioaccel1_blockFetchTable);
7588 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7590 /* Allocate ioaccel2 mode command blocks and block fetch table */
7593 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7594 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7595 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7597 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7598 IOACCEL2_COMMANDLIST_ALIGNMENT);
7599 h->ioaccel2_cmd_pool =
7600 pci_alloc_consistent(h->pdev,
7601 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7602 &(h->ioaccel2_cmd_pool_dhandle));
7604 h->ioaccel2_blockFetchTable =
7605 kmalloc(((h->ioaccel_maxsg + 1) *
7606 sizeof(u32)), GFP_KERNEL);
7608 if ((h->ioaccel2_cmd_pool == NULL) ||
7609 (h->ioaccel2_blockFetchTable == NULL))
7612 memset(h->ioaccel2_cmd_pool, 0,
7613 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7617 if (h->ioaccel2_cmd_pool)
7618 pci_free_consistent(h->pdev,
7619 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7620 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7621 kfree(h->ioaccel2_blockFetchTable);
7625 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7628 unsigned long transMethod = CFGTBL_Trans_Performant |
7629 CFGTBL_Trans_use_short_tags;
7632 if (hpsa_simple_mode)
7635 trans_support = readl(&(h->cfgtable->TransportSupport));
7636 if (!(trans_support & PERFORMANT_MODE))
7639 /* Check for I/O accelerator mode support */
7640 if (trans_support & CFGTBL_Trans_io_accel1) {
7641 transMethod |= CFGTBL_Trans_io_accel1 |
7642 CFGTBL_Trans_enable_directed_msix;
7643 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7646 if (trans_support & CFGTBL_Trans_io_accel2) {
7647 transMethod |= CFGTBL_Trans_io_accel2 |
7648 CFGTBL_Trans_enable_directed_msix;
7649 if (ioaccel2_alloc_cmds_and_bft(h))
7654 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7655 hpsa_get_max_perf_mode_cmds(h);
7656 /* Performant mode ring buffer and supporting data structures */
7657 h->reply_queue_size = h->max_commands * sizeof(u64);
7659 for (i = 0; i < h->nreply_queues; i++) {
7660 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7661 h->reply_queue_size,
7662 &(h->reply_queue[i].busaddr));
7663 if (!h->reply_queue[i].head)
7665 h->reply_queue[i].size = h->max_commands;
7666 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7667 h->reply_queue[i].current_entry = 0;
7670 /* Need a block fetch table for performant mode */
7671 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7672 sizeof(u32)), GFP_KERNEL);
7673 if (!h->blockFetchTable)
7676 hpsa_enter_performant_mode(h, trans_support);
7680 hpsa_free_reply_queues(h);
7681 kfree(h->blockFetchTable);
7684 static int is_accelerated_cmd(struct CommandList *c)
7686 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7689 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7691 struct CommandList *c = NULL;
7692 int i, accel_cmds_out;
7695 do { /* wait for all outstanding ioaccel commands to drain out */
7697 for (i = 0; i < h->nr_cmds; i++) {
7698 c = h->cmd_pool + i;
7699 refcount = atomic_inc_return(&c->refcount);
7700 if (refcount > 1) /* Command is allocated */
7701 accel_cmds_out += is_accelerated_cmd(c);
7704 if (accel_cmds_out <= 0)
7711 * This is it. Register the PCI driver information for the cards we control
7712 * the OS will call our registered routines when it finds one of our cards.
7714 static int __init hpsa_init(void)
7716 return pci_register_driver(&hpsa_pci_driver);
7719 static void __exit hpsa_cleanup(void)
7721 pci_unregister_driver(&hpsa_pci_driver);
7724 static void __attribute__((unused)) verify_offsets(void)
7726 #define VERIFY_OFFSET(member, offset) \
7727 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7729 VERIFY_OFFSET(structure_size, 0);
7730 VERIFY_OFFSET(volume_blk_size, 4);
7731 VERIFY_OFFSET(volume_blk_cnt, 8);
7732 VERIFY_OFFSET(phys_blk_shift, 16);
7733 VERIFY_OFFSET(parity_rotation_shift, 17);
7734 VERIFY_OFFSET(strip_size, 18);
7735 VERIFY_OFFSET(disk_starting_blk, 20);
7736 VERIFY_OFFSET(disk_blk_cnt, 28);
7737 VERIFY_OFFSET(data_disks_per_row, 36);
7738 VERIFY_OFFSET(metadata_disks_per_row, 38);
7739 VERIFY_OFFSET(row_cnt, 40);
7740 VERIFY_OFFSET(layout_map_count, 42);
7741 VERIFY_OFFSET(flags, 44);
7742 VERIFY_OFFSET(dekindex, 46);
7743 /* VERIFY_OFFSET(reserved, 48 */
7744 VERIFY_OFFSET(data, 64);
7746 #undef VERIFY_OFFSET
7748 #define VERIFY_OFFSET(member, offset) \
7749 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7751 VERIFY_OFFSET(IU_type, 0);
7752 VERIFY_OFFSET(direction, 1);
7753 VERIFY_OFFSET(reply_queue, 2);
7754 /* VERIFY_OFFSET(reserved1, 3); */
7755 VERIFY_OFFSET(scsi_nexus, 4);
7756 VERIFY_OFFSET(Tag, 8);
7757 VERIFY_OFFSET(cdb, 16);
7758 VERIFY_OFFSET(cciss_lun, 32);
7759 VERIFY_OFFSET(data_len, 40);
7760 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7761 VERIFY_OFFSET(sg_count, 45);
7762 /* VERIFY_OFFSET(reserved3 */
7763 VERIFY_OFFSET(err_ptr, 48);
7764 VERIFY_OFFSET(err_len, 56);
7765 /* VERIFY_OFFSET(reserved4 */
7766 VERIFY_OFFSET(sg, 64);
7768 #undef VERIFY_OFFSET
7770 #define VERIFY_OFFSET(member, offset) \
7771 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7773 VERIFY_OFFSET(dev_handle, 0x00);
7774 VERIFY_OFFSET(reserved1, 0x02);
7775 VERIFY_OFFSET(function, 0x03);
7776 VERIFY_OFFSET(reserved2, 0x04);
7777 VERIFY_OFFSET(err_info, 0x0C);
7778 VERIFY_OFFSET(reserved3, 0x10);
7779 VERIFY_OFFSET(err_info_len, 0x12);
7780 VERIFY_OFFSET(reserved4, 0x13);
7781 VERIFY_OFFSET(sgl_offset, 0x14);
7782 VERIFY_OFFSET(reserved5, 0x15);
7783 VERIFY_OFFSET(transfer_len, 0x1C);
7784 VERIFY_OFFSET(reserved6, 0x20);
7785 VERIFY_OFFSET(io_flags, 0x24);
7786 VERIFY_OFFSET(reserved7, 0x26);
7787 VERIFY_OFFSET(LUN, 0x34);
7788 VERIFY_OFFSET(control, 0x3C);
7789 VERIFY_OFFSET(CDB, 0x40);
7790 VERIFY_OFFSET(reserved8, 0x50);
7791 VERIFY_OFFSET(host_context_flags, 0x60);
7792 VERIFY_OFFSET(timeout_sec, 0x62);
7793 VERIFY_OFFSET(ReplyQueue, 0x64);
7794 VERIFY_OFFSET(reserved9, 0x65);
7795 VERIFY_OFFSET(tag, 0x68);
7796 VERIFY_OFFSET(host_addr, 0x70);
7797 VERIFY_OFFSET(CISS_LUN, 0x78);
7798 VERIFY_OFFSET(SG, 0x78 + 8);
7799 #undef VERIFY_OFFSET
7802 module_init(hpsa_init);
7803 module_exit(hpsa_cleanup);