2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/spinlock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/kthread.h>
52 #include <linux/jiffies.h>
56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57 #define HPSA_DRIVER_VERSION "2.0.2-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 /* How long to wait (in milliseconds) for board to go into simple mode */
62 #define MAX_CONFIG_WAIT 30000
63 #define MAX_IOCTL_CONFIG_WAIT 1000
65 /*define how many times we will try a command because of bus resets */
66 #define MAX_CMD_RETRIES 3
68 /* Embedded module documentation macros - see modules.h */
69 MODULE_AUTHOR("Hewlett-Packard Company");
70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73 MODULE_VERSION(HPSA_DRIVER_VERSION);
74 MODULE_LICENSE("GPL");
76 static int hpsa_allow_any;
77 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78 MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80 static int hpsa_simple_mode;
81 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
85 /* define the PCI info for the cards we can control */
86 static const struct pci_device_id hpsa_pci_device_id[] = {
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
111 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
112 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
116 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
118 /* board_id = Subsystem Device ID & Vendor ID
119 * product = Marketing Name for the board
120 * access = Address of the struct of function pointers
122 static struct board_type products[] = {
123 {0x3241103C, "Smart Array P212", &SA5_access},
124 {0x3243103C, "Smart Array P410", &SA5_access},
125 {0x3245103C, "Smart Array P410i", &SA5_access},
126 {0x3247103C, "Smart Array P411", &SA5_access},
127 {0x3249103C, "Smart Array P812", &SA5_access},
128 {0x324a103C, "Smart Array P712m", &SA5_access},
129 {0x324b103C, "Smart Array P711m", &SA5_access},
130 {0x3350103C, "Smart Array P222", &SA5_access},
131 {0x3351103C, "Smart Array P420", &SA5_access},
132 {0x3352103C, "Smart Array P421", &SA5_access},
133 {0x3353103C, "Smart Array P822", &SA5_access},
134 {0x3354103C, "Smart Array P420i", &SA5_access},
135 {0x3355103C, "Smart Array P220i", &SA5_access},
136 {0x3356103C, "Smart Array P721m", &SA5_access},
137 {0x1920103C, "Smart Array", &SA5_access},
138 {0x1921103C, "Smart Array", &SA5_access},
139 {0x1922103C, "Smart Array", &SA5_access},
140 {0x1923103C, "Smart Array", &SA5_access},
141 {0x1924103C, "Smart Array", &SA5_access},
142 {0x1925103C, "Smart Array", &SA5_access},
143 {0x1926103C, "Smart Array", &SA5_access},
144 {0x1928103C, "Smart Array", &SA5_access},
145 {0x334d103C, "Smart Array P822se", &SA5_access},
146 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
149 static int number_of_controllers;
151 static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
152 static spinlock_t lockup_detector_lock;
153 static struct task_struct *hpsa_lockup_detector;
155 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
156 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
157 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
158 static void start_io(struct ctlr_info *h);
161 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
164 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
165 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
166 static struct CommandList *cmd_alloc(struct ctlr_info *h);
167 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
168 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
169 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
172 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
173 static void hpsa_scan_start(struct Scsi_Host *);
174 static int hpsa_scan_finished(struct Scsi_Host *sh,
175 unsigned long elapsed_time);
176 static int hpsa_change_queue_depth(struct scsi_device *sdev,
177 int qdepth, int reason);
179 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
180 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
181 static int hpsa_slave_alloc(struct scsi_device *sdev);
182 static void hpsa_slave_destroy(struct scsi_device *sdev);
184 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
185 static int check_for_unit_attention(struct ctlr_info *h,
186 struct CommandList *c);
187 static void check_ioctl_unit_attention(struct ctlr_info *h,
188 struct CommandList *c);
189 /* performant mode helper functions */
190 static void calc_bucket_map(int *bucket, int num_buckets,
191 int nsgs, int *bucket_map);
192 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
193 static inline u32 next_command(struct ctlr_info *h, u8 q);
194 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
195 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
197 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
198 unsigned long *memory_bar);
199 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
200 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
201 void __iomem *vaddr, int wait_for_ready);
202 static inline void finish_cmd(struct CommandList *c);
203 #define BOARD_NOT_READY 0
204 #define BOARD_READY 1
206 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
208 unsigned long *priv = shost_priv(sdev->host);
209 return (struct ctlr_info *) *priv;
212 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
214 unsigned long *priv = shost_priv(sh);
215 return (struct ctlr_info *) *priv;
218 static int check_for_unit_attention(struct ctlr_info *h,
219 struct CommandList *c)
221 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
224 switch (c->err_info->SenseInfo[12]) {
226 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
227 "detected, command retried\n", h->ctlr);
230 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
231 "detected, action required\n", h->ctlr);
233 case REPORT_LUNS_CHANGED:
234 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
235 "changed, action required\n", h->ctlr);
237 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
238 * target (array) devices.
242 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
243 "or device reset detected\n", h->ctlr);
245 case UNIT_ATTENTION_CLEARED:
246 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
247 "cleared by another initiator\n", h->ctlr);
250 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
251 "unit attention detected\n", h->ctlr);
257 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
259 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
260 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
261 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
263 dev_warn(&h->pdev->dev, HPSA "device busy");
267 static ssize_t host_store_rescan(struct device *dev,
268 struct device_attribute *attr,
269 const char *buf, size_t count)
272 struct Scsi_Host *shost = class_to_shost(dev);
273 h = shost_to_hba(shost);
274 hpsa_scan_start(h->scsi_host);
278 static ssize_t host_show_firmware_revision(struct device *dev,
279 struct device_attribute *attr, char *buf)
282 struct Scsi_Host *shost = class_to_shost(dev);
283 unsigned char *fwrev;
285 h = shost_to_hba(shost);
286 if (!h->hba_inquiry_data)
288 fwrev = &h->hba_inquiry_data[32];
289 return snprintf(buf, 20, "%c%c%c%c\n",
290 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
293 static ssize_t host_show_commands_outstanding(struct device *dev,
294 struct device_attribute *attr, char *buf)
296 struct Scsi_Host *shost = class_to_shost(dev);
297 struct ctlr_info *h = shost_to_hba(shost);
299 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
302 static ssize_t host_show_transport_mode(struct device *dev,
303 struct device_attribute *attr, char *buf)
306 struct Scsi_Host *shost = class_to_shost(dev);
308 h = shost_to_hba(shost);
309 return snprintf(buf, 20, "%s\n",
310 h->transMethod & CFGTBL_Trans_Performant ?
311 "performant" : "simple");
314 /* List of controllers which cannot be hard reset on kexec with reset_devices */
315 static u32 unresettable_controller[] = {
316 0x324a103C, /* Smart Array P712m */
317 0x324b103C, /* SmartArray P711m */
318 0x3223103C, /* Smart Array P800 */
319 0x3234103C, /* Smart Array P400 */
320 0x3235103C, /* Smart Array P400i */
321 0x3211103C, /* Smart Array E200i */
322 0x3212103C, /* Smart Array E200 */
323 0x3213103C, /* Smart Array E200i */
324 0x3214103C, /* Smart Array E200i */
325 0x3215103C, /* Smart Array E200i */
326 0x3237103C, /* Smart Array E500 */
327 0x323D103C, /* Smart Array P700m */
328 0x40800E11, /* Smart Array 5i */
329 0x409C0E11, /* Smart Array 6400 */
330 0x409D0E11, /* Smart Array 6400 EM */
331 0x40700E11, /* Smart Array 5300 */
332 0x40820E11, /* Smart Array 532 */
333 0x40830E11, /* Smart Array 5312 */
334 0x409A0E11, /* Smart Array 641 */
335 0x409B0E11, /* Smart Array 642 */
336 0x40910E11, /* Smart Array 6i */
339 /* List of controllers which cannot even be soft reset */
340 static u32 soft_unresettable_controller[] = {
341 0x40800E11, /* Smart Array 5i */
342 0x40700E11, /* Smart Array 5300 */
343 0x40820E11, /* Smart Array 532 */
344 0x40830E11, /* Smart Array 5312 */
345 0x409A0E11, /* Smart Array 641 */
346 0x409B0E11, /* Smart Array 642 */
347 0x40910E11, /* Smart Array 6i */
348 /* Exclude 640x boards. These are two pci devices in one slot
349 * which share a battery backed cache module. One controls the
350 * cache, the other accesses the cache through the one that controls
351 * it. If we reset the one controlling the cache, the other will
352 * likely not be happy. Just forbid resetting this conjoined mess.
353 * The 640x isn't really supported by hpsa anyway.
355 0x409C0E11, /* Smart Array 6400 */
356 0x409D0E11, /* Smart Array 6400 EM */
359 static int ctlr_is_hard_resettable(u32 board_id)
363 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
364 if (unresettable_controller[i] == board_id)
369 static int ctlr_is_soft_resettable(u32 board_id)
373 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
374 if (soft_unresettable_controller[i] == board_id)
379 static int ctlr_is_resettable(u32 board_id)
381 return ctlr_is_hard_resettable(board_id) ||
382 ctlr_is_soft_resettable(board_id);
385 static ssize_t host_show_resettable(struct device *dev,
386 struct device_attribute *attr, char *buf)
389 struct Scsi_Host *shost = class_to_shost(dev);
391 h = shost_to_hba(shost);
392 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
395 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
397 return (scsi3addr[3] & 0xC0) == 0x40;
400 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
403 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
405 static ssize_t raid_level_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
409 unsigned char rlevel;
411 struct scsi_device *sdev;
412 struct hpsa_scsi_dev_t *hdev;
415 sdev = to_scsi_device(dev);
416 h = sdev_to_hba(sdev);
417 spin_lock_irqsave(&h->lock, flags);
418 hdev = sdev->hostdata;
420 spin_unlock_irqrestore(&h->lock, flags);
424 /* Is this even a logical drive? */
425 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
426 spin_unlock_irqrestore(&h->lock, flags);
427 l = snprintf(buf, PAGE_SIZE, "N/A\n");
431 rlevel = hdev->raid_level;
432 spin_unlock_irqrestore(&h->lock, flags);
433 if (rlevel > RAID_UNKNOWN)
434 rlevel = RAID_UNKNOWN;
435 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
439 static ssize_t lunid_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
443 struct scsi_device *sdev;
444 struct hpsa_scsi_dev_t *hdev;
446 unsigned char lunid[8];
448 sdev = to_scsi_device(dev);
449 h = sdev_to_hba(sdev);
450 spin_lock_irqsave(&h->lock, flags);
451 hdev = sdev->hostdata;
453 spin_unlock_irqrestore(&h->lock, flags);
456 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
457 spin_unlock_irqrestore(&h->lock, flags);
458 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
459 lunid[0], lunid[1], lunid[2], lunid[3],
460 lunid[4], lunid[5], lunid[6], lunid[7]);
463 static ssize_t unique_id_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
467 struct scsi_device *sdev;
468 struct hpsa_scsi_dev_t *hdev;
470 unsigned char sn[16];
472 sdev = to_scsi_device(dev);
473 h = sdev_to_hba(sdev);
474 spin_lock_irqsave(&h->lock, flags);
475 hdev = sdev->hostdata;
477 spin_unlock_irqrestore(&h->lock, flags);
480 memcpy(sn, hdev->device_id, sizeof(sn));
481 spin_unlock_irqrestore(&h->lock, flags);
482 return snprintf(buf, 16 * 2 + 2,
483 "%02X%02X%02X%02X%02X%02X%02X%02X"
484 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
485 sn[0], sn[1], sn[2], sn[3],
486 sn[4], sn[5], sn[6], sn[7],
487 sn[8], sn[9], sn[10], sn[11],
488 sn[12], sn[13], sn[14], sn[15]);
491 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
492 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
493 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
494 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
495 static DEVICE_ATTR(firmware_revision, S_IRUGO,
496 host_show_firmware_revision, NULL);
497 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
498 host_show_commands_outstanding, NULL);
499 static DEVICE_ATTR(transport_mode, S_IRUGO,
500 host_show_transport_mode, NULL);
501 static DEVICE_ATTR(resettable, S_IRUGO,
502 host_show_resettable, NULL);
504 static struct device_attribute *hpsa_sdev_attrs[] = {
505 &dev_attr_raid_level,
511 static struct device_attribute *hpsa_shost_attrs[] = {
513 &dev_attr_firmware_revision,
514 &dev_attr_commands_outstanding,
515 &dev_attr_transport_mode,
516 &dev_attr_resettable,
520 static struct scsi_host_template hpsa_driver_template = {
521 .module = THIS_MODULE,
524 .queuecommand = hpsa_scsi_queue_command,
525 .scan_start = hpsa_scan_start,
526 .scan_finished = hpsa_scan_finished,
527 .change_queue_depth = hpsa_change_queue_depth,
529 .use_clustering = ENABLE_CLUSTERING,
530 .eh_abort_handler = hpsa_eh_abort_handler,
531 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
533 .slave_alloc = hpsa_slave_alloc,
534 .slave_destroy = hpsa_slave_destroy,
536 .compat_ioctl = hpsa_compat_ioctl,
538 .sdev_attrs = hpsa_sdev_attrs,
539 .shost_attrs = hpsa_shost_attrs,
544 /* Enqueuing and dequeuing functions for cmdlists. */
545 static inline void addQ(struct list_head *list, struct CommandList *c)
547 list_add_tail(&c->list, list);
550 static inline u32 next_command(struct ctlr_info *h, u8 q)
553 struct reply_pool *rq = &h->reply_queue[q];
556 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
557 return h->access.command_completed(h, q);
559 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
560 a = rq->head[rq->current_entry];
562 spin_lock_irqsave(&h->lock, flags);
563 h->commands_outstanding--;
564 spin_unlock_irqrestore(&h->lock, flags);
568 /* Check for wraparound */
569 if (rq->current_entry == h->max_commands) {
570 rq->current_entry = 0;
576 /* set_performant_mode: Modify the tag for cciss performant
577 * set bit 0 for pull model, bits 3-1 for block fetch
580 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
582 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
583 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
584 if (likely(h->msix_vector))
585 c->Header.ReplyQueue =
586 smp_processor_id() % h->nreply_queues;
590 static int is_firmware_flash_cmd(u8 *cdb)
592 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
596 * During firmware flash, the heartbeat register may not update as frequently
597 * as it should. So we dial down lockup detection during firmware flash. and
598 * dial it back up when firmware flash completes.
600 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
601 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
602 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
603 struct CommandList *c)
605 if (!is_firmware_flash_cmd(c->Request.CDB))
607 atomic_inc(&h->firmware_flash_in_progress);
608 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
611 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
612 struct CommandList *c)
614 if (is_firmware_flash_cmd(c->Request.CDB) &&
615 atomic_dec_and_test(&h->firmware_flash_in_progress))
616 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
619 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
620 struct CommandList *c)
624 set_performant_mode(h, c);
625 dial_down_lockup_detection_during_fw_flash(h, c);
626 spin_lock_irqsave(&h->lock, flags);
629 spin_unlock_irqrestore(&h->lock, flags);
633 static inline void removeQ(struct CommandList *c)
635 if (WARN_ON(list_empty(&c->list)))
637 list_del_init(&c->list);
640 static inline int is_hba_lunid(unsigned char scsi3addr[])
642 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
645 static inline int is_scsi_rev_5(struct ctlr_info *h)
647 if (!h->hba_inquiry_data)
649 if ((h->hba_inquiry_data[2] & 0x07) == 5)
654 static int hpsa_find_target_lun(struct ctlr_info *h,
655 unsigned char scsi3addr[], int bus, int *target, int *lun)
657 /* finds an unused bus, target, lun for a new physical device
658 * assumes h->devlock is held
661 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
663 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
665 for (i = 0; i < h->ndevices; i++) {
666 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
667 __set_bit(h->dev[i]->target, lun_taken);
670 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
671 if (i < HPSA_MAX_DEVICES) {
680 /* Add an entry into h->dev[] array. */
681 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
682 struct hpsa_scsi_dev_t *device,
683 struct hpsa_scsi_dev_t *added[], int *nadded)
685 /* assumes h->devlock is held */
688 unsigned char addr1[8], addr2[8];
689 struct hpsa_scsi_dev_t *sd;
691 if (n >= HPSA_MAX_DEVICES) {
692 dev_err(&h->pdev->dev, "too many devices, some will be "
697 /* physical devices do not have lun or target assigned until now. */
698 if (device->lun != -1)
699 /* Logical device, lun is already assigned. */
702 /* If this device a non-zero lun of a multi-lun device
703 * byte 4 of the 8-byte LUN addr will contain the logical
704 * unit no, zero otherise.
706 if (device->scsi3addr[4] == 0) {
707 /* This is not a non-zero lun of a multi-lun device */
708 if (hpsa_find_target_lun(h, device->scsi3addr,
709 device->bus, &device->target, &device->lun) != 0)
714 /* This is a non-zero lun of a multi-lun device.
715 * Search through our list and find the device which
716 * has the same 8 byte LUN address, excepting byte 4.
717 * Assign the same bus and target for this new LUN.
718 * Use the logical unit number from the firmware.
720 memcpy(addr1, device->scsi3addr, 8);
722 for (i = 0; i < n; i++) {
724 memcpy(addr2, sd->scsi3addr, 8);
726 /* differ only in byte 4? */
727 if (memcmp(addr1, addr2, 8) == 0) {
728 device->bus = sd->bus;
729 device->target = sd->target;
730 device->lun = device->scsi3addr[4];
734 if (device->lun == -1) {
735 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
736 " suspect firmware bug or unsupported hardware "
745 added[*nadded] = device;
748 /* initially, (before registering with scsi layer) we don't
749 * know our hostno and we don't want to print anything first
750 * time anyway (the scsi layer's inquiries will show that info)
752 /* if (hostno != -1) */
753 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
754 scsi_device_type(device->devtype), hostno,
755 device->bus, device->target, device->lun);
759 /* Update an entry in h->dev[] array. */
760 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
761 int entry, struct hpsa_scsi_dev_t *new_entry)
763 /* assumes h->devlock is held */
764 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
766 /* Raid level changed. */
767 h->dev[entry]->raid_level = new_entry->raid_level;
768 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
769 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
770 new_entry->target, new_entry->lun);
773 /* Replace an entry from h->dev[] array. */
774 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
775 int entry, struct hpsa_scsi_dev_t *new_entry,
776 struct hpsa_scsi_dev_t *added[], int *nadded,
777 struct hpsa_scsi_dev_t *removed[], int *nremoved)
779 /* assumes h->devlock is held */
780 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
781 removed[*nremoved] = h->dev[entry];
785 * New physical devices won't have target/lun assigned yet
786 * so we need to preserve the values in the slot we are replacing.
788 if (new_entry->target == -1) {
789 new_entry->target = h->dev[entry]->target;
790 new_entry->lun = h->dev[entry]->lun;
793 h->dev[entry] = new_entry;
794 added[*nadded] = new_entry;
796 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
797 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
798 new_entry->target, new_entry->lun);
801 /* Remove an entry from h->dev[] array. */
802 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
803 struct hpsa_scsi_dev_t *removed[], int *nremoved)
805 /* assumes h->devlock is held */
807 struct hpsa_scsi_dev_t *sd;
809 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
812 removed[*nremoved] = h->dev[entry];
815 for (i = entry; i < h->ndevices-1; i++)
816 h->dev[i] = h->dev[i+1];
818 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
819 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
823 #define SCSI3ADDR_EQ(a, b) ( \
824 (a)[7] == (b)[7] && \
825 (a)[6] == (b)[6] && \
826 (a)[5] == (b)[5] && \
827 (a)[4] == (b)[4] && \
828 (a)[3] == (b)[3] && \
829 (a)[2] == (b)[2] && \
830 (a)[1] == (b)[1] && \
833 static void fixup_botched_add(struct ctlr_info *h,
834 struct hpsa_scsi_dev_t *added)
836 /* called when scsi_add_device fails in order to re-adjust
837 * h->dev[] to match the mid layer's view.
842 spin_lock_irqsave(&h->lock, flags);
843 for (i = 0; i < h->ndevices; i++) {
844 if (h->dev[i] == added) {
845 for (j = i; j < h->ndevices-1; j++)
846 h->dev[j] = h->dev[j+1];
851 spin_unlock_irqrestore(&h->lock, flags);
855 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
856 struct hpsa_scsi_dev_t *dev2)
858 /* we compare everything except lun and target as these
859 * are not yet assigned. Compare parts likely
862 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
863 sizeof(dev1->scsi3addr)) != 0)
865 if (memcmp(dev1->device_id, dev2->device_id,
866 sizeof(dev1->device_id)) != 0)
868 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
870 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
872 if (dev1->devtype != dev2->devtype)
874 if (dev1->bus != dev2->bus)
879 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
880 struct hpsa_scsi_dev_t *dev2)
882 /* Device attributes that can change, but don't mean
883 * that the device is a different device, nor that the OS
884 * needs to be told anything about the change.
886 if (dev1->raid_level != dev2->raid_level)
891 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
892 * and return needle location in *index. If scsi3addr matches, but not
893 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
894 * location in *index.
895 * In the case of a minor device attribute change, such as RAID level, just
896 * return DEVICE_UPDATED, along with the updated device's location in index.
897 * If needle not found, return DEVICE_NOT_FOUND.
899 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
900 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
904 #define DEVICE_NOT_FOUND 0
905 #define DEVICE_CHANGED 1
906 #define DEVICE_SAME 2
907 #define DEVICE_UPDATED 3
908 for (i = 0; i < haystack_size; i++) {
909 if (haystack[i] == NULL) /* previously removed. */
911 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
913 if (device_is_the_same(needle, haystack[i])) {
914 if (device_updated(needle, haystack[i]))
915 return DEVICE_UPDATED;
918 return DEVICE_CHANGED;
923 return DEVICE_NOT_FOUND;
926 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
927 struct hpsa_scsi_dev_t *sd[], int nsds)
929 /* sd contains scsi3 addresses and devtypes, and inquiry
930 * data. This function takes what's in sd to be the current
931 * reality and updates h->dev[] to reflect that reality.
933 int i, entry, device_change, changes = 0;
934 struct hpsa_scsi_dev_t *csd;
936 struct hpsa_scsi_dev_t **added, **removed;
937 int nadded, nremoved;
938 struct Scsi_Host *sh = NULL;
940 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
941 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
943 if (!added || !removed) {
944 dev_warn(&h->pdev->dev, "out of memory in "
945 "adjust_hpsa_scsi_table\n");
949 spin_lock_irqsave(&h->devlock, flags);
951 /* find any devices in h->dev[] that are not in
952 * sd[] and remove them from h->dev[], and for any
953 * devices which have changed, remove the old device
954 * info and add the new device info.
955 * If minor device attributes change, just update
956 * the existing device structure.
961 while (i < h->ndevices) {
963 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
964 if (device_change == DEVICE_NOT_FOUND) {
966 hpsa_scsi_remove_entry(h, hostno, i,
968 continue; /* remove ^^^, hence i not incremented */
969 } else if (device_change == DEVICE_CHANGED) {
971 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
972 added, &nadded, removed, &nremoved);
973 /* Set it to NULL to prevent it from being freed
974 * at the bottom of hpsa_update_scsi_devices()
977 } else if (device_change == DEVICE_UPDATED) {
978 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
983 /* Now, make sure every device listed in sd[] is also
984 * listed in h->dev[], adding them if they aren't found
987 for (i = 0; i < nsds; i++) {
988 if (!sd[i]) /* if already added above. */
990 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
991 h->ndevices, &entry);
992 if (device_change == DEVICE_NOT_FOUND) {
994 if (hpsa_scsi_add_entry(h, hostno, sd[i],
995 added, &nadded) != 0)
997 sd[i] = NULL; /* prevent from being freed later. */
998 } else if (device_change == DEVICE_CHANGED) {
999 /* should never happen... */
1001 dev_warn(&h->pdev->dev,
1002 "device unexpectedly changed.\n");
1003 /* but if it does happen, we just ignore that device */
1006 spin_unlock_irqrestore(&h->devlock, flags);
1008 /* Don't notify scsi mid layer of any changes the first time through
1009 * (or if there are no changes) scsi_scan_host will do it later the
1010 * first time through.
1012 if (hostno == -1 || !changes)
1016 /* Notify scsi mid layer of any removed devices */
1017 for (i = 0; i < nremoved; i++) {
1018 struct scsi_device *sdev =
1019 scsi_device_lookup(sh, removed[i]->bus,
1020 removed[i]->target, removed[i]->lun);
1022 scsi_remove_device(sdev);
1023 scsi_device_put(sdev);
1025 /* We don't expect to get here.
1026 * future cmds to this device will get selection
1027 * timeout as if the device was gone.
1029 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1030 " for removal.", hostno, removed[i]->bus,
1031 removed[i]->target, removed[i]->lun);
1037 /* Notify scsi mid layer of any added devices */
1038 for (i = 0; i < nadded; i++) {
1039 if (scsi_add_device(sh, added[i]->bus,
1040 added[i]->target, added[i]->lun) == 0)
1042 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1043 "device not added.\n", hostno, added[i]->bus,
1044 added[i]->target, added[i]->lun);
1045 /* now we have to remove it from h->dev,
1046 * since it didn't get added to scsi mid layer
1048 fixup_botched_add(h, added[i]);
1057 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
1058 * Assume's h->devlock is held.
1060 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1061 int bus, int target, int lun)
1064 struct hpsa_scsi_dev_t *sd;
1066 for (i = 0; i < h->ndevices; i++) {
1068 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1074 /* link sdev->hostdata to our per-device structure. */
1075 static int hpsa_slave_alloc(struct scsi_device *sdev)
1077 struct hpsa_scsi_dev_t *sd;
1078 unsigned long flags;
1079 struct ctlr_info *h;
1081 h = sdev_to_hba(sdev);
1082 spin_lock_irqsave(&h->devlock, flags);
1083 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1084 sdev_id(sdev), sdev->lun);
1086 sdev->hostdata = sd;
1087 spin_unlock_irqrestore(&h->devlock, flags);
1091 static void hpsa_slave_destroy(struct scsi_device *sdev)
1093 /* nothing to do. */
1096 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1100 if (!h->cmd_sg_list)
1102 for (i = 0; i < h->nr_cmds; i++) {
1103 kfree(h->cmd_sg_list[i]);
1104 h->cmd_sg_list[i] = NULL;
1106 kfree(h->cmd_sg_list);
1107 h->cmd_sg_list = NULL;
1110 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1114 if (h->chainsize <= 0)
1117 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1119 if (!h->cmd_sg_list)
1121 for (i = 0; i < h->nr_cmds; i++) {
1122 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1123 h->chainsize, GFP_KERNEL);
1124 if (!h->cmd_sg_list[i])
1130 hpsa_free_sg_chain_blocks(h);
1134 static void hpsa_map_sg_chain_block(struct ctlr_info *h,
1135 struct CommandList *c)
1137 struct SGDescriptor *chain_sg, *chain_block;
1140 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1141 chain_block = h->cmd_sg_list[c->cmdindex];
1142 chain_sg->Ext = HPSA_SG_CHAIN;
1143 chain_sg->Len = sizeof(*chain_sg) *
1144 (c->Header.SGTotal - h->max_cmd_sg_entries);
1145 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1147 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1148 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1151 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1152 struct CommandList *c)
1154 struct SGDescriptor *chain_sg;
1155 union u64bit temp64;
1157 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1160 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1161 temp64.val32.lower = chain_sg->Addr.lower;
1162 temp64.val32.upper = chain_sg->Addr.upper;
1163 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1166 static void complete_scsi_command(struct CommandList *cp)
1168 struct scsi_cmnd *cmd;
1169 struct ctlr_info *h;
1170 struct ErrorInfo *ei;
1172 unsigned char sense_key;
1173 unsigned char asc; /* additional sense code */
1174 unsigned char ascq; /* additional sense code qualifier */
1175 unsigned long sense_data_size;
1178 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1181 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1182 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1183 hpsa_unmap_sg_chain_block(h, cp);
1185 cmd->result = (DID_OK << 16); /* host byte */
1186 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1187 cmd->result |= ei->ScsiStatus;
1189 /* copy the sense data whether we need to or not. */
1190 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1191 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1193 sense_data_size = sizeof(ei->SenseInfo);
1194 if (ei->SenseLen < sense_data_size)
1195 sense_data_size = ei->SenseLen;
1197 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1198 scsi_set_resid(cmd, ei->ResidualCnt);
1200 if (ei->CommandStatus == 0) {
1201 cmd->scsi_done(cmd);
1206 /* an error has occurred */
1207 switch (ei->CommandStatus) {
1209 case CMD_TARGET_STATUS:
1210 if (ei->ScsiStatus) {
1212 sense_key = 0xf & ei->SenseInfo[2];
1213 /* Get additional sense code */
1214 asc = ei->SenseInfo[12];
1215 /* Get addition sense code qualifier */
1216 ascq = ei->SenseInfo[13];
1219 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1220 if (check_for_unit_attention(h, cp)) {
1221 cmd->result = DID_SOFT_ERROR << 16;
1224 if (sense_key == ILLEGAL_REQUEST) {
1226 * SCSI REPORT_LUNS is commonly unsupported on
1227 * Smart Array. Suppress noisy complaint.
1229 if (cp->Request.CDB[0] == REPORT_LUNS)
1232 /* If ASC/ASCQ indicate Logical Unit
1233 * Not Supported condition,
1235 if ((asc == 0x25) && (ascq == 0x0)) {
1236 dev_warn(&h->pdev->dev, "cp %p "
1237 "has check condition\n", cp);
1242 if (sense_key == NOT_READY) {
1243 /* If Sense is Not Ready, Logical Unit
1244 * Not ready, Manual Intervention
1247 if ((asc == 0x04) && (ascq == 0x03)) {
1248 dev_warn(&h->pdev->dev, "cp %p "
1249 "has check condition: unit "
1250 "not ready, manual "
1251 "intervention required\n", cp);
1255 if (sense_key == ABORTED_COMMAND) {
1256 /* Aborted command is retryable */
1257 dev_warn(&h->pdev->dev, "cp %p "
1258 "has check condition: aborted command: "
1259 "ASC: 0x%x, ASCQ: 0x%x\n",
1261 cmd->result = DID_SOFT_ERROR << 16;
1264 /* Must be some other type of check condition */
1265 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1267 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1268 "Returning result: 0x%x, "
1269 "cmd=[%02x %02x %02x %02x %02x "
1270 "%02x %02x %02x %02x %02x %02x "
1271 "%02x %02x %02x %02x %02x]\n",
1272 cp, sense_key, asc, ascq,
1274 cmd->cmnd[0], cmd->cmnd[1],
1275 cmd->cmnd[2], cmd->cmnd[3],
1276 cmd->cmnd[4], cmd->cmnd[5],
1277 cmd->cmnd[6], cmd->cmnd[7],
1278 cmd->cmnd[8], cmd->cmnd[9],
1279 cmd->cmnd[10], cmd->cmnd[11],
1280 cmd->cmnd[12], cmd->cmnd[13],
1281 cmd->cmnd[14], cmd->cmnd[15]);
1286 /* Problem was not a check condition
1287 * Pass it up to the upper layers...
1289 if (ei->ScsiStatus) {
1290 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1291 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1292 "Returning result: 0x%x\n",
1294 sense_key, asc, ascq,
1296 } else { /* scsi status is zero??? How??? */
1297 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1298 "Returning no connection.\n", cp),
1300 /* Ordinarily, this case should never happen,
1301 * but there is a bug in some released firmware
1302 * revisions that allows it to happen if, for
1303 * example, a 4100 backplane loses power and
1304 * the tape drive is in it. We assume that
1305 * it's a fatal error of some kind because we
1306 * can't show that it wasn't. We will make it
1307 * look like selection timeout since that is
1308 * the most common reason for this to occur,
1309 * and it's severe enough.
1312 cmd->result = DID_NO_CONNECT << 16;
1316 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1318 case CMD_DATA_OVERRUN:
1319 dev_warn(&h->pdev->dev, "cp %p has"
1320 " completed with data overrun "
1324 /* print_bytes(cp, sizeof(*cp), 1, 0);
1326 /* We get CMD_INVALID if you address a non-existent device
1327 * instead of a selection timeout (no response). You will
1328 * see this if you yank out a drive, then try to access it.
1329 * This is kind of a shame because it means that any other
1330 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1331 * missing target. */
1332 cmd->result = DID_NO_CONNECT << 16;
1335 case CMD_PROTOCOL_ERR:
1336 dev_warn(&h->pdev->dev, "cp %p has "
1337 "protocol error \n", cp);
1339 case CMD_HARDWARE_ERR:
1340 cmd->result = DID_ERROR << 16;
1341 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1343 case CMD_CONNECTION_LOST:
1344 cmd->result = DID_ERROR << 16;
1345 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1348 cmd->result = DID_ABORT << 16;
1349 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1350 cp, ei->ScsiStatus);
1352 case CMD_ABORT_FAILED:
1353 cmd->result = DID_ERROR << 16;
1354 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1356 case CMD_UNSOLICITED_ABORT:
1357 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1358 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1362 cmd->result = DID_TIME_OUT << 16;
1363 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1365 case CMD_UNABORTABLE:
1366 cmd->result = DID_ERROR << 16;
1367 dev_warn(&h->pdev->dev, "Command unabortable\n");
1370 cmd->result = DID_ERROR << 16;
1371 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1372 cp, ei->CommandStatus);
1374 cmd->scsi_done(cmd);
1378 static void hpsa_pci_unmap(struct pci_dev *pdev,
1379 struct CommandList *c, int sg_used, int data_direction)
1382 union u64bit addr64;
1384 for (i = 0; i < sg_used; i++) {
1385 addr64.val32.lower = c->SG[i].Addr.lower;
1386 addr64.val32.upper = c->SG[i].Addr.upper;
1387 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1392 static void hpsa_map_one(struct pci_dev *pdev,
1393 struct CommandList *cp,
1400 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1401 cp->Header.SGList = 0;
1402 cp->Header.SGTotal = 0;
1406 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1407 cp->SG[0].Addr.lower =
1408 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1409 cp->SG[0].Addr.upper =
1410 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1411 cp->SG[0].Len = buflen;
1412 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1413 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1416 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1417 struct CommandList *c)
1419 DECLARE_COMPLETION_ONSTACK(wait);
1422 enqueue_cmd_and_start_io(h, c);
1423 wait_for_completion(&wait);
1426 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1427 struct CommandList *c)
1429 unsigned long flags;
1431 /* If controller lockup detected, fake a hardware error. */
1432 spin_lock_irqsave(&h->lock, flags);
1433 if (unlikely(h->lockup_detected)) {
1434 spin_unlock_irqrestore(&h->lock, flags);
1435 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1437 spin_unlock_irqrestore(&h->lock, flags);
1438 hpsa_scsi_do_simple_cmd_core(h, c);
1442 #define MAX_DRIVER_CMD_RETRIES 25
1443 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1444 struct CommandList *c, int data_direction)
1446 int backoff_time = 10, retry_count = 0;
1449 memset(c->err_info, 0, sizeof(*c->err_info));
1450 hpsa_scsi_do_simple_cmd_core(h, c);
1452 if (retry_count > 3) {
1453 msleep(backoff_time);
1454 if (backoff_time < 1000)
1457 } while ((check_for_unit_attention(h, c) ||
1458 check_for_busy(h, c)) &&
1459 retry_count <= MAX_DRIVER_CMD_RETRIES);
1460 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1463 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1465 struct ErrorInfo *ei;
1466 struct device *d = &cp->h->pdev->dev;
1469 switch (ei->CommandStatus) {
1470 case CMD_TARGET_STATUS:
1471 dev_warn(d, "cmd %p has completed with errors\n", cp);
1472 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1474 if (ei->ScsiStatus == 0)
1475 dev_warn(d, "SCSI status is abnormally zero. "
1476 "(probably indicates selection timeout "
1477 "reported incorrectly due to a known "
1478 "firmware bug, circa July, 2001.)\n");
1480 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1481 dev_info(d, "UNDERRUN\n");
1483 case CMD_DATA_OVERRUN:
1484 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1487 /* controller unfortunately reports SCSI passthru's
1488 * to non-existent targets as invalid commands.
1490 dev_warn(d, "cp %p is reported invalid (probably means "
1491 "target device no longer present)\n", cp);
1492 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1496 case CMD_PROTOCOL_ERR:
1497 dev_warn(d, "cp %p has protocol error \n", cp);
1499 case CMD_HARDWARE_ERR:
1500 /* cmd->result = DID_ERROR << 16; */
1501 dev_warn(d, "cp %p had hardware error\n", cp);
1503 case CMD_CONNECTION_LOST:
1504 dev_warn(d, "cp %p had connection lost\n", cp);
1507 dev_warn(d, "cp %p was aborted\n", cp);
1509 case CMD_ABORT_FAILED:
1510 dev_warn(d, "cp %p reports abort failed\n", cp);
1512 case CMD_UNSOLICITED_ABORT:
1513 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1516 dev_warn(d, "cp %p timed out\n", cp);
1518 case CMD_UNABORTABLE:
1519 dev_warn(d, "Command unabortable\n");
1522 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1527 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1528 unsigned char page, unsigned char *buf,
1529 unsigned char bufsize)
1532 struct CommandList *c;
1533 struct ErrorInfo *ei;
1535 c = cmd_special_alloc(h);
1537 if (c == NULL) { /* trouble... */
1538 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1542 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1543 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1545 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1546 hpsa_scsi_interpret_error(c);
1549 cmd_special_free(h, c);
1553 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1556 struct CommandList *c;
1557 struct ErrorInfo *ei;
1559 c = cmd_special_alloc(h);
1561 if (c == NULL) { /* trouble... */
1562 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1566 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1567 hpsa_scsi_do_simple_cmd_core(h, c);
1568 /* no unmap needed here because no data xfer. */
1571 if (ei->CommandStatus != 0) {
1572 hpsa_scsi_interpret_error(c);
1575 cmd_special_free(h, c);
1579 static void hpsa_get_raid_level(struct ctlr_info *h,
1580 unsigned char *scsi3addr, unsigned char *raid_level)
1585 *raid_level = RAID_UNKNOWN;
1586 buf = kzalloc(64, GFP_KERNEL);
1589 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1591 *raid_level = buf[8];
1592 if (*raid_level > RAID_UNKNOWN)
1593 *raid_level = RAID_UNKNOWN;
1598 /* Get the device id from inquiry page 0x83 */
1599 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1600 unsigned char *device_id, int buflen)
1607 buf = kzalloc(64, GFP_KERNEL);
1610 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1612 memcpy(device_id, &buf[8], buflen);
1617 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1618 struct ReportLUNdata *buf, int bufsize,
1619 int extended_response)
1622 struct CommandList *c;
1623 unsigned char scsi3addr[8];
1624 struct ErrorInfo *ei;
1626 c = cmd_special_alloc(h);
1627 if (c == NULL) { /* trouble... */
1628 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1631 /* address the controller */
1632 memset(scsi3addr, 0, sizeof(scsi3addr));
1633 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1634 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1635 if (extended_response)
1636 c->Request.CDB[1] = extended_response;
1637 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1639 if (ei->CommandStatus != 0 &&
1640 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1641 hpsa_scsi_interpret_error(c);
1644 cmd_special_free(h, c);
1648 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1649 struct ReportLUNdata *buf,
1650 int bufsize, int extended_response)
1652 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1655 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1656 struct ReportLUNdata *buf, int bufsize)
1658 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1661 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1662 int bus, int target, int lun)
1665 device->target = target;
1669 static int hpsa_update_device_info(struct ctlr_info *h,
1670 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1671 unsigned char *is_OBDR_device)
1674 #define OBDR_SIG_OFFSET 43
1675 #define OBDR_TAPE_SIG "$DR-10"
1676 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1677 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1679 unsigned char *inq_buff;
1680 unsigned char *obdr_sig;
1682 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1686 /* Do an inquiry to the device to see what it is. */
1687 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1688 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1689 /* Inquiry failed (msg printed already) */
1690 dev_err(&h->pdev->dev,
1691 "hpsa_update_device_info: inquiry failed\n");
1695 this_device->devtype = (inq_buff[0] & 0x1f);
1696 memcpy(this_device->scsi3addr, scsi3addr, 8);
1697 memcpy(this_device->vendor, &inq_buff[8],
1698 sizeof(this_device->vendor));
1699 memcpy(this_device->model, &inq_buff[16],
1700 sizeof(this_device->model));
1701 memset(this_device->device_id, 0,
1702 sizeof(this_device->device_id));
1703 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1704 sizeof(this_device->device_id));
1706 if (this_device->devtype == TYPE_DISK &&
1707 is_logical_dev_addr_mode(scsi3addr))
1708 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1710 this_device->raid_level = RAID_UNKNOWN;
1712 if (is_OBDR_device) {
1713 /* See if this is a One-Button-Disaster-Recovery device
1714 * by looking for "$DR-10" at offset 43 in inquiry data.
1716 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1717 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1718 strncmp(obdr_sig, OBDR_TAPE_SIG,
1719 OBDR_SIG_LEN) == 0);
1730 static unsigned char *ext_target_model[] = {
1739 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1743 for (i = 0; ext_target_model[i]; i++)
1744 if (strncmp(device->model, ext_target_model[i],
1745 strlen(ext_target_model[i])) == 0)
1750 /* Helper function to assign bus, target, lun mapping of devices.
1751 * Puts non-external target logical volumes on bus 0, external target logical
1752 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1753 * Logical drive target and lun are assigned at this time, but
1754 * physical device lun and target assignment are deferred (assigned
1755 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1757 static void figure_bus_target_lun(struct ctlr_info *h,
1758 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
1760 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1762 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1763 /* physical device, target and lun filled in later */
1764 if (is_hba_lunid(lunaddrbytes))
1765 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
1767 /* defer target, lun assignment for physical devices */
1768 hpsa_set_bus_target_lun(device, 2, -1, -1);
1771 /* It's a logical device */
1772 if (is_ext_target(h, device)) {
1773 /* external target way, put logicals on bus 1
1774 * and match target/lun numbers box
1775 * reports, other smart array, bus 0, target 0, match lunid
1777 hpsa_set_bus_target_lun(device,
1778 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1781 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
1785 * If there is no lun 0 on a target, linux won't find any devices.
1786 * For the external targets (arrays), we have to manually detect the enclosure
1787 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1788 * it for some reason. *tmpdevice is the target we're adding,
1789 * this_device is a pointer into the current element of currentsd[]
1790 * that we're building up in update_scsi_devices(), below.
1791 * lunzerobits is a bitmap that tracks which targets already have a
1793 * Returns 1 if an enclosure was added, 0 if not.
1795 static int add_ext_target_dev(struct ctlr_info *h,
1796 struct hpsa_scsi_dev_t *tmpdevice,
1797 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1798 unsigned long lunzerobits[], int *n_ext_target_devs)
1800 unsigned char scsi3addr[8];
1802 if (test_bit(tmpdevice->target, lunzerobits))
1803 return 0; /* There is already a lun 0 on this target. */
1805 if (!is_logical_dev_addr_mode(lunaddrbytes))
1806 return 0; /* It's the logical targets that may lack lun 0. */
1808 if (!is_ext_target(h, tmpdevice))
1809 return 0; /* Only external target devices have this problem. */
1811 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
1814 memset(scsi3addr, 0, 8);
1815 scsi3addr[3] = tmpdevice->target;
1816 if (is_hba_lunid(scsi3addr))
1817 return 0; /* Don't add the RAID controller here. */
1819 if (is_scsi_rev_5(h))
1820 return 0; /* p1210m doesn't need to do this. */
1822 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
1823 dev_warn(&h->pdev->dev, "Maximum number of external "
1824 "target devices exceeded. Check your hardware "
1829 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1831 (*n_ext_target_devs)++;
1832 hpsa_set_bus_target_lun(this_device,
1833 tmpdevice->bus, tmpdevice->target, 0);
1834 set_bit(tmpdevice->target, lunzerobits);
1839 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1840 * logdev. The number of luns in physdev and logdev are returned in
1841 * *nphysicals and *nlogicals, respectively.
1842 * Returns 0 on success, -1 otherwise.
1844 static int hpsa_gather_lun_info(struct ctlr_info *h,
1846 struct ReportLUNdata *physdev, u32 *nphysicals,
1847 struct ReportLUNdata *logdev, u32 *nlogicals)
1849 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1850 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1853 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1854 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1855 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1856 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1857 *nphysicals - HPSA_MAX_PHYS_LUN);
1858 *nphysicals = HPSA_MAX_PHYS_LUN;
1860 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1861 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1864 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1865 /* Reject Logicals in excess of our max capability. */
1866 if (*nlogicals > HPSA_MAX_LUN) {
1867 dev_warn(&h->pdev->dev,
1868 "maximum logical LUNs (%d) exceeded. "
1869 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1870 *nlogicals - HPSA_MAX_LUN);
1871 *nlogicals = HPSA_MAX_LUN;
1873 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1874 dev_warn(&h->pdev->dev,
1875 "maximum logical + physical LUNs (%d) exceeded. "
1876 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1877 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1878 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1883 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1884 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1885 struct ReportLUNdata *logdev_list)
1887 /* Helper function, figure out where the LUN ID info is coming from
1888 * given index i, lists of physical and logical devices, where in
1889 * the list the raid controller is supposed to appear (first or last)
1892 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1893 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1895 if (i == raid_ctlr_position)
1896 return RAID_CTLR_LUNID;
1898 if (i < logicals_start)
1899 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1901 if (i < last_device)
1902 return &logdev_list->LUN[i - nphysicals -
1903 (raid_ctlr_position == 0)][0];
1908 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1910 /* the idea here is we could get notified
1911 * that some devices have changed, so we do a report
1912 * physical luns and report logical luns cmd, and adjust
1913 * our list of devices accordingly.
1915 * The scsi3addr's of devices won't change so long as the
1916 * adapter is not reset. That means we can rescan and
1917 * tell which devices we already know about, vs. new
1918 * devices, vs. disappearing devices.
1920 struct ReportLUNdata *physdev_list = NULL;
1921 struct ReportLUNdata *logdev_list = NULL;
1924 u32 ndev_allocated = 0;
1925 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1927 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1928 int i, n_ext_target_devs, ndevs_to_allocate;
1929 int raid_ctlr_position;
1930 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
1932 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
1933 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1934 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1935 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1937 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1938 dev_err(&h->pdev->dev, "out of memory\n");
1941 memset(lunzerobits, 0, sizeof(lunzerobits));
1943 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1944 logdev_list, &nlogicals))
1947 /* We might see up to the maximum number of logical and physical disks
1948 * plus external target devices, and a device for the local RAID
1951 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
1953 /* Allocate the per device structures */
1954 for (i = 0; i < ndevs_to_allocate; i++) {
1955 if (i >= HPSA_MAX_DEVICES) {
1956 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1957 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1958 ndevs_to_allocate - HPSA_MAX_DEVICES);
1962 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1963 if (!currentsd[i]) {
1964 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1965 __FILE__, __LINE__);
1971 if (unlikely(is_scsi_rev_5(h)))
1972 raid_ctlr_position = 0;
1974 raid_ctlr_position = nphysicals + nlogicals;
1976 /* adjust our table of devices */
1977 n_ext_target_devs = 0;
1978 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1979 u8 *lunaddrbytes, is_OBDR = 0;
1981 /* Figure out where the LUN ID info is coming from */
1982 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1983 i, nphysicals, nlogicals, physdev_list, logdev_list);
1984 /* skip masked physical devices. */
1985 if (lunaddrbytes[3] & 0xC0 &&
1986 i < nphysicals + (raid_ctlr_position == 0))
1989 /* Get device type, vendor, model, device id */
1990 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1992 continue; /* skip it if we can't talk to it. */
1993 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
1994 this_device = currentsd[ncurrent];
1997 * For external target devices, we have to insert a LUN 0 which
1998 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1999 * is nonetheless an enclosure device there. We have to
2000 * present that otherwise linux won't find anything if
2001 * there is no lun 0.
2003 if (add_ext_target_dev(h, tmpdevice, this_device,
2004 lunaddrbytes, lunzerobits,
2005 &n_ext_target_devs)) {
2007 this_device = currentsd[ncurrent];
2010 *this_device = *tmpdevice;
2012 switch (this_device->devtype) {
2014 /* We don't *really* support actual CD-ROM devices,
2015 * just "One Button Disaster Recovery" tape drive
2016 * which temporarily pretends to be a CD-ROM drive.
2017 * So we check that the device is really an OBDR tape
2018 * device by checking for "$DR-10" in bytes 43-48 of
2030 case TYPE_MEDIUM_CHANGER:
2034 /* Only present the Smartarray HBA as a RAID controller.
2035 * If it's a RAID controller other than the HBA itself
2036 * (an external RAID controller, MSA500 or similar)
2039 if (!is_hba_lunid(lunaddrbytes))
2046 if (ncurrent >= HPSA_MAX_DEVICES)
2049 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2052 for (i = 0; i < ndev_allocated; i++)
2053 kfree(currentsd[i]);
2055 kfree(physdev_list);
2059 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
2060 * dma mapping and fills in the scatter gather entries of the
2063 static int hpsa_scatter_gather(struct ctlr_info *h,
2064 struct CommandList *cp,
2065 struct scsi_cmnd *cmd)
2068 struct scatterlist *sg;
2070 int use_sg, i, sg_index, chained;
2071 struct SGDescriptor *curr_sg;
2073 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
2075 use_sg = scsi_dma_map(cmd);
2080 goto sglist_finished;
2085 scsi_for_each_sg(cmd, sg, use_sg, i) {
2086 if (i == h->max_cmd_sg_entries - 1 &&
2087 use_sg > h->max_cmd_sg_entries) {
2089 curr_sg = h->cmd_sg_list[cp->cmdindex];
2092 addr64 = (u64) sg_dma_address(sg);
2093 len = sg_dma_len(sg);
2094 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2095 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2097 curr_sg->Ext = 0; /* we are not chaining */
2101 if (use_sg + chained > h->maxSG)
2102 h->maxSG = use_sg + chained;
2105 cp->Header.SGList = h->max_cmd_sg_entries;
2106 cp->Header.SGTotal = (u16) (use_sg + 1);
2107 hpsa_map_sg_chain_block(h, cp);
2113 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
2114 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
2119 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2120 void (*done)(struct scsi_cmnd *))
2122 struct ctlr_info *h;
2123 struct hpsa_scsi_dev_t *dev;
2124 unsigned char scsi3addr[8];
2125 struct CommandList *c;
2126 unsigned long flags;
2128 /* Get the ptr to our adapter structure out of cmd->host. */
2129 h = sdev_to_hba(cmd->device);
2130 dev = cmd->device->hostdata;
2132 cmd->result = DID_NO_CONNECT << 16;
2136 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2138 spin_lock_irqsave(&h->lock, flags);
2139 if (unlikely(h->lockup_detected)) {
2140 spin_unlock_irqrestore(&h->lock, flags);
2141 cmd->result = DID_ERROR << 16;
2145 spin_unlock_irqrestore(&h->lock, flags);
2147 if (c == NULL) { /* trouble... */
2148 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2149 return SCSI_MLQUEUE_HOST_BUSY;
2152 /* Fill in the command list header */
2154 cmd->scsi_done = done; /* save this for use by completion code */
2156 /* save c in case we have to abort it */
2157 cmd->host_scribble = (unsigned char *) c;
2159 c->cmd_type = CMD_SCSI;
2161 c->Header.ReplyQueue = 0; /* unused in simple mode */
2162 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2163 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2164 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2166 /* Fill in the request block... */
2168 c->Request.Timeout = 0;
2169 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2170 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2171 c->Request.CDBLen = cmd->cmd_len;
2172 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2173 c->Request.Type.Type = TYPE_CMD;
2174 c->Request.Type.Attribute = ATTR_SIMPLE;
2175 switch (cmd->sc_data_direction) {
2177 c->Request.Type.Direction = XFER_WRITE;
2179 case DMA_FROM_DEVICE:
2180 c->Request.Type.Direction = XFER_READ;
2183 c->Request.Type.Direction = XFER_NONE;
2185 case DMA_BIDIRECTIONAL:
2186 /* This can happen if a buggy application does a scsi passthru
2187 * and sets both inlen and outlen to non-zero. ( see
2188 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2191 c->Request.Type.Direction = XFER_RSVD;
2192 /* This is technically wrong, and hpsa controllers should
2193 * reject it with CMD_INVALID, which is the most correct
2194 * response, but non-fibre backends appear to let it
2195 * slide by, and give the same results as if this field
2196 * were set correctly. Either way is acceptable for
2197 * our purposes here.
2203 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2204 cmd->sc_data_direction);
2209 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2211 return SCSI_MLQUEUE_HOST_BUSY;
2213 enqueue_cmd_and_start_io(h, c);
2214 /* the cmd'll come back via intr handler in complete_scsi_command() */
2218 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2220 static void hpsa_scan_start(struct Scsi_Host *sh)
2222 struct ctlr_info *h = shost_to_hba(sh);
2223 unsigned long flags;
2225 /* wait until any scan already in progress is finished. */
2227 spin_lock_irqsave(&h->scan_lock, flags);
2228 if (h->scan_finished)
2230 spin_unlock_irqrestore(&h->scan_lock, flags);
2231 wait_event(h->scan_wait_queue, h->scan_finished);
2232 /* Note: We don't need to worry about a race between this
2233 * thread and driver unload because the midlayer will
2234 * have incremented the reference count, so unload won't
2235 * happen if we're in here.
2238 h->scan_finished = 0; /* mark scan as in progress */
2239 spin_unlock_irqrestore(&h->scan_lock, flags);
2241 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2243 spin_lock_irqsave(&h->scan_lock, flags);
2244 h->scan_finished = 1; /* mark scan as finished. */
2245 wake_up_all(&h->scan_wait_queue);
2246 spin_unlock_irqrestore(&h->scan_lock, flags);
2249 static int hpsa_scan_finished(struct Scsi_Host *sh,
2250 unsigned long elapsed_time)
2252 struct ctlr_info *h = shost_to_hba(sh);
2253 unsigned long flags;
2256 spin_lock_irqsave(&h->scan_lock, flags);
2257 finished = h->scan_finished;
2258 spin_unlock_irqrestore(&h->scan_lock, flags);
2262 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2263 int qdepth, int reason)
2265 struct ctlr_info *h = sdev_to_hba(sdev);
2267 if (reason != SCSI_QDEPTH_DEFAULT)
2273 if (qdepth > h->nr_cmds)
2274 qdepth = h->nr_cmds;
2275 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2276 return sdev->queue_depth;
2279 static void hpsa_unregister_scsi(struct ctlr_info *h)
2281 /* we are being forcibly unloaded, and may not refuse. */
2282 scsi_remove_host(h->scsi_host);
2283 scsi_host_put(h->scsi_host);
2284 h->scsi_host = NULL;
2287 static int hpsa_register_scsi(struct ctlr_info *h)
2289 struct Scsi_Host *sh;
2292 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2299 sh->max_channel = 3;
2300 sh->max_cmd_len = MAX_COMMAND_SIZE;
2301 sh->max_lun = HPSA_MAX_LUN;
2302 sh->max_id = HPSA_MAX_LUN;
2303 sh->can_queue = h->nr_cmds;
2304 sh->cmd_per_lun = h->nr_cmds;
2305 sh->sg_tablesize = h->maxsgentries;
2307 sh->hostdata[0] = (unsigned long) h;
2308 sh->irq = h->intr[h->intr_mode];
2309 sh->unique_id = sh->irq;
2310 error = scsi_add_host(sh, &h->pdev->dev);
2317 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2318 " failed for controller %d\n", __func__, h->ctlr);
2322 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2323 " failed for controller %d\n", __func__, h->ctlr);
2327 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2328 unsigned char lunaddr[])
2332 int waittime = 1; /* seconds */
2333 struct CommandList *c;
2335 c = cmd_special_alloc(h);
2337 dev_warn(&h->pdev->dev, "out of memory in "
2338 "wait_for_device_to_become_ready.\n");
2342 /* Send test unit ready until device ready, or give up. */
2343 while (count < HPSA_TUR_RETRY_LIMIT) {
2345 /* Wait for a bit. do this first, because if we send
2346 * the TUR right away, the reset will just abort it.
2348 msleep(1000 * waittime);
2351 /* Increase wait time with each try, up to a point. */
2352 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2353 waittime = waittime * 2;
2355 /* Send the Test Unit Ready */
2356 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2357 hpsa_scsi_do_simple_cmd_core(h, c);
2358 /* no unmap needed here because no data xfer. */
2360 if (c->err_info->CommandStatus == CMD_SUCCESS)
2363 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2364 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2365 (c->err_info->SenseInfo[2] == NO_SENSE ||
2366 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2369 dev_warn(&h->pdev->dev, "waiting %d secs "
2370 "for device to become ready.\n", waittime);
2371 rc = 1; /* device not ready. */
2375 dev_warn(&h->pdev->dev, "giving up on device.\n");
2377 dev_warn(&h->pdev->dev, "device is ready.\n");
2379 cmd_special_free(h, c);
2383 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2384 * complaining. Doing a host- or bus-reset can't do anything good here.
2386 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2389 struct ctlr_info *h;
2390 struct hpsa_scsi_dev_t *dev;
2392 /* find the controller to which the command to be aborted was sent */
2393 h = sdev_to_hba(scsicmd->device);
2394 if (h == NULL) /* paranoia */
2396 dev = scsicmd->device->hostdata;
2398 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2399 "device lookup failed.\n");
2402 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2403 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2404 /* send a reset to the SCSI LUN which the command was sent to */
2405 rc = hpsa_send_reset(h, dev->scsi3addr);
2406 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2409 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2413 static void swizzle_abort_tag(u8 *tag)
2417 memcpy(original_tag, tag, 8);
2418 tag[0] = original_tag[3];
2419 tag[1] = original_tag[2];
2420 tag[2] = original_tag[1];
2421 tag[3] = original_tag[0];
2422 tag[4] = original_tag[7];
2423 tag[5] = original_tag[6];
2424 tag[6] = original_tag[5];
2425 tag[7] = original_tag[4];
2428 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2429 struct CommandList *abort, int swizzle)
2432 struct CommandList *c;
2433 struct ErrorInfo *ei;
2435 c = cmd_special_alloc(h);
2436 if (c == NULL) { /* trouble... */
2437 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2441 fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
2443 swizzle_abort_tag(&c->Request.CDB[4]);
2444 hpsa_scsi_do_simple_cmd_core(h, c);
2445 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2446 __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
2447 /* no unmap needed here because no data xfer. */
2450 switch (ei->CommandStatus) {
2453 case CMD_UNABORTABLE: /* Very common, don't make noise. */
2457 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2458 __func__, abort->Header.Tag.upper,
2459 abort->Header.Tag.lower);
2460 hpsa_scsi_interpret_error(c);
2464 cmd_special_free(h, c);
2465 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
2466 abort->Header.Tag.upper, abort->Header.Tag.lower);
2471 * hpsa_find_cmd_in_queue
2473 * Used to determine whether a command (find) is still present
2474 * in queue_head. Optionally excludes the last element of queue_head.
2476 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
2477 * not yet been submitted, and so can be aborted by the driver without
2478 * sending an abort to the hardware.
2480 * Returns pointer to command if found in queue, NULL otherwise.
2482 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
2483 struct scsi_cmnd *find, struct list_head *queue_head)
2485 unsigned long flags;
2486 struct CommandList *c = NULL; /* ptr into cmpQ */
2490 spin_lock_irqsave(&h->lock, flags);
2491 list_for_each_entry(c, queue_head, list) {
2492 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
2494 if (c->scsi_cmd == find) {
2495 spin_unlock_irqrestore(&h->lock, flags);
2499 spin_unlock_irqrestore(&h->lock, flags);
2503 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
2504 u8 *tag, struct list_head *queue_head)
2506 unsigned long flags;
2507 struct CommandList *c;
2509 spin_lock_irqsave(&h->lock, flags);
2510 list_for_each_entry(c, queue_head, list) {
2511 if (memcmp(&c->Header.Tag, tag, 8) != 0)
2513 spin_unlock_irqrestore(&h->lock, flags);
2516 spin_unlock_irqrestore(&h->lock, flags);
2520 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
2521 * tell which kind we're dealing with, so we send the abort both ways. There
2522 * shouldn't be any collisions between swizzled and unswizzled tags due to the
2523 * way we construct our tags but we check anyway in case the assumptions which
2524 * make this true someday become false.
2526 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
2527 unsigned char *scsi3addr, struct CommandList *abort)
2530 struct CommandList *c;
2531 int rc = 0, rc2 = 0;
2533 /* we do not expect to find the swizzled tag in our queue, but
2534 * check anyway just to be sure the assumptions which make this
2535 * the case haven't become wrong.
2537 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
2538 swizzle_abort_tag(swizzled_tag);
2539 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
2541 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
2542 return hpsa_send_abort(h, scsi3addr, abort, 0);
2544 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
2546 /* if the command is still in our queue, we can't conclude that it was
2547 * aborted (it might have just completed normally) but in any case
2548 * we don't need to try to abort it another way.
2550 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
2552 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
2556 /* Send an abort for the specified command.
2557 * If the device and controller support it,
2558 * send a task abort request.
2560 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2564 struct ctlr_info *h;
2565 struct hpsa_scsi_dev_t *dev;
2566 struct CommandList *abort; /* pointer to command to be aborted */
2567 struct CommandList *found;
2568 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
2569 char msg[256]; /* For debug messaging. */
2572 /* Find the controller of the command to be aborted */
2573 h = sdev_to_hba(sc->device);
2575 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2578 /* Check that controller supports some kind of task abort */
2579 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
2580 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
2583 memset(msg, 0, sizeof(msg));
2584 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2585 h->scsi_host->host_no, sc->device->channel,
2586 sc->device->id, sc->device->lun);
2588 /* Find the device of the command to be aborted */
2589 dev = sc->device->hostdata;
2591 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
2596 /* Get SCSI command to be aborted */
2597 abort = (struct CommandList *) sc->host_scribble;
2598 if (abort == NULL) {
2599 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
2604 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
2605 abort->Header.Tag.upper, abort->Header.Tag.lower);
2606 as = (struct scsi_cmnd *) abort->scsi_cmd;
2608 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
2609 as->cmnd[0], as->serial_number);
2610 dev_dbg(&h->pdev->dev, "%s\n", msg);
2611 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
2612 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2614 /* Search reqQ to See if command is queued but not submitted,
2615 * if so, complete the command with aborted status and remove
2618 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
2620 found->err_info->CommandStatus = CMD_ABORTED;
2622 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
2627 /* not in reqQ, if also not in cmpQ, must have already completed */
2628 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2630 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
2636 * Command is in flight, or possibly already completed
2637 * by the firmware (but not to the scsi mid layer) but we can't
2638 * distinguish which. Send the abort down.
2640 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
2642 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
2643 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2644 h->scsi_host->host_no,
2645 dev->bus, dev->target, dev->lun);
2648 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
2650 /* If the abort(s) above completed and actually aborted the
2651 * command, then the command to be aborted should already be
2652 * completed. If not, wait around a bit more to see if they
2653 * manage to complete normally.
2655 #define ABORT_COMPLETE_WAIT_SECS 30
2656 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
2657 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2662 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2663 msg, ABORT_COMPLETE_WAIT_SECS);
2669 * For operations that cannot sleep, a command block is allocated at init,
2670 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2671 * which ones are free or in use. Lock must be held when calling this.
2672 * cmd_free() is the complement.
2674 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2676 struct CommandList *c;
2678 union u64bit temp64;
2679 dma_addr_t cmd_dma_handle, err_dma_handle;
2680 unsigned long flags;
2682 spin_lock_irqsave(&h->lock, flags);
2684 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2685 if (i == h->nr_cmds) {
2686 spin_unlock_irqrestore(&h->lock, flags);
2689 } while (test_and_set_bit
2690 (i & (BITS_PER_LONG - 1),
2691 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2693 spin_unlock_irqrestore(&h->lock, flags);
2695 c = h->cmd_pool + i;
2696 memset(c, 0, sizeof(*c));
2697 cmd_dma_handle = h->cmd_pool_dhandle
2699 c->err_info = h->errinfo_pool + i;
2700 memset(c->err_info, 0, sizeof(*c->err_info));
2701 err_dma_handle = h->errinfo_pool_dhandle
2702 + i * sizeof(*c->err_info);
2706 INIT_LIST_HEAD(&c->list);
2707 c->busaddr = (u32) cmd_dma_handle;
2708 temp64.val = (u64) err_dma_handle;
2709 c->ErrDesc.Addr.lower = temp64.val32.lower;
2710 c->ErrDesc.Addr.upper = temp64.val32.upper;
2711 c->ErrDesc.Len = sizeof(*c->err_info);
2717 /* For operations that can wait for kmalloc to possibly sleep,
2718 * this routine can be called. Lock need not be held to call
2719 * cmd_special_alloc. cmd_special_free() is the complement.
2721 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2723 struct CommandList *c;
2724 union u64bit temp64;
2725 dma_addr_t cmd_dma_handle, err_dma_handle;
2727 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2730 memset(c, 0, sizeof(*c));
2734 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2737 if (c->err_info == NULL) {
2738 pci_free_consistent(h->pdev,
2739 sizeof(*c), c, cmd_dma_handle);
2742 memset(c->err_info, 0, sizeof(*c->err_info));
2744 INIT_LIST_HEAD(&c->list);
2745 c->busaddr = (u32) cmd_dma_handle;
2746 temp64.val = (u64) err_dma_handle;
2747 c->ErrDesc.Addr.lower = temp64.val32.lower;
2748 c->ErrDesc.Addr.upper = temp64.val32.upper;
2749 c->ErrDesc.Len = sizeof(*c->err_info);
2755 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2758 unsigned long flags;
2760 i = c - h->cmd_pool;
2761 spin_lock_irqsave(&h->lock, flags);
2762 clear_bit(i & (BITS_PER_LONG - 1),
2763 h->cmd_pool_bits + (i / BITS_PER_LONG));
2765 spin_unlock_irqrestore(&h->lock, flags);
2768 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2770 union u64bit temp64;
2772 temp64.val32.lower = c->ErrDesc.Addr.lower;
2773 temp64.val32.upper = c->ErrDesc.Addr.upper;
2774 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2775 c->err_info, (dma_addr_t) temp64.val);
2776 pci_free_consistent(h->pdev, sizeof(*c),
2777 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2780 #ifdef CONFIG_COMPAT
2782 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2784 IOCTL32_Command_struct __user *arg32 =
2785 (IOCTL32_Command_struct __user *) arg;
2786 IOCTL_Command_struct arg64;
2787 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2791 memset(&arg64, 0, sizeof(arg64));
2793 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2794 sizeof(arg64.LUN_info));
2795 err |= copy_from_user(&arg64.Request, &arg32->Request,
2796 sizeof(arg64.Request));
2797 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2798 sizeof(arg64.error_info));
2799 err |= get_user(arg64.buf_size, &arg32->buf_size);
2800 err |= get_user(cp, &arg32->buf);
2801 arg64.buf = compat_ptr(cp);
2802 err |= copy_to_user(p, &arg64, sizeof(arg64));
2807 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2810 err |= copy_in_user(&arg32->error_info, &p->error_info,
2811 sizeof(arg32->error_info));
2817 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2820 BIG_IOCTL32_Command_struct __user *arg32 =
2821 (BIG_IOCTL32_Command_struct __user *) arg;
2822 BIG_IOCTL_Command_struct arg64;
2823 BIG_IOCTL_Command_struct __user *p =
2824 compat_alloc_user_space(sizeof(arg64));
2828 memset(&arg64, 0, sizeof(arg64));
2830 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2831 sizeof(arg64.LUN_info));
2832 err |= copy_from_user(&arg64.Request, &arg32->Request,
2833 sizeof(arg64.Request));
2834 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2835 sizeof(arg64.error_info));
2836 err |= get_user(arg64.buf_size, &arg32->buf_size);
2837 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2838 err |= get_user(cp, &arg32->buf);
2839 arg64.buf = compat_ptr(cp);
2840 err |= copy_to_user(p, &arg64, sizeof(arg64));
2845 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2848 err |= copy_in_user(&arg32->error_info, &p->error_info,
2849 sizeof(arg32->error_info));
2855 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2858 case CCISS_GETPCIINFO:
2859 case CCISS_GETINTINFO:
2860 case CCISS_SETINTINFO:
2861 case CCISS_GETNODENAME:
2862 case CCISS_SETNODENAME:
2863 case CCISS_GETHEARTBEAT:
2864 case CCISS_GETBUSTYPES:
2865 case CCISS_GETFIRMVER:
2866 case CCISS_GETDRIVVER:
2867 case CCISS_REVALIDVOLS:
2868 case CCISS_DEREGDISK:
2869 case CCISS_REGNEWDISK:
2871 case CCISS_RESCANDISK:
2872 case CCISS_GETLUNINFO:
2873 return hpsa_ioctl(dev, cmd, arg);
2875 case CCISS_PASSTHRU32:
2876 return hpsa_ioctl32_passthru(dev, cmd, arg);
2877 case CCISS_BIG_PASSTHRU32:
2878 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2881 return -ENOIOCTLCMD;
2886 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2888 struct hpsa_pci_info pciinfo;
2892 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2893 pciinfo.bus = h->pdev->bus->number;
2894 pciinfo.dev_fn = h->pdev->devfn;
2895 pciinfo.board_id = h->board_id;
2896 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2901 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2903 DriverVer_type DriverVer;
2904 unsigned char vmaj, vmin, vsubmin;
2907 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2908 &vmaj, &vmin, &vsubmin);
2910 dev_info(&h->pdev->dev, "driver version string '%s' "
2911 "unrecognized.", HPSA_DRIVER_VERSION);
2916 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2919 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2924 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2926 IOCTL_Command_struct iocommand;
2927 struct CommandList *c;
2929 union u64bit temp64;
2933 if (!capable(CAP_SYS_RAWIO))
2935 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2937 if ((iocommand.buf_size < 1) &&
2938 (iocommand.Request.Type.Direction != XFER_NONE)) {
2941 if (iocommand.buf_size > 0) {
2942 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2945 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2946 /* Copy the data into the buffer we created */
2947 if (copy_from_user(buff, iocommand.buf,
2948 iocommand.buf_size)) {
2953 memset(buff, 0, iocommand.buf_size);
2956 c = cmd_special_alloc(h);
2961 /* Fill in the command type */
2962 c->cmd_type = CMD_IOCTL_PEND;
2963 /* Fill in Command Header */
2964 c->Header.ReplyQueue = 0; /* unused in simple mode */
2965 if (iocommand.buf_size > 0) { /* buffer to fill */
2966 c->Header.SGList = 1;
2967 c->Header.SGTotal = 1;
2968 } else { /* no buffers to fill */
2969 c->Header.SGList = 0;
2970 c->Header.SGTotal = 0;
2972 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2973 /* use the kernel address the cmd block for tag */
2974 c->Header.Tag.lower = c->busaddr;
2976 /* Fill in Request block */
2977 memcpy(&c->Request, &iocommand.Request,
2978 sizeof(c->Request));
2980 /* Fill in the scatter gather information */
2981 if (iocommand.buf_size > 0) {
2982 temp64.val = pci_map_single(h->pdev, buff,
2983 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2984 c->SG[0].Addr.lower = temp64.val32.lower;
2985 c->SG[0].Addr.upper = temp64.val32.upper;
2986 c->SG[0].Len = iocommand.buf_size;
2987 c->SG[0].Ext = 0; /* we are not chaining*/
2989 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
2990 if (iocommand.buf_size > 0)
2991 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2992 check_ioctl_unit_attention(h, c);
2994 /* Copy the error information out */
2995 memcpy(&iocommand.error_info, c->err_info,
2996 sizeof(iocommand.error_info));
2997 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2999 cmd_special_free(h, c);
3002 if (iocommand.Request.Type.Direction == XFER_READ &&
3003 iocommand.buf_size > 0) {
3004 /* Copy the data out of the buffer we created */
3005 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
3007 cmd_special_free(h, c);
3012 cmd_special_free(h, c);
3016 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
3018 BIG_IOCTL_Command_struct *ioc;
3019 struct CommandList *c;
3020 unsigned char **buff = NULL;
3021 int *buff_size = NULL;
3022 union u64bit temp64;
3028 BYTE __user *data_ptr;
3032 if (!capable(CAP_SYS_RAWIO))
3034 ioc = (BIG_IOCTL_Command_struct *)
3035 kmalloc(sizeof(*ioc), GFP_KERNEL);
3040 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
3044 if ((ioc->buf_size < 1) &&
3045 (ioc->Request.Type.Direction != XFER_NONE)) {
3049 /* Check kmalloc limits using all SGs */
3050 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
3054 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
3058 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
3063 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
3068 left = ioc->buf_size;
3069 data_ptr = ioc->buf;
3071 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
3072 buff_size[sg_used] = sz;
3073 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
3074 if (buff[sg_used] == NULL) {
3078 if (ioc->Request.Type.Direction == XFER_WRITE) {
3079 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
3084 memset(buff[sg_used], 0, sz);
3089 c = cmd_special_alloc(h);
3094 c->cmd_type = CMD_IOCTL_PEND;
3095 c->Header.ReplyQueue = 0;
3096 c->Header.SGList = c->Header.SGTotal = sg_used;
3097 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
3098 c->Header.Tag.lower = c->busaddr;
3099 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
3100 if (ioc->buf_size > 0) {
3102 for (i = 0; i < sg_used; i++) {
3103 temp64.val = pci_map_single(h->pdev, buff[i],
3104 buff_size[i], PCI_DMA_BIDIRECTIONAL);
3105 c->SG[i].Addr.lower = temp64.val32.lower;
3106 c->SG[i].Addr.upper = temp64.val32.upper;
3107 c->SG[i].Len = buff_size[i];
3108 /* we are not chaining */
3112 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3114 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
3115 check_ioctl_unit_attention(h, c);
3116 /* Copy the error information out */
3117 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
3118 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
3119 cmd_special_free(h, c);
3123 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
3124 /* Copy the data out of the buffer we created */
3125 BYTE __user *ptr = ioc->buf;
3126 for (i = 0; i < sg_used; i++) {
3127 if (copy_to_user(ptr, buff[i], buff_size[i])) {
3128 cmd_special_free(h, c);
3132 ptr += buff_size[i];
3135 cmd_special_free(h, c);
3139 for (i = 0; i < sg_used; i++)
3148 static void check_ioctl_unit_attention(struct ctlr_info *h,
3149 struct CommandList *c)
3151 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
3152 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
3153 (void) check_for_unit_attention(h, c);
3158 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
3160 struct ctlr_info *h;
3161 void __user *argp = (void __user *)arg;
3163 h = sdev_to_hba(dev);
3166 case CCISS_DEREGDISK:
3167 case CCISS_REGNEWDISK:
3169 hpsa_scan_start(h->scsi_host);
3171 case CCISS_GETPCIINFO:
3172 return hpsa_getpciinfo_ioctl(h, argp);
3173 case CCISS_GETDRIVVER:
3174 return hpsa_getdrivver_ioctl(h, argp);
3175 case CCISS_PASSTHRU:
3176 return hpsa_passthru_ioctl(h, argp);
3177 case CCISS_BIG_PASSTHRU:
3178 return hpsa_big_passthru_ioctl(h, argp);
3184 static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
3185 unsigned char *scsi3addr, u8 reset_type)
3187 struct CommandList *c;
3192 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
3193 RAID_CTLR_LUNID, TYPE_MSG);
3194 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
3196 enqueue_cmd_and_start_io(h, c);
3197 /* Don't wait for completion, the reset won't complete. Don't free
3198 * the command either. This is the last command we will send before
3199 * re-initializing everything, so it doesn't matter and won't leak.
3204 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3205 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
3208 int pci_dir = XFER_NONE;
3209 struct CommandList *a; /* for commands to be aborted */
3211 c->cmd_type = CMD_IOCTL_PEND;
3212 c->Header.ReplyQueue = 0;
3213 if (buff != NULL && size > 0) {
3214 c->Header.SGList = 1;
3215 c->Header.SGTotal = 1;
3217 c->Header.SGList = 0;
3218 c->Header.SGTotal = 0;
3220 c->Header.Tag.lower = c->busaddr;
3221 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
3223 c->Request.Type.Type = cmd_type;
3224 if (cmd_type == TYPE_CMD) {
3227 /* are we trying to read a vital product page */
3228 if (page_code != 0) {
3229 c->Request.CDB[1] = 0x01;
3230 c->Request.CDB[2] = page_code;
3232 c->Request.CDBLen = 6;
3233 c->Request.Type.Attribute = ATTR_SIMPLE;
3234 c->Request.Type.Direction = XFER_READ;
3235 c->Request.Timeout = 0;
3236 c->Request.CDB[0] = HPSA_INQUIRY;
3237 c->Request.CDB[4] = size & 0xFF;
3239 case HPSA_REPORT_LOG:
3240 case HPSA_REPORT_PHYS:
3241 /* Talking to controller so It's a physical command
3242 mode = 00 target = 0. Nothing to write.
3244 c->Request.CDBLen = 12;
3245 c->Request.Type.Attribute = ATTR_SIMPLE;
3246 c->Request.Type.Direction = XFER_READ;
3247 c->Request.Timeout = 0;
3248 c->Request.CDB[0] = cmd;
3249 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
3250 c->Request.CDB[7] = (size >> 16) & 0xFF;
3251 c->Request.CDB[8] = (size >> 8) & 0xFF;
3252 c->Request.CDB[9] = size & 0xFF;
3254 case HPSA_CACHE_FLUSH:
3255 c->Request.CDBLen = 12;
3256 c->Request.Type.Attribute = ATTR_SIMPLE;
3257 c->Request.Type.Direction = XFER_WRITE;
3258 c->Request.Timeout = 0;
3259 c->Request.CDB[0] = BMIC_WRITE;
3260 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
3261 c->Request.CDB[7] = (size >> 8) & 0xFF;
3262 c->Request.CDB[8] = size & 0xFF;
3264 case TEST_UNIT_READY:
3265 c->Request.CDBLen = 6;
3266 c->Request.Type.Attribute = ATTR_SIMPLE;
3267 c->Request.Type.Direction = XFER_NONE;
3268 c->Request.Timeout = 0;
3271 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
3275 } else if (cmd_type == TYPE_MSG) {
3278 case HPSA_DEVICE_RESET_MSG:
3279 c->Request.CDBLen = 16;
3280 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
3281 c->Request.Type.Attribute = ATTR_SIMPLE;
3282 c->Request.Type.Direction = XFER_NONE;
3283 c->Request.Timeout = 0; /* Don't time out */
3284 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
3285 c->Request.CDB[0] = cmd;
3286 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
3287 /* If bytes 4-7 are zero, it means reset the */
3289 c->Request.CDB[4] = 0x00;
3290 c->Request.CDB[5] = 0x00;
3291 c->Request.CDB[6] = 0x00;
3292 c->Request.CDB[7] = 0x00;
3294 case HPSA_ABORT_MSG:
3295 a = buff; /* point to command to be aborted */
3296 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3297 a->Header.Tag.upper, a->Header.Tag.lower,
3298 c->Header.Tag.upper, c->Header.Tag.lower);
3299 c->Request.CDBLen = 16;
3300 c->Request.Type.Type = TYPE_MSG;
3301 c->Request.Type.Attribute = ATTR_SIMPLE;
3302 c->Request.Type.Direction = XFER_WRITE;
3303 c->Request.Timeout = 0; /* Don't time out */
3304 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
3305 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
3306 c->Request.CDB[2] = 0x00; /* reserved */
3307 c->Request.CDB[3] = 0x00; /* reserved */
3308 /* Tag to abort goes in CDB[4]-CDB[11] */
3309 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
3310 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
3311 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
3312 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
3313 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
3314 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
3315 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
3316 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
3317 c->Request.CDB[12] = 0x00; /* reserved */
3318 c->Request.CDB[13] = 0x00; /* reserved */
3319 c->Request.CDB[14] = 0x00; /* reserved */
3320 c->Request.CDB[15] = 0x00; /* reserved */
3323 dev_warn(&h->pdev->dev, "unknown message type %d\n",
3328 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
3332 switch (c->Request.Type.Direction) {
3334 pci_dir = PCI_DMA_FROMDEVICE;
3337 pci_dir = PCI_DMA_TODEVICE;
3340 pci_dir = PCI_DMA_NONE;
3343 pci_dir = PCI_DMA_BIDIRECTIONAL;
3346 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
3352 * Map (physical) PCI mem into (virtual) kernel space
3354 static void __iomem *remap_pci_mem(ulong base, ulong size)
3356 ulong page_base = ((ulong) base) & PAGE_MASK;
3357 ulong page_offs = ((ulong) base) - page_base;
3358 void __iomem *page_remapped = ioremap_nocache(page_base,
3361 return page_remapped ? (page_remapped + page_offs) : NULL;
3364 /* Takes cmds off the submission queue and sends them to the hardware,
3365 * then puts them on the queue of cmds waiting for completion.
3367 static void start_io(struct ctlr_info *h)
3369 struct CommandList *c;
3370 unsigned long flags;
3372 spin_lock_irqsave(&h->lock, flags);
3373 while (!list_empty(&h->reqQ)) {
3374 c = list_entry(h->reqQ.next, struct CommandList, list);
3375 /* can't do anything if fifo is full */
3376 if ((h->access.fifo_full(h))) {
3377 dev_warn(&h->pdev->dev, "fifo full\n");
3381 /* Get the first entry from the Request Q */
3385 /* Put job onto the completed Q */
3388 /* Must increment commands_outstanding before unlocking
3389 * and submitting to avoid race checking for fifo full
3392 h->commands_outstanding++;
3393 if (h->commands_outstanding > h->max_outstanding)
3394 h->max_outstanding = h->commands_outstanding;
3396 /* Tell the controller execute command */
3397 spin_unlock_irqrestore(&h->lock, flags);
3398 h->access.submit_command(h, c);
3399 spin_lock_irqsave(&h->lock, flags);
3401 spin_unlock_irqrestore(&h->lock, flags);
3404 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
3406 return h->access.command_completed(h, q);
3409 static inline bool interrupt_pending(struct ctlr_info *h)
3411 return h->access.intr_pending(h);
3414 static inline long interrupt_not_for_us(struct ctlr_info *h)
3416 return (h->access.intr_pending(h) == 0) ||
3417 (h->interrupts_enabled == 0);
3420 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3423 if (unlikely(tag_index >= h->nr_cmds)) {
3424 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3430 static inline void finish_cmd(struct CommandList *c)
3432 unsigned long flags;
3434 spin_lock_irqsave(&c->h->lock, flags);
3436 spin_unlock_irqrestore(&c->h->lock, flags);
3437 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3438 if (likely(c->cmd_type == CMD_SCSI))
3439 complete_scsi_command(c);
3440 else if (c->cmd_type == CMD_IOCTL_PEND)
3441 complete(c->waiting);
3444 static inline u32 hpsa_tag_contains_index(u32 tag)
3446 return tag & DIRECT_LOOKUP_BIT;
3449 static inline u32 hpsa_tag_to_index(u32 tag)
3451 return tag >> DIRECT_LOOKUP_SHIFT;
3455 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3457 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3458 #define HPSA_SIMPLE_ERROR_BITS 0x03
3459 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3460 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3461 return tag & ~HPSA_PERF_ERROR_BITS;
3464 /* process completion of an indexed ("direct lookup") command */
3465 static inline void process_indexed_cmd(struct ctlr_info *h,
3469 struct CommandList *c;
3471 tag_index = hpsa_tag_to_index(raw_tag);
3472 if (!bad_tag(h, tag_index, raw_tag)) {
3473 c = h->cmd_pool + tag_index;
3478 /* process completion of a non-indexed command */
3479 static inline void process_nonindexed_cmd(struct ctlr_info *h,
3483 struct CommandList *c = NULL;
3484 unsigned long flags;
3486 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3487 spin_lock_irqsave(&h->lock, flags);
3488 list_for_each_entry(c, &h->cmpQ, list) {
3489 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3490 spin_unlock_irqrestore(&h->lock, flags);
3495 spin_unlock_irqrestore(&h->lock, flags);
3496 bad_tag(h, h->nr_cmds + 1, raw_tag);
3499 /* Some controllers, like p400, will give us one interrupt
3500 * after a soft reset, even if we turned interrupts off.
3501 * Only need to check for this in the hpsa_xxx_discard_completions
3504 static int ignore_bogus_interrupt(struct ctlr_info *h)
3506 if (likely(!reset_devices))
3509 if (likely(h->interrupts_enabled))
3512 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3513 "(known firmware bug.) Ignoring.\n");
3519 * Convert &h->q[x] (passed to interrupt handlers) back to h.
3520 * Relies on (h-q[x] == x) being true for x such that
3521 * 0 <= x < MAX_REPLY_QUEUES.
3523 static struct ctlr_info *queue_to_hba(u8 *queue)
3525 return container_of((queue - *queue), struct ctlr_info, q[0]);
3528 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3530 struct ctlr_info *h = queue_to_hba(queue);
3531 u8 q = *(u8 *) queue;
3534 if (ignore_bogus_interrupt(h))
3537 if (interrupt_not_for_us(h))
3539 h->last_intr_timestamp = get_jiffies_64();
3540 while (interrupt_pending(h)) {
3541 raw_tag = get_next_completion(h, q);
3542 while (raw_tag != FIFO_EMPTY)
3543 raw_tag = next_command(h, q);
3548 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
3550 struct ctlr_info *h = queue_to_hba(queue);
3552 u8 q = *(u8 *) queue;
3554 if (ignore_bogus_interrupt(h))
3557 h->last_intr_timestamp = get_jiffies_64();
3558 raw_tag = get_next_completion(h, q);
3559 while (raw_tag != FIFO_EMPTY)
3560 raw_tag = next_command(h, q);
3564 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3566 struct ctlr_info *h = queue_to_hba((u8 *) queue);
3568 u8 q = *(u8 *) queue;
3570 if (interrupt_not_for_us(h))
3572 h->last_intr_timestamp = get_jiffies_64();
3573 while (interrupt_pending(h)) {
3574 raw_tag = get_next_completion(h, q);
3575 while (raw_tag != FIFO_EMPTY) {
3576 if (likely(hpsa_tag_contains_index(raw_tag)))
3577 process_indexed_cmd(h, raw_tag);
3579 process_nonindexed_cmd(h, raw_tag);
3580 raw_tag = next_command(h, q);
3586 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3588 struct ctlr_info *h = queue_to_hba(queue);
3590 u8 q = *(u8 *) queue;
3592 h->last_intr_timestamp = get_jiffies_64();
3593 raw_tag = get_next_completion(h, q);
3594 while (raw_tag != FIFO_EMPTY) {
3595 if (likely(hpsa_tag_contains_index(raw_tag)))
3596 process_indexed_cmd(h, raw_tag);
3598 process_nonindexed_cmd(h, raw_tag);
3599 raw_tag = next_command(h, q);
3604 /* Send a message CDB to the firmware. Careful, this only works
3605 * in simple mode, not performant mode due to the tag lookup.
3606 * We only ever use this immediately after a controller reset.
3608 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3612 struct CommandListHeader CommandHeader;
3613 struct RequestBlock Request;
3614 struct ErrDescriptor ErrorDescriptor;
3616 struct Command *cmd;
3617 static const size_t cmd_sz = sizeof(*cmd) +
3618 sizeof(cmd->ErrorDescriptor);
3620 uint32_t paddr32, tag;
3621 void __iomem *vaddr;
3624 vaddr = pci_ioremap_bar(pdev, 0);
3628 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3629 * CCISS commands, so they must be allocated from the lower 4GiB of
3632 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3638 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3644 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3645 * although there's no guarantee, we assume that the address is at
3646 * least 4-byte aligned (most likely, it's page-aligned).
3650 cmd->CommandHeader.ReplyQueue = 0;
3651 cmd->CommandHeader.SGList = 0;
3652 cmd->CommandHeader.SGTotal = 0;
3653 cmd->CommandHeader.Tag.lower = paddr32;
3654 cmd->CommandHeader.Tag.upper = 0;
3655 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3657 cmd->Request.CDBLen = 16;
3658 cmd->Request.Type.Type = TYPE_MSG;
3659 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3660 cmd->Request.Type.Direction = XFER_NONE;
3661 cmd->Request.Timeout = 0; /* Don't time out */
3662 cmd->Request.CDB[0] = opcode;
3663 cmd->Request.CDB[1] = type;
3664 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3665 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3666 cmd->ErrorDescriptor.Addr.upper = 0;
3667 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3669 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3671 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3672 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3673 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3675 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3680 /* we leak the DMA buffer here ... no choice since the controller could
3681 * still complete the command.
3683 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3684 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3689 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3691 if (tag & HPSA_ERROR_BIT) {
3692 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3697 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3702 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3704 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3705 void * __iomem vaddr, u32 use_doorbell)
3711 /* For everything after the P600, the PCI power state method
3712 * of resetting the controller doesn't work, so we have this
3713 * other way using the doorbell register.
3715 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3716 writel(use_doorbell, vaddr + SA5_DOORBELL);
3717 } else { /* Try to do it the PCI power state way */
3719 /* Quoting from the Open CISS Specification: "The Power
3720 * Management Control/Status Register (CSR) controls the power
3721 * state of the device. The normal operating state is D0,
3722 * CSR=00h. The software off state is D3, CSR=03h. To reset
3723 * the controller, place the interface device in D3 then to D0,
3724 * this causes a secondary PCI reset which will reset the
3727 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3730 "hpsa_reset_controller: "
3731 "PCI PM not supported\n");
3734 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3735 /* enter the D3hot power management state */
3736 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3737 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3739 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3743 /* enter the D0 power management state */
3744 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3746 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3749 * The P600 requires a small delay when changing states.
3750 * Otherwise we may think the board did not reset and we bail.
3751 * This for kdump only and is particular to the P600.
3758 static __devinit void init_driver_version(char *driver_version, int len)
3760 memset(driver_version, 0, len);
3761 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
3764 static __devinit int write_driver_ver_to_cfgtable(
3765 struct CfgTable __iomem *cfgtable)
3767 char *driver_version;
3768 int i, size = sizeof(cfgtable->driver_version);
3770 driver_version = kmalloc(size, GFP_KERNEL);
3771 if (!driver_version)
3774 init_driver_version(driver_version, size);
3775 for (i = 0; i < size; i++)
3776 writeb(driver_version[i], &cfgtable->driver_version[i]);
3777 kfree(driver_version);
3781 static __devinit void read_driver_ver_from_cfgtable(
3782 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3786 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3787 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3790 static __devinit int controller_reset_failed(
3791 struct CfgTable __iomem *cfgtable)
3794 char *driver_ver, *old_driver_ver;
3795 int rc, size = sizeof(cfgtable->driver_version);
3797 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3798 if (!old_driver_ver)
3800 driver_ver = old_driver_ver + size;
3802 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3803 * should have been changed, otherwise we know the reset failed.
3805 init_driver_version(old_driver_ver, size);
3806 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3807 rc = !memcmp(driver_ver, old_driver_ver, size);
3808 kfree(old_driver_ver);
3811 /* This does a hard reset of the controller using PCI power management
3812 * states or the using the doorbell register.
3814 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3818 u64 cfg_base_addr_index;
3819 void __iomem *vaddr;
3820 unsigned long paddr;
3821 u32 misc_fw_support;
3823 struct CfgTable __iomem *cfgtable;
3826 u16 command_register;
3828 /* For controllers as old as the P600, this is very nearly
3831 * pci_save_state(pci_dev);
3832 * pci_set_power_state(pci_dev, PCI_D3hot);
3833 * pci_set_power_state(pci_dev, PCI_D0);
3834 * pci_restore_state(pci_dev);
3836 * For controllers newer than the P600, the pci power state
3837 * method of resetting doesn't work so we have another way
3838 * using the doorbell register.
3841 rc = hpsa_lookup_board_id(pdev, &board_id);
3842 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3843 dev_warn(&pdev->dev, "Not resetting device.\n");
3847 /* if controller is soft- but not hard resettable... */
3848 if (!ctlr_is_hard_resettable(board_id))
3849 return -ENOTSUPP; /* try soft reset later. */
3851 /* Save the PCI command register */
3852 pci_read_config_word(pdev, 4, &command_register);
3853 /* Turn the board off. This is so that later pci_restore_state()
3854 * won't turn the board on before the rest of config space is ready.
3856 pci_disable_device(pdev);
3857 pci_save_state(pdev);
3859 /* find the first memory BAR, so we can find the cfg table */
3860 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3863 vaddr = remap_pci_mem(paddr, 0x250);
3867 /* find cfgtable in order to check if reset via doorbell is supported */
3868 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3869 &cfg_base_addr_index, &cfg_offset);
3872 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3873 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3878 rc = write_driver_ver_to_cfgtable(cfgtable);
3882 /* If reset via doorbell register is supported, use that.
3883 * There are two such methods. Favor the newest method.
3885 misc_fw_support = readl(&cfgtable->misc_fw_support);
3886 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3888 use_doorbell = DOORBELL_CTLR_RESET2;
3890 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3892 dev_warn(&pdev->dev, "Soft reset not supported. "
3893 "Firmware update is required.\n");
3894 rc = -ENOTSUPP; /* try soft reset */
3895 goto unmap_cfgtable;
3899 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3901 goto unmap_cfgtable;
3903 pci_restore_state(pdev);
3904 rc = pci_enable_device(pdev);
3906 dev_warn(&pdev->dev, "failed to enable device.\n");
3907 goto unmap_cfgtable;
3909 pci_write_config_word(pdev, 4, command_register);
3911 /* Some devices (notably the HP Smart Array 5i Controller)
3912 need a little pause here */
3913 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3915 /* Wait for board to become not ready, then ready. */
3916 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3917 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3919 dev_warn(&pdev->dev,
3920 "failed waiting for board to reset."
3921 " Will try soft reset.\n");
3922 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3923 goto unmap_cfgtable;
3925 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3927 dev_warn(&pdev->dev,
3928 "failed waiting for board to become ready "
3929 "after hard reset\n");
3930 goto unmap_cfgtable;
3933 rc = controller_reset_failed(vaddr);
3935 goto unmap_cfgtable;
3937 dev_warn(&pdev->dev, "Unable to successfully reset "
3938 "controller. Will try soft reset.\n");
3941 dev_info(&pdev->dev, "board ready after hard reset.\n");
3953 * We cannot read the structure directly, for portability we must use
3955 * This is for debug only.
3957 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3963 dev_info(dev, "Controller Configuration information\n");
3964 dev_info(dev, "------------------------------------\n");
3965 for (i = 0; i < 4; i++)
3966 temp_name[i] = readb(&(tb->Signature[i]));
3967 temp_name[4] = '\0';
3968 dev_info(dev, " Signature = %s\n", temp_name);
3969 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3970 dev_info(dev, " Transport methods supported = 0x%x\n",
3971 readl(&(tb->TransportSupport)));
3972 dev_info(dev, " Transport methods active = 0x%x\n",
3973 readl(&(tb->TransportActive)));
3974 dev_info(dev, " Requested transport Method = 0x%x\n",
3975 readl(&(tb->HostWrite.TransportRequest)));
3976 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3977 readl(&(tb->HostWrite.CoalIntDelay)));
3978 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3979 readl(&(tb->HostWrite.CoalIntCount)));
3980 dev_info(dev, " Max outstanding commands = 0x%d\n",
3981 readl(&(tb->CmdsOutMax)));
3982 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3983 for (i = 0; i < 16; i++)
3984 temp_name[i] = readb(&(tb->ServerName[i]));
3985 temp_name[16] = '\0';
3986 dev_info(dev, " Server Name = %s\n", temp_name);
3987 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3988 readl(&(tb->HeartBeat)));
3989 #endif /* HPSA_DEBUG */
3992 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3994 int i, offset, mem_type, bar_type;
3996 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3999 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4000 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
4001 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
4004 mem_type = pci_resource_flags(pdev, i) &
4005 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
4007 case PCI_BASE_ADDRESS_MEM_TYPE_32:
4008 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
4009 offset += 4; /* 32 bit */
4011 case PCI_BASE_ADDRESS_MEM_TYPE_64:
4014 default: /* reserved in PCI 2.2 */
4015 dev_warn(&pdev->dev,
4016 "base address is invalid\n");
4021 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
4027 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
4028 * controllers that are capable. If not, we use IO-APIC mode.
4031 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
4033 #ifdef CONFIG_PCI_MSI
4035 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
4037 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
4038 hpsa_msix_entries[i].vector = 0;
4039 hpsa_msix_entries[i].entry = i;
4042 /* Some boards advertise MSI but don't really support it */
4043 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
4044 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
4045 goto default_int_mode;
4046 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
4047 dev_info(&h->pdev->dev, "MSIX\n");
4048 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
4051 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4052 h->intr[i] = hpsa_msix_entries[i].vector;
4057 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
4058 "available\n", err);
4059 goto default_int_mode;
4061 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
4063 goto default_int_mode;
4066 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
4067 dev_info(&h->pdev->dev, "MSI\n");
4068 if (!pci_enable_msi(h->pdev))
4071 dev_warn(&h->pdev->dev, "MSI init failed\n");
4074 #endif /* CONFIG_PCI_MSI */
4075 /* if we get here we're going to use the default interrupt mode */
4076 h->intr[h->intr_mode] = h->pdev->irq;
4079 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
4082 u32 subsystem_vendor_id, subsystem_device_id;
4084 subsystem_vendor_id = pdev->subsystem_vendor;
4085 subsystem_device_id = pdev->subsystem_device;
4086 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
4087 subsystem_vendor_id;
4089 for (i = 0; i < ARRAY_SIZE(products); i++)
4090 if (*board_id == products[i].board_id)
4093 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
4094 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
4096 dev_warn(&pdev->dev, "unrecognized board ID: "
4097 "0x%08x, ignoring.\n", *board_id);
4100 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
4103 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
4104 unsigned long *memory_bar)
4108 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
4109 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4110 /* addressing mode bits already removed */
4111 *memory_bar = pci_resource_start(pdev, i);
4112 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
4116 dev_warn(&pdev->dev, "no memory BAR found\n");
4120 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
4121 void __iomem *vaddr, int wait_for_ready)
4126 iterations = HPSA_BOARD_READY_ITERATIONS;
4128 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
4130 for (i = 0; i < iterations; i++) {
4131 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
4132 if (wait_for_ready) {
4133 if (scratchpad == HPSA_FIRMWARE_READY)
4136 if (scratchpad != HPSA_FIRMWARE_READY)
4139 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
4141 dev_warn(&pdev->dev, "board not ready, timed out.\n");
4145 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
4146 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
4149 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
4150 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
4151 *cfg_base_addr &= (u32) 0x0000ffff;
4152 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
4153 if (*cfg_base_addr_index == -1) {
4154 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
4160 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
4164 u64 cfg_base_addr_index;
4168 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
4169 &cfg_base_addr_index, &cfg_offset);
4172 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
4173 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
4176 rc = write_driver_ver_to_cfgtable(h->cfgtable);
4179 /* Find performant mode table. */
4180 trans_offset = readl(&h->cfgtable->TransMethodOffset);
4181 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
4182 cfg_base_addr_index)+cfg_offset+trans_offset,
4183 sizeof(*h->transtable));
4189 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
4191 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4193 /* Limit commands in memory limited kdump scenario. */
4194 if (reset_devices && h->max_commands > 32)
4195 h->max_commands = 32;
4197 if (h->max_commands < 16) {
4198 dev_warn(&h->pdev->dev, "Controller reports "
4199 "max supported commands of %d, an obvious lie. "
4200 "Using 16. Ensure that firmware is up to date.\n",
4202 h->max_commands = 16;
4206 /* Interrogate the hardware for some limits:
4207 * max commands, max SG elements without chaining, and with chaining,
4208 * SG chain block size, etc.
4210 static void __devinit hpsa_find_board_params(struct ctlr_info *h)
4212 hpsa_get_max_perf_mode_cmds(h);
4213 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
4214 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
4216 * Limit in-command s/g elements to 32 save dma'able memory.
4217 * Howvever spec says if 0, use 31
4219 h->max_cmd_sg_entries = 31;
4220 if (h->maxsgentries > 512) {
4221 h->max_cmd_sg_entries = 32;
4222 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
4223 h->maxsgentries--; /* save one for chain pointer */
4225 h->maxsgentries = 31; /* default to traditional values */
4229 /* Find out what task management functions are supported and cache */
4230 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
4233 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
4235 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
4236 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
4242 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
4243 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
4248 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
4250 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
4254 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
4255 * in a prefetch beyond physical memory.
4257 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
4261 if (h->board_id != 0x3225103C)
4263 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
4264 dma_prefetch |= 0x8000;
4265 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
4268 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
4272 unsigned long flags;
4274 /* under certain very rare conditions, this can take awhile.
4275 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
4276 * as we enter this code.)
4278 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
4279 spin_lock_irqsave(&h->lock, flags);
4280 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
4281 spin_unlock_irqrestore(&h->lock, flags);
4282 if (!(doorbell_value & CFGTBL_ChangeReq))
4284 /* delay and try again */
4285 usleep_range(10000, 20000);
4289 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
4293 trans_support = readl(&(h->cfgtable->TransportSupport));
4294 if (!(trans_support & SIMPLE_MODE))
4297 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
4298 /* Update the field, and then ring the doorbell */
4299 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
4300 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4301 hpsa_wait_for_mode_change_ack(h);
4302 print_cfg_table(&h->pdev->dev, h->cfgtable);
4303 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
4304 dev_warn(&h->pdev->dev,
4305 "unable to get board into simple mode\n");
4308 h->transMethod = CFGTBL_Trans_Simple;
4312 static int __devinit hpsa_pci_init(struct ctlr_info *h)
4314 int prod_index, err;
4316 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
4319 h->product_name = products[prod_index].product_name;
4320 h->access = *(products[prod_index].access);
4322 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4323 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4325 err = pci_enable_device(h->pdev);
4327 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
4331 /* Enable bus mastering (pci_disable_device may disable this) */
4332 pci_set_master(h->pdev);
4334 err = pci_request_regions(h->pdev, HPSA);
4336 dev_err(&h->pdev->dev,
4337 "cannot obtain PCI resources, aborting\n");
4340 hpsa_interrupt_mode(h);
4341 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
4343 goto err_out_free_res;
4344 h->vaddr = remap_pci_mem(h->paddr, 0x250);
4347 goto err_out_free_res;
4349 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4351 goto err_out_free_res;
4352 err = hpsa_find_cfgtables(h);
4354 goto err_out_free_res;
4355 hpsa_find_board_params(h);
4357 if (!hpsa_CISS_signature_present(h)) {
4359 goto err_out_free_res;
4361 hpsa_enable_scsi_prefetch(h);
4362 hpsa_p600_dma_prefetch_quirk(h);
4363 err = hpsa_enter_simple_mode(h);
4365 goto err_out_free_res;
4370 iounmap(h->transtable);
4372 iounmap(h->cfgtable);
4375 pci_disable_device(h->pdev);
4376 pci_release_regions(h->pdev);
4380 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
4384 #define HBA_INQUIRY_BYTE_COUNT 64
4385 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
4386 if (!h->hba_inquiry_data)
4388 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
4389 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
4391 kfree(h->hba_inquiry_data);
4392 h->hba_inquiry_data = NULL;
4396 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
4403 /* Reset the controller with a PCI power-cycle or via doorbell */
4404 rc = hpsa_kdump_hard_reset_controller(pdev);
4406 /* -ENOTSUPP here means we cannot reset the controller
4407 * but it's already (and still) up and running in
4408 * "performant mode". Or, it might be 640x, which can't reset
4409 * due to concerns about shared bbwc between 6402/6404 pair.
4411 if (rc == -ENOTSUPP)
4412 return rc; /* just try to do the kdump anyhow. */
4416 /* Now try to get the controller to respond to a no-op */
4417 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4418 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4419 if (hpsa_noop(pdev) == 0)
4422 dev_warn(&pdev->dev, "no-op failed%s\n",
4423 (i < 11 ? "; re-trying" : ""));
4428 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4430 h->cmd_pool_bits = kzalloc(
4431 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4432 sizeof(unsigned long), GFP_KERNEL);
4433 h->cmd_pool = pci_alloc_consistent(h->pdev,
4434 h->nr_cmds * sizeof(*h->cmd_pool),
4435 &(h->cmd_pool_dhandle));
4436 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4437 h->nr_cmds * sizeof(*h->errinfo_pool),
4438 &(h->errinfo_pool_dhandle));
4439 if ((h->cmd_pool_bits == NULL)
4440 || (h->cmd_pool == NULL)
4441 || (h->errinfo_pool == NULL)) {
4442 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4448 static void hpsa_free_cmd_pool(struct ctlr_info *h)
4450 kfree(h->cmd_pool_bits);
4452 pci_free_consistent(h->pdev,
4453 h->nr_cmds * sizeof(struct CommandList),
4454 h->cmd_pool, h->cmd_pool_dhandle);
4455 if (h->errinfo_pool)
4456 pci_free_consistent(h->pdev,
4457 h->nr_cmds * sizeof(struct ErrorInfo),
4459 h->errinfo_pool_dhandle);
4462 static int hpsa_request_irq(struct ctlr_info *h,
4463 irqreturn_t (*msixhandler)(int, void *),
4464 irqreturn_t (*intxhandler)(int, void *))
4469 * initialize h->q[x] = x so that interrupt handlers know which
4472 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4475 if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
4476 /* If performant mode and MSI-X, use multiple reply queues */
4477 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4478 rc = request_irq(h->intr[i], msixhandler,
4482 /* Use single reply pool */
4483 if (h->msix_vector || h->msi_vector) {
4484 rc = request_irq(h->intr[h->intr_mode],
4485 msixhandler, 0, h->devname,
4486 &h->q[h->intr_mode]);
4488 rc = request_irq(h->intr[h->intr_mode],
4489 intxhandler, IRQF_SHARED, h->devname,
4490 &h->q[h->intr_mode]);
4494 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4495 h->intr[h->intr_mode], h->devname);
4501 static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4503 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4504 HPSA_RESET_TYPE_CONTROLLER)) {
4505 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4509 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4510 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4511 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4515 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4516 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4517 dev_warn(&h->pdev->dev, "Board failed to become ready "
4518 "after soft reset.\n");
4525 static void free_irqs(struct ctlr_info *h)
4529 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
4530 /* Single reply queue, only one irq to free */
4532 free_irq(h->intr[i], &h->q[i]);
4536 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4537 free_irq(h->intr[i], &h->q[i]);
4540 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
4543 #ifdef CONFIG_PCI_MSI
4544 if (h->msix_vector) {
4545 if (h->pdev->msix_enabled)
4546 pci_disable_msix(h->pdev);
4547 } else if (h->msi_vector) {
4548 if (h->pdev->msi_enabled)
4549 pci_disable_msi(h->pdev);
4551 #endif /* CONFIG_PCI_MSI */
4554 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4556 hpsa_free_irqs_and_disable_msix(h);
4557 hpsa_free_sg_chain_blocks(h);
4558 hpsa_free_cmd_pool(h);
4559 kfree(h->blockFetchTable);
4560 pci_free_consistent(h->pdev, h->reply_pool_size,
4561 h->reply_pool, h->reply_pool_dhandle);
4565 iounmap(h->transtable);
4567 iounmap(h->cfgtable);
4568 pci_release_regions(h->pdev);
4572 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4574 assert_spin_locked(&lockup_detector_lock);
4575 if (!hpsa_lockup_detector)
4577 if (h->lockup_detected)
4578 return; /* already stopped the lockup detector */
4579 list_del(&h->lockup_list);
4582 /* Called when controller lockup detected. */
4583 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4585 struct CommandList *c = NULL;
4587 assert_spin_locked(&h->lock);
4588 /* Mark all outstanding commands as failed and complete them. */
4589 while (!list_empty(list)) {
4590 c = list_entry(list->next, struct CommandList, list);
4591 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4596 static void controller_lockup_detected(struct ctlr_info *h)
4598 unsigned long flags;
4600 assert_spin_locked(&lockup_detector_lock);
4601 remove_ctlr_from_lockup_detector_list(h);
4602 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4603 spin_lock_irqsave(&h->lock, flags);
4604 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4605 spin_unlock_irqrestore(&h->lock, flags);
4606 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4607 h->lockup_detected);
4608 pci_disable_device(h->pdev);
4609 spin_lock_irqsave(&h->lock, flags);
4610 fail_all_cmds_on_list(h, &h->cmpQ);
4611 fail_all_cmds_on_list(h, &h->reqQ);
4612 spin_unlock_irqrestore(&h->lock, flags);
4615 static void detect_controller_lockup(struct ctlr_info *h)
4619 unsigned long flags;
4621 assert_spin_locked(&lockup_detector_lock);
4622 now = get_jiffies_64();
4623 /* If we've received an interrupt recently, we're ok. */
4624 if (time_after64(h->last_intr_timestamp +
4625 (h->heartbeat_sample_interval), now))
4629 * If we've already checked the heartbeat recently, we're ok.
4630 * This could happen if someone sends us a signal. We
4631 * otherwise don't care about signals in this thread.
4633 if (time_after64(h->last_heartbeat_timestamp +
4634 (h->heartbeat_sample_interval), now))
4637 /* If heartbeat has not changed since we last looked, we're not ok. */
4638 spin_lock_irqsave(&h->lock, flags);
4639 heartbeat = readl(&h->cfgtable->HeartBeat);
4640 spin_unlock_irqrestore(&h->lock, flags);
4641 if (h->last_heartbeat == heartbeat) {
4642 controller_lockup_detected(h);
4647 h->last_heartbeat = heartbeat;
4648 h->last_heartbeat_timestamp = now;
4651 static int detect_controller_lockup_thread(void *notused)
4653 struct ctlr_info *h;
4654 unsigned long flags;
4657 struct list_head *this, *tmp;
4659 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4660 if (kthread_should_stop())
4662 spin_lock_irqsave(&lockup_detector_lock, flags);
4663 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4664 h = list_entry(this, struct ctlr_info, lockup_list);
4665 detect_controller_lockup(h);
4667 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4672 static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4674 unsigned long flags;
4676 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
4677 spin_lock_irqsave(&lockup_detector_lock, flags);
4678 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4679 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4682 static void start_controller_lockup_detector(struct ctlr_info *h)
4684 /* Start the lockup detector thread if not already started */
4685 if (!hpsa_lockup_detector) {
4686 spin_lock_init(&lockup_detector_lock);
4687 hpsa_lockup_detector =
4688 kthread_run(detect_controller_lockup_thread,
4691 if (!hpsa_lockup_detector) {
4692 dev_warn(&h->pdev->dev,
4693 "Could not start lockup detector thread\n");
4696 add_ctlr_to_lockup_detector_list(h);
4699 static void stop_controller_lockup_detector(struct ctlr_info *h)
4701 unsigned long flags;
4703 spin_lock_irqsave(&lockup_detector_lock, flags);
4704 remove_ctlr_from_lockup_detector_list(h);
4705 /* If the list of ctlr's to monitor is empty, stop the thread */
4706 if (list_empty(&hpsa_ctlr_list)) {
4707 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4708 kthread_stop(hpsa_lockup_detector);
4709 spin_lock_irqsave(&lockup_detector_lock, flags);
4710 hpsa_lockup_detector = NULL;
4712 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4715 static int __devinit hpsa_init_one(struct pci_dev *pdev,
4716 const struct pci_device_id *ent)
4719 struct ctlr_info *h;
4720 int try_soft_reset = 0;
4721 unsigned long flags;
4723 if (number_of_controllers == 0)
4724 printk(KERN_INFO DRIVER_NAME "\n");
4726 rc = hpsa_init_reset_devices(pdev);
4728 if (rc != -ENOTSUPP)
4730 /* If the reset fails in a particular way (it has no way to do
4731 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4732 * a soft reset once we get the controller configured up to the
4733 * point that it can accept a command.
4739 reinit_after_soft_reset:
4741 /* Command structures must be aligned on a 32-byte boundary because
4742 * the 5 lower bits of the address are used by the hardware. and by
4743 * the driver. See comments in hpsa.h for more info.
4745 #define COMMANDLIST_ALIGNMENT 32
4746 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4747 h = kzalloc(sizeof(*h), GFP_KERNEL);
4752 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4753 INIT_LIST_HEAD(&h->cmpQ);
4754 INIT_LIST_HEAD(&h->reqQ);
4755 spin_lock_init(&h->lock);
4756 spin_lock_init(&h->scan_lock);
4757 rc = hpsa_pci_init(h);
4761 sprintf(h->devname, HPSA "%d", number_of_controllers);
4762 h->ctlr = number_of_controllers;
4763 number_of_controllers++;
4765 /* configure PCI DMA stuff */
4766 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4770 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4774 dev_err(&pdev->dev, "no suitable DMA available\n");
4779 /* make sure the board interrupts are off */
4780 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4782 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4784 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4785 h->devname, pdev->device,
4786 h->intr[h->intr_mode], dac ? "" : " not");
4787 if (hpsa_allocate_cmd_pool(h))
4789 if (hpsa_allocate_sg_chain_blocks(h))
4791 init_waitqueue_head(&h->scan_wait_queue);
4792 h->scan_finished = 1; /* no scan currently in progress */
4794 pci_set_drvdata(pdev, h);
4796 h->scsi_host = NULL;
4797 spin_lock_init(&h->devlock);
4798 hpsa_put_ctlr_into_performant_mode(h);
4800 /* At this point, the controller is ready to take commands.
4801 * Now, if reset_devices and the hard reset didn't work, try
4802 * the soft reset and see if that works.
4804 if (try_soft_reset) {
4806 /* This is kind of gross. We may or may not get a completion
4807 * from the soft reset command, and if we do, then the value
4808 * from the fifo may or may not be valid. So, we wait 10 secs
4809 * after the reset throwing away any completions we get during
4810 * that time. Unregister the interrupt handler and register
4811 * fake ones to scoop up any residual completions.
4813 spin_lock_irqsave(&h->lock, flags);
4814 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4815 spin_unlock_irqrestore(&h->lock, flags);
4817 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4818 hpsa_intx_discard_completions);
4820 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4825 rc = hpsa_kdump_soft_reset(h);
4827 /* Neither hard nor soft reset worked, we're hosed. */
4830 dev_info(&h->pdev->dev, "Board READY.\n");
4831 dev_info(&h->pdev->dev,
4832 "Waiting for stale completions to drain.\n");
4833 h->access.set_intr_mask(h, HPSA_INTR_ON);
4835 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4837 rc = controller_reset_failed(h->cfgtable);
4839 dev_info(&h->pdev->dev,
4840 "Soft reset appears to have failed.\n");
4842 /* since the controller's reset, we have to go back and re-init
4843 * everything. Easiest to just forget what we've done and do it
4846 hpsa_undo_allocations_after_kdump_soft_reset(h);
4849 /* don't go to clean4, we already unallocated */
4852 goto reinit_after_soft_reset;
4855 /* Turn the interrupts on so we can service requests */
4856 h->access.set_intr_mask(h, HPSA_INTR_ON);
4858 hpsa_hba_inquiry(h);
4859 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
4860 start_controller_lockup_detector(h);
4864 hpsa_free_sg_chain_blocks(h);
4865 hpsa_free_cmd_pool(h);
4873 static void hpsa_flush_cache(struct ctlr_info *h)
4876 struct CommandList *c;
4878 flush_buf = kzalloc(4, GFP_KERNEL);
4882 c = cmd_special_alloc(h);
4884 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4887 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4888 RAID_CTLR_LUNID, TYPE_CMD);
4889 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4890 if (c->err_info->CommandStatus != 0)
4891 dev_warn(&h->pdev->dev,
4892 "error flushing cache on controller\n");
4893 cmd_special_free(h, c);
4898 static void hpsa_shutdown(struct pci_dev *pdev)
4900 struct ctlr_info *h;
4902 h = pci_get_drvdata(pdev);
4903 /* Turn board interrupts off and send the flush cache command
4904 * sendcmd will turn off interrupt, and send the flush...
4905 * To write all data in the battery backed cache to disks
4907 hpsa_flush_cache(h);
4908 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4909 hpsa_free_irqs_and_disable_msix(h);
4912 static void __devexit hpsa_free_device_info(struct ctlr_info *h)
4916 for (i = 0; i < h->ndevices; i++)
4920 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4922 struct ctlr_info *h;
4924 if (pci_get_drvdata(pdev) == NULL) {
4925 dev_err(&pdev->dev, "unable to remove device\n");
4928 h = pci_get_drvdata(pdev);
4929 stop_controller_lockup_detector(h);
4930 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
4931 hpsa_shutdown(pdev);
4933 iounmap(h->transtable);
4934 iounmap(h->cfgtable);
4935 hpsa_free_device_info(h);
4936 hpsa_free_sg_chain_blocks(h);
4937 pci_free_consistent(h->pdev,
4938 h->nr_cmds * sizeof(struct CommandList),
4939 h->cmd_pool, h->cmd_pool_dhandle);
4940 pci_free_consistent(h->pdev,
4941 h->nr_cmds * sizeof(struct ErrorInfo),
4942 h->errinfo_pool, h->errinfo_pool_dhandle);
4943 pci_free_consistent(h->pdev, h->reply_pool_size,
4944 h->reply_pool, h->reply_pool_dhandle);
4945 kfree(h->cmd_pool_bits);
4946 kfree(h->blockFetchTable);
4947 kfree(h->hba_inquiry_data);
4948 pci_disable_device(pdev);
4949 pci_release_regions(pdev);
4950 pci_set_drvdata(pdev, NULL);
4954 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4955 __attribute__((unused)) pm_message_t state)
4960 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4965 static struct pci_driver hpsa_pci_driver = {
4967 .probe = hpsa_init_one,
4968 .remove = __devexit_p(hpsa_remove_one),
4969 .id_table = hpsa_pci_device_id, /* id_table */
4970 .shutdown = hpsa_shutdown,
4971 .suspend = hpsa_suspend,
4972 .resume = hpsa_resume,
4975 /* Fill in bucket_map[], given nsgs (the max number of
4976 * scatter gather elements supported) and bucket[],
4977 * which is an array of 8 integers. The bucket[] array
4978 * contains 8 different DMA transfer sizes (in 16
4979 * byte increments) which the controller uses to fetch
4980 * commands. This function fills in bucket_map[], which
4981 * maps a given number of scatter gather elements to one of
4982 * the 8 DMA transfer sizes. The point of it is to allow the
4983 * controller to only do as much DMA as needed to fetch the
4984 * command, with the DMA transfer size encoded in the lower
4985 * bits of the command address.
4987 static void calc_bucket_map(int bucket[], int num_buckets,
4988 int nsgs, int *bucket_map)
4992 /* even a command with 0 SGs requires 4 blocks */
4993 #define MINIMUM_TRANSFER_BLOCKS 4
4994 #define NUM_BUCKETS 8
4995 /* Note, bucket_map must have nsgs+1 entries. */
4996 for (i = 0; i <= nsgs; i++) {
4997 /* Compute size of a command with i SG entries */
4998 size = i + MINIMUM_TRANSFER_BLOCKS;
4999 b = num_buckets; /* Assume the biggest bucket */
5000 /* Find the bucket that is just big enough */
5001 for (j = 0; j < 8; j++) {
5002 if (bucket[j] >= size) {
5007 /* for a command with i SG entries, use bucket b. */
5012 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
5016 unsigned long register_value;
5018 /* This is a bit complicated. There are 8 registers on
5019 * the controller which we write to to tell it 8 different
5020 * sizes of commands which there may be. It's a way of
5021 * reducing the DMA done to fetch each command. Encoded into
5022 * each command's tag are 3 bits which communicate to the controller
5023 * which of the eight sizes that command fits within. The size of
5024 * each command depends on how many scatter gather entries there are.
5025 * Each SG entry requires 16 bytes. The eight registers are programmed
5026 * with the number of 16-byte blocks a command of that size requires.
5027 * The smallest command possible requires 5 such 16 byte blocks.
5028 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
5029 * blocks. Note, this only extends to the SG entries contained
5030 * within the command block, and does not extend to chained blocks
5031 * of SG elements. bft[] contains the eight values we write to
5032 * the registers. They are not evenly distributed, but have more
5033 * sizes for small commands, and fewer sizes for larger commands.
5035 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
5036 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
5037 /* 5 = 1 s/g entry or 4k
5038 * 6 = 2 s/g entry or 8k
5039 * 8 = 4 s/g entry or 16k
5040 * 10 = 6 s/g entry or 24k
5043 /* Controller spec: zero out this buffer. */
5044 memset(h->reply_pool, 0, h->reply_pool_size);
5046 bft[7] = SG_ENTRIES_IN_CMD + 4;
5047 calc_bucket_map(bft, ARRAY_SIZE(bft),
5048 SG_ENTRIES_IN_CMD, h->blockFetchTable);
5049 for (i = 0; i < 8; i++)
5050 writel(bft[i], &h->transtable->BlockFetch[i]);
5052 /* size of controller ring buffer */
5053 writel(h->max_commands, &h->transtable->RepQSize);
5054 writel(h->nreply_queues, &h->transtable->RepQCount);
5055 writel(0, &h->transtable->RepQCtrAddrLow32);
5056 writel(0, &h->transtable->RepQCtrAddrHigh32);
5058 for (i = 0; i < h->nreply_queues; i++) {
5059 writel(0, &h->transtable->RepQAddr[i].upper);
5060 writel(h->reply_pool_dhandle +
5061 (h->max_commands * sizeof(u64) * i),
5062 &h->transtable->RepQAddr[i].lower);
5065 writel(CFGTBL_Trans_Performant | use_short_tags |
5066 CFGTBL_Trans_enable_directed_msix,
5067 &(h->cfgtable->HostWrite.TransportRequest));
5068 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5069 hpsa_wait_for_mode_change_ack(h);
5070 register_value = readl(&(h->cfgtable->TransportActive));
5071 if (!(register_value & CFGTBL_Trans_Performant)) {
5072 dev_warn(&h->pdev->dev, "unable to get board into"
5073 " performant mode\n");
5076 /* Change the access methods to the performant access methods */
5077 h->access = SA5_performant_access;
5078 h->transMethod = CFGTBL_Trans_Performant;
5081 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
5086 if (hpsa_simple_mode)
5089 trans_support = readl(&(h->cfgtable->TransportSupport));
5090 if (!(trans_support & PERFORMANT_MODE))
5093 h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
5094 hpsa_get_max_perf_mode_cmds(h);
5095 /* Performant mode ring buffer and supporting data structures */
5096 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
5097 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
5098 &(h->reply_pool_dhandle));
5100 for (i = 0; i < h->nreply_queues; i++) {
5101 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
5102 h->reply_queue[i].size = h->max_commands;
5103 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
5104 h->reply_queue[i].current_entry = 0;
5107 /* Need a block fetch table for performant mode */
5108 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
5109 sizeof(u32)), GFP_KERNEL);
5111 if ((h->reply_pool == NULL)
5112 || (h->blockFetchTable == NULL))
5115 hpsa_enter_performant_mode(h,
5116 trans_support & CFGTBL_Trans_use_short_tags);
5122 pci_free_consistent(h->pdev, h->reply_pool_size,
5123 h->reply_pool, h->reply_pool_dhandle);
5124 kfree(h->blockFetchTable);
5128 * This is it. Register the PCI driver information for the cards we control
5129 * the OS will call our registered routines when it finds one of our cards.
5131 static int __init hpsa_init(void)
5133 return pci_register_driver(&hpsa_pci_driver);
5136 static void __exit hpsa_cleanup(void)
5138 pci_unregister_driver(&hpsa_pci_driver);
5141 module_init(hpsa_init);
5142 module_exit(hpsa_cleanup);