d8b9ba251fbd28730889750f344a0745d266f7f3
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / pmcraid.c
1 /*
2  * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3  *
4  * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5  *             PMC-Sierra Inc
6  *
7  * Copyright (C) 2008, 2009 PMC Sierra Inc
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
22  * USA
23  *
24  */
25 #include <linux/fs.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/errno.h>
29 #include <linux/kernel.h>
30 #include <linux/ioport.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
33 #include <linux/wait.h>
34 #include <linux/spinlock.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 #include <linux/blkdev.h>
38 #include <linux/firmware.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/hdreg.h>
42 #include <linux/io.h>
43 #include <linux/slab.h>
44 #include <asm/irq.h>
45 #include <asm/processor.h>
46 #include <linux/libata.h>
47 #include <linux/mutex.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_host.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_eh.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsicam.h>
55
56 #include "pmcraid.h"
57
58 /*
59  *   Module configuration parameters
60  */
61 static unsigned int pmcraid_debug_log;
62 static unsigned int pmcraid_disable_aen;
63 static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
64 static unsigned int pmcraid_enable_msix;
65
66 /*
67  * Data structures to support multiple adapters by the LLD.
68  * pmcraid_adapter_count - count of configured adapters
69  */
70 static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
71
72 /*
73  * Supporting user-level control interface through IOCTL commands.
74  * pmcraid_major - major number to use
75  * pmcraid_minor - minor number(s) to use
76  */
77 static unsigned int pmcraid_major;
78 static struct class *pmcraid_class;
79 DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
80
81 /*
82  * Module parameters
83  */
84 MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
85 MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(PMCRAID_DRIVER_VERSION);
88
89 module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
90 MODULE_PARM_DESC(log_level,
91                  "Enables firmware error code logging, default :1 high-severity"
92                  " errors, 2: all errors including high-severity errors,"
93                  " 0: disables logging");
94
95 module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
96 MODULE_PARM_DESC(debug,
97                  "Enable driver verbose message logging. Set 1 to enable."
98                  "(default: 0)");
99
100 module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
101 MODULE_PARM_DESC(disable_aen,
102                  "Disable driver aen notifications to apps. Set 1 to disable."
103                  "(default: 0)");
104
105 /* chip specific constants for PMC MaxRAID controllers (same for
106  * 0x5220 and 0x8010
107  */
108 static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
109         {
110          .ioastatus = 0x0,
111          .ioarrin = 0x00040,
112          .mailbox = 0x7FC30,
113          .global_intr_mask = 0x00034,
114          .ioa_host_intr = 0x0009C,
115          .ioa_host_intr_clr = 0x000A0,
116          .ioa_host_msix_intr = 0x7FC40,
117          .ioa_host_mask = 0x7FC28,
118          .ioa_host_mask_clr = 0x7FC28,
119          .host_ioa_intr = 0x00020,
120          .host_ioa_intr_clr = 0x00020,
121          .transop_timeout = 300
122          }
123 };
124
125 /*
126  * PCI device ids supported by pmcraid driver
127  */
128 static struct pci_device_id pmcraid_pci_table[] = {
129         { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
130           0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
131         },
132         {}
133 };
134
135 MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
136
137
138
139 /**
140  * pmcraid_slave_alloc - Prepare for commands to a device
141  * @scsi_dev: scsi device struct
142  *
143  * This function is called by mid-layer prior to sending any command to the new
144  * device. Stores resource entry details of the device in scsi_device struct.
145  * Queuecommand uses the resource handle and other details to fill up IOARCB
146  * while sending commands to the device.
147  *
148  * Return value:
149  *        0 on success / -ENXIO if device does not exist
150  */
151 static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
152 {
153         struct pmcraid_resource_entry *temp, *res = NULL;
154         struct pmcraid_instance *pinstance;
155         u8 target, bus, lun;
156         unsigned long lock_flags;
157         int rc = -ENXIO;
158         u16 fw_version;
159
160         pinstance = shost_priv(scsi_dev->host);
161
162         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
163
164         /* Driver exposes VSET and GSCSI resources only; all other device types
165          * are not exposed. Resource list is synchronized using resource lock
166          * so any traversal or modifications to the list should be done inside
167          * this lock
168          */
169         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
170         list_for_each_entry(temp, &pinstance->used_res_q, queue) {
171
172                 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
173                 if (RES_IS_VSET(temp->cfg_entry)) {
174                         if (fw_version <= PMCRAID_FW_VERSION_1)
175                                 target = temp->cfg_entry.unique_flags1;
176                         else
177                                 target = temp->cfg_entry.array_id & 0xFF;
178
179                         if (target > PMCRAID_MAX_VSET_TARGETS)
180                                 continue;
181                         bus = PMCRAID_VSET_BUS_ID;
182                         lun = 0;
183                 } else if (RES_IS_GSCSI(temp->cfg_entry)) {
184                         target = RES_TARGET(temp->cfg_entry.resource_address);
185                         bus = PMCRAID_PHYS_BUS_ID;
186                         lun = RES_LUN(temp->cfg_entry.resource_address);
187                 } else {
188                         continue;
189                 }
190
191                 if (bus == scsi_dev->channel &&
192                     target == scsi_dev->id &&
193                     lun == scsi_dev->lun) {
194                         res = temp;
195                         break;
196                 }
197         }
198
199         if (res) {
200                 res->scsi_dev = scsi_dev;
201                 scsi_dev->hostdata = res;
202                 res->change_detected = 0;
203                 atomic_set(&res->read_failures, 0);
204                 atomic_set(&res->write_failures, 0);
205                 rc = 0;
206         }
207         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
208         return rc;
209 }
210
211 /**
212  * pmcraid_slave_configure - Configures a SCSI device
213  * @scsi_dev: scsi device struct
214  *
215  * This function is executed by SCSI mid layer just after a device is first
216  * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
217  * timeout value (default 30s) will be over-written to a higher value (60s)
218  * and max_sectors value will be over-written to 512. It also sets queue depth
219  * to host->cmd_per_lun value
220  *
221  * Return value:
222  *        0 on success
223  */
224 static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
225 {
226         struct pmcraid_resource_entry *res = scsi_dev->hostdata;
227
228         if (!res)
229                 return 0;
230
231         /* LLD exposes VSETs and Enclosure devices only */
232         if (RES_IS_GSCSI(res->cfg_entry) &&
233             scsi_dev->type != TYPE_ENCLOSURE)
234                 return -ENXIO;
235
236         pmcraid_info("configuring %x:%x:%x:%x\n",
237                      scsi_dev->host->unique_id,
238                      scsi_dev->channel,
239                      scsi_dev->id,
240                      (u8)scsi_dev->lun);
241
242         if (RES_IS_GSCSI(res->cfg_entry)) {
243                 scsi_dev->allow_restart = 1;
244         } else if (RES_IS_VSET(res->cfg_entry)) {
245                 scsi_dev->allow_restart = 1;
246                 blk_queue_rq_timeout(scsi_dev->request_queue,
247                                      PMCRAID_VSET_IO_TIMEOUT);
248                 blk_queue_max_hw_sectors(scsi_dev->request_queue,
249                                       PMCRAID_VSET_MAX_SECTORS);
250         }
251
252         /*
253          * We never want to report TCQ support for these types of devices.
254          */
255         if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry))
256                 scsi_dev->tagged_supported = 0;
257
258         return 0;
259 }
260
261 /**
262  * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
263  *
264  * @scsi_dev: scsi device struct
265  *
266  * This is called by mid-layer before removing a device. Pointer assignments
267  * done in pmcraid_slave_alloc will be reset to NULL here.
268  *
269  * Return value
270  *   none
271  */
272 static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
273 {
274         struct pmcraid_resource_entry *res;
275
276         res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
277
278         if (res)
279                 res->scsi_dev = NULL;
280
281         scsi_dev->hostdata = NULL;
282 }
283
284 /**
285  * pmcraid_change_queue_depth - Change the device's queue depth
286  * @scsi_dev: scsi device struct
287  * @depth: depth to set
288  * @reason: calling context
289  *
290  * Return value
291  *      actual depth set
292  */
293 static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
294                                       int reason)
295 {
296         if (reason != SCSI_QDEPTH_DEFAULT)
297                 return -EOPNOTSUPP;
298
299         if (depth > PMCRAID_MAX_CMD_PER_LUN)
300                 depth = PMCRAID_MAX_CMD_PER_LUN;
301
302         scsi_adjust_queue_depth(scsi_dev, depth);
303
304         return scsi_dev->queue_depth;
305 }
306
307 /**
308  * pmcraid_init_cmdblk - initializes a command block
309  *
310  * @cmd: pointer to struct pmcraid_cmd to be initialized
311  * @index: if >=0 first time initialization; otherwise reinitialization
312  *
313  * Return Value
314  *       None
315  */
316 void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
317 {
318         struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
319         dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
320
321         if (index >= 0) {
322                 /* first time initialization (called from  probe) */
323                 u32 ioasa_offset =
324                         offsetof(struct pmcraid_control_block, ioasa);
325
326                 cmd->index = index;
327                 ioarcb->response_handle = cpu_to_le32(index << 2);
328                 ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
329                 ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
330                 ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
331         } else {
332                 /* re-initialization of various lengths, called once command is
333                  * processed by IOA
334                  */
335                 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
336                 ioarcb->hrrq_id = 0;
337                 ioarcb->request_flags0 = 0;
338                 ioarcb->request_flags1 = 0;
339                 ioarcb->cmd_timeout = 0;
340                 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
341                 ioarcb->ioadl_bus_addr = 0;
342                 ioarcb->ioadl_length = 0;
343                 ioarcb->data_transfer_length = 0;
344                 ioarcb->add_cmd_param_length = 0;
345                 ioarcb->add_cmd_param_offset = 0;
346                 cmd->ioa_cb->ioasa.ioasc = 0;
347                 cmd->ioa_cb->ioasa.residual_data_length = 0;
348                 cmd->time_left = 0;
349         }
350
351         cmd->cmd_done = NULL;
352         cmd->scsi_cmd = NULL;
353         cmd->release = 0;
354         cmd->completion_req = 0;
355         cmd->sense_buffer = 0;
356         cmd->sense_buffer_dma = 0;
357         cmd->dma_handle = 0;
358         init_timer(&cmd->timer);
359 }
360
361 /**
362  * pmcraid_reinit_cmdblk - reinitialize a command block
363  *
364  * @cmd: pointer to struct pmcraid_cmd to be reinitialized
365  *
366  * Return Value
367  *       None
368  */
369 static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
370 {
371         pmcraid_init_cmdblk(cmd, -1);
372 }
373
374 /**
375  * pmcraid_get_free_cmd - get a free cmd block from command block pool
376  * @pinstance: adapter instance structure
377  *
378  * Return Value:
379  *      returns pointer to cmd block or NULL if no blocks are available
380  */
381 static struct pmcraid_cmd *pmcraid_get_free_cmd(
382         struct pmcraid_instance *pinstance
383 )
384 {
385         struct pmcraid_cmd *cmd = NULL;
386         unsigned long lock_flags;
387
388         /* free cmd block list is protected by free_pool_lock */
389         spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
390
391         if (!list_empty(&pinstance->free_cmd_pool)) {
392                 cmd = list_entry(pinstance->free_cmd_pool.next,
393                                  struct pmcraid_cmd, free_list);
394                 list_del(&cmd->free_list);
395         }
396         spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
397
398         /* Initialize the command block before giving it the caller */
399         if (cmd != NULL)
400                 pmcraid_reinit_cmdblk(cmd);
401         return cmd;
402 }
403
404 /**
405  * pmcraid_return_cmd - return a completed command block back into free pool
406  * @cmd: pointer to the command block
407  *
408  * Return Value:
409  *      nothing
410  */
411 void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
412 {
413         struct pmcraid_instance *pinstance = cmd->drv_inst;
414         unsigned long lock_flags;
415
416         spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
417         list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
418         spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
419 }
420
421 /**
422  * pmcraid_read_interrupts -  reads IOA interrupts
423  *
424  * @pinstance: pointer to adapter instance structure
425  *
426  * Return value
427  *       interrupts read from IOA
428  */
429 static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
430 {
431         return (pinstance->interrupt_mode) ?
432                 ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
433                 ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
434 }
435
436 /**
437  * pmcraid_disable_interrupts - Masks and clears all specified interrupts
438  *
439  * @pinstance: pointer to per adapter instance structure
440  * @intrs: interrupts to disable
441  *
442  * Return Value
443  *       None
444  */
445 static void pmcraid_disable_interrupts(
446         struct pmcraid_instance *pinstance,
447         u32 intrs
448 )
449 {
450         u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
451         u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
452
453         iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
454         iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
455         ioread32(pinstance->int_regs.global_interrupt_mask_reg);
456
457         if (!pinstance->interrupt_mode) {
458                 iowrite32(intrs,
459                         pinstance->int_regs.ioa_host_interrupt_mask_reg);
460                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
461         }
462 }
463
464 /**
465  * pmcraid_enable_interrupts - Enables specified interrupts
466  *
467  * @pinstance: pointer to per adapter instance structure
468  * @intr: interrupts to enable
469  *
470  * Return Value
471  *       None
472  */
473 static void pmcraid_enable_interrupts(
474         struct pmcraid_instance *pinstance,
475         u32 intrs
476 )
477 {
478         u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
479         u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
480
481         iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
482
483         if (!pinstance->interrupt_mode) {
484                 iowrite32(~intrs,
485                          pinstance->int_regs.ioa_host_interrupt_mask_reg);
486                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
487         }
488
489         pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
490                 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
491                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
492 }
493
494 /**
495  * pmcraid_clr_trans_op - clear trans to op interrupt
496  *
497  * @pinstance: pointer to per adapter instance structure
498  *
499  * Return Value
500  *       None
501  */
502 static void pmcraid_clr_trans_op(
503         struct pmcraid_instance *pinstance
504 )
505 {
506         unsigned long lock_flags;
507
508         if (!pinstance->interrupt_mode) {
509                 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
510                         pinstance->int_regs.ioa_host_interrupt_mask_reg);
511                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
512                 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
513                         pinstance->int_regs.ioa_host_interrupt_clr_reg);
514                 ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
515         }
516
517         if (pinstance->reset_cmd != NULL) {
518                 del_timer(&pinstance->reset_cmd->timer);
519                 spin_lock_irqsave(
520                         pinstance->host->host_lock, lock_flags);
521                 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
522                 spin_unlock_irqrestore(
523                         pinstance->host->host_lock, lock_flags);
524         }
525 }
526
527 /**
528  * pmcraid_reset_type - Determine the required reset type
529  * @pinstance: pointer to adapter instance structure
530  *
531  * IOA requires hard reset if any of the following conditions is true.
532  * 1. If HRRQ valid interrupt is not masked
533  * 2. IOA reset alert doorbell is set
534  * 3. If there are any error interrupts
535  */
536 static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
537 {
538         u32 mask;
539         u32 intrs;
540         u32 alerts;
541
542         mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
543         intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
544         alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
545
546         if ((mask & INTRS_HRRQ_VALID) == 0 ||
547             (alerts & DOORBELL_IOA_RESET_ALERT) ||
548             (intrs & PMCRAID_ERROR_INTERRUPTS)) {
549                 pmcraid_info("IOA requires hard reset\n");
550                 pinstance->ioa_hard_reset = 1;
551         }
552
553         /* If unit check is active, trigger the dump */
554         if (intrs & INTRS_IOA_UNIT_CHECK)
555                 pinstance->ioa_unit_check = 1;
556 }
557
558 /**
559  * pmcraid_bist_done - completion function for PCI BIST
560  * @cmd: pointer to reset command
561  * Return Value
562  *      none
563  */
564
565 static void pmcraid_ioa_reset(struct pmcraid_cmd *);
566
567 static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
568 {
569         struct pmcraid_instance *pinstance = cmd->drv_inst;
570         unsigned long lock_flags;
571         int rc;
572         u16 pci_reg;
573
574         rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
575
576         /* If PCI config space can't be accessed wait for another two secs */
577         if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
578             cmd->time_left > 0) {
579                 pmcraid_info("BIST not complete, waiting another 2 secs\n");
580                 cmd->timer.expires = jiffies + cmd->time_left;
581                 cmd->time_left = 0;
582                 cmd->timer.data = (unsigned long)cmd;
583                 cmd->timer.function =
584                         (void (*)(unsigned long))pmcraid_bist_done;
585                 add_timer(&cmd->timer);
586         } else {
587                 cmd->time_left = 0;
588                 pmcraid_info("BIST is complete, proceeding with reset\n");
589                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
590                 pmcraid_ioa_reset(cmd);
591                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
592         }
593 }
594
595 /**
596  * pmcraid_start_bist - starts BIST
597  * @cmd: pointer to reset cmd
598  * Return Value
599  *   none
600  */
601 static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
602 {
603         struct pmcraid_instance *pinstance = cmd->drv_inst;
604         u32 doorbells, intrs;
605
606         /* proceed with bist and wait for 2 seconds */
607         iowrite32(DOORBELL_IOA_START_BIST,
608                 pinstance->int_regs.host_ioa_interrupt_reg);
609         doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
610         intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
611         pmcraid_info("doorbells after start bist: %x intrs: %x\n",
612                       doorbells, intrs);
613
614         cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
615         cmd->timer.data = (unsigned long)cmd;
616         cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
617         cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
618         add_timer(&cmd->timer);
619 }
620
621 /**
622  * pmcraid_reset_alert_done - completion routine for reset_alert
623  * @cmd: pointer to command block used in reset sequence
624  * Return value
625  *  None
626  */
627 static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
628 {
629         struct pmcraid_instance *pinstance = cmd->drv_inst;
630         u32 status = ioread32(pinstance->ioa_status);
631         unsigned long lock_flags;
632
633         /* if the critical operation in progress bit is set or the wait times
634          * out, invoke reset engine to proceed with hard reset. If there is
635          * some more time to wait, restart the timer
636          */
637         if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
638             cmd->time_left <= 0) {
639                 pmcraid_info("critical op is reset proceeding with reset\n");
640                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
641                 pmcraid_ioa_reset(cmd);
642                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
643         } else {
644                 pmcraid_info("critical op is not yet reset waiting again\n");
645                 /* restart timer if some more time is available to wait */
646                 cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
647                 cmd->timer.data = (unsigned long)cmd;
648                 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
649                 cmd->timer.function =
650                         (void (*)(unsigned long))pmcraid_reset_alert_done;
651                 add_timer(&cmd->timer);
652         }
653 }
654
655 /**
656  * pmcraid_reset_alert - alerts IOA for a possible reset
657  * @cmd : command block to be used for reset sequence.
658  *
659  * Return Value
660  *      returns 0 if pci config-space is accessible and RESET_DOORBELL is
661  *      successfully written to IOA. Returns non-zero in case pci_config_space
662  *      is not accessible
663  */
664 static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
665 static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
666 {
667         struct pmcraid_instance *pinstance = cmd->drv_inst;
668         u32 doorbells;
669         int rc;
670         u16 pci_reg;
671
672         /* If we are able to access IOA PCI config space, alert IOA that we are
673          * going to reset it soon. This enables IOA to preserv persistent error
674          * data if any. In case memory space is not accessible, proceed with
675          * BIST or slot_reset
676          */
677         rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
678         if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
679
680                 /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
681                  * reset IOA doesn't generate any interrupts when CRITICAL
682                  * OPERATION bit is reset. A timer is started to wait for this
683                  * bit to be reset.
684                  */
685                 cmd->time_left = PMCRAID_RESET_TIMEOUT;
686                 cmd->timer.data = (unsigned long)cmd;
687                 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
688                 cmd->timer.function =
689                         (void (*)(unsigned long))pmcraid_reset_alert_done;
690                 add_timer(&cmd->timer);
691
692                 iowrite32(DOORBELL_IOA_RESET_ALERT,
693                         pinstance->int_regs.host_ioa_interrupt_reg);
694                 doorbells =
695                         ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
696                 pmcraid_info("doorbells after reset alert: %x\n", doorbells);
697         } else {
698                 pmcraid_info("PCI config is not accessible starting BIST\n");
699                 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
700                 pmcraid_start_bist(cmd);
701         }
702 }
703
704 /**
705  * pmcraid_timeout_handler -  Timeout handler for internally generated ops
706  *
707  * @cmd : pointer to command structure, that got timedout
708  *
709  * This function blocks host requests and initiates an adapter reset.
710  *
711  * Return value:
712  *   None
713  */
714 static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
715 {
716         struct pmcraid_instance *pinstance = cmd->drv_inst;
717         unsigned long lock_flags;
718
719         dev_info(&pinstance->pdev->dev,
720                 "Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
721                 cmd->ioa_cb->ioarcb.cdb[0]);
722
723         /* Command timeouts result in hard reset sequence. The command that got
724          * timed out may be the one used as part of reset sequence. In this
725          * case restart reset sequence using the same command block even if
726          * reset is in progress. Otherwise fail this command and get a free
727          * command block to restart the reset sequence.
728          */
729         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
730         if (!pinstance->ioa_reset_in_progress) {
731                 pinstance->ioa_reset_attempts = 0;
732                 cmd = pmcraid_get_free_cmd(pinstance);
733
734                 /* If we are out of command blocks, just return here itself.
735                  * Some other command's timeout handler can do the reset job
736                  */
737                 if (cmd == NULL) {
738                         spin_unlock_irqrestore(pinstance->host->host_lock,
739                                                lock_flags);
740                         pmcraid_err("no free cmnd block for timeout handler\n");
741                         return;
742                 }
743
744                 pinstance->reset_cmd = cmd;
745                 pinstance->ioa_reset_in_progress = 1;
746         } else {
747                 pmcraid_info("reset is already in progress\n");
748
749                 if (pinstance->reset_cmd != cmd) {
750                         /* This command should have been given to IOA, this
751                          * command will be completed by fail_outstanding_cmds
752                          * anyway
753                          */
754                         pmcraid_err("cmd is pending but reset in progress\n");
755                 }
756
757                 /* If this command was being used as part of the reset
758                  * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
759                  * causes fail_outstanding_commands not to return the command
760                  * block back to free pool
761                  */
762                 if (cmd == pinstance->reset_cmd)
763                         cmd->cmd_done = pmcraid_ioa_reset;
764         }
765
766         /* Notify apps of important IOA bringup/bringdown sequences */
767         if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
768             pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
769                 pmcraid_notify_ioastate(pinstance,
770                                         PMC_DEVICE_EVENT_RESET_START);
771
772         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
773         scsi_block_requests(pinstance->host);
774         pmcraid_reset_alert(cmd);
775         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
776 }
777
778 /**
779  * pmcraid_internal_done - completion routine for internally generated cmds
780  *
781  * @cmd: command that got response from IOA
782  *
783  * Return Value:
784  *       none
785  */
786 static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
787 {
788         pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
789                      cmd->ioa_cb->ioarcb.cdb[0],
790                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
791
792         /* Some of the internal commands are sent with callers blocking for the
793          * response. Same will be indicated as part of cmd->completion_req
794          * field. Response path needs to wake up any waiters waiting for cmd
795          * completion if this flag is set.
796          */
797         if (cmd->completion_req) {
798                 cmd->completion_req = 0;
799                 complete(&cmd->wait_for_completion);
800         }
801
802         /* most of the internal commands are completed by caller itself, so
803          * no need to return the command block back to free pool until we are
804          * required to do so (e.g once done with initialization).
805          */
806         if (cmd->release) {
807                 cmd->release = 0;
808                 pmcraid_return_cmd(cmd);
809         }
810 }
811
812 /**
813  * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
814  *
815  * @cmd: command that got response from IOA
816  *
817  * This routine is called after driver re-reads configuration table due to a
818  * lost CCN. It returns the command block back to free pool and schedules
819  * worker thread to add/delete devices into the system.
820  *
821  * Return Value:
822  *       none
823  */
824 static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
825 {
826         pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
827                      cmd->ioa_cb->ioarcb.cdb[0],
828                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
829
830         if (cmd->release) {
831                 cmd->release = 0;
832                 pmcraid_return_cmd(cmd);
833         }
834         pmcraid_info("scheduling worker for config table reinitialization\n");
835         schedule_work(&cmd->drv_inst->worker_q);
836 }
837
838 /**
839  * pmcraid_erp_done - Process completion of SCSI error response from device
840  * @cmd: pmcraid_command
841  *
842  * This function copies the sense buffer into the scsi_cmd struct and completes
843  * scsi_cmd by calling scsi_done function.
844  *
845  * Return value:
846  *  none
847  */
848 static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
849 {
850         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
851         struct pmcraid_instance *pinstance = cmd->drv_inst;
852         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
853
854         if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
855                 scsi_cmd->result |= (DID_ERROR << 16);
856                 scmd_printk(KERN_INFO, scsi_cmd,
857                             "command CDB[0] = %x failed with IOASC: 0x%08X\n",
858                             cmd->ioa_cb->ioarcb.cdb[0], ioasc);
859         }
860
861         /* if we had allocated sense buffers for request sense, copy the sense
862          * release the buffers
863          */
864         if (cmd->sense_buffer != NULL) {
865                 memcpy(scsi_cmd->sense_buffer,
866                        cmd->sense_buffer,
867                        SCSI_SENSE_BUFFERSIZE);
868                 pci_free_consistent(pinstance->pdev,
869                                     SCSI_SENSE_BUFFERSIZE,
870                                     cmd->sense_buffer, cmd->sense_buffer_dma);
871                 cmd->sense_buffer = NULL;
872                 cmd->sense_buffer_dma = 0;
873         }
874
875         scsi_dma_unmap(scsi_cmd);
876         pmcraid_return_cmd(cmd);
877         scsi_cmd->scsi_done(scsi_cmd);
878 }
879
880 /**
881  * pmcraid_fire_command - sends an IOA command to adapter
882  *
883  * This function adds the given block into pending command list
884  * and returns without waiting
885  *
886  * @cmd : command to be sent to the device
887  *
888  * Return Value
889  *      None
890  */
891 static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
892 {
893         struct pmcraid_instance *pinstance = cmd->drv_inst;
894         unsigned long lock_flags;
895
896         /* Add this command block to pending cmd pool. We do this prior to
897          * writting IOARCB to ioarrin because IOA might complete the command
898          * by the time we are about to add it to the list. Response handler
899          * (isr/tasklet) looks for cmd block in the pending pending list.
900          */
901         spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
902         list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
903         spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
904         atomic_inc(&pinstance->outstanding_cmds);
905
906         /* driver writes lower 32-bit value of IOARCB address only */
907         mb();
908         iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
909                   pinstance->ioarrin);
910 }
911
912 /**
913  * pmcraid_send_cmd - fires a command to IOA
914  *
915  * This function also sets up timeout function, and command completion
916  * function
917  *
918  * @cmd: pointer to the command block to be fired to IOA
919  * @cmd_done: command completion function, called once IOA responds
920  * @timeout: timeout to wait for this command completion
921  * @timeout_func: timeout handler
922  *
923  * Return value
924  *   none
925  */
926 static void pmcraid_send_cmd(
927         struct pmcraid_cmd *cmd,
928         void (*cmd_done) (struct pmcraid_cmd *),
929         unsigned long timeout,
930         void (*timeout_func) (struct pmcraid_cmd *)
931 )
932 {
933         /* initialize done function */
934         cmd->cmd_done = cmd_done;
935
936         if (timeout_func) {
937                 /* setup timeout handler */
938                 cmd->timer.data = (unsigned long)cmd;
939                 cmd->timer.expires = jiffies + timeout;
940                 cmd->timer.function = (void (*)(unsigned long))timeout_func;
941                 add_timer(&cmd->timer);
942         }
943
944         /* fire the command to IOA */
945         _pmcraid_fire_command(cmd);
946 }
947
948 /**
949  * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
950  * @cmd: pointer to the command block used for sending IOA shutdown command
951  *
952  * Return value
953  *  None
954  */
955 static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
956 {
957         struct pmcraid_instance *pinstance = cmd->drv_inst;
958         unsigned long lock_flags;
959
960         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
961         pmcraid_ioa_reset(cmd);
962         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
963 }
964
965 /**
966  * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
967  *
968  * @cmd: pointer to the command block used as part of reset sequence
969  *
970  * Return Value
971  *  None
972  */
973 static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
974 {
975         pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
976                      cmd->ioa_cb->ioarcb.cdb[0],
977                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
978
979         /* Note that commands sent during reset require next command to be sent
980          * to IOA. Hence reinit the done function as well as timeout function
981          */
982         pmcraid_reinit_cmdblk(cmd);
983         cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
984         cmd->ioa_cb->ioarcb.resource_handle =
985                 cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
986         cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
987         cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
988
989         /* fire shutdown command to hardware. */
990         pmcraid_info("firing normal shutdown command (%d) to IOA\n",
991                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
992
993         pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
994
995         pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
996                          PMCRAID_SHUTDOWN_TIMEOUT,
997                          pmcraid_timeout_handler);
998 }
999
1000 /**
1001  * pmcraid_get_fwversion_done - completion function for get_fwversion
1002  *
1003  * @cmd: pointer to command block used to send INQUIRY command
1004  *
1005  * Return Value
1006  *      none
1007  */
1008 static void pmcraid_querycfg(struct pmcraid_cmd *);
1009
1010 static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
1011 {
1012         struct pmcraid_instance *pinstance = cmd->drv_inst;
1013         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1014         unsigned long lock_flags;
1015
1016         /* configuration table entry size depends on firmware version. If fw
1017          * version is not known, it is not possible to interpret IOA config
1018          * table
1019          */
1020         if (ioasc) {
1021                 pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
1022                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1023                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1024                 pmcraid_reset_alert(cmd);
1025                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1026         } else  {
1027                 pmcraid_querycfg(cmd);
1028         }
1029 }
1030
1031 /**
1032  * pmcraid_get_fwversion - reads firmware version information
1033  *
1034  * @cmd: pointer to command block used to send INQUIRY command
1035  *
1036  * Return Value
1037  *      none
1038  */
1039 static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
1040 {
1041         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1042         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
1043         struct pmcraid_instance *pinstance = cmd->drv_inst;
1044         u16 data_size = sizeof(struct pmcraid_inquiry_data);
1045
1046         pmcraid_reinit_cmdblk(cmd);
1047         ioarcb->request_type = REQ_TYPE_SCSI;
1048         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1049         ioarcb->cdb[0] = INQUIRY;
1050         ioarcb->cdb[1] = 1;
1051         ioarcb->cdb[2] = 0xD0;
1052         ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
1053         ioarcb->cdb[4] = data_size & 0xFF;
1054
1055         /* Since entire inquiry data it can be part of IOARCB itself
1056          */
1057         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1058                                         offsetof(struct pmcraid_ioarcb,
1059                                                 add_data.u.ioadl[0]));
1060         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1061         ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
1062
1063         ioarcb->request_flags0 |= NO_LINK_DESCS;
1064         ioarcb->data_transfer_length = cpu_to_le32(data_size);
1065         ioadl = &(ioarcb->add_data.u.ioadl[0]);
1066         ioadl->flags = IOADL_FLAGS_LAST_DESC;
1067         ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
1068         ioadl->data_len = cpu_to_le32(data_size);
1069
1070         pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
1071                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
1072 }
1073
1074 /**
1075  * pmcraid_identify_hrrq - registers host rrq buffers with IOA
1076  * @cmd: pointer to command block to be used for identify hrrq
1077  *
1078  * Return Value
1079  *       none
1080  */
1081 static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
1082 {
1083         struct pmcraid_instance *pinstance = cmd->drv_inst;
1084         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1085         int index = cmd->hrrq_index;
1086         __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
1087         u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
1088         void (*done_function)(struct pmcraid_cmd *);
1089
1090         pmcraid_reinit_cmdblk(cmd);
1091         cmd->hrrq_index = index + 1;
1092
1093         if (cmd->hrrq_index < pinstance->num_hrrq) {
1094                 done_function = pmcraid_identify_hrrq;
1095         } else {
1096                 cmd->hrrq_index = 0;
1097                 done_function = pmcraid_get_fwversion;
1098         }
1099
1100         /* Initialize ioarcb */
1101         ioarcb->request_type = REQ_TYPE_IOACMD;
1102         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1103
1104         /* initialize the hrrq number where IOA will respond to this command */
1105         ioarcb->hrrq_id = index;
1106         ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
1107         ioarcb->cdb[1] = index;
1108
1109         /* IOA expects 64-bit pci address to be written in B.E format
1110          * (i.e cdb[2]=MSByte..cdb[9]=LSB.
1111          */
1112         pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
1113                      hrrq_addr, ioarcb->ioarcb_bus_addr, index);
1114
1115         memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
1116         memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
1117
1118         /* Subsequent commands require HRRQ identification to be successful.
1119          * Note that this gets called even during reset from SCSI mid-layer
1120          * or tasklet
1121          */
1122         pmcraid_send_cmd(cmd, done_function,
1123                          PMCRAID_INTERNAL_TIMEOUT,
1124                          pmcraid_timeout_handler);
1125 }
1126
1127 static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
1128 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
1129
1130 /**
1131  * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
1132  *
1133  * @cmd: initialized command block pointer
1134  *
1135  * Return Value
1136  *   none
1137  */
1138 static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
1139 {
1140         if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
1141                 atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
1142         else
1143                 atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
1144
1145         pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
1146 }
1147
1148 /**
1149  * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
1150  *
1151  * @pinstance: pointer to adapter instance structure
1152  * @type: HCAM type
1153  *
1154  * Return Value
1155  *   pointer to initialized pmcraid_cmd structure or NULL
1156  */
1157 static struct pmcraid_cmd *pmcraid_init_hcam
1158 (
1159         struct pmcraid_instance *pinstance,
1160         u8 type
1161 )
1162 {
1163         struct pmcraid_cmd *cmd;
1164         struct pmcraid_ioarcb *ioarcb;
1165         struct pmcraid_ioadl_desc *ioadl;
1166         struct pmcraid_hostrcb *hcam;
1167         void (*cmd_done) (struct pmcraid_cmd *);
1168         dma_addr_t dma;
1169         int rcb_size;
1170
1171         cmd = pmcraid_get_free_cmd(pinstance);
1172
1173         if (!cmd) {
1174                 pmcraid_err("no free command blocks for hcam\n");
1175                 return cmd;
1176         }
1177
1178         if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1179                 rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
1180                 cmd_done = pmcraid_process_ccn;
1181                 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1182                 hcam = &pinstance->ccn;
1183         } else {
1184                 rcb_size = sizeof(struct pmcraid_hcam_ldn);
1185                 cmd_done = pmcraid_process_ldn;
1186                 dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
1187                 hcam = &pinstance->ldn;
1188         }
1189
1190         /* initialize command pointer used for HCAM registration */
1191         hcam->cmd = cmd;
1192
1193         ioarcb = &cmd->ioa_cb->ioarcb;
1194         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1195                                         offsetof(struct pmcraid_ioarcb,
1196                                                 add_data.u.ioadl[0]));
1197         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1198         ioadl = ioarcb->add_data.u.ioadl;
1199
1200         /* Initialize ioarcb */
1201         ioarcb->request_type = REQ_TYPE_HCAM;
1202         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1203         ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
1204         ioarcb->cdb[1] = type;
1205         ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
1206         ioarcb->cdb[8] = (rcb_size) & 0xFF;
1207
1208         ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1209
1210         ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
1211         ioadl[0].data_len = cpu_to_le32(rcb_size);
1212         ioadl[0].address = cpu_to_le32(dma);
1213
1214         cmd->cmd_done = cmd_done;
1215         return cmd;
1216 }
1217
1218 /**
1219  * pmcraid_send_hcam - Send an HCAM to IOA
1220  * @pinstance: ioa config struct
1221  * @type: HCAM type
1222  *
1223  * This function will send a Host Controlled Async command to IOA.
1224  *
1225  * Return value:
1226  *      none
1227  */
1228 static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1229 {
1230         struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
1231         pmcraid_send_hcam_cmd(cmd);
1232 }
1233
1234
1235 /**
1236  * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
1237  *
1238  * @cmd: pointer to cmd that is used as cancelling command
1239  * @cmd_to_cancel: pointer to the command that needs to be cancelled
1240  */
1241 static void pmcraid_prepare_cancel_cmd(
1242         struct pmcraid_cmd *cmd,
1243         struct pmcraid_cmd *cmd_to_cancel
1244 )
1245 {
1246         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1247         __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
1248
1249         /* Get the resource handle to where the command to be aborted has been
1250          * sent.
1251          */
1252         ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
1253         ioarcb->request_type = REQ_TYPE_IOACMD;
1254         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
1255         ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
1256
1257         /* IOARCB address of the command to be cancelled is given in
1258          * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
1259          * IOARCB address are not masked.
1260          */
1261         ioarcb_addr = cpu_to_be64(ioarcb_addr);
1262         memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
1263 }
1264
1265 /**
1266  * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
1267  *
1268  * @cmd: command to be used as cancelling command
1269  * @type: HCAM type
1270  * @cmd_done: op done function for the cancelling command
1271  */
1272 static void pmcraid_cancel_hcam(
1273         struct pmcraid_cmd *cmd,
1274         u8 type,
1275         void (*cmd_done) (struct pmcraid_cmd *)
1276 )
1277 {
1278         struct pmcraid_instance *pinstance;
1279         struct pmcraid_hostrcb  *hcam;
1280
1281         pinstance = cmd->drv_inst;
1282         hcam =  (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
1283                 &pinstance->ldn : &pinstance->ccn;
1284
1285         /* prepare for cancelling previous hcam command. If the HCAM is
1286          * currently not pending with IOA, we would have hcam->cmd as non-null
1287          */
1288         if (hcam->cmd == NULL)
1289                 return;
1290
1291         pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
1292
1293         /* writing to IOARRIN must be protected by host_lock, as mid-layer
1294          * schedule queuecommand while we are doing this
1295          */
1296         pmcraid_send_cmd(cmd, cmd_done,
1297                          PMCRAID_INTERNAL_TIMEOUT,
1298                          pmcraid_timeout_handler);
1299 }
1300
1301 /**
1302  * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
1303  *
1304  * @cmd: command block to be used for cancelling the HCAM
1305  */
1306 static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
1307 {
1308         pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
1309                      cmd->ioa_cb->ioarcb.cdb[0],
1310                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
1311
1312         pmcraid_reinit_cmdblk(cmd);
1313
1314         pmcraid_cancel_hcam(cmd,
1315                             PMCRAID_HCAM_CODE_CONFIG_CHANGE,
1316                             pmcraid_ioa_shutdown);
1317 }
1318
1319 /**
1320  * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
1321  *
1322  * @cmd: command block to be used for cancelling the HCAM
1323  */
1324 static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1325 {
1326         pmcraid_cancel_hcam(cmd,
1327                             PMCRAID_HCAM_CODE_LOG_DATA,
1328                             pmcraid_cancel_ccn);
1329 }
1330
1331 /**
1332  * pmcraid_expose_resource - check if the resource can be exposed to OS
1333  *
1334  * @fw_version: firmware version code
1335  * @cfgte: pointer to configuration table entry of the resource
1336  *
1337  * Return value:
1338  *      true if resource can be added to midlayer, false(0) otherwise
1339  */
1340 static int pmcraid_expose_resource(u16 fw_version,
1341                                    struct pmcraid_config_table_entry *cfgte)
1342 {
1343         int retval = 0;
1344
1345         if (cfgte->resource_type == RES_TYPE_VSET) {
1346                 if (fw_version <= PMCRAID_FW_VERSION_1)
1347                         retval = ((cfgte->unique_flags1 & 0x80) == 0);
1348                 else
1349                         retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
1350                                   (cfgte->unique_flags1 & 0x80) == 0);
1351
1352         } else if (cfgte->resource_type == RES_TYPE_GSCSI)
1353                 retval = (RES_BUS(cfgte->resource_address) !=
1354                                 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1355         return retval;
1356 }
1357
1358 /* attributes supported by pmcraid_event_family */
1359 enum {
1360         PMCRAID_AEN_ATTR_UNSPEC,
1361         PMCRAID_AEN_ATTR_EVENT,
1362         __PMCRAID_AEN_ATTR_MAX,
1363 };
1364 #define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
1365
1366 /* commands supported by pmcraid_event_family */
1367 enum {
1368         PMCRAID_AEN_CMD_UNSPEC,
1369         PMCRAID_AEN_CMD_EVENT,
1370         __PMCRAID_AEN_CMD_MAX,
1371 };
1372 #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1373
1374 static struct genl_multicast_group pmcraid_mcgrps[] = {
1375         { .name = "events", /* not really used - see ID discussion below */ },
1376 };
1377
1378 static struct genl_family pmcraid_event_family = {
1379         /*
1380          * Due to prior multicast group abuse (the code having assumed that
1381          * the family ID can be used as a multicast group ID) we need to
1382          * statically allocate a family (and thus group) ID.
1383          */
1384         .id = GENL_ID_PMCRAID,
1385         .name = "pmcraid",
1386         .version = 1,
1387         .maxattr = PMCRAID_AEN_ATTR_MAX,
1388         .mcgrps = pmcraid_mcgrps,
1389         .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
1390 };
1391
1392 /**
1393  * pmcraid_netlink_init - registers pmcraid_event_family
1394  *
1395  * Return value:
1396  *      0 if the pmcraid_event_family is successfully registered
1397  *      with netlink generic, non-zero otherwise
1398  */
1399 static int pmcraid_netlink_init(void)
1400 {
1401         int result;
1402
1403         result = genl_register_family(&pmcraid_event_family);
1404
1405         if (result)
1406                 return result;
1407
1408         pmcraid_info("registered NETLINK GENERIC group: %d\n",
1409                      pmcraid_event_family.id);
1410
1411         return result;
1412 }
1413
1414 /**
1415  * pmcraid_netlink_release - unregisters pmcraid_event_family
1416  *
1417  * Return value:
1418  *      none
1419  */
1420 static void pmcraid_netlink_release(void)
1421 {
1422         genl_unregister_family(&pmcraid_event_family);
1423 }
1424
1425 /**
1426  * pmcraid_notify_aen - sends event msg to user space application
1427  * @pinstance: pointer to adapter instance structure
1428  * @type: HCAM type
1429  *
1430  * Return value:
1431  *      0 if success, error value in case of any failure.
1432  */
1433 static int pmcraid_notify_aen(
1434         struct pmcraid_instance *pinstance,
1435         struct pmcraid_aen_msg  *aen_msg,
1436         u32    data_size
1437 )
1438 {
1439         struct sk_buff *skb;
1440         void *msg_header;
1441         u32  total_size, nla_genl_hdr_total_size;
1442         int result;
1443
1444         aen_msg->hostno = (pinstance->host->unique_id << 16 |
1445                            MINOR(pinstance->cdev.dev));
1446         aen_msg->length = data_size;
1447
1448         data_size += sizeof(*aen_msg);
1449
1450         total_size = nla_total_size(data_size);
1451         /* Add GENL_HDR to total_size */
1452         nla_genl_hdr_total_size =
1453                 (total_size + (GENL_HDRLEN +
1454                 ((struct genl_family *)&pmcraid_event_family)->hdrsize)
1455                  + NLMSG_HDRLEN);
1456         skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
1457
1458
1459         if (!skb) {
1460                 pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
1461                              total_size);
1462                 return -ENOMEM;
1463         }
1464
1465         /* add the genetlink message header */
1466         msg_header = genlmsg_put(skb, 0, 0,
1467                                  &pmcraid_event_family, 0,
1468                                  PMCRAID_AEN_CMD_EVENT);
1469         if (!msg_header) {
1470                 pmcraid_err("failed to copy command details\n");
1471                 nlmsg_free(skb);
1472                 return -ENOMEM;
1473         }
1474
1475         result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1476
1477         if (result) {
1478                 pmcraid_err("failed to copy AEN attribute data\n");
1479                 nlmsg_free(skb);
1480                 return -EINVAL;
1481         }
1482
1483         /* send genetlink multicast message to notify appplications */
1484         result = genlmsg_end(skb, msg_header);
1485
1486         if (result < 0) {
1487                 pmcraid_err("genlmsg_end failed\n");
1488                 nlmsg_free(skb);
1489                 return result;
1490         }
1491
1492         result = genlmsg_multicast(&pmcraid_event_family, skb,
1493                                    0, 0, GFP_ATOMIC);
1494
1495         /* If there are no listeners, genlmsg_multicast may return non-zero
1496          * value.
1497          */
1498         if (result)
1499                 pmcraid_info("error (%x) sending aen event message\n", result);
1500         return result;
1501 }
1502
1503 /**
1504  * pmcraid_notify_ccn - notifies about CCN event msg to user space
1505  * @pinstance: pointer adapter instance structure
1506  *
1507  * Return value:
1508  *      0 if success, error value in case of any failure
1509  */
1510 static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
1511 {
1512         return pmcraid_notify_aen(pinstance,
1513                                 pinstance->ccn.msg,
1514                                 pinstance->ccn.hcam->data_len +
1515                                 sizeof(struct pmcraid_hcam_hdr));
1516 }
1517
1518 /**
1519  * pmcraid_notify_ldn - notifies about CCN event msg to user space
1520  * @pinstance: pointer adapter instance structure
1521  *
1522  * Return value:
1523  *      0 if success, error value in case of any failure
1524  */
1525 static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
1526 {
1527         return pmcraid_notify_aen(pinstance,
1528                                 pinstance->ldn.msg,
1529                                 pinstance->ldn.hcam->data_len +
1530                                 sizeof(struct pmcraid_hcam_hdr));
1531 }
1532
1533 /**
1534  * pmcraid_notify_ioastate - sends IOA state event msg to user space
1535  * @pinstance: pointer adapter instance structure
1536  * @evt: controller state event to be sent
1537  *
1538  * Return value:
1539  *      0 if success, error value in case of any failure
1540  */
1541 static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
1542 {
1543         pinstance->scn.ioa_state = evt;
1544         pmcraid_notify_aen(pinstance,
1545                           &pinstance->scn.msg,
1546                           sizeof(u32));
1547 }
1548
1549 /**
1550  * pmcraid_handle_config_change - Handle a config change from the adapter
1551  * @pinstance: pointer to per adapter instance structure
1552  *
1553  * Return value:
1554  *  none
1555  */
1556
1557 static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1558 {
1559         struct pmcraid_config_table_entry *cfg_entry;
1560         struct pmcraid_hcam_ccn *ccn_hcam;
1561         struct pmcraid_cmd *cmd;
1562         struct pmcraid_cmd *cfgcmd;
1563         struct pmcraid_resource_entry *res = NULL;
1564         unsigned long lock_flags;
1565         unsigned long host_lock_flags;
1566         u32 new_entry = 1;
1567         u32 hidden_entry = 0;
1568         u16 fw_version;
1569         int rc;
1570
1571         ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1572         cfg_entry = &ccn_hcam->cfg_entry;
1573         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
1574
1575         pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
1576                  res: %x:%x:%x:%x\n",
1577                  pinstance->ccn.hcam->ilid,
1578                  pinstance->ccn.hcam->op_code,
1579                 ((pinstance->ccn.hcam->timestamp1) |
1580                 ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
1581                  pinstance->ccn.hcam->notification_type,
1582                  pinstance->ccn.hcam->notification_lost,
1583                  pinstance->ccn.hcam->flags,
1584                  pinstance->host->unique_id,
1585                  RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1586                  (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1587                         RES_BUS(cfg_entry->resource_address)),
1588                  RES_IS_VSET(*cfg_entry) ?
1589                         (fw_version <= PMCRAID_FW_VERSION_1 ?
1590                                 cfg_entry->unique_flags1 :
1591                                         cfg_entry->array_id & 0xFF) :
1592                         RES_TARGET(cfg_entry->resource_address),
1593                  RES_LUN(cfg_entry->resource_address));
1594
1595
1596         /* If this HCAM indicates a lost notification, read the config table */
1597         if (pinstance->ccn.hcam->notification_lost) {
1598                 cfgcmd = pmcraid_get_free_cmd(pinstance);
1599                 if (cfgcmd) {
1600                         pmcraid_info("lost CCN, reading config table\b");
1601                         pinstance->reinit_cfg_table = 1;
1602                         pmcraid_querycfg(cfgcmd);
1603                 } else {
1604                         pmcraid_err("lost CCN, no free cmd for querycfg\n");
1605                 }
1606                 goto out_notify_apps;
1607         }
1608
1609         /* If this resource is not going to be added to mid-layer, just notify
1610          * applications and return. If this notification is about hiding a VSET
1611          * resource, check if it was exposed already.
1612          */
1613         if (pinstance->ccn.hcam->notification_type ==
1614             NOTIFICATION_TYPE_ENTRY_CHANGED &&
1615             cfg_entry->resource_type == RES_TYPE_VSET) {
1616
1617                 if (fw_version <= PMCRAID_FW_VERSION_1)
1618                         hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
1619                 else
1620                         hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
1621
1622         } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
1623                 goto out_notify_apps;
1624         }
1625
1626         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1627         list_for_each_entry(res, &pinstance->used_res_q, queue) {
1628                 rc = memcmp(&res->cfg_entry.resource_address,
1629                             &cfg_entry->resource_address,
1630                             sizeof(cfg_entry->resource_address));
1631                 if (!rc) {
1632                         new_entry = 0;
1633                         break;
1634                 }
1635         }
1636
1637         if (new_entry) {
1638
1639                 if (hidden_entry) {
1640                         spin_unlock_irqrestore(&pinstance->resource_lock,
1641                                                 lock_flags);
1642                         goto out_notify_apps;
1643                 }
1644
1645                 /* If there are more number of resources than what driver can
1646                  * manage, do not notify the applications about the CCN. Just
1647                  * ignore this notifications and re-register the same HCAM
1648                  */
1649                 if (list_empty(&pinstance->free_res_q)) {
1650                         spin_unlock_irqrestore(&pinstance->resource_lock,
1651                                                 lock_flags);
1652                         pmcraid_err("too many resources attached\n");
1653                         spin_lock_irqsave(pinstance->host->host_lock,
1654                                           host_lock_flags);
1655                         pmcraid_send_hcam(pinstance,
1656                                           PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1657                         spin_unlock_irqrestore(pinstance->host->host_lock,
1658                                                host_lock_flags);
1659                         return;
1660                 }
1661
1662                 res = list_entry(pinstance->free_res_q.next,
1663                                  struct pmcraid_resource_entry, queue);
1664
1665                 list_del(&res->queue);
1666                 res->scsi_dev = NULL;
1667                 res->reset_progress = 0;
1668                 list_add_tail(&res->queue, &pinstance->used_res_q);
1669         }
1670
1671         memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
1672
1673         if (pinstance->ccn.hcam->notification_type ==
1674             NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1675                 if (res->scsi_dev) {
1676                         if (fw_version <= PMCRAID_FW_VERSION_1)
1677                                 res->cfg_entry.unique_flags1 &= 0x7F;
1678                         else
1679                                 res->cfg_entry.array_id &= 0xFF;
1680                         res->change_detected = RES_CHANGE_DEL;
1681                         res->cfg_entry.resource_handle =
1682                                 PMCRAID_INVALID_RES_HANDLE;
1683                         schedule_work(&pinstance->worker_q);
1684                 } else {
1685                         /* This may be one of the non-exposed resources */
1686                         list_move_tail(&res->queue, &pinstance->free_res_q);
1687                 }
1688         } else if (!res->scsi_dev) {
1689                 res->change_detected = RES_CHANGE_ADD;
1690                 schedule_work(&pinstance->worker_q);
1691         }
1692         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
1693
1694 out_notify_apps:
1695
1696         /* Notify configuration changes to registered applications.*/
1697         if (!pmcraid_disable_aen)
1698                 pmcraid_notify_ccn(pinstance);
1699
1700         cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1701         if (cmd)
1702                 pmcraid_send_hcam_cmd(cmd);
1703 }
1704
1705 /**
1706  * pmcraid_get_error_info - return error string for an ioasc
1707  * @ioasc: ioasc code
1708  * Return Value
1709  *       none
1710  */
1711 static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1712 {
1713         int i;
1714         for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
1715                 if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
1716                         return &pmcraid_ioasc_error_table[i];
1717         }
1718         return NULL;
1719 }
1720
1721 /**
1722  * pmcraid_ioasc_logger - log IOASC information based user-settings
1723  * @ioasc: ioasc code
1724  * @cmd: pointer to command that resulted in 'ioasc'
1725  */
1726 void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1727 {
1728         struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1729
1730         if (error_info == NULL ||
1731                 cmd->drv_inst->current_log_level < error_info->log_level)
1732                 return;
1733
1734         /* log the error string */
1735         pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
1736                 cmd->ioa_cb->ioarcb.cdb[0],
1737                 cmd->ioa_cb->ioarcb.resource_handle,
1738                 le32_to_cpu(ioasc), error_info->error_string);
1739 }
1740
1741 /**
1742  * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
1743  *
1744  * @pinstance: pointer to per adapter instance structure
1745  *
1746  * Return value:
1747  *  none
1748  */
1749 static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1750 {
1751         struct pmcraid_hcam_ldn *hcam_ldn;
1752         u32 ioasc;
1753
1754         hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1755
1756         pmcraid_info
1757                 ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
1758                  pinstance->ldn.hcam->ilid,
1759                  pinstance->ldn.hcam->op_code,
1760                  pinstance->ldn.hcam->notification_type,
1761                  pinstance->ldn.hcam->notification_lost,
1762                  pinstance->ldn.hcam->flags,
1763                  pinstance->ldn.hcam->overlay_id);
1764
1765         /* log only the errors, no need to log informational log entries */
1766         if (pinstance->ldn.hcam->notification_type !=
1767             NOTIFICATION_TYPE_ERROR_LOG)
1768                 return;
1769
1770         if (pinstance->ldn.hcam->notification_lost ==
1771             HOSTRCB_NOTIFICATIONS_LOST)
1772                 dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
1773
1774         ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1775
1776         if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1777                 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
1778                 dev_info(&pinstance->pdev->dev,
1779                         "UnitAttention due to IOA Bus Reset\n");
1780                 scsi_report_bus_reset(
1781                         pinstance->host,
1782                         RES_BUS(hcam_ldn->error_log.fd_ra));
1783         }
1784
1785         return;
1786 }
1787
1788 /**
1789  * pmcraid_process_ccn - Op done function for a CCN.
1790  * @cmd: pointer to command struct
1791  *
1792  * This function is the op done function for a configuration
1793  * change notification
1794  *
1795  * Return value:
1796  * none
1797  */
1798 static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1799 {
1800         struct pmcraid_instance *pinstance = cmd->drv_inst;
1801         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1802         unsigned long lock_flags;
1803
1804         pinstance->ccn.cmd = NULL;
1805         pmcraid_return_cmd(cmd);
1806
1807         /* If driver initiated IOA reset happened while this hcam was pending
1808          * with IOA, or IOA bringdown sequence is in progress, no need to
1809          * re-register the hcam
1810          */
1811         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1812             atomic_read(&pinstance->ccn.ignore) == 1) {
1813                 return;
1814         } else if (ioasc) {
1815                 dev_info(&pinstance->pdev->dev,
1816                         "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1817                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1818                 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1819                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1820         } else {
1821                 pmcraid_handle_config_change(pinstance);
1822         }
1823 }
1824
1825 /**
1826  * pmcraid_process_ldn - op done function for an LDN
1827  * @cmd: pointer to command block
1828  *
1829  * Return value
1830  *   none
1831  */
1832 static void pmcraid_initiate_reset(struct pmcraid_instance *);
1833 static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
1834
1835 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1836 {
1837         struct pmcraid_instance *pinstance = cmd->drv_inst;
1838         struct pmcraid_hcam_ldn *ldn_hcam =
1839                         (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1840         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1841         u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
1842         unsigned long lock_flags;
1843
1844         /* return the command block back to freepool */
1845         pinstance->ldn.cmd = NULL;
1846         pmcraid_return_cmd(cmd);
1847
1848         /* If driver initiated IOA reset happened while this hcam was pending
1849          * with IOA, no need to re-register the hcam as reset engine will do it
1850          * once reset sequence is complete
1851          */
1852         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1853             atomic_read(&pinstance->ccn.ignore) == 1) {
1854                 return;
1855         } else if (!ioasc) {
1856                 pmcraid_handle_error_log(pinstance);
1857                 if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
1858                         spin_lock_irqsave(pinstance->host->host_lock,
1859                                           lock_flags);
1860                         pmcraid_initiate_reset(pinstance);
1861                         spin_unlock_irqrestore(pinstance->host->host_lock,
1862                                                lock_flags);
1863                         return;
1864                 }
1865                 if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
1866                         pinstance->timestamp_error = 1;
1867                         pmcraid_set_timestamp(cmd);
1868                 }
1869         } else {
1870                 dev_info(&pinstance->pdev->dev,
1871                         "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1872         }
1873         /* send netlink message for HCAM notification if enabled */
1874         if (!pmcraid_disable_aen)
1875                 pmcraid_notify_ldn(pinstance);
1876
1877         cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1878         if (cmd)
1879                 pmcraid_send_hcam_cmd(cmd);
1880 }
1881
1882 /**
1883  * pmcraid_register_hcams - register HCAMs for CCN and LDN
1884  *
1885  * @pinstance: pointer per adapter instance structure
1886  *
1887  * Return Value
1888  *   none
1889  */
1890 static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
1891 {
1892         pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1893         pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1894 }
1895
1896 /**
1897  * pmcraid_unregister_hcams - cancel HCAMs registered already
1898  * @cmd: pointer to command used as part of reset sequence
1899  */
1900 static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1901 {
1902         struct pmcraid_instance *pinstance = cmd->drv_inst;
1903
1904         /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
1905          * handling hcam response though it is not necessary. In order to
1906          * prevent this, set 'ignore', so that bring-down sequence doesn't
1907          * re-send any more hcams
1908          */
1909         atomic_set(&pinstance->ccn.ignore, 1);
1910         atomic_set(&pinstance->ldn.ignore, 1);
1911
1912         /* If adapter reset was forced as part of runtime reset sequence,
1913          * start the reset sequence. Reset will be triggered even in case
1914          * IOA unit_check.
1915          */
1916         if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
1917              pinstance->ioa_unit_check) {
1918                 pinstance->force_ioa_reset = 0;
1919                 pinstance->ioa_unit_check = 0;
1920                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1921                 pmcraid_reset_alert(cmd);
1922                 return;
1923         }
1924
1925         /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
1926          * one after the other. So CCN cancellation will be triggered by
1927          * pmcraid_cancel_ldn itself.
1928          */
1929         pmcraid_cancel_ldn(cmd);
1930 }
1931
1932 /**
1933  * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
1934  * @pinstance: pointer to adapter instance structure
1935  * Return Value
1936  *  1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
1937  */
1938 static void pmcraid_reinit_buffers(struct pmcraid_instance *);
1939
1940 static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1941 {
1942         u32 intrs;
1943
1944         pmcraid_reinit_buffers(pinstance);
1945         intrs = pmcraid_read_interrupts(pinstance);
1946
1947         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1948
1949         if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1950                 if (!pinstance->interrupt_mode) {
1951                         iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1952                                 pinstance->int_regs.
1953                                 ioa_host_interrupt_mask_reg);
1954                         iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1955                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1956                 }
1957                 return 1;
1958         } else {
1959                 return 0;
1960         }
1961 }
1962
1963 /**
1964  * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
1965  * @cmd : pointer to reset command block
1966  *
1967  * Return Value
1968  *      none
1969  */
1970 static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1971 {
1972         struct pmcraid_instance *pinstance = cmd->drv_inst;
1973         u32 int_reg;
1974         u32 doorbell;
1975
1976         /* There will be an interrupt when Transition to Operational bit is
1977          * set so tasklet would execute next reset task. The timeout handler
1978          * would re-initiate a reset
1979          */
1980         cmd->cmd_done = pmcraid_ioa_reset;
1981         cmd->timer.data = (unsigned long)cmd;
1982         cmd->timer.expires = jiffies +
1983                              msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
1984         cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
1985
1986         if (!timer_pending(&cmd->timer))
1987                 add_timer(&cmd->timer);
1988
1989         /* Enable destructive diagnostics on IOA if it is not yet in
1990          * operational state
1991          */
1992         doorbell = DOORBELL_RUNTIME_RESET |
1993                    DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1994
1995         /* Since we do RESET_ALERT and Start BIST we have to again write
1996          * MSIX Doorbell to indicate the interrupt mode
1997          */
1998         if (pinstance->interrupt_mode) {
1999                 iowrite32(DOORBELL_INTR_MODE_MSIX,
2000                           pinstance->int_regs.host_ioa_interrupt_reg);
2001                 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
2002         }
2003
2004         iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
2005         ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
2006         int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
2007
2008         pmcraid_info("Waiting for IOA to become operational %x:%x\n",
2009                      ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
2010                      int_reg);
2011 }
2012
2013 /**
2014  * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
2015  *
2016  * @pinstance: pointer to adapter instance structure
2017  *
2018  * Return Value
2019  *      none
2020  */
2021 static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
2022 {
2023         pmcraid_info("%s is not yet implemented\n", __func__);
2024 }
2025
2026 /**
2027  * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
2028  * @pinstance: pointer to adapter instance structure
2029  *
2030  * This function fails all outstanding ops. If they are submitted to IOA
2031  * already, it sends cancel all messages if IOA is still accepting IOARCBs,
2032  * otherwise just completes the commands and returns the cmd blocks to free
2033  * pool.
2034  *
2035  * Return value:
2036  *       none
2037  */
2038 static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
2039 {
2040         struct pmcraid_cmd *cmd, *temp;
2041         unsigned long lock_flags;
2042
2043         /* pending command list is protected by pending_pool_lock. Its
2044          * traversal must be done as within this lock
2045          */
2046         spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
2047         list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
2048                                  free_list) {
2049                 list_del(&cmd->free_list);
2050                 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
2051                                         lock_flags);
2052                 cmd->ioa_cb->ioasa.ioasc =
2053                         cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
2054                 cmd->ioa_cb->ioasa.ilid =
2055                         cpu_to_be32(PMCRAID_DRIVER_ILID);
2056
2057                 /* In case the command timer is still running */
2058                 del_timer(&cmd->timer);
2059
2060                 /* If this is an IO command, complete it by invoking scsi_done
2061                  * function. If this is one of the internal commands other
2062                  * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
2063                  * complete it
2064                  */
2065                 if (cmd->scsi_cmd) {
2066
2067                         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2068                         __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
2069
2070                         scsi_cmd->result |= DID_ERROR << 16;
2071
2072                         scsi_dma_unmap(scsi_cmd);
2073                         pmcraid_return_cmd(cmd);
2074
2075                         pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
2076                                      le32_to_cpu(resp) >> 2,
2077                                      cmd->ioa_cb->ioarcb.cdb[0],
2078                                      scsi_cmd->result);
2079                         scsi_cmd->scsi_done(scsi_cmd);
2080                 } else if (cmd->cmd_done == pmcraid_internal_done ||
2081                            cmd->cmd_done == pmcraid_erp_done) {
2082                         cmd->cmd_done(cmd);
2083                 } else if (cmd->cmd_done != pmcraid_ioa_reset &&
2084                            cmd->cmd_done != pmcraid_ioa_shutdown_done) {
2085                         pmcraid_return_cmd(cmd);
2086                 }
2087
2088                 atomic_dec(&pinstance->outstanding_cmds);
2089                 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
2090         }
2091
2092         spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
2093 }
2094
2095 /**
2096  * pmcraid_ioa_reset - Implementation of IOA reset logic
2097  *
2098  * @cmd: pointer to the cmd block to be used for entire reset process
2099  *
2100  * This function executes most of the steps required for IOA reset. This gets
2101  * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
2102  * 'eh_' thread. Access to variables used for controlling the reset sequence is
2103  * synchronized using host lock. Various functions called during reset process
2104  * would make use of a single command block, pointer to which is also stored in
2105  * adapter instance structure.
2106  *
2107  * Return Value
2108  *       None
2109  */
2110 static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
2111 {
2112         struct pmcraid_instance *pinstance = cmd->drv_inst;
2113         u8 reset_complete = 0;
2114
2115         pinstance->ioa_reset_in_progress = 1;
2116
2117         if (pinstance->reset_cmd != cmd) {
2118                 pmcraid_err("reset is called with different command block\n");
2119                 pinstance->reset_cmd = cmd;
2120         }
2121
2122         pmcraid_info("reset_engine: state = %d, command = %p\n",
2123                       pinstance->ioa_state, cmd);
2124
2125         switch (pinstance->ioa_state) {
2126
2127         case IOA_STATE_DEAD:
2128                 /* If IOA is offline, whatever may be the reset reason, just
2129                  * return. callers might be waiting on the reset wait_q, wake
2130                  * up them
2131                  */
2132                 pmcraid_err("IOA is offline no reset is possible\n");
2133                 reset_complete = 1;
2134                 break;
2135
2136         case IOA_STATE_IN_BRINGDOWN:
2137                 /* we enter here, once ioa shutdown command is processed by IOA
2138                  * Alert IOA for a possible reset. If reset alert fails, IOA
2139                  * goes through hard-reset
2140                  */
2141                 pmcraid_disable_interrupts(pinstance, ~0);
2142                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2143                 pmcraid_reset_alert(cmd);
2144                 break;
2145
2146         case IOA_STATE_UNKNOWN:
2147                 /* We may be called during probe or resume. Some pre-processing
2148                  * is required for prior to reset
2149                  */
2150                 scsi_block_requests(pinstance->host);
2151
2152                 /* If asked to reset while IOA was processing responses or
2153                  * there are any error responses then IOA may require
2154                  * hard-reset.
2155                  */
2156                 if (pinstance->ioa_hard_reset == 0) {
2157                         if (ioread32(pinstance->ioa_status) &
2158                             INTRS_TRANSITION_TO_OPERATIONAL) {
2159                                 pmcraid_info("sticky bit set, bring-up\n");
2160                                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2161                                 pmcraid_reinit_cmdblk(cmd);
2162                                 pmcraid_identify_hrrq(cmd);
2163                         } else {
2164                                 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
2165                                 pmcraid_soft_reset(cmd);
2166                         }
2167                 } else {
2168                         /* Alert IOA of a possible reset and wait for critical
2169                          * operation in progress bit to reset
2170                          */
2171                         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2172                         pmcraid_reset_alert(cmd);
2173                 }
2174                 break;
2175
2176         case IOA_STATE_IN_RESET_ALERT:
2177                 /* If critical operation in progress bit is reset or wait gets
2178                  * timed out, reset proceeds with starting BIST on the IOA.
2179                  * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
2180                  * they are 3 or more, reset engine marks IOA dead and returns
2181                  */
2182                 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
2183                 pmcraid_start_bist(cmd);
2184                 break;
2185
2186         case IOA_STATE_IN_HARD_RESET:
2187                 pinstance->ioa_reset_attempts++;
2188
2189                 /* retry reset if we haven't reached maximum allowed limit */
2190                 if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
2191                         pinstance->ioa_reset_attempts = 0;
2192                         pmcraid_err("IOA didn't respond marking it as dead\n");
2193                         pinstance->ioa_state = IOA_STATE_DEAD;
2194
2195                         if (pinstance->ioa_bringdown)
2196                                 pmcraid_notify_ioastate(pinstance,
2197                                         PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
2198                         else
2199                                 pmcraid_notify_ioastate(pinstance,
2200                                                 PMC_DEVICE_EVENT_RESET_FAILED);
2201                         reset_complete = 1;
2202                         break;
2203                 }
2204
2205                 /* Once either bist or pci reset is done, restore PCI config
2206                  * space. If this fails, proceed with hard reset again
2207                  */
2208                 pci_restore_state(pinstance->pdev);
2209
2210                 /* fail all pending commands */
2211                 pmcraid_fail_outstanding_cmds(pinstance);
2212
2213                 /* check if unit check is active, if so extract dump */
2214                 if (pinstance->ioa_unit_check) {
2215                         pmcraid_info("unit check is active\n");
2216                         pinstance->ioa_unit_check = 0;
2217                         pmcraid_get_dump(pinstance);
2218                         pinstance->ioa_reset_attempts--;
2219                         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2220                         pmcraid_reset_alert(cmd);
2221                         break;
2222                 }
2223
2224                 /* if the reset reason is to bring-down the ioa, we might be
2225                  * done with the reset restore pci_config_space and complete
2226                  * the reset
2227                  */
2228                 if (pinstance->ioa_bringdown) {
2229                         pmcraid_info("bringing down the adapter\n");
2230                         pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2231                         pinstance->ioa_bringdown = 0;
2232                         pinstance->ioa_state = IOA_STATE_UNKNOWN;
2233                         pmcraid_notify_ioastate(pinstance,
2234                                         PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
2235                         reset_complete = 1;
2236                 } else {
2237                         /* bring-up IOA, so proceed with soft reset
2238                          * Reinitialize hrrq_buffers and their indices also
2239                          * enable interrupts after a pci_restore_state
2240                          */
2241                         if (pmcraid_reset_enable_ioa(pinstance)) {
2242                                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2243                                 pmcraid_info("bringing up the adapter\n");
2244                                 pmcraid_reinit_cmdblk(cmd);
2245                                 pmcraid_identify_hrrq(cmd);
2246                         } else {
2247                                 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
2248                                 pmcraid_soft_reset(cmd);
2249                         }
2250                 }
2251                 break;
2252
2253         case IOA_STATE_IN_SOFT_RESET:
2254                 /* TRANSITION TO OPERATIONAL is on so start initialization
2255                  * sequence
2256                  */
2257                 pmcraid_info("In softreset proceeding with bring-up\n");
2258                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2259
2260                 /* Initialization commands start with HRRQ identification. From
2261                  * now on tasklet completes most of the commands as IOA is up
2262                  * and intrs are enabled
2263                  */
2264                 pmcraid_identify_hrrq(cmd);
2265                 break;
2266
2267         case IOA_STATE_IN_BRINGUP:
2268                 /* we are done with bringing up of IOA, change the ioa_state to
2269                  * operational and wake up any waiters
2270                  */
2271                 pinstance->ioa_state = IOA_STATE_OPERATIONAL;
2272                 reset_complete = 1;
2273                 break;
2274
2275         case IOA_STATE_OPERATIONAL:
2276         default:
2277                 /* When IOA is operational and a reset is requested, check for
2278                  * the reset reason. If reset is to bring down IOA, unregister
2279                  * HCAMs and initiate shutdown; if adapter reset is forced then
2280                  * restart reset sequence again
2281                  */
2282                 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2283                     pinstance->force_ioa_reset == 0) {
2284                         pmcraid_notify_ioastate(pinstance,
2285                                                 PMC_DEVICE_EVENT_RESET_SUCCESS);
2286                         reset_complete = 1;
2287                 } else {
2288                         if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
2289                                 pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
2290                         pmcraid_reinit_cmdblk(cmd);
2291                         pmcraid_unregister_hcams(cmd);
2292                 }
2293                 break;
2294         }
2295
2296         /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
2297          * OPERATIONAL. Reset all control variables used during reset, wake up
2298          * any waiting threads and let the SCSI mid-layer send commands. Note
2299          * that host_lock must be held before invoking scsi_report_bus_reset.
2300          */
2301         if (reset_complete) {
2302                 pinstance->ioa_reset_in_progress = 0;
2303                 pinstance->ioa_reset_attempts = 0;
2304                 pinstance->reset_cmd = NULL;
2305                 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2306                 pinstance->ioa_bringdown = 0;
2307                 pmcraid_return_cmd(cmd);
2308
2309                 /* If target state is to bring up the adapter, proceed with
2310                  * hcam registration and resource exposure to mid-layer.
2311                  */
2312                 if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
2313                         pmcraid_register_hcams(pinstance);
2314
2315                 wake_up_all(&pinstance->reset_wait_q);
2316         }
2317
2318         return;
2319 }
2320
2321 /**
2322  * pmcraid_initiate_reset - initiates reset sequence. This is called from
2323  * ISR/tasklet during error interrupts including IOA unit check. If reset
2324  * is already in progress, it just returns, otherwise initiates IOA reset
2325  * to bring IOA up to operational state.
2326  *
2327  * @pinstance: pointer to adapter instance structure
2328  *
2329  * Return value
2330  *       none
2331  */
2332 static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2333 {
2334         struct pmcraid_cmd *cmd;
2335
2336         /* If the reset is already in progress, just return, otherwise start
2337          * reset sequence and return
2338          */
2339         if (!pinstance->ioa_reset_in_progress) {
2340                 scsi_block_requests(pinstance->host);
2341                 cmd = pmcraid_get_free_cmd(pinstance);
2342
2343                 if (cmd == NULL) {
2344                         pmcraid_err("no cmnd blocks for initiate_reset\n");
2345                         return;
2346                 }
2347
2348                 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2349                 pinstance->reset_cmd = cmd;
2350                 pinstance->force_ioa_reset = 1;
2351                 pmcraid_notify_ioastate(pinstance,
2352                                         PMC_DEVICE_EVENT_RESET_START);
2353                 pmcraid_ioa_reset(cmd);
2354         }
2355 }
2356
2357 /**
2358  * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
2359  *                        or bringdown IOA
2360  * @pinstance: pointer adapter instance structure
2361  * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
2362  * @target_state: expected target state after reset
2363  *
2364  * Note: This command initiates reset and waits for its completion. Hence this
2365  * should not be called from isr/timer/tasklet functions (timeout handlers,
2366  * error response handlers and interrupt handlers).
2367  *
2368  * Return Value
2369  *       1 in case ioa_state is not target_state, 0 otherwise.
2370  */
2371 static int pmcraid_reset_reload(
2372         struct pmcraid_instance *pinstance,
2373         u8 shutdown_type,
2374         u8 target_state
2375 )
2376 {
2377         struct pmcraid_cmd *reset_cmd = NULL;
2378         unsigned long lock_flags;
2379         int reset = 1;
2380
2381         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2382
2383         if (pinstance->ioa_reset_in_progress) {
2384                 pmcraid_info("reset_reload: reset is already in progress\n");
2385
2386                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2387
2388                 wait_event(pinstance->reset_wait_q,
2389                            !pinstance->ioa_reset_in_progress);
2390
2391                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2392
2393                 if (pinstance->ioa_state == IOA_STATE_DEAD) {
2394                         spin_unlock_irqrestore(pinstance->host->host_lock,
2395                                                lock_flags);
2396                         pmcraid_info("reset_reload: IOA is dead\n");
2397                         return reset;
2398                 } else if (pinstance->ioa_state == target_state) {
2399                         reset = 0;
2400                 }
2401         }
2402
2403         if (reset) {
2404                 pmcraid_info("reset_reload: proceeding with reset\n");
2405                 scsi_block_requests(pinstance->host);
2406                 reset_cmd = pmcraid_get_free_cmd(pinstance);
2407
2408                 if (reset_cmd == NULL) {
2409                         pmcraid_err("no free cmnd for reset_reload\n");
2410                         spin_unlock_irqrestore(pinstance->host->host_lock,
2411                                                lock_flags);
2412                         return reset;
2413                 }
2414
2415                 if (shutdown_type == SHUTDOWN_NORMAL)
2416                         pinstance->ioa_bringdown = 1;
2417
2418                 pinstance->ioa_shutdown_type = shutdown_type;
2419                 pinstance->reset_cmd = reset_cmd;
2420                 pinstance->force_ioa_reset = reset;
2421                 pmcraid_info("reset_reload: initiating reset\n");
2422                 pmcraid_ioa_reset(reset_cmd);
2423                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2424                 pmcraid_info("reset_reload: waiting for reset to complete\n");
2425                 wait_event(pinstance->reset_wait_q,
2426                            !pinstance->ioa_reset_in_progress);
2427
2428                 pmcraid_info("reset_reload: reset is complete !!\n");
2429                 scsi_unblock_requests(pinstance->host);
2430                 if (pinstance->ioa_state == target_state)
2431                         reset = 0;
2432         }
2433
2434         return reset;
2435 }
2436
2437 /**
2438  * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
2439  *
2440  * @pinstance: pointer to adapter instance structure
2441  *
2442  * Return Value
2443  *       whatever is returned from pmcraid_reset_reload
2444  */
2445 static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2446 {
2447         return pmcraid_reset_reload(pinstance,
2448                                     SHUTDOWN_NORMAL,
2449                                     IOA_STATE_UNKNOWN);
2450 }
2451
2452 /**
2453  * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
2454  *
2455  * @pinstance: pointer to adapter instance structure
2456  *
2457  * Return Value
2458  *       whatever is returned from pmcraid_reset_reload
2459  */
2460 static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2461 {
2462         pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
2463
2464         return pmcraid_reset_reload(pinstance,
2465                                     SHUTDOWN_NONE,
2466                                     IOA_STATE_OPERATIONAL);
2467 }
2468
2469 /**
2470  * pmcraid_request_sense - Send request sense to a device
2471  * @cmd: pmcraid command struct
2472  *
2473  * This function sends a request sense to a device as a result of a check
2474  * condition. This method re-uses the same command block that failed earlier.
2475  */
2476 static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2477 {
2478         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2479         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2480
2481         /* allocate DMAable memory for sense buffers */
2482         cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
2483                                                  SCSI_SENSE_BUFFERSIZE,
2484                                                  &cmd->sense_buffer_dma);
2485
2486         if (cmd->sense_buffer == NULL) {
2487                 pmcraid_err
2488                         ("couldn't allocate sense buffer for request sense\n");
2489                 pmcraid_erp_done(cmd);
2490                 return;
2491         }
2492
2493         /* re-use the command block */
2494         memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
2495         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2496         ioarcb->request_flags0 = (SYNC_COMPLETE |
2497                                   NO_LINK_DESCS |
2498                                   INHIBIT_UL_CHECK);
2499         ioarcb->request_type = REQ_TYPE_SCSI;
2500         ioarcb->cdb[0] = REQUEST_SENSE;
2501         ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2502
2503         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
2504                                         offsetof(struct pmcraid_ioarcb,
2505                                                 add_data.u.ioadl[0]));
2506         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
2507
2508         ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2509
2510         ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2511         ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2512         ioadl->flags = IOADL_FLAGS_LAST_DESC;
2513
2514         /* request sense might be called as part of error response processing
2515          * which runs in tasklets context. It is possible that mid-layer might
2516          * schedule queuecommand during this time, hence, writting to IOARRIN
2517          * must be protect by host_lock
2518          */
2519         pmcraid_send_cmd(cmd, pmcraid_erp_done,
2520                          PMCRAID_REQUEST_SENSE_TIMEOUT,
2521                          pmcraid_timeout_handler);
2522 }
2523
2524 /**
2525  * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2526  * @cmd: command that failed
2527  * @sense: true if request_sense is required after cancel all
2528  *
2529  * This function sends a cancel all to a device to clear the queue.
2530  */
2531 static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2532 {
2533         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2534         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2535         struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2536         void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2537                                                         : pmcraid_request_sense;
2538
2539         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2540         ioarcb->request_flags0 = SYNC_OVERRIDE;
2541         ioarcb->request_type = REQ_TYPE_IOACMD;
2542         ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
2543
2544         if (RES_IS_GSCSI(res->cfg_entry))
2545                 ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
2546
2547         ioarcb->ioadl_bus_addr = 0;
2548         ioarcb->ioadl_length = 0;
2549         ioarcb->data_transfer_length = 0;
2550         ioarcb->ioarcb_bus_addr &= (~0x1FULL);
2551
2552         /* writing to IOARRIN must be protected by host_lock, as mid-layer
2553          * schedule queuecommand while we are doing this
2554          */
2555         pmcraid_send_cmd(cmd, cmd_done,
2556                          PMCRAID_REQUEST_SENSE_TIMEOUT,
2557                          pmcraid_timeout_handler);
2558 }
2559
2560 /**
2561  * pmcraid_frame_auto_sense: frame fixed format sense information
2562  *
2563  * @cmd: pointer to failing command block
2564  *
2565  * Return value
2566  *  none
2567  */
2568 static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
2569 {
2570         u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
2571         struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
2572         struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2573         u32 ioasc = le32_to_cpu(ioasa->ioasc);
2574         u32 failing_lba = 0;
2575
2576         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
2577         cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
2578
2579         if (RES_IS_VSET(res->cfg_entry) &&
2580             ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
2581             ioasa->u.vset.failing_lba_hi != 0) {
2582
2583                 sense_buf[0] = 0x72;
2584                 sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2585                 sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2586                 sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2587
2588                 sense_buf[7] = 12;
2589                 sense_buf[8] = 0;
2590                 sense_buf[9] = 0x0A;
2591                 sense_buf[10] = 0x80;
2592
2593                 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
2594
2595                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
2596                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
2597                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
2598                 sense_buf[15] = failing_lba & 0x000000ff;
2599
2600                 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
2601
2602                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
2603                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
2604                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
2605                 sense_buf[19] = failing_lba & 0x000000ff;
2606         } else {
2607                 sense_buf[0] = 0x70;
2608                 sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2609                 sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2610                 sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2611
2612                 if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
2613                         if (RES_IS_VSET(res->cfg_entry))
2614                                 failing_lba =
2615                                         le32_to_cpu(ioasa->u.
2616                                                  vset.failing_lba_lo);
2617                         sense_buf[0] |= 0x80;
2618                         sense_buf[3] = (failing_lba >> 24) & 0xff;
2619                         sense_buf[4] = (failing_lba >> 16) & 0xff;
2620                         sense_buf[5] = (failing_lba >> 8) & 0xff;
2621                         sense_buf[6] = failing_lba & 0xff;
2622                 }
2623
2624                 sense_buf[7] = 6; /* additional length */
2625         }
2626 }
2627
2628 /**
2629  * pmcraid_error_handler - Error response handlers for a SCSI op
2630  * @cmd: pointer to pmcraid_cmd that has failed
2631  *
2632  * This function determines whether or not to initiate ERP on the affected
2633  * device. This is called from a tasklet, which doesn't hold any locks.
2634  *
2635  * Return value:
2636  *       0 it caller can complete the request, otherwise 1 where in error
2637  *       handler itself completes the request and returns the command block
2638  *       back to free-pool
2639  */
2640 static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2641 {
2642         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2643         struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2644         struct pmcraid_instance *pinstance = cmd->drv_inst;
2645         struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2646         u32 ioasc = le32_to_cpu(ioasa->ioasc);
2647         u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2648         u32 sense_copied = 0;
2649
2650         if (!res) {
2651                 pmcraid_info("resource pointer is NULL\n");
2652                 return 0;
2653         }
2654
2655         /* If this was a SCSI read/write command keep count of errors */
2656         if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
2657                 atomic_inc(&res->read_failures);
2658         else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
2659                 atomic_inc(&res->write_failures);
2660
2661         if (!RES_IS_GSCSI(res->cfg_entry) &&
2662                 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
2663                 pmcraid_frame_auto_sense(cmd);
2664         }
2665
2666         /* Log IOASC/IOASA information based on user settings */
2667         pmcraid_ioasc_logger(ioasc, cmd);
2668
2669         switch (masked_ioasc) {
2670
2671         case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
2672                 scsi_cmd->result |= (DID_ABORT << 16);
2673                 break;
2674
2675         case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
2676         case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
2677                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
2678                 break;
2679
2680         case PMCRAID_IOASC_NR_SYNC_REQUIRED:
2681                 res->sync_reqd = 1;
2682                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
2683                 break;
2684
2685         case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
2686                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
2687                 break;
2688
2689         case PMCRAID_IOASC_UA_BUS_WAS_RESET:
2690         case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
2691                 if (!res->reset_progress)
2692                         scsi_report_bus_reset(pinstance->host,
2693                                               scsi_cmd->device->channel);
2694                 scsi_cmd->result |= (DID_ERROR << 16);
2695                 break;
2696
2697         case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
2698                 scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
2699                 res->sync_reqd = 1;
2700
2701                 /* if check_condition is not active return with error otherwise
2702                  * get/frame the sense buffer
2703                  */
2704                 if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
2705                     SAM_STAT_CHECK_CONDITION &&
2706                     PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
2707                         return 0;
2708
2709                 /* If we have auto sense data as part of IOASA pass it to
2710                  * mid-layer
2711                  */
2712                 if (ioasa->auto_sense_length != 0) {
2713                         short sense_len = ioasa->auto_sense_length;
2714                         int data_size = min_t(u16, le16_to_cpu(sense_len),
2715                                               SCSI_SENSE_BUFFERSIZE);
2716
2717                         memcpy(scsi_cmd->sense_buffer,
2718                                ioasa->sense_data,
2719                                data_size);
2720                         sense_copied = 1;
2721                 }
2722
2723                 if (RES_IS_GSCSI(res->cfg_entry))
2724                         pmcraid_cancel_all(cmd, sense_copied);
2725                 else if (sense_copied)
2726                         pmcraid_erp_done(cmd);
2727                 else
2728                         pmcraid_request_sense(cmd);
2729
2730                 return 1;
2731
2732         case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
2733                 break;
2734
2735         default:
2736                 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
2737                         scsi_cmd->result |= (DID_ERROR << 16);
2738                 break;
2739         }
2740         return 0;
2741 }
2742
2743 /**
2744  * pmcraid_reset_device - device reset handler functions
2745  *
2746  * @scsi_cmd: scsi command struct
2747  * @modifier: reset modifier indicating the reset sequence to be performed
2748  *
2749  * This function issues a device reset to the affected device.
2750  * A LUN reset will be sent to the device first. If that does
2751  * not work, a target reset will be sent.
2752  *
2753  * Return value:
2754  *      SUCCESS / FAILED
2755  */
2756 static int pmcraid_reset_device(
2757         struct scsi_cmnd *scsi_cmd,
2758         unsigned long timeout,
2759         u8 modifier
2760 )
2761 {
2762         struct pmcraid_cmd *cmd;
2763         struct pmcraid_instance *pinstance;
2764         struct pmcraid_resource_entry *res;
2765         struct pmcraid_ioarcb *ioarcb;
2766         unsigned long lock_flags;
2767         u32 ioasc;
2768
2769         pinstance =
2770                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2771         res = scsi_cmd->device->hostdata;
2772
2773         if (!res) {
2774                 sdev_printk(KERN_ERR, scsi_cmd->device,
2775                             "reset_device: NULL resource pointer\n");
2776                 return FAILED;
2777         }
2778
2779         /* If adapter is currently going through reset/reload, return failed.
2780          * This will force the mid-layer to call _eh_bus/host reset, which
2781          * will then go to sleep and wait for the reset to complete
2782          */
2783         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2784         if (pinstance->ioa_reset_in_progress ||
2785             pinstance->ioa_state == IOA_STATE_DEAD) {
2786                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2787                 return FAILED;
2788         }
2789
2790         res->reset_progress = 1;
2791         pmcraid_info("Resetting %s resource with addr %x\n",
2792                      ((modifier & RESET_DEVICE_LUN) ? "LUN" :
2793                      ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
2794                      le32_to_cpu(res->cfg_entry.resource_address));
2795
2796         /* get a free cmd block */
2797         cmd = pmcraid_get_free_cmd(pinstance);
2798
2799         if (cmd == NULL) {
2800                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2801                 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2802                 return FAILED;
2803         }
2804
2805         ioarcb = &cmd->ioa_cb->ioarcb;
2806         ioarcb->resource_handle = res->cfg_entry.resource_handle;
2807         ioarcb->request_type = REQ_TYPE_IOACMD;
2808         ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
2809
2810         /* Initialize reset modifier bits */
2811         if (modifier)
2812                 modifier = ENABLE_RESET_MODIFIER | modifier;
2813
2814         ioarcb->cdb[1] = modifier;
2815
2816         init_completion(&cmd->wait_for_completion);
2817         cmd->completion_req = 1;
2818
2819         pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
2820                      cmd->ioa_cb->ioarcb.cdb[0],
2821                      le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
2822                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2823
2824         pmcraid_send_cmd(cmd,
2825                          pmcraid_internal_done,
2826                          timeout,
2827                          pmcraid_timeout_handler);
2828
2829         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2830
2831         /* RESET_DEVICE command completes after all pending IOARCBs are
2832          * completed. Once this command is completed, pmcraind_internal_done
2833          * will wake up the 'completion' queue.
2834          */
2835         wait_for_completion(&cmd->wait_for_completion);
2836
2837         /* complete the command here itself and return the command block
2838          * to free list
2839          */
2840         pmcraid_return_cmd(cmd);
2841         res->reset_progress = 0;
2842         ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2843
2844         /* set the return value based on the returned ioasc */
2845         return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2846 }
2847
2848 /**
2849  * _pmcraid_io_done - helper for pmcraid_io_done function
2850  *
2851  * @cmd: pointer to pmcraid command struct
2852  * @reslen: residual data length to be set in the ioasa
2853  * @ioasc: ioasc either returned by IOA or set by driver itself.
2854  *
2855  * This function is invoked by pmcraid_io_done to complete mid-layer
2856  * scsi ops.
2857  *
2858  * Return value:
2859  *        0 if caller is required to return it to free_pool. Returns 1 if
2860  *        caller need not worry about freeing command block as error handler
2861  *        will take care of that.
2862  */
2863
2864 static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
2865 {
2866         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2867         int rc = 0;
2868
2869         scsi_set_resid(scsi_cmd, reslen);
2870
2871         pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
2872                 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
2873                 cmd->ioa_cb->ioarcb.cdb[0],
2874                 ioasc, scsi_cmd->result);
2875
2876         if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
2877                 rc = pmcraid_error_handler(cmd);
2878
2879         if (rc == 0) {
2880                 scsi_dma_unmap(scsi_cmd);
2881                 scsi_cmd->scsi_done(scsi_cmd);
2882         }
2883
2884         return rc;
2885 }
2886
2887 /**
2888  * pmcraid_io_done - SCSI completion function
2889  *
2890  * @cmd: pointer to pmcraid command struct
2891  *
2892  * This function is invoked by tasklet/mid-layer error handler to completing
2893  * the SCSI ops sent from mid-layer.
2894  *
2895  * Return value
2896  *        none
2897  */
2898
2899 static void pmcraid_io_done(struct pmcraid_cmd *cmd)
2900 {
2901         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2902         u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
2903
2904         if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
2905                 pmcraid_return_cmd(cmd);
2906 }
2907
2908 /**
2909  * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
2910  *
2911  * @cmd: command block of the command to be aborted
2912  *
2913  * Return Value:
2914  *       returns pointer to command structure used as cancelling cmd
2915  */
2916 static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2917 {
2918         struct pmcraid_cmd *cancel_cmd;
2919         struct pmcraid_instance *pinstance;
2920         struct pmcraid_resource_entry *res;
2921
2922         pinstance = (struct pmcraid_instance *)cmd->drv_inst;
2923         res = cmd->scsi_cmd->device->hostdata;
2924
2925         cancel_cmd = pmcraid_get_free_cmd(pinstance);
2926
2927         if (cancel_cmd == NULL) {
2928                 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2929                 return NULL;
2930         }
2931
2932         pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
2933
2934         pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
2935                 cmd->ioa_cb->ioarcb.cdb[0],
2936                 cmd->ioa_cb->ioarcb.response_handle >> 2);
2937
2938         init_completion(&cancel_cmd->wait_for_completion);
2939         cancel_cmd->completion_req = 1;
2940
2941         pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2942                 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2943                 cancel_cmd->ioa_cb->ioarcb.cdb[0],
2944                 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2945
2946         pmcraid_send_cmd(cancel_cmd,
2947                          pmcraid_internal_done,
2948                          PMCRAID_INTERNAL_TIMEOUT,
2949                          pmcraid_timeout_handler);
2950         return cancel_cmd;
2951 }
2952
2953 /**
2954  * pmcraid_abort_complete - Waits for ABORT TASK completion
2955  *
2956  * @cancel_cmd: command block use as cancelling command
2957  *
2958  * Return Value:
2959  *       returns SUCCESS if ABORT TASK has good completion
2960  *       otherwise FAILED
2961  */
2962 static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2963 {
2964         struct pmcraid_resource_entry *res;
2965         u32 ioasc;
2966
2967         wait_for_completion(&cancel_cmd->wait_for_completion);
2968         res = cancel_cmd->res;
2969         cancel_cmd->res = NULL;
2970         ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2971
2972         /* If the abort task is not timed out we will get a Good completion
2973          * as sense_key, otherwise we may get one the following responses
2974          * due to subsequent bus reset or device reset. In case IOASC is
2975          * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
2976          */
2977         if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
2978             ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
2979                 if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
2980                         res->sync_reqd = 1;
2981                 ioasc = 0;
2982         }
2983
2984         /* complete the command here itself */
2985         pmcraid_return_cmd(cancel_cmd);
2986         return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2987 }
2988
2989 /**
2990  * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
2991  *
2992  * @scsi_cmd:   scsi command struct given by mid-layer. When this is called
2993  *              mid-layer ensures that no other commands are queued. This
2994  *              never gets called under interrupt, but a separate eh thread.
2995  *
2996  * Return value:
2997  *       SUCCESS / FAILED
2998  */
2999 static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
3000 {
3001         struct pmcraid_instance *pinstance;
3002         struct pmcraid_cmd *cmd;
3003         struct pmcraid_resource_entry *res;
3004         unsigned long host_lock_flags;
3005         unsigned long pending_lock_flags;
3006         struct pmcraid_cmd *cancel_cmd = NULL;
3007         int cmd_found = 0;
3008         int rc = FAILED;
3009
3010         pinstance =
3011                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3012
3013         scmd_printk(KERN_INFO, scsi_cmd,
3014                     "I/O command timed out, aborting it.\n");
3015
3016         res = scsi_cmd->device->hostdata;
3017
3018         if (res == NULL)
3019                 return rc;
3020
3021         /* If we are currently going through reset/reload, return failed.
3022          * This will force the mid-layer to eventually call
3023          * pmcraid_eh_host_reset which will then go to sleep and wait for the
3024          * reset to complete
3025          */
3026         spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
3027
3028         if (pinstance->ioa_reset_in_progress ||
3029             pinstance->ioa_state == IOA_STATE_DEAD) {
3030                 spin_unlock_irqrestore(pinstance->host->host_lock,
3031                                        host_lock_flags);
3032                 return rc;
3033         }
3034
3035         /* loop over pending cmd list to find cmd corresponding to this
3036          * scsi_cmd. Note that this command might not have been completed
3037          * already. locking: all pending commands are protected with
3038          * pending_pool_lock.
3039          */
3040         spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
3041         list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
3042
3043                 if (cmd->scsi_cmd == scsi_cmd) {
3044                         cmd_found = 1;
3045                         break;
3046                 }
3047         }
3048
3049         spin_unlock_irqrestore(&pinstance->pending_pool_lock,
3050                                 pending_lock_flags);
3051
3052         /* If the command to be aborted was given to IOA and still pending with
3053          * it, send ABORT_TASK to abort this and wait for its completion
3054          */
3055         if (cmd_found)
3056                 cancel_cmd = pmcraid_abort_cmd(cmd);
3057
3058         spin_unlock_irqrestore(pinstance->host->host_lock,
3059                                host_lock_flags);
3060
3061         if (cancel_cmd) {
3062                 cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
3063                 rc = pmcraid_abort_complete(cancel_cmd);
3064         }
3065
3066         return cmd_found ? rc : SUCCESS;
3067 }
3068
3069 /**
3070  * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
3071  *
3072  * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
3073  *
3074  * All these routines invokve pmcraid_reset_device with appropriate parameters.
3075  * Since these are called from mid-layer EH thread, no other IO will be queued
3076  * to the resource being reset. However, control path (IOCTL) may be active so
3077  * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
3078  * takes care by locking/unlocking host_lock.
3079  *
3080  * Return value
3081  *      SUCCESS or FAILED
3082  */
3083 static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
3084 {
3085         scmd_printk(KERN_INFO, scmd,
3086                     "resetting device due to an I/O command timeout.\n");
3087         return pmcraid_reset_device(scmd,
3088                                     PMCRAID_INTERNAL_TIMEOUT,
3089                                     RESET_DEVICE_LUN);
3090 }
3091
3092 static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
3093 {
3094         scmd_printk(KERN_INFO, scmd,
3095                     "Doing bus reset due to an I/O command timeout.\n");
3096         return pmcraid_reset_device(scmd,
3097                                     PMCRAID_RESET_BUS_TIMEOUT,
3098                                     RESET_DEVICE_BUS);
3099 }
3100
3101 static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
3102 {
3103         scmd_printk(KERN_INFO, scmd,
3104                     "Doing target reset due to an I/O command timeout.\n");
3105         return pmcraid_reset_device(scmd,
3106                                     PMCRAID_INTERNAL_TIMEOUT,
3107                                     RESET_DEVICE_TARGET);
3108 }
3109
3110 /**
3111  * pmcraid_eh_host_reset_handler - adapter reset handler callback
3112  *
3113  * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
3114  *
3115  * Initiates adapter reset to bring it up to operational state
3116  *
3117  * Return value
3118  *      SUCCESS or FAILED
3119  */
3120 static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
3121 {
3122         unsigned long interval = 10000; /* 10 seconds interval */
3123         int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
3124         struct pmcraid_instance *pinstance =
3125                 (struct pmcraid_instance *)(scmd->device->host->hostdata);
3126
3127
3128         /* wait for an additional 150 seconds just in case firmware could come
3129          * up and if it could complete all the pending commands excluding the
3130          * two HCAM (CCN and LDN).
3131          */
3132         while (waits--) {
3133                 if (atomic_read(&pinstance->outstanding_cmds) <=
3134                     PMCRAID_MAX_HCAM_CMD)
3135                         return SUCCESS;
3136                 msleep(interval);
3137         }
3138
3139         dev_err(&pinstance->pdev->dev,
3140                 "Adapter being reset due to an I/O command timeout.\n");
3141         return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
3142 }
3143
3144 /**
3145  * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
3146  * @cmd: pmcraid command struct
3147  * @sgcount: count of scatter-gather elements
3148  *
3149  * Return value
3150  *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
3151  *   or external IOADLs
3152  */
3153 struct pmcraid_ioadl_desc *
3154 pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
3155 {
3156         struct pmcraid_ioadl_desc *ioadl;
3157         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3158         int ioadl_count = 0;
3159
3160         if (ioarcb->add_cmd_param_length)
3161                 ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
3162         ioarcb->ioadl_length =
3163                 sizeof(struct pmcraid_ioadl_desc) * sgcount;
3164
3165         if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
3166                 /* external ioadls start at offset 0x80 from control_block
3167                  * structure, re-using 24 out of 27 ioadls part of IOARCB.
3168                  * It is necessary to indicate to firmware that driver is
3169                  * using ioadls to be treated as external to IOARCB.
3170                  */
3171                 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
3172                 ioarcb->ioadl_bus_addr =
3173                         cpu_to_le64((cmd->ioa_cb_bus_addr) +
3174                                 offsetof(struct pmcraid_ioarcb,
3175                                         add_data.u.ioadl[3]));
3176                 ioadl = &ioarcb->add_data.u.ioadl[3];
3177         } else {
3178                 ioarcb->ioadl_bus_addr =
3179                         cpu_to_le64((cmd->ioa_cb_bus_addr) +
3180                                 offsetof(struct pmcraid_ioarcb,
3181                                         add_data.u.ioadl[ioadl_count]));
3182
3183                 ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
3184                 ioarcb->ioarcb_bus_addr |=
3185                                 DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
3186         }
3187
3188         return ioadl;
3189 }
3190
3191 /**
3192  * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
3193  * @pinstance: pointer to adapter instance structure
3194  * @cmd: pmcraid command struct
3195  *
3196  * This function is invoked by queuecommand entry point while sending a command
3197  * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
3198  *
3199  * Return value:
3200  *      0 on success or -1 on failure
3201  */
3202 static int pmcraid_build_ioadl(
3203         struct pmcraid_instance *pinstance,
3204         struct pmcraid_cmd *cmd
3205 )
3206 {
3207         int i, nseg;
3208         struct scatterlist *sglist;
3209
3210         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
3211         struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
3212         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
3213
3214         u32 length = scsi_bufflen(scsi_cmd);
3215
3216         if (!length)
3217                 return 0;
3218
3219         nseg = scsi_dma_map(scsi_cmd);
3220
3221         if (nseg < 0) {
3222                 scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
3223                 return -1;
3224         } else if (nseg > PMCRAID_MAX_IOADLS) {
3225                 scsi_dma_unmap(scsi_cmd);
3226                 scmd_printk(KERN_ERR, scsi_cmd,
3227                         "sg count is (%d) more than allowed!\n", nseg);
3228                 return -1;
3229         }
3230
3231         /* Initialize IOARCB data transfer length fields */
3232         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
3233                 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
3234
3235         ioarcb->request_flags0 |= NO_LINK_DESCS;
3236         ioarcb->data_transfer_length = cpu_to_le32(length);
3237         ioadl = pmcraid_init_ioadls(cmd, nseg);
3238
3239         /* Initialize IOADL descriptor addresses */
3240         scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
3241                 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
3242                 ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
3243                 ioadl[i].flags = 0;
3244         }
3245         /* setup last descriptor */
3246         ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3247
3248         return 0;
3249 }
3250
3251 /**
3252  * pmcraid_free_sglist - Frees an allocated SG buffer list
3253  * @sglist: scatter/gather list pointer
3254  *
3255  * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3256  *
3257  * Return value:
3258  *      none
3259  */
3260 static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3261 {
3262         int i;
3263
3264         for (i = 0; i < sglist->num_sg; i++)
3265                 __free_pages(sg_page(&(sglist->scatterlist[i])),
3266                              sglist->order);
3267
3268         kfree(sglist);
3269 }
3270
3271 /**
3272  * pmcraid_alloc_sglist - Allocates memory for a SG list
3273  * @buflen: buffer length
3274  *
3275  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3276  * list.
3277  *
3278  * Return value
3279  *      pointer to sglist / NULL on failure
3280  */
3281 static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3282 {
3283         struct pmcraid_sglist *sglist;
3284         struct scatterlist *scatterlist;
3285         struct page *page;
3286         int num_elem, i, j;
3287         int sg_size;
3288         int order;
3289         int bsize_elem;
3290
3291         sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
3292         order = (sg_size > 0) ? get_order(sg_size) : 0;
3293         bsize_elem = PAGE_SIZE * (1 << order);
3294
3295         /* Determine the actual number of sg entries needed */
3296         if (buflen % bsize_elem)
3297                 num_elem = (buflen / bsize_elem) + 1;
3298         else
3299                 num_elem = buflen / bsize_elem;
3300
3301         /* Allocate a scatter/gather list for the DMA */
3302         sglist = kzalloc(sizeof(struct pmcraid_sglist) +
3303                          (sizeof(struct scatterlist) * (num_elem - 1)),
3304                          GFP_KERNEL);
3305
3306         if (sglist == NULL)
3307                 return NULL;
3308
3309         scatterlist = sglist->scatterlist;
3310         sg_init_table(scatterlist, num_elem);
3311         sglist->order = order;
3312         sglist->num_sg = num_elem;
3313         sg_size = buflen;
3314
3315         for (i = 0; i < num_elem; i++) {
3316                 page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
3317                 if (!page) {
3318                         for (j = i - 1; j >= 0; j--)
3319                                 __free_pages(sg_page(&scatterlist[j]), order);
3320                         kfree(sglist);
3321                         return NULL;
3322                 }
3323
3324                 sg_set_page(&scatterlist[i], page,
3325                         sg_size < bsize_elem ? sg_size : bsize_elem, 0);
3326                 sg_size -= bsize_elem;
3327         }
3328
3329         return sglist;
3330 }
3331
3332 /**
3333  * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
3334  * @sglist: scatter/gather list pointer
3335  * @buffer: buffer pointer
3336  * @len: buffer length
3337  * @direction: data transfer direction
3338  *
3339  * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
3340  *
3341  * Return value:
3342  * 0 on success / other on failure
3343  */
3344 static int pmcraid_copy_sglist(
3345         struct pmcraid_sglist *sglist,
3346         unsigned long buffer,
3347         u32 len,
3348         int direction
3349 )
3350 {
3351         struct scatterlist *scatterlist;
3352         void *kaddr;
3353         int bsize_elem;
3354         int i;
3355         int rc = 0;
3356
3357         /* Determine the actual number of bytes per element */
3358         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3359
3360         scatterlist = sglist->scatterlist;
3361
3362         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3363                 struct page *page = sg_page(&scatterlist[i]);
3364
3365                 kaddr = kmap(page);
3366                 if (direction == DMA_TO_DEVICE)
3367                         rc = __copy_from_user(kaddr,
3368                                               (void *)buffer,
3369                                               bsize_elem);
3370                 else
3371                         rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
3372
3373                 kunmap(page);
3374
3375                 if (rc) {
3376                         pmcraid_err("failed to copy user data into sg list\n");
3377                         return -EFAULT;
3378                 }
3379
3380                 scatterlist[i].length = bsize_elem;
3381         }
3382
3383         if (len % bsize_elem) {
3384                 struct page *page = sg_page(&scatterlist[i]);
3385
3386                 kaddr = kmap(page);
3387
3388                 if (direction == DMA_TO_DEVICE)
3389                         rc = __copy_from_user(kaddr,
3390                                               (void *)buffer,
3391                                               len % bsize_elem);
3392                 else
3393                         rc = __copy_to_user((void *)buffer,
3394                                             kaddr,
3395                                             len % bsize_elem);
3396
3397                 kunmap(page);
3398
3399                 scatterlist[i].length = len % bsize_elem;
3400         }
3401
3402         if (rc) {
3403                 pmcraid_err("failed to copy user data into sg list\n");
3404                 rc = -EFAULT;
3405         }
3406
3407         return rc;
3408 }
3409
3410 /**
3411  * pmcraid_queuecommand - Queue a mid-layer request
3412  * @scsi_cmd: scsi command struct
3413  * @done: done function
3414  *
3415  * This function queues a request generated by the mid-layer. Midlayer calls
3416  * this routine within host->lock. Some of the functions called by queuecommand
3417  * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
3418  *
3419  * Return value:
3420  *        0 on success
3421  *        SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3422  *        SCSI_MLQUEUE_HOST_BUSY if host is busy
3423  */
3424 static int pmcraid_queuecommand_lck(
3425         struct scsi_cmnd *scsi_cmd,
3426         void (*done) (struct scsi_cmnd *)
3427 )
3428 {
3429         struct pmcraid_instance *pinstance;
3430         struct pmcraid_resource_entry *res;
3431         struct pmcraid_ioarcb *ioarcb;
3432         struct pmcraid_cmd *cmd;
3433         u32 fw_version;
3434         int rc = 0;
3435
3436         pinstance =
3437                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3438         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
3439         scsi_cmd->scsi_done = done;
3440         res = scsi_cmd->device->hostdata;
3441         scsi_cmd->result = (DID_OK << 16);
3442
3443         /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
3444          * the command
3445          */
3446         if (pinstance->ioa_state == IOA_STATE_DEAD) {
3447                 pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
3448                 scsi_cmd->result = (DID_NO_CONNECT << 16);
3449                 scsi_cmd->scsi_done(scsi_cmd);
3450                 return 0;
3451         }
3452
3453         /* If IOA reset is in progress, can't queue the commands */
3454         if (pinstance->ioa_reset_in_progress)
3455                 return SCSI_MLQUEUE_HOST_BUSY;
3456
3457         /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
3458          * the command here itself with success return
3459          */
3460         if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
3461                 pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
3462                 scsi_cmd->scsi_done(scsi_cmd);
3463                 return 0;
3464         }
3465
3466         /* initialize the command and IOARCB to be sent to IOA */
3467         cmd = pmcraid_get_free_cmd(pinstance);
3468
3469         if (cmd == NULL) {
3470                 pmcraid_err("free command block is not available\n");
3471                 return SCSI_MLQUEUE_HOST_BUSY;
3472         }
3473
3474         cmd->scsi_cmd = scsi_cmd;
3475         ioarcb = &(cmd->ioa_cb->ioarcb);
3476         memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3477         ioarcb->resource_handle = res->cfg_entry.resource_handle;
3478         ioarcb->request_type = REQ_TYPE_SCSI;
3479
3480         /* set hrrq number where the IOA should respond to. Note that all cmds
3481          * generated internally uses hrrq_id 0, exception to this is the cmd
3482          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3483          * hrrq_id assigned here in queuecommand
3484          */
3485         ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3486                           pinstance->num_hrrq;
3487         cmd->cmd_done = pmcraid_io_done;
3488
3489         if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
3490                 if (scsi_cmd->underflow == 0)
3491                         ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
3492
3493                 if (res->sync_reqd) {
3494                         ioarcb->request_flags0 |= SYNC_COMPLETE;
3495                         res->sync_reqd = 0;
3496                 }
3497
3498                 ioarcb->request_flags0 |= NO_LINK_DESCS;
3499
3500                 if (scsi_cmd->flags & SCMD_TAGGED)
3501                         ioarcb->request_flags1 |= TASK_TAG_SIMPLE;
3502
3503                 if (RES_IS_GSCSI(res->cfg_entry))
3504                         ioarcb->request_flags1 |= DELAY_AFTER_RESET;
3505         }
3506
3507         rc = pmcraid_build_ioadl(pinstance, cmd);
3508
3509         pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
3510                      le32_to_cpu(ioarcb->response_handle) >> 2,
3511                      scsi_cmd->cmnd[0], pinstance->host->unique_id,
3512                      RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3513                         PMCRAID_PHYS_BUS_ID,
3514                      RES_IS_VSET(res->cfg_entry) ?
3515                         (fw_version <= PMCRAID_FW_VERSION_1 ?
3516                                 res->cfg_entry.unique_flags1 :
3517                                         res->cfg_entry.array_id & 0xFF) :
3518                         RES_TARGET(res->cfg_entry.resource_address),
3519                      RES_LUN(res->cfg_entry.resource_address));
3520
3521         if (likely(rc == 0)) {
3522                 _pmcraid_fire_command(cmd);
3523         } else {
3524                 pmcraid_err("queuecommand could not build ioadl\n");
3525                 pmcraid_return_cmd(cmd);
3526                 rc = SCSI_MLQUEUE_HOST_BUSY;
3527         }
3528
3529         return rc;
3530 }
3531
3532 static DEF_SCSI_QCMD(pmcraid_queuecommand)
3533
3534 /**
3535  * pmcraid_open -char node "open" entry, allowed only users with admin access
3536  */
3537 static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3538 {
3539         struct pmcraid_instance *pinstance;
3540
3541         if (!capable(CAP_SYS_ADMIN))
3542                 return -EACCES;
3543
3544         /* Populate adapter instance * pointer for use by ioctl */
3545         pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
3546         filep->private_data = pinstance;
3547
3548         return 0;
3549 }
3550
3551 /**
3552  * pmcraid_fasync - Async notifier registration from applications
3553  *
3554  * This function adds the calling process to a driver global queue. When an
3555  * event occurs, SIGIO will be sent to all processes in this queue.
3556  */
3557 static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3558 {
3559         struct pmcraid_instance *pinstance;
3560         int rc;
3561
3562         pinstance = filep->private_data;
3563         mutex_lock(&pinstance->aen_queue_lock);
3564         rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3565         mutex_unlock(&pinstance->aen_queue_lock);
3566
3567         return rc;
3568 }
3569
3570
3571 /**
3572  * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
3573  * commands sent over IOCTL interface
3574  *
3575  * @cmd       : pointer to struct pmcraid_cmd
3576  * @buflen    : length of the request buffer
3577  * @direction : data transfer direction
3578  *
3579  * Return value
3580  *  0 on success, non-zero error code on failure
3581  */
3582 static int pmcraid_build_passthrough_ioadls(
3583         struct pmcraid_cmd *cmd,
3584         int buflen,
3585         int direction
3586 )
3587 {
3588         struct pmcraid_sglist *sglist = NULL;
3589         struct scatterlist *sg = NULL;
3590         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3591         struct pmcraid_ioadl_desc *ioadl;
3592         int i;
3593
3594         sglist = pmcraid_alloc_sglist(buflen);
3595
3596         if (!sglist) {
3597                 pmcraid_err("can't allocate memory for passthrough SGls\n");
3598                 return -ENOMEM;
3599         }
3600
3601         sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
3602                                         sglist->scatterlist,
3603                                         sglist->num_sg, direction);
3604
3605         if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
3606                 dev_err(&cmd->drv_inst->pdev->dev,
3607                         "Failed to map passthrough buffer!\n");
3608                 pmcraid_free_sglist(sglist);
3609                 return -EIO;
3610         }
3611
3612         cmd->sglist = sglist;
3613         ioarcb->request_flags0 |= NO_LINK_DESCS;
3614
3615         ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
3616
3617         /* Initialize IOADL descriptor addresses */
3618         for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
3619                 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
3620                 ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
3621                 ioadl[i].flags = 0;
3622         }
3623
3624         /* setup the last descriptor */
3625         ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3626
3627         return 0;
3628 }
3629
3630
3631 /**
3632  * pmcraid_release_passthrough_ioadls - release passthrough ioadls
3633  *
3634  * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
3635  * @buflen: size of the request buffer
3636  * @direction: data transfer direction
3637  *
3638  * Return value
3639  *  0 on success, non-zero error code on failure
3640  */
3641 static void pmcraid_release_passthrough_ioadls(
3642         struct pmcraid_cmd *cmd,
3643         int buflen,
3644         int direction
3645 )
3646 {
3647         struct pmcraid_sglist *sglist = cmd->sglist;
3648
3649         if (buflen > 0) {
3650                 pci_unmap_sg(cmd->drv_inst->pdev,
3651                              sglist->scatterlist,
3652                              sglist->num_sg,
3653                              direction);
3654                 pmcraid_free_sglist(sglist);
3655                 cmd->sglist = NULL;
3656         }
3657 }
3658
3659 /**
3660  * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
3661  *
3662  * @pinstance: pointer to adapter instance structure
3663  * @cmd: ioctl code
3664  * @arg: pointer to pmcraid_passthrough_buffer user buffer
3665  *
3666  * Return value
3667  *  0 on success, non-zero error code on failure
3668  */
3669 static long pmcraid_ioctl_passthrough(
3670         struct pmcraid_instance *pinstance,
3671         unsigned int ioctl_cmd,
3672         unsigned int buflen,
3673         unsigned long arg
3674 )
3675 {
3676         struct pmcraid_passthrough_ioctl_buffer *buffer;
3677         struct pmcraid_ioarcb *ioarcb;
3678         struct pmcraid_cmd *cmd;
3679         struct pmcraid_cmd *cancel_cmd;
3680         unsigned long request_buffer;
3681         unsigned long request_offset;
3682         unsigned long lock_flags;
3683         void *ioasa;
3684         u32 ioasc;
3685         int request_size;
3686         int buffer_size;
3687         u8 access, direction;
3688         int rc = 0;
3689
3690         /* If IOA reset is in progress, wait 10 secs for reset to complete */
3691         if (pinstance->ioa_reset_in_progress) {
3692                 rc = wait_event_interruptible_timeout(
3693                                 pinstance->reset_wait_q,
3694                                 !pinstance->ioa_reset_in_progress,
3695                                 msecs_to_jiffies(10000));
3696
3697                 if (!rc)
3698                         return -ETIMEDOUT;
3699                 else if (rc < 0)
3700                         return -ERESTARTSYS;
3701         }
3702
3703         /* If adapter is not in operational state, return error */
3704         if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
3705                 pmcraid_err("IOA is not operational\n");
3706                 return -ENOTTY;
3707         }
3708
3709         buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
3710         buffer = kmalloc(buffer_size, GFP_KERNEL);
3711
3712         if (!buffer) {
3713                 pmcraid_err("no memory for passthrough buffer\n");
3714                 return -ENOMEM;
3715         }
3716
3717         request_offset =
3718             offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
3719
3720         request_buffer = arg + request_offset;
3721
3722         rc = __copy_from_user(buffer,
3723                              (struct pmcraid_passthrough_ioctl_buffer *) arg,
3724                              sizeof(struct pmcraid_passthrough_ioctl_buffer));
3725
3726         ioasa =
3727         (void *)(arg +
3728                 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
3729
3730         if (rc) {
3731                 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3732                 rc = -EFAULT;
3733                 goto out_free_buffer;
3734         }
3735
3736         request_size = buffer->ioarcb.data_transfer_length;
3737
3738         if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
3739                 access = VERIFY_READ;
3740                 direction = DMA_TO_DEVICE;
3741         } else {
3742                 access = VERIFY_WRITE;
3743                 direction = DMA_FROM_DEVICE;
3744         }
3745
3746         if (request_size > 0) {
3747                 rc = access_ok(access, arg, request_offset + request_size);
3748
3749                 if (!rc) {
3750                         rc = -EFAULT;
3751                         goto out_free_buffer;
3752                 }
3753         } else if (request_size < 0) {
3754                 rc = -EINVAL;
3755                 goto out_free_buffer;
3756         }
3757
3758         /* check if we have any additional command parameters */
3759         if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
3760                 rc = -EINVAL;
3761                 goto out_free_buffer;
3762         }
3763
3764         cmd = pmcraid_get_free_cmd(pinstance);
3765
3766         if (!cmd) {
3767                 pmcraid_err("free command block is not available\n");
3768                 rc = -ENOMEM;
3769                 goto out_free_buffer;
3770         }
3771
3772         cmd->scsi_cmd = NULL;
3773         ioarcb = &(cmd->ioa_cb->ioarcb);
3774
3775         /* Copy the user-provided IOARCB stuff field by field */
3776         ioarcb->resource_handle = buffer->ioarcb.resource_handle;
3777         ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
3778         ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
3779         ioarcb->request_type = buffer->ioarcb.request_type;
3780         ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
3781         ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
3782         memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
3783
3784         if (buffer->ioarcb.add_cmd_param_length) {
3785                 ioarcb->add_cmd_param_length =
3786                         buffer->ioarcb.add_cmd_param_length;
3787                 ioarcb->add_cmd_param_offset =
3788                         buffer->ioarcb.add_cmd_param_offset;
3789                 memcpy(ioarcb->add_data.u.add_cmd_params,
3790                         buffer->ioarcb.add_data.u.add_cmd_params,
3791                         buffer->ioarcb.add_cmd_param_length);
3792         }
3793
3794         /* set hrrq number where the IOA should respond to. Note that all cmds
3795          * generated internally uses hrrq_id 0, exception to this is the cmd
3796          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3797          * hrrq_id assigned here in queuecommand
3798          */
3799         ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3800                           pinstance->num_hrrq;
3801
3802         if (request_size) {
3803                 rc = pmcraid_build_passthrough_ioadls(cmd,
3804                                                       request_size,
3805                                                       direction);
3806                 if (rc) {
3807                         pmcraid_err("couldn't build passthrough ioadls\n");
3808                         goto out_free_buffer;
3809                 }
3810         } else if (request_size < 0) {
3811                 rc = -EINVAL;
3812                 goto out_free_buffer;
3813         }
3814
3815         /* If data is being written into the device, copy the data from user
3816          * buffers
3817          */
3818         if (direction == DMA_TO_DEVICE && request_size > 0) {
3819                 rc = pmcraid_copy_sglist(cmd->sglist,
3820                                          request_buffer,
3821                                          request_size,
3822                                          direction);
3823                 if (rc) {
3824                         pmcraid_err("failed to copy user buffer\n");
3825                         goto out_free_sglist;
3826                 }
3827         }
3828
3829         /* passthrough ioctl is a blocking command so, put the user to sleep
3830          * until timeout. Note that a timeout value of 0 means, do timeout.
3831          */
3832         cmd->cmd_done = pmcraid_internal_done;
3833         init_completion(&cmd->wait_for_completion);
3834         cmd->completion_req = 1;
3835
3836         pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
3837                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3838                      cmd->ioa_cb->ioarcb.cdb[0],
3839                      le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
3840
3841         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3842         _pmcraid_fire_command(cmd);
3843         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3844
3845         /* NOTE ! Remove the below line once abort_task is implemented
3846          * in firmware. This line disables ioctl command timeout handling logic
3847          * similar to IO command timeout handling, making ioctl commands to wait
3848          * until the command completion regardless of timeout value specified in
3849          * ioarcb
3850          */
3851         buffer->ioarcb.cmd_timeout = 0;
3852
3853         /* If command timeout is specified put caller to wait till that time,
3854          * otherwise it would be blocking wait. If command gets timed out, it
3855          * will be aborted.
3856          */
3857         if (buffer->ioarcb.cmd_timeout == 0) {
3858                 wait_for_completion(&cmd->wait_for_completion);
3859         } else if (!wait_for_completion_timeout(
3860                         &cmd->wait_for_completion,
3861                         msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
3862
3863                 pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
3864                         le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
3865                         cmd->ioa_cb->ioarcb.cdb[0]);
3866
3867                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3868                 cancel_cmd = pmcraid_abort_cmd(cmd);
3869                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3870
3871                 if (cancel_cmd) {
3872                         wait_for_completion(&cancel_cmd->wait_for_completion);
3873                         ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
3874                         pmcraid_return_cmd(cancel_cmd);
3875
3876                         /* if abort task couldn't find the command i.e it got
3877                          * completed prior to aborting, return good completion.
3878                          * if command got aborted successfully or there was IOA
3879                          * reset due to abort task itself getting timedout then
3880                          * return -ETIMEDOUT
3881                          */
3882                         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
3883                             PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
3884                                 if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
3885                                         rc = -ETIMEDOUT;
3886                                 goto out_handle_response;
3887                         }
3888                 }
3889
3890                 /* no command block for abort task or abort task failed to abort
3891                  * the IOARCB, then wait for 150 more seconds and initiate reset
3892                  * sequence after timeout
3893                  */
3894                 if (!wait_for_completion_timeout(
3895                         &cmd->wait_for_completion,
3896                         msecs_to_jiffies(150 * 1000))) {
3897                         pmcraid_reset_bringup(cmd->drv_inst);
3898                         rc = -ETIMEDOUT;
3899                 }
3900         }
3901
3902 out_handle_response:
3903         /* copy entire IOASA buffer and return IOCTL success.
3904          * If copying IOASA to user-buffer fails, return
3905          * EFAULT
3906          */
3907         if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3908                 sizeof(struct pmcraid_ioasa))) {
3909                 pmcraid_err("failed to copy ioasa buffer to user\n");
3910                 rc = -EFAULT;
3911         }
3912
3913         /* If the data transfer was from device, copy the data onto user
3914          * buffers
3915          */
3916         else if (direction == DMA_FROM_DEVICE && request_size > 0) {
3917                 rc = pmcraid_copy_sglist(cmd->sglist,
3918                                          request_buffer,
3919                                          request_size,
3920                                          direction);
3921                 if (rc) {
3922                         pmcraid_err("failed to copy user buffer\n");
3923                         rc = -EFAULT;
3924                 }
3925         }
3926
3927 out_free_sglist:
3928         pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3929         pmcraid_return_cmd(cmd);
3930
3931 out_free_buffer:
3932         kfree(buffer);
3933
3934         return rc;
3935 }
3936
3937
3938
3939
3940 /**
3941  * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
3942  *
3943  * @pinstance: pointer to adapter instance structure
3944  * @cmd: ioctl command passed in
3945  * @buflen: length of user_buffer
3946  * @user_buffer: user buffer pointer
3947  *
3948  * Return Value
3949  *   0 in case of success, otherwise appropriate error code
3950  */
3951 static long pmcraid_ioctl_driver(
3952         struct pmcraid_instance *pinstance,
3953         unsigned int cmd,
3954         unsigned int buflen,
3955         void __user *user_buffer
3956 )
3957 {
3958         int rc = -ENOSYS;
3959
3960         if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
3961                 pmcraid_err("ioctl_driver: access fault in request buffer\n");
3962                 return -EFAULT;
3963         }
3964
3965         switch (cmd) {
3966         case PMCRAID_IOCTL_RESET_ADAPTER:
3967                 pmcraid_reset_bringup(pinstance);
3968                 rc = 0;
3969                 break;
3970
3971         default:
3972                 break;
3973         }
3974
3975         return rc;
3976 }
3977
3978 /**
3979  * pmcraid_check_ioctl_buffer - check for proper access to user buffer
3980  *
3981  * @cmd: ioctl command
3982  * @arg: user buffer
3983  * @hdr: pointer to kernel memory for pmcraid_ioctl_header
3984  *
3985  * Return Value
3986  *      negetive error code if there are access issues, otherwise zero.
3987  *      Upon success, returns ioctl header copied out of user buffer.
3988  */
3989
3990 static int pmcraid_check_ioctl_buffer(
3991         int cmd,
3992         void __user *arg,
3993         struct pmcraid_ioctl_header *hdr
3994 )
3995 {
3996         int rc = 0;
3997         int access = VERIFY_READ;
3998
3999         if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
4000                 pmcraid_err("couldn't copy ioctl header from user buffer\n");
4001                 return -EFAULT;
4002         }
4003
4004         /* check for valid driver signature */
4005         rc = memcmp(hdr->signature,
4006                     PMCRAID_IOCTL_SIGNATURE,
4007                     sizeof(hdr->signature));
4008         if (rc) {
4009                 pmcraid_err("signature verification failed\n");
4010                 return -EINVAL;
4011         }
4012
4013         /* check for appropriate buffer access */
4014         if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
4015                 access = VERIFY_WRITE;
4016
4017         rc = access_ok(access,
4018                        (arg + sizeof(struct pmcraid_ioctl_header)),
4019                        hdr->buffer_length);
4020         if (!rc) {
4021                 pmcraid_err("access failed for user buffer of size %d\n",
4022                              hdr->buffer_length);
4023                 return -EFAULT;
4024         }
4025
4026         return 0;
4027 }
4028
4029 /**
4030  *  pmcraid_ioctl - char node ioctl entry point
4031  */
4032 static long pmcraid_chr_ioctl(
4033         struct file *filep,
4034         unsigned int cmd,
4035         unsigned long arg
4036 )
4037 {
4038         struct pmcraid_instance *pinstance = NULL;
4039         struct pmcraid_ioctl_header *hdr = NULL;
4040         int retval = -ENOTTY;
4041
4042         hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
4043
4044         if (!hdr) {
4045                 pmcraid_err("failed to allocate memory for ioctl header\n");
4046                 return -ENOMEM;
4047         }
4048
4049         retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
4050
4051         if (retval) {
4052                 pmcraid_info("chr_ioctl: header check failed\n");
4053                 kfree(hdr);
4054                 return retval;
4055         }
4056
4057         pinstance = filep->private_data;
4058
4059         if (!pinstance) {
4060                 pmcraid_info("adapter instance is not found\n");
4061                 kfree(hdr);
4062                 return -ENOTTY;
4063         }
4064
4065         switch (_IOC_TYPE(cmd)) {
4066
4067         case PMCRAID_PASSTHROUGH_IOCTL:
4068                 /* If ioctl code is to download microcode, we need to block
4069                  * mid-layer requests.
4070                  */
4071                 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
4072                         scsi_block_requests(pinstance->host);
4073
4074                 retval = pmcraid_ioctl_passthrough(pinstance,
4075                                                    cmd,
4076                                                    hdr->buffer_length,
4077                                                    arg);
4078
4079                 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
4080                         scsi_unblock_requests(pinstance->host);
4081                 break;
4082
4083         case PMCRAID_DRIVER_IOCTL:
4084                 arg += sizeof(struct pmcraid_ioctl_header);
4085                 retval = pmcraid_ioctl_driver(pinstance,
4086                                               cmd,
4087                                               hdr->buffer_length,
4088                                               (void __user *)arg);
4089                 break;
4090
4091         default:
4092                 retval = -ENOTTY;
4093                 break;
4094         }
4095
4096         kfree(hdr);
4097
4098         return retval;
4099 }
4100
4101 /**
4102  * File operations structure for management interface
4103  */
4104 static const struct file_operations pmcraid_fops = {
4105         .owner = THIS_MODULE,
4106         .open = pmcraid_chr_open,
4107         .fasync = pmcraid_chr_fasync,
4108         .unlocked_ioctl = pmcraid_chr_ioctl,
4109 #ifdef CONFIG_COMPAT
4110         .compat_ioctl = pmcraid_chr_ioctl,
4111 #endif
4112         .llseek = noop_llseek,
4113 };
4114
4115
4116
4117
4118 /**
4119  * pmcraid_show_log_level - Display adapter's error logging level
4120  * @dev: class device struct
4121  * @buf: buffer
4122  *
4123  * Return value:
4124  *  number of bytes printed to buffer
4125  */
4126 static ssize_t pmcraid_show_log_level(
4127         struct device *dev,
4128         struct device_attribute *attr,
4129         char *buf)
4130 {
4131         struct Scsi_Host *shost = class_to_shost(dev);
4132         struct pmcraid_instance *pinstance =
4133                 (struct pmcraid_instance *)shost->hostdata;
4134         return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
4135 }
4136
4137 /**
4138  * pmcraid_store_log_level - Change the adapter's error logging level
4139  * @dev: class device struct
4140  * @buf: buffer
4141  * @count: not used
4142  *
4143  * Return value:
4144  *  number of bytes printed to buffer
4145  */
4146 static ssize_t pmcraid_store_log_level(
4147         struct device *dev,
4148         struct device_attribute *attr,
4149         const char *buf,
4150         size_t count
4151 )
4152 {
4153         struct Scsi_Host *shost;
4154         struct pmcraid_instance *pinstance;
4155         u8 val;
4156
4157         if (kstrtou8(buf, 10, &val))
4158                 return -EINVAL;
4159         /* log-level should be from 0 to 2 */
4160         if (val > 2)
4161                 return -EINVAL;
4162
4163         shost = class_to_shost(dev);
4164         pinstance = (struct pmcraid_instance *)shost->hostdata;
4165         pinstance->current_log_level = val;
4166
4167         return strlen(buf);
4168 }
4169
4170 static struct device_attribute pmcraid_log_level_attr = {
4171         .attr = {
4172                  .name = "log_level",
4173                  .mode = S_IRUGO | S_IWUSR,
4174                  },
4175         .show = pmcraid_show_log_level,
4176         .store = pmcraid_store_log_level,
4177 };
4178
4179 /**
4180  * pmcraid_show_drv_version - Display driver version
4181  * @dev: class device struct
4182  * @buf: buffer
4183  *
4184  * Return value:
4185  *  number of bytes printed to buffer
4186  */
4187 static ssize_t pmcraid_show_drv_version(
4188         struct device *dev,
4189         struct device_attribute *attr,
4190         char *buf
4191 )
4192 {
4193         return snprintf(buf, PAGE_SIZE, "version: %s\n",
4194                         PMCRAID_DRIVER_VERSION);
4195 }
4196
4197 static struct device_attribute pmcraid_driver_version_attr = {
4198         .attr = {
4199                  .name = "drv_version",
4200                  .mode = S_IRUGO,
4201                  },
4202         .show = pmcraid_show_drv_version,
4203 };
4204
4205 /**
4206  * pmcraid_show_io_adapter_id - Display driver assigned adapter id
4207  * @dev: class device struct
4208  * @buf: buffer
4209  *
4210  * Return value:
4211  *  number of bytes printed to buffer
4212  */
4213 static ssize_t pmcraid_show_adapter_id(
4214         struct device *dev,
4215         struct device_attribute *attr,
4216         char *buf
4217 )
4218 {
4219         struct Scsi_Host *shost = class_to_shost(dev);
4220         struct pmcraid_instance *pinstance =
4221                 (struct pmcraid_instance *)shost->hostdata;
4222         u32 adapter_id = (pinstance->pdev->bus->number << 8) |
4223                 pinstance->pdev->devfn;
4224         u32 aen_group = pmcraid_event_family.id;
4225
4226         return snprintf(buf, PAGE_SIZE,
4227                         "adapter id: %d\nminor: %d\naen group: %d\n",
4228                         adapter_id, MINOR(pinstance->cdev.dev), aen_group);
4229 }
4230
4231 static struct device_attribute pmcraid_adapter_id_attr = {
4232         .attr = {
4233                  .name = "adapter_id",
4234                  .mode = S_IRUGO | S_IWUSR,
4235                  },
4236         .show = pmcraid_show_adapter_id,
4237 };
4238
4239 static struct device_attribute *pmcraid_host_attrs[] = {
4240         &pmcraid_log_level_attr,
4241         &pmcraid_driver_version_attr,
4242         &pmcraid_adapter_id_attr,
4243         NULL,
4244 };
4245
4246
4247 /* host template structure for pmcraid driver */
4248 static struct scsi_host_template pmcraid_host_template = {
4249         .module = THIS_MODULE,
4250         .name = PMCRAID_DRIVER_NAME,
4251         .queuecommand = pmcraid_queuecommand,
4252         .eh_abort_handler = pmcraid_eh_abort_handler,
4253         .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
4254         .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
4255         .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
4256         .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
4257
4258         .slave_alloc = pmcraid_slave_alloc,
4259         .slave_configure = pmcraid_slave_configure,
4260         .slave_destroy = pmcraid_slave_destroy,
4261         .change_queue_depth = pmcraid_change_queue_depth,
4262         .change_queue_type  = scsi_change_queue_type,
4263         .can_queue = PMCRAID_MAX_IO_CMD,
4264         .this_id = -1,
4265         .sg_tablesize = PMCRAID_MAX_IOADLS,
4266         .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4267         .no_write_same = 1,
4268         .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4269         .use_clustering = ENABLE_CLUSTERING,
4270         .shost_attrs = pmcraid_host_attrs,
4271         .proc_name = PMCRAID_DRIVER_NAME,
4272         .use_blk_tags = 1,
4273 };
4274
4275 /*
4276  * pmcraid_isr_msix - implements MSI-X interrupt handling routine
4277  * @irq: interrupt vector number
4278  * @dev_id: pointer hrrq_vector
4279  *
4280  * Return Value
4281  *       IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4282  */
4283
4284 static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
4285 {
4286         struct pmcraid_isr_param *hrrq_vector;
4287         struct pmcraid_instance *pinstance;
4288         unsigned long lock_flags;
4289         u32 intrs_val;
4290         int hrrq_id;
4291
4292         hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4293         hrrq_id = hrrq_vector->hrrq_id;
4294         pinstance = hrrq_vector->drv_inst;
4295
4296         if (!hrrq_id) {
4297                 /* Read the interrupt */
4298                 intrs_val = pmcraid_read_interrupts(pinstance);
4299                 if (intrs_val &&
4300                         ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
4301                         & DOORBELL_INTR_MSIX_CLR) == 0)) {
4302                         /* Any error interrupts including unit_check,
4303                          * initiate IOA reset.In case of unit check indicate
4304                          * to reset_sequence that IOA unit checked and prepare
4305                          * for a dump during reset sequence
4306                          */
4307                         if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
4308                                 if (intrs_val & INTRS_IOA_UNIT_CHECK)
4309                                         pinstance->ioa_unit_check = 1;
4310
4311                                 pmcraid_err("ISR: error interrupts: %x \
4312                                         initiating reset\n", intrs_val);
4313                                 spin_lock_irqsave(pinstance->host->host_lock,
4314                                         lock_flags);
4315                                 pmcraid_initiate_reset(pinstance);
4316                                 spin_unlock_irqrestore(
4317                                         pinstance->host->host_lock,
4318                                         lock_flags);
4319                         }
4320                         /* If interrupt was as part of the ioa initialization,
4321                          * clear it. Delete the timer and wakeup the
4322                          * reset engine to proceed with reset sequence
4323                          */
4324                         if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
4325                                 pmcraid_clr_trans_op(pinstance);
4326
4327                         /* Clear the interrupt register by writing
4328                          * to host to ioa doorbell. Once done
4329                          * FW will clear the interrupt.
4330                          */
4331                         iowrite32(DOORBELL_INTR_MSIX_CLR,
4332                                 pinstance->int_regs.host_ioa_interrupt_reg);
4333                         ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4334
4335
4336                 }
4337         }
4338
4339         tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4340
4341         return IRQ_HANDLED;
4342 }
4343
4344 /**
4345  * pmcraid_isr  - implements legacy interrupt handling routine
4346  *
4347  * @irq: interrupt vector number
4348  * @dev_id: pointer hrrq_vector
4349  *
4350  * Return Value
4351  *       IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4352  */
4353 static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4354 {
4355         struct pmcraid_isr_param *hrrq_vector;
4356         struct pmcraid_instance *pinstance;
4357         u32 intrs;
4358         unsigned long lock_flags;
4359         int hrrq_id = 0;
4360
4361         /* In case of legacy interrupt mode where interrupts are shared across
4362          * isrs, it may be possible that the current interrupt is not from IOA
4363          */
4364         if (!dev_id) {
4365                 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4366                 return IRQ_NONE;
4367         }
4368         hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4369         pinstance = hrrq_vector->drv_inst;
4370
4371         intrs = pmcraid_read_interrupts(pinstance);
4372
4373         if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
4374                 return IRQ_NONE;
4375
4376         /* Any error interrupts including unit_check, initiate IOA reset.
4377          * In case of unit check indicate to reset_sequence that IOA unit
4378          * checked and prepare for a dump during reset sequence
4379          */
4380         if (intrs & PMCRAID_ERROR_INTERRUPTS) {
4381
4382                 if (intrs & INTRS_IOA_UNIT_CHECK)
4383                         pinstance->ioa_unit_check = 1;
4384
4385                 iowrite32(intrs,
4386                           pinstance->int_regs.ioa_host_interrupt_clr_reg);
4387                 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4388                             intrs);
4389                 intrs = ioread32(
4390                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4391                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4392                 pmcraid_initiate_reset(pinstance);
4393                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4394         } else {
4395                 /* If interrupt was as part of the ioa initialization,
4396                  * clear. Delete the timer and wakeup the
4397                  * reset engine to proceed with reset sequence
4398                  */
4399                 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4400                         pmcraid_clr_trans_op(pinstance);
4401                 } else {
4402                         iowrite32(intrs,
4403                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4404                         ioread32(
4405                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4406
4407                         tasklet_schedule(
4408                                         &(pinstance->isr_tasklet[hrrq_id]));
4409                 }
4410         }
4411
4412         return IRQ_HANDLED;
4413 }
4414
4415
4416 /**
4417  * pmcraid_worker_function -  worker thread function
4418  *
4419  * @workp: pointer to struct work queue
4420  *
4421  * Return Value
4422  *       None
4423  */
4424
4425 static void pmcraid_worker_function(struct work_struct *workp)
4426 {
4427         struct pmcraid_instance *pinstance;
4428         struct pmcraid_resource_entry *res;
4429         struct pmcraid_resource_entry *temp;
4430         struct scsi_device *sdev;
4431         unsigned long lock_flags;
4432         unsigned long host_lock_flags;
4433         u16 fw_version;
4434         u8 bus, target, lun;
4435
4436         pinstance = container_of(workp, struct pmcraid_instance, worker_q);
4437         /* add resources only after host is added into system */
4438         if (!atomic_read(&pinstance->expose_resources))
4439                 return;
4440
4441         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
4442
4443         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4444         list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4445
4446                 if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
4447                         sdev = res->scsi_dev;
4448
4449                         /* host_lock must be held before calling
4450                          * scsi_device_get
4451                          */
4452                         spin_lock_irqsave(pinstance->host->host_lock,
4453                                           host_lock_flags);
4454                         if (!scsi_device_get(sdev)) {
4455                                 spin_unlock_irqrestore(
4456                                                 pinstance->host->host_lock,
4457                                                 host_lock_flags);
4458                                 pmcraid_info("deleting %x from midlayer\n",
4459                                              res->cfg_entry.resource_address);
4460                                 list_move_tail(&res->queue,
4461                                                 &pinstance->free_res_q);
4462                                 spin_unlock_irqrestore(
4463                                         &pinstance->resource_lock,
4464                                         lock_flags);
4465                                 scsi_remove_device(sdev);
4466                                 scsi_device_put(sdev);
4467                                 spin_lock_irqsave(&pinstance->resource_lock,
4468                                                    lock_flags);
4469                                 res->change_detected = 0;
4470                         } else {
4471                                 spin_unlock_irqrestore(
4472                                                 pinstance->host->host_lock,
4473                                                 host_lock_flags);
4474                         }
4475                 }
4476         }
4477
4478         list_for_each_entry(res, &pinstance->used_res_q, queue) {
4479
4480                 if (res->change_detected == RES_CHANGE_ADD) {
4481
4482                         if (!pmcraid_expose_resource(fw_version,
4483                                                      &res->cfg_entry))
4484                                 continue;
4485
4486                         if (RES_IS_VSET(res->cfg_entry)) {
4487                                 bus = PMCRAID_VSET_BUS_ID;
4488                                 if (fw_version <= PMCRAID_FW_VERSION_1)
4489                                         target = res->cfg_entry.unique_flags1;
4490                                 else
4491                                         target = res->cfg_entry.array_id & 0xFF;
4492                                 lun = PMCRAID_VSET_LUN_ID;
4493                         } else {
4494                                 bus = PMCRAID_PHYS_BUS_ID;
4495                                 target =
4496                                      RES_TARGET(
4497                                         res->cfg_entry.resource_address);
4498                                 lun = RES_LUN(res->cfg_entry.resource_address);
4499                         }
4500
4501                         res->change_detected = 0;
4502                         spin_unlock_irqrestore(&pinstance->resource_lock,
4503                                                 lock_flags);
4504                         scsi_add_device(pinstance->host, bus, target, lun);
4505                         spin_lock_irqsave(&pinstance->resource_lock,
4506                                            lock_flags);
4507                 }
4508         }
4509
4510         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4511 }
4512
4513 /**
4514  * pmcraid_tasklet_function - Tasklet function
4515  *
4516  * @instance: pointer to msix param structure
4517  *
4518  * Return Value
4519  *      None
4520  */
4521 static void pmcraid_tasklet_function(unsigned long instance)
4522 {
4523         struct pmcraid_isr_param *hrrq_vector;
4524         struct pmcraid_instance *pinstance;
4525         unsigned long hrrq_lock_flags;
4526         unsigned long pending_lock_flags;
4527         unsigned long host_lock_flags;
4528         spinlock_t *lockp; /* hrrq buffer lock */
4529         int id;
4530         __le32 resp;
4531
4532         hrrq_vector = (struct pmcraid_isr_param *)instance;
4533         pinstance = hrrq_vector->drv_inst;
4534         id = hrrq_vector->hrrq_id;
4535         lockp = &(pinstance->hrrq_lock[id]);
4536
4537         /* loop through each of the commands responded by IOA. Each HRRQ buf is
4538          * protected by its own lock. Traversals must be done within this lock
4539          * as there may be multiple tasklets running on multiple CPUs. Note
4540          * that the lock is held just for picking up the response handle and
4541          * manipulating hrrq_curr/toggle_bit values.
4542          */
4543         spin_lock_irqsave(lockp, hrrq_lock_flags);
4544
4545         resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4546
4547         while ((resp & HRRQ_TOGGLE_BIT) ==
4548                 pinstance->host_toggle_bit[id]) {
4549
4550                 int cmd_index = resp >> 2;
4551                 struct pmcraid_cmd *cmd = NULL;
4552
4553                 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4554                         pinstance->hrrq_curr[id]++;
4555                 } else {
4556                         pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
4557                         pinstance->host_toggle_bit[id] ^= 1u;
4558                 }
4559
4560                 if (cmd_index >= PMCRAID_MAX_CMD) {
4561                         /* In case of invalid response handle, log message */
4562                         pmcraid_err("Invalid response handle %d\n", cmd_index);
4563                         resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4564                         continue;
4565                 }
4566
4567                 cmd = pinstance->cmd_list[cmd_index];
4568                 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4569
4570                 spin_lock_irqsave(&pinstance->pending_pool_lock,
4571                                    pending_lock_flags);
4572                 list_del(&cmd->free_list);
4573                 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
4574                                         pending_lock_flags);
4575                 del_timer(&cmd->timer);
4576                 atomic_dec(&pinstance->outstanding_cmds);
4577
4578                 if (cmd->cmd_done == pmcraid_ioa_reset) {
4579                         spin_lock_irqsave(pinstance->host->host_lock,
4580                                           host_lock_flags);
4581                         cmd->cmd_done(cmd);
4582                         spin_unlock_irqrestore(pinstance->host->host_lock,
4583                                                host_lock_flags);
4584                 } else if (cmd->cmd_done != NULL) {
4585                         cmd->cmd_done(cmd);
4586                 }
4587                 /* loop over until we are done with all responses */
4588                 spin_lock_irqsave(lockp, hrrq_lock_flags);
4589                 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4590         }
4591
4592         spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4593 }
4594
4595 /**
4596  * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
4597  * @pinstance: pointer to adapter instance structure
4598  *
4599  * This routine un-registers registered interrupt handler and
4600  * also frees irqs/vectors.
4601  *
4602  * Retun Value
4603  *      None
4604  */
4605 static
4606 void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4607 {
4608         int i;
4609
4610         for (i = 0; i < pinstance->num_hrrq; i++)
4611                 free_irq(pinstance->hrrq_vector[i].vector,
4612                          &(pinstance->hrrq_vector[i]));
4613
4614         if (pinstance->interrupt_mode) {
4615                 pci_disable_msix(pinstance->pdev);
4616                 pinstance->interrupt_mode = 0;
4617         }
4618 }
4619
4620 /**
4621  * pmcraid_register_interrupt_handler - registers interrupt handler
4622  * @pinstance: pointer to per-adapter instance structure
4623  *
4624  * Return Value
4625  *      0 on success, non-zero error code otherwise.
4626  */
4627 static int
4628 pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4629 {
4630         int rc;
4631         struct pci_dev *pdev = pinstance->pdev;
4632
4633         if ((pmcraid_enable_msix) &&
4634                 (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
4635                 int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
4636                 struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
4637                 int i;
4638                 for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
4639                         entries[i].entry = i;
4640
4641                 num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
4642                 if (num_hrrq < 0)
4643                         goto pmcraid_isr_legacy;
4644
4645                 for (i = 0; i < num_hrrq; i++) {
4646                         pinstance->hrrq_vector[i].hrrq_id = i;
4647                         pinstance->hrrq_vector[i].drv_inst = pinstance;
4648                         pinstance->hrrq_vector[i].vector = entries[i].vector;
4649                         rc = request_irq(pinstance->hrrq_vector[i].vector,
4650                                         pmcraid_isr_msix, 0,
4651                                         PMCRAID_DRIVER_NAME,
4652                                         &(pinstance->hrrq_vector[i]));
4653
4654                         if (rc) {
4655                                 int j;
4656                                 for (j = 0; j < i; j++)
4657                                         free_irq(entries[j].vector,
4658                                                  &(pinstance->hrrq_vector[j]));
4659                                 pci_disable_msix(pdev);
4660                                 goto pmcraid_isr_legacy;
4661                         }
4662                 }
4663
4664                 pinstance->num_hrrq = num_hrrq;
4665                 pinstance->interrupt_mode = 1;
4666                 iowrite32(DOORBELL_INTR_MODE_MSIX,
4667                           pinstance->int_regs.host_ioa_interrupt_reg);
4668                 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4669                 goto pmcraid_isr_out;
4670         }
4671
4672 pmcraid_isr_legacy:
4673         /* If MSI-X registration failed fallback to legacy mode, where
4674          * only one hrrq entry will be used
4675          */
4676         pinstance->hrrq_vector[0].hrrq_id = 0;
4677         pinstance->hrrq_vector[0].drv_inst = pinstance;
4678         pinstance->hrrq_vector[0].vector = pdev->irq;
4679         pinstance->num_hrrq = 1;
4680
4681         rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4682                          PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4683 pmcraid_isr_out:
4684         return rc;
4685 }
4686
4687 /**
4688  * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
4689  * @pinstance: per adapter instance structure pointer
4690  * @max_index: number of buffer blocks to release
4691  *
4692  * Return Value
4693  *  None
4694  */
4695 static void
4696 pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
4697 {
4698         int i;
4699         for (i = 0; i < max_index; i++) {
4700                 kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
4701                 pinstance->cmd_list[i] = NULL;
4702         }
4703         kmem_cache_destroy(pinstance->cmd_cachep);
4704         pinstance->cmd_cachep = NULL;
4705 }
4706
4707 /**
4708  * pmcraid_release_control_blocks - releases buffers alloced for control blocks
4709  * @pinstance: pointer to per adapter instance structure
4710  * @max_index: number of buffers (from 0 onwards) to release
4711  *
4712  * This function assumes that the command blocks for which control blocks are
4713  * linked are not released.
4714  *
4715  * Return Value
4716  *       None
4717  */
4718 static void
4719 pmcraid_release_control_blocks(
4720         struct pmcraid_instance *pinstance,
4721         int max_index
4722 )
4723 {
4724         int i;
4725
4726         if (pinstance->control_pool == NULL)
4727                 return;
4728
4729         for (i = 0; i < max_index; i++) {
4730                 pci_pool_free(pinstance->control_pool,
4731                               pinstance->cmd_list[i]->ioa_cb,
4732                               pinstance->cmd_list[i]->ioa_cb_bus_addr);
4733                 pinstance->cmd_list[i]->ioa_cb = NULL;
4734                 pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
4735         }
4736         pci_pool_destroy(pinstance->control_pool);
4737         pinstance->control_pool = NULL;
4738 }
4739
4740 /**
4741  * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
4742  * @pinstance - pointer to per adapter instance structure
4743  *
4744  * Allocates memory for command blocks using kernel slab allocator.
4745  *
4746  * Return Value
4747  *      0 in case of success; -ENOMEM in case of failure
4748  */
4749 static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
4750 {
4751         int i;
4752
4753         sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
4754                 pinstance->host->unique_id);
4755
4756
4757         pinstance->cmd_cachep = kmem_cache_create(
4758                                         pinstance->cmd_pool_name,
4759                                         sizeof(struct pmcraid_cmd), 0,
4760                                         SLAB_HWCACHE_ALIGN, NULL);
4761         if (!pinstance->cmd_cachep)
4762                 return -ENOMEM;
4763
4764         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4765                 pinstance->cmd_list[i] =
4766                         kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
4767                 if (!pinstance->cmd_list[i]) {
4768                         pmcraid_release_cmd_blocks(pinstance, i);
4769                         return -ENOMEM;
4770                 }
4771         }
4772         return 0;
4773 }
4774
4775 /**
4776  * pmcraid_allocate_control_blocks - allocates memory control blocks
4777  * @pinstance : pointer to per adapter instance structure
4778  *
4779  * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
4780  * and IOASAs. This is called after command blocks are already allocated.
4781  *
4782  * Return Value
4783  *  0 in case it can allocate all control blocks, otherwise -ENOMEM
4784  */
4785 static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
4786 {
4787         int i;
4788
4789         sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
4790                 pinstance->host->unique_id);
4791
4792         pinstance->control_pool =
4793                 pci_pool_create(pinstance->ctl_pool_name,
4794                                 pinstance->pdev,
4795                                 sizeof(struct pmcraid_control_block),
4796                                 PMCRAID_IOARCB_ALIGNMENT, 0);
4797
4798         if (!pinstance->control_pool)
4799                 return -ENOMEM;
4800
4801         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4802                 pinstance->cmd_list[i]->ioa_cb =
4803                         pci_pool_alloc(
4804                                 pinstance->control_pool,
4805                                 GFP_KERNEL,
4806                                 &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
4807
4808                 if (!pinstance->cmd_list[i]->ioa_cb) {
4809                         pmcraid_release_control_blocks(pinstance, i);
4810                         return -ENOMEM;
4811                 }
4812                 memset(pinstance->cmd_list[i]->ioa_cb, 0,
4813                         sizeof(struct pmcraid_control_block));
4814         }
4815         return 0;
4816 }
4817
4818 /**
4819  * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
4820  * @pinstance: pointer to per adapter instance structure
4821  * @maxindex: size of hrrq buffer pointer array
4822  *
4823  * Return Value
4824  *      None
4825  */
4826 static void
4827 pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4828 {
4829         int i;
4830         for (i = 0; i < maxindex; i++) {
4831
4832                 pci_free_consistent(pinstance->pdev,
4833                                     HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4834                                     pinstance->hrrq_start[i],
4835                                     pinstance->hrrq_start_bus_addr[i]);
4836
4837                 /* reset pointers and toggle bit to zeros */
4838                 pinstance->hrrq_start[i] = NULL;
4839                 pinstance->hrrq_start_bus_addr[i] = 0;
4840                 pinstance->host_toggle_bit[i] = 0;
4841         }
4842 }
4843
4844 /**
4845  * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
4846  * @pinstance: pointer to per adapter instance structure
4847  *
4848  * Return value
4849  *      0 hrrq buffers are allocated, -ENOMEM otherwise.
4850  */
4851 static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4852 {
4853         int i, buffer_size;
4854
4855         buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4856
4857         for (i = 0; i < pinstance->num_hrrq; i++) {
4858                 pinstance->hrrq_start[i] =
4859                         pci_alloc_consistent(
4860                                         pinstance->pdev,
4861                                         buffer_size,
4862                                         &(pinstance->hrrq_start_bus_addr[i]));
4863
4864                 if (pinstance->hrrq_start[i] == 0) {
4865                         pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
4866                                     i);
4867                         pmcraid_release_host_rrqs(pinstance, i);
4868                         return -ENOMEM;
4869                 }
4870
4871                 memset(pinstance->hrrq_start[i], 0, buffer_size);
4872                 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4873                 pinstance->hrrq_end[i] =
4874                         pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4875                 pinstance->host_toggle_bit[i] = 1;
4876                 spin_lock_init(&pinstance->hrrq_lock[i]);
4877         }
4878         return 0;
4879 }
4880
4881 /**
4882  * pmcraid_release_hcams - release HCAM buffers
4883  *
4884  * @pinstance: pointer to per adapter instance structure
4885  *
4886  * Return value
4887  *  none
4888  */
4889 static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4890 {
4891         if (pinstance->ccn.msg != NULL) {
4892                 pci_free_consistent(pinstance->pdev,
4893                                     PMCRAID_AEN_HDR_SIZE +
4894                                     sizeof(struct pmcraid_hcam_ccn_ext),
4895                                     pinstance->ccn.msg,
4896                                     pinstance->ccn.baddr);
4897
4898                 pinstance->ccn.msg = NULL;
4899                 pinstance->ccn.hcam = NULL;
4900                 pinstance->ccn.baddr = 0;
4901         }
4902
4903         if (pinstance->ldn.msg != NULL) {
4904                 pci_free_consistent(pinstance->pdev,
4905                                     PMCRAID_AEN_HDR_SIZE +
4906                                     sizeof(struct pmcraid_hcam_ldn),
4907                                     pinstance->ldn.msg,
4908                                     pinstance->ldn.baddr);
4909
4910                 pinstance->ldn.msg = NULL;
4911                 pinstance->ldn.hcam = NULL;
4912                 pinstance->ldn.baddr = 0;
4913         }
4914 }
4915
4916 /**
4917  * pmcraid_allocate_hcams - allocates HCAM buffers
4918  * @pinstance : pointer to per adapter instance structure
4919  *
4920  * Return Value:
4921  *   0 in case of successful allocation, non-zero otherwise
4922  */
4923 static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4924 {
4925         pinstance->ccn.msg = pci_alloc_consistent(
4926                                         pinstance->pdev,
4927                                         PMCRAID_AEN_HDR_SIZE +
4928                                         sizeof(struct pmcraid_hcam_ccn_ext),
4929                                         &(pinstance->ccn.baddr));
4930
4931         pinstance->ldn.msg = pci_alloc_consistent(
4932                                         pinstance->pdev,
4933                                         PMCRAID_AEN_HDR_SIZE +
4934                                         sizeof(struct pmcraid_hcam_ldn),
4935                                         &(pinstance->ldn.baddr));
4936
4937         if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4938                 pmcraid_release_hcams(pinstance);
4939         } else {
4940                 pinstance->ccn.hcam =
4941                         (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
4942                 pinstance->ldn.hcam =
4943                         (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
4944
4945                 atomic_set(&pinstance->ccn.ignore, 0);
4946                 atomic_set(&pinstance->ldn.ignore, 0);
4947         }
4948
4949         return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
4950 }
4951
4952 /**
4953  * pmcraid_release_config_buffers - release config.table buffers
4954  * @pinstance: pointer to per adapter instance structure
4955  *
4956  * Return Value
4957  *       none
4958  */
4959 static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4960 {
4961         if (pinstance->cfg_table != NULL &&
4962             pinstance->cfg_table_bus_addr != 0) {
4963                 pci_free_consistent(pinstance->pdev,
4964                                     sizeof(struct pmcraid_config_table),
4965                                     pinstance->cfg_table,
4966                                     pinstance->cfg_table_bus_addr);
4967                 pinstance->cfg_table = NULL;
4968                 pinstance->cfg_table_bus_addr = 0;
4969         }
4970
4971         if (pinstance->res_entries != NULL) {
4972                 int i;
4973
4974                 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4975                         list_del(&pinstance->res_entries[i].queue);
4976                 kfree(pinstance->res_entries);
4977                 pinstance->res_entries = NULL;
4978         }
4979
4980         pmcraid_release_hcams(pinstance);
4981 }
4982
4983 /**
4984  * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
4985  * @pinstance : pointer to per adapter instance structure
4986  *
4987  * Return Value
4988  *      0 for successful allocation, -ENOMEM for any failure
4989  */
4990 static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4991 {
4992         int i;
4993
4994         pinstance->res_entries =
4995                         kzalloc(sizeof(struct pmcraid_resource_entry) *
4996                                 PMCRAID_MAX_RESOURCES, GFP_KERNEL);
4997
4998         if (NULL == pinstance->res_entries) {
4999                 pmcraid_err("failed to allocate memory for resource table\n");
5000                 return -ENOMEM;
5001         }
5002
5003         for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
5004                 list_add_tail(&pinstance->res_entries[i].queue,
5005                               &pinstance->free_res_q);
5006
5007         pinstance->cfg_table =
5008                 pci_alloc_consistent(pinstance->pdev,
5009                                      sizeof(struct pmcraid_config_table),
5010                                      &pinstance->cfg_table_bus_addr);
5011
5012         if (NULL == pinstance->cfg_table) {
5013                 pmcraid_err("couldn't alloc DMA memory for config table\n");
5014                 pmcraid_release_config_buffers(pinstance);
5015                 return -ENOMEM;
5016         }
5017
5018         if (pmcraid_allocate_hcams(pinstance)) {
5019                 pmcraid_err("could not alloc DMA memory for HCAMS\n");
5020                 pmcraid_release_config_buffers(pinstance);
5021                 return -ENOMEM;
5022         }
5023
5024         return 0;
5025 }
5026
5027 /**
5028  * pmcraid_init_tasklets - registers tasklets for response handling
5029  *
5030  * @pinstance: pointer adapter instance structure
5031  *
5032  * Return value
5033  *      none
5034  */
5035 static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
5036 {
5037         int i;
5038         for (i = 0; i < pinstance->num_hrrq; i++)
5039                 tasklet_init(&pinstance->isr_tasklet[i],
5040                              pmcraid_tasklet_function,
5041                              (unsigned long)&pinstance->hrrq_vector[i]);
5042 }
5043
5044 /**
5045  * pmcraid_kill_tasklets - destroys tasklets registered for response handling
5046  *
5047  * @pinstance: pointer to adapter instance structure
5048  *
5049  * Return value
5050  *      none
5051  */
5052 static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
5053 {
5054         int i;
5055         for (i = 0; i < pinstance->num_hrrq; i++)
5056                 tasklet_kill(&pinstance->isr_tasklet[i]);
5057 }
5058
5059 /**
5060  * pmcraid_release_buffers - release per-adapter buffers allocated
5061  *
5062  * @pinstance: pointer to adapter soft state
5063  *
5064  * Return Value
5065  *      none
5066  */
5067 static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
5068 {
5069         pmcraid_release_config_buffers(pinstance);
5070         pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
5071         pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
5072         pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5073
5074         if (pinstance->inq_data != NULL) {
5075                 pci_free_consistent(pinstance->pdev,
5076                                     sizeof(struct pmcraid_inquiry_data),
5077                                     pinstance->inq_data,
5078                                     pinstance->inq_data_baddr);
5079
5080                 pinstance->inq_data = NULL;
5081                 pinstance->inq_data_baddr = 0;
5082         }
5083
5084         if (pinstance->timestamp_data != NULL) {
5085                 pci_free_consistent(pinstance->pdev,
5086                                     sizeof(struct pmcraid_timestamp_data),
5087                                     pinstance->timestamp_data,
5088                                     pinstance->timestamp_data_baddr);
5089
5090                 pinstance->timestamp_data = NULL;
5091                 pinstance->timestamp_data_baddr = 0;
5092         }
5093 }
5094
5095 /**
5096  * pmcraid_init_buffers - allocates memory and initializes various structures
5097  * @pinstance: pointer to per adapter instance structure
5098  *
5099  * This routine pre-allocates memory based on the type of block as below:
5100  * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
5101  * IOARCBs(PMCRAID_MAX_CMD)  : DMAable memory, using pci pool allocator
5102  * config-table entries      : DMAable memory using pci_alloc_consistent
5103  * HostRRQs                  : DMAable memory, using pci_alloc_consistent
5104  *
5105  * Return Value
5106  *       0 in case all of the blocks are allocated, -ENOMEM otherwise.
5107  */
5108 static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5109 {
5110         int i;
5111
5112         if (pmcraid_allocate_host_rrqs(pinstance)) {
5113                 pmcraid_err("couldn't allocate memory for %d host rrqs\n",
5114                              pinstance->num_hrrq);
5115                 return -ENOMEM;
5116         }
5117
5118         if (pmcraid_allocate_config_buffers(pinstance)) {
5119                 pmcraid_err("couldn't allocate memory for config buffers\n");
5120                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5121                 return -ENOMEM;
5122         }
5123
5124         if (pmcraid_allocate_cmd_blocks(pinstance)) {
5125                 pmcraid_err("couldn't allocate memory for cmd blocks\n");
5126                 pmcraid_release_config_buffers(pinstance);
5127                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5128                 return -ENOMEM;
5129         }
5130
5131         if (pmcraid_allocate_control_blocks(pinstance)) {
5132                 pmcraid_err("couldn't allocate memory control blocks\n");
5133                 pmcraid_release_config_buffers(pinstance);
5134                 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
5135                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5136                 return -ENOMEM;
5137         }
5138
5139         /* allocate DMAable memory for page D0 INQUIRY buffer */
5140         pinstance->inq_data = pci_alloc_consistent(
5141                                         pinstance->pdev,
5142                                         sizeof(struct pmcraid_inquiry_data),
5143                                         &pinstance->inq_data_baddr);
5144
5145         if (pinstance->inq_data == NULL) {
5146                 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
5147                 pmcraid_release_buffers(pinstance);
5148                 return -ENOMEM;
5149         }
5150
5151         /* allocate DMAable memory for set timestamp data buffer */
5152         pinstance->timestamp_data = pci_alloc_consistent(
5153                                         pinstance->pdev,
5154                                         sizeof(struct pmcraid_timestamp_data),
5155                                         &pinstance->timestamp_data_baddr);
5156
5157         if (pinstance->timestamp_data == NULL) {
5158                 pmcraid_err("couldn't allocate DMA memory for \
5159                                 set time_stamp \n");
5160                 pmcraid_release_buffers(pinstance);
5161                 return -ENOMEM;
5162         }
5163
5164
5165         /* Initialize all the command blocks and add them to free pool. No
5166          * need to lock (free_pool_lock) as this is done in initialization
5167          * itself
5168          */
5169         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
5170                 struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
5171                 pmcraid_init_cmdblk(cmdp, i);
5172                 cmdp->drv_inst = pinstance;
5173                 list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
5174         }
5175
5176         return 0;
5177 }
5178
5179 /**
5180  * pmcraid_reinit_buffers - resets various buffer pointers
5181  * @pinstance: pointer to adapter instance
5182  * Return value
5183  *      none
5184  */
5185 static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
5186 {
5187         int i;
5188         int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
5189
5190         for (i = 0; i < pinstance->num_hrrq; i++) {
5191                 memset(pinstance->hrrq_start[i], 0, buffer_size);
5192                 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
5193                 pinstance->hrrq_end[i] =
5194                         pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
5195                 pinstance->host_toggle_bit[i] = 1;
5196         }
5197 }
5198
5199 /**
5200  * pmcraid_init_instance - initialize per instance data structure
5201  * @pdev: pointer to pci device structure
5202  * @host: pointer to Scsi_Host structure
5203  * @mapped_pci_addr: memory mapped IOA configuration registers
5204  *
5205  * Return Value
5206  *       0 on success, non-zero in case of any failure
5207  */
5208 static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
5209                                  void __iomem *mapped_pci_addr)
5210 {
5211         struct pmcraid_instance *pinstance =
5212                 (struct pmcraid_instance *)host->hostdata;
5213
5214         pinstance->host = host;
5215         pinstance->pdev = pdev;
5216
5217         /* Initialize register addresses */
5218         pinstance->mapped_dma_addr = mapped_pci_addr;
5219
5220         /* Initialize chip-specific details */
5221         {
5222                 struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
5223                 struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
5224
5225                 pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
5226
5227                 pint_regs->ioa_host_interrupt_reg =
5228                         mapped_pci_addr + chip_cfg->ioa_host_intr;
5229                 pint_regs->ioa_host_interrupt_clr_reg =
5230                         mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
5231                 pint_regs->ioa_host_msix_interrupt_reg =
5232                         mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
5233                 pint_regs->host_ioa_interrupt_reg =
5234                         mapped_pci_addr + chip_cfg->host_ioa_intr;
5235                 pint_regs->host_ioa_interrupt_clr_reg =
5236                         mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
5237
5238                 /* Current version of firmware exposes interrupt mask set
5239                  * and mask clr registers through memory mapped bar0.
5240                  */
5241                 pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
5242                 pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
5243                 pint_regs->ioa_host_interrupt_mask_reg =
5244                         mapped_pci_addr + chip_cfg->ioa_host_mask;
5245                 pint_regs->ioa_host_interrupt_mask_clr_reg =
5246                         mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
5247                 pint_regs->global_interrupt_mask_reg =
5248                         mapped_pci_addr + chip_cfg->global_intr_mask;
5249         };
5250
5251         pinstance->ioa_reset_attempts = 0;
5252         init_waitqueue_head(&pinstance->reset_wait_q);
5253
5254         atomic_set(&pinstance->outstanding_cmds, 0);
5255         atomic_set(&pinstance->last_message_id, 0);
5256         atomic_set(&pinstance->expose_resources, 0);
5257
5258         INIT_LIST_HEAD(&pinstance->free_res_q);
5259         INIT_LIST_HEAD(&pinstance->used_res_q);
5260         INIT_LIST_HEAD(&pinstance->free_cmd_pool);
5261         INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
5262
5263         spin_lock_init(&pinstance->free_pool_lock);
5264         spin_lock_init(&pinstance->pending_pool_lock);
5265         spin_lock_init(&pinstance->resource_lock);
5266         mutex_init(&pinstance->aen_queue_lock);
5267
5268         /* Work-queue (Shared) for deferred processing error handling */
5269         INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
5270
5271         /* Initialize the default log_level */
5272         pinstance->current_log_level = pmcraid_log_level;
5273
5274         /* Setup variables required for reset engine */
5275         pinstance->ioa_state = IOA_STATE_UNKNOWN;
5276         pinstance->reset_cmd = NULL;
5277         return 0;
5278 }
5279
5280 /**
5281  * pmcraid_shutdown - shutdown adapter controller.
5282  * @pdev: pci device struct
5283  *
5284  * Issues an adapter shutdown to the card waits for its completion
5285  *
5286  * Return value
5287  *        none
5288  */
5289 static void pmcraid_shutdown(struct pci_dev *pdev)
5290 {
5291         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5292         pmcraid_reset_bringdown(pinstance);
5293 }
5294
5295
5296 /**
5297  * pmcraid_get_minor - returns unused minor number from minor number bitmap
5298  */
5299 static unsigned short pmcraid_get_minor(void)
5300 {
5301         int minor;
5302
5303         minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
5304         __set_bit(minor, pmcraid_minor);
5305         return minor;
5306 }
5307
5308 /**
5309  * pmcraid_release_minor - releases given minor back to minor number bitmap
5310  */
5311 static void pmcraid_release_minor(unsigned short minor)
5312 {
5313         __clear_bit(minor, pmcraid_minor);
5314 }
5315
5316 /**
5317  * pmcraid_setup_chrdev - allocates a minor number and registers a char device
5318  *
5319  * @pinstance: pointer to adapter instance for which to register device
5320  *
5321  * Return value
5322  *      0 in case of success, otherwise non-zero
5323  */
5324 static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
5325 {
5326         int minor;
5327         int error;
5328
5329         minor = pmcraid_get_minor();
5330         cdev_init(&pinstance->cdev, &pmcraid_fops);
5331         pinstance->cdev.owner = THIS_MODULE;
5332
5333         error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
5334
5335         if (error)
5336                 pmcraid_release_minor(minor);
5337         else
5338                 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
5339                               NULL, "%s%u", PMCRAID_DEVFILE, minor);
5340         return error;
5341 }
5342
5343 /**
5344  * pmcraid_release_chrdev - unregisters per-adapter management interface
5345  *
5346  * @pinstance: pointer to adapter instance structure
5347  *
5348  * Return value
5349  *  none
5350  */
5351 static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
5352 {
5353         pmcraid_release_minor(MINOR(pinstance->cdev.dev));
5354         device_destroy(pmcraid_class,
5355                        MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
5356         cdev_del(&pinstance->cdev);
5357 }
5358
5359 /**
5360  * pmcraid_remove - IOA hot plug remove entry point
5361  * @pdev: pci device struct
5362  *
5363  * Return value
5364  *        none
5365  */
5366 static void pmcraid_remove(struct pci_dev *pdev)
5367 {
5368         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5369
5370         /* remove the management interface (/dev file) for this device */
5371         pmcraid_release_chrdev(pinstance);
5372
5373         /* remove host template from scsi midlayer */
5374         scsi_remove_host(pinstance->host);
5375
5376         /* block requests from mid-layer */
5377         scsi_block_requests(pinstance->host);
5378
5379         /* initiate shutdown adapter */
5380         pmcraid_shutdown(pdev);
5381
5382         pmcraid_disable_interrupts(pinstance, ~0);
5383         flush_work(&pinstance->worker_q);
5384
5385         pmcraid_kill_tasklets(pinstance);
5386         pmcraid_unregister_interrupt_handler(pinstance);
5387         pmcraid_release_buffers(pinstance);
5388         iounmap(pinstance->mapped_dma_addr);
5389         pci_release_regions(pdev);
5390         scsi_host_put(pinstance->host);
5391         pci_disable_device(pdev);
5392
5393         return;
5394 }
5395
5396 #ifdef CONFIG_PM
5397 /**
5398  * pmcraid_suspend - driver suspend entry point for power management
5399  * @pdev:   PCI device structure
5400  * @state:  PCI power state to suspend routine
5401  *
5402  * Return Value - 0 always
5403  */
5404 static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
5405 {
5406         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5407
5408         pmcraid_shutdown(pdev);
5409         pmcraid_disable_interrupts(pinstance, ~0);
5410         pmcraid_kill_tasklets(pinstance);
5411         pci_set_drvdata(pinstance->pdev, pinstance);
5412         pmcraid_unregister_interrupt_handler(pinstance);
5413         pci_save_state(pdev);
5414         pci_disable_device(pdev);
5415         pci_set_power_state(pdev, pci_choose_state(pdev, state));
5416
5417         return 0;
5418 }
5419
5420 /**
5421  * pmcraid_resume - driver resume entry point PCI power management
5422  * @pdev: PCI device structure
5423  *
5424  * Return Value - 0 in case of success. Error code in case of any failure
5425  */
5426 static int pmcraid_resume(struct pci_dev *pdev)
5427 {
5428         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5429         struct Scsi_Host *host = pinstance->host;
5430         int rc;
5431
5432         pci_set_power_state(pdev, PCI_D0);
5433         pci_enable_wake(pdev, PCI_D0, 0);
5434         pci_restore_state(pdev);
5435
5436         rc = pci_enable_device(pdev);
5437
5438         if (rc) {
5439                 dev_err(&pdev->dev, "resume: Enable device failed\n");
5440                 return rc;
5441         }
5442
5443         pci_set_master(pdev);
5444
5445         if ((sizeof(dma_addr_t) == 4) ||
5446              pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5447                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5448
5449         if (rc == 0)
5450                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5451
5452         if (rc != 0) {
5453                 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
5454                 goto disable_device;
5455         }
5456
5457         pmcraid_disable_interrupts(pinstance, ~0);
5458         atomic_set(&pinstance->outstanding_cmds, 0);
5459         rc = pmcraid_register_interrupt_handler(pinstance);
5460
5461         if (rc) {
5462                 dev_err(&pdev->dev,
5463                         "resume: couldn't register interrupt handlers\n");
5464                 rc = -ENODEV;
5465                 goto release_host;
5466         }
5467
5468         pmcraid_init_tasklets(pinstance);
5469         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5470
5471         /* Start with hard reset sequence which brings up IOA to operational
5472          * state as well as completes the reset sequence.
5473          */
5474         pinstance->ioa_hard_reset = 1;
5475
5476         /* Start IOA firmware initialization and bring card to Operational
5477          * state.
5478          */
5479         if (pmcraid_reset_bringup(pinstance)) {
5480                 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5481                 rc = -ENODEV;
5482                 goto release_tasklets;
5483         }
5484
5485         return 0;
5486
5487 release_tasklets:
5488         pmcraid_disable_interrupts(pinstance, ~0);
5489         pmcraid_kill_tasklets(pinstance);
5490         pmcraid_unregister_interrupt_handler(pinstance);
5491
5492 release_host:
5493         scsi_host_put(host);
5494
5495 disable_device:
5496         pci_disable_device(pdev);
5497
5498         return rc;
5499 }
5500
5501 #else
5502
5503 #define pmcraid_suspend NULL
5504 #define pmcraid_resume  NULL
5505
5506 #endif /* CONFIG_PM */
5507
5508 /**
5509  * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5510  *                              completion of the ioa reset
5511  * @cmd: pointer to reset command block
5512  */
5513 static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
5514 {
5515         struct pmcraid_instance *pinstance = cmd->drv_inst;
5516         unsigned long flags;
5517
5518         spin_lock_irqsave(pinstance->host->host_lock, flags);
5519         pmcraid_ioa_reset(cmd);
5520         spin_unlock_irqrestore(pinstance->host->host_lock, flags);
5521         scsi_unblock_requests(pinstance->host);
5522         schedule_work(&pinstance->worker_q);
5523 }
5524
5525 /**
5526  * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
5527  *
5528  * @cmd: pointer to pmcraid_cmd structure
5529  *
5530  * Return Value
5531  *  0 for success or non-zero for failure cases
5532  */
5533 static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5534 {
5535         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5536         void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
5537
5538         pmcraid_reinit_cmdblk(cmd);
5539
5540         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5541         ioarcb->request_type = REQ_TYPE_IOACMD;
5542         ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
5543         ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
5544
5545         /* If this was called as part of resource table reinitialization due to
5546          * lost CCN, it is enough to return the command block back to free pool
5547          * as part of set_supported_devs completion function.
5548          */
5549         if (cmd->drv_inst->reinit_cfg_table) {
5550                 cmd->drv_inst->reinit_cfg_table = 0;
5551                 cmd->release = 1;
5552                 cmd_done = pmcraid_reinit_cfgtable_done;
5553         }
5554
5555         /* we will be done with the reset sequence after set supported devices,
5556          * setup the done function to return the command block back to free
5557          * pool
5558          */
5559         pmcraid_send_cmd(cmd,
5560                          cmd_done,
5561                          PMCRAID_SET_SUP_DEV_TIMEOUT,
5562                          pmcraid_timeout_handler);
5563         return;
5564 }
5565
5566 /**
5567  * pmcraid_set_timestamp - set the timestamp to IOAFP
5568  *
5569  * @cmd: pointer to pmcraid_cmd structure
5570  *
5571  * Return Value
5572  *  0 for success or non-zero for failure cases
5573  */
5574 static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
5575 {
5576         struct pmcraid_instance *pinstance = cmd->drv_inst;
5577         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5578         __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
5579         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5580
5581         struct timeval tv;
5582         __le64 timestamp;
5583
5584         do_gettimeofday(&tv);
5585         timestamp = tv.tv_sec * 1000;
5586
5587         pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
5588         pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
5589         pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
5590         pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
5591         pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
5592         pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp)  >> 40);
5593
5594         pmcraid_reinit_cmdblk(cmd);
5595         ioarcb->request_type = REQ_TYPE_SCSI;
5596         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5597         ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
5598         ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
5599         memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
5600
5601         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5602                                         offsetof(struct pmcraid_ioarcb,
5603                                                 add_data.u.ioadl[0]));
5604         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5605         ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5606
5607         ioarcb->request_flags0 |= NO_LINK_DESCS;
5608         ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
5609         ioarcb->data_transfer_length =
5610                 cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5611         ioadl = &(ioarcb->add_data.u.ioadl[0]);
5612         ioadl->flags = IOADL_FLAGS_LAST_DESC;
5613         ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
5614         ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5615
5616         if (!pinstance->timestamp_error) {
5617                 pinstance->timestamp_error = 0;
5618                 pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
5619                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5620         } else {
5621                 pmcraid_send_cmd(cmd, pmcraid_return_cmd,
5622                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5623                 return;
5624         }
5625 }
5626
5627
5628 /**
5629  * pmcraid_init_res_table - Initialize the resource table
5630  * @cmd:  pointer to pmcraid command struct
5631  *
5632  * This function looks through the existing resource table, comparing
5633  * it with the config table. This function will take care of old/new
5634  * devices and schedule adding/removing them from the mid-layer
5635  * as appropriate.
5636  *
5637  * Return value
5638  *       None
5639  */
5640 static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5641 {
5642         struct pmcraid_instance *pinstance = cmd->drv_inst;
5643         struct pmcraid_resource_entry *res, *temp;
5644         struct pmcraid_config_table_entry *cfgte;
5645         unsigned long lock_flags;
5646         int found, rc, i;
5647         u16 fw_version;
5648         LIST_HEAD(old_res);
5649
5650         if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5651                 pmcraid_err("IOA requires microcode download\n");
5652
5653         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
5654
5655         /* resource list is protected by pinstance->resource_lock.
5656          * init_res_table can be called from probe (user-thread) or runtime
5657          * reset (timer/tasklet)
5658          */
5659         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
5660
5661         list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
5662                 list_move_tail(&res->queue, &old_res);
5663
5664         for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
5665                 if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5666                                                 PMCRAID_FW_VERSION_1)
5667                         cfgte = &pinstance->cfg_table->entries[i];
5668                 else
5669                         cfgte = (struct pmcraid_config_table_entry *)
5670                                         &pinstance->cfg_table->entries_ext[i];
5671
5672                 if (!pmcraid_expose_resource(fw_version, cfgte))
5673                         continue;
5674
5675                 found = 0;
5676
5677                 /* If this entry was already detected and initialized */
5678                 list_for_each_entry_safe(res, temp, &old_res, queue) {
5679
5680                         rc = memcmp(&res->cfg_entry.resource_address,
5681                                     &cfgte->resource_address,
5682                                     sizeof(cfgte->resource_address));
5683                         if (!rc) {
5684                                 list_move_tail(&res->queue,
5685                                                 &pinstance->used_res_q);
5686                                 found = 1;
5687                                 break;
5688                         }
5689                 }
5690
5691                 /* If this is new entry, initialize it and add it the queue */
5692                 if (!found) {
5693
5694                         if (list_empty(&pinstance->free_res_q)) {
5695                                 pmcraid_err("Too many devices attached\n");
5696                                 break;
5697                         }
5698
5699                         found = 1;
5700                         res = list_entry(pinstance->free_res_q.next,
5701                                          struct pmcraid_resource_entry, queue);
5702
5703                         res->scsi_dev = NULL;
5704                         res->change_detected = RES_CHANGE_ADD;
5705                         res->reset_progress = 0;
5706                         list_move_tail(&res->queue, &pinstance->used_res_q);
5707                 }
5708
5709                 /* copy new configuration table entry details into driver
5710                  * maintained resource entry
5711                  */
5712                 if (found) {
5713                         memcpy(&res->cfg_entry, cfgte,
5714                                         pinstance->config_table_entry_size);
5715                         pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5716                                  res->cfg_entry.resource_type,
5717                                  (fw_version <= PMCRAID_FW_VERSION_1 ?
5718                                         res->cfg_entry.unique_flags1 :
5719                                                 res->cfg_entry.array_id & 0xFF),
5720                                  le32_to_cpu(res->cfg_entry.resource_address));
5721                 }
5722         }
5723
5724         /* Detect any deleted entries, mark them for deletion from mid-layer */
5725         list_for_each_entry_safe(res, temp, &old_res, queue) {
5726
5727                 if (res->scsi_dev) {
5728                         res->change_detected = RES_CHANGE_DEL;
5729                         res->cfg_entry.resource_handle =
5730                                 PMCRAID_INVALID_RES_HANDLE;
5731                         list_move_tail(&res->queue, &pinstance->used_res_q);
5732                 } else {
5733                         list_move_tail(&res->queue, &pinstance->free_res_q);
5734                 }
5735         }
5736
5737         /* release the resource list lock */
5738         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5739         pmcraid_set_timestamp(cmd);
5740 }
5741
5742 /**
5743  * pmcraid_querycfg - Send a Query IOA Config to the adapter.
5744  * @cmd: pointer pmcraid_cmd struct
5745  *
5746  * This function sends a Query IOA Configuration command to the adapter to
5747  * retrieve the IOA configuration table.
5748  *
5749  * Return value:
5750  *      none
5751  */
5752 static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5753 {
5754         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5755         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5756         struct pmcraid_instance *pinstance = cmd->drv_inst;
5757         int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5758
5759         if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5760                                         PMCRAID_FW_VERSION_1)
5761                 pinstance->config_table_entry_size =
5762                         sizeof(struct pmcraid_config_table_entry);
5763         else
5764                 pinstance->config_table_entry_size =
5765                         sizeof(struct pmcraid_config_table_entry_ext);
5766
5767         ioarcb->request_type = REQ_TYPE_IOACMD;
5768         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5769
5770         ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
5771
5772         /* firmware requires 4-byte length field, specified in B.E format */
5773         memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
5774
5775         /* Since entire config table can be described by single IOADL, it can
5776          * be part of IOARCB itself
5777          */
5778         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5779                                         offsetof(struct pmcraid_ioarcb,
5780                                                 add_data.u.ioadl[0]));
5781         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5782         ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5783
5784         ioarcb->request_flags0 |= NO_LINK_DESCS;
5785         ioarcb->data_transfer_length =
5786                 cpu_to_le32(sizeof(struct pmcraid_config_table));
5787
5788         ioadl = &(ioarcb->add_data.u.ioadl[0]);
5789         ioadl->flags = IOADL_FLAGS_LAST_DESC;
5790         ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5791         ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5792
5793         pmcraid_send_cmd(cmd, pmcraid_init_res_table,
5794                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5795 }
5796
5797
5798 /**
5799  * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
5800  * @pdev: pointer to pci device structure
5801  * @dev_id: pointer to device ids structure
5802  *
5803  * Return Value
5804  *      returns 0 if the device is claimed and successfully configured.
5805  *      returns non-zero error code in case of any failure
5806  */
5807 static int pmcraid_probe(struct pci_dev *pdev,
5808                          const struct pci_device_id *dev_id)
5809 {
5810         struct pmcraid_instance *pinstance;
5811         struct Scsi_Host *host;
5812         void __iomem *mapped_pci_addr;
5813         int rc = PCIBIOS_SUCCESSFUL;
5814
5815         if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
5816                 pmcraid_err
5817                         ("maximum number(%d) of supported adapters reached\n",
5818                          atomic_read(&pmcraid_adapter_count));
5819                 return -ENOMEM;
5820         }
5821
5822         atomic_inc(&pmcraid_adapter_count);
5823         rc = pci_enable_device(pdev);
5824
5825         if (rc) {
5826                 dev_err(&pdev->dev, "Cannot enable adapter\n");
5827                 atomic_dec(&pmcraid_adapter_count);
5828                 return rc;
5829         }
5830
5831         dev_info(&pdev->dev,
5832                 "Found new IOA(%x:%x), Total IOA count: %d\n",
5833                  pdev->vendor, pdev->device,
5834                  atomic_read(&pmcraid_adapter_count));
5835
5836         rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
5837
5838         if (rc < 0) {
5839                 dev_err(&pdev->dev,
5840                         "Couldn't register memory range of registers\n");
5841                 goto out_disable_device;
5842         }
5843
5844         mapped_pci_addr = pci_iomap(pdev, 0, 0);
5845
5846         if (!mapped_pci_addr) {
5847                 dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
5848                 rc = -ENOMEM;
5849                 goto out_release_regions;
5850         }
5851
5852         pci_set_master(pdev);
5853
5854         /* Firmware requires the system bus address of IOARCB to be within
5855          * 32-bit addressable range though it has 64-bit IOARRIN register.
5856          * However, firmware supports 64-bit streaming DMA buffers, whereas
5857          * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
5858          * returns memory within 4GB (if not, change this logic), coherent
5859          * buffers are within firmware acceptable address ranges.
5860          */
5861         if ((sizeof(dma_addr_t) == 4) ||
5862             pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5863                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5864
5865         /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5866          * bit mask for pci_alloc_consistent to return addresses within 4GB
5867          */
5868         if (rc == 0)
5869                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5870
5871         if (rc != 0) {
5872                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5873                 goto cleanup_nomem;
5874         }
5875
5876         host = scsi_host_alloc(&pmcraid_host_template,
5877                                 sizeof(struct pmcraid_instance));
5878
5879         if (!host) {
5880                 dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
5881                 rc = -ENOMEM;
5882                 goto cleanup_nomem;
5883         }
5884
5885         host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
5886         host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
5887         host->unique_id = host->host_no;
5888         host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
5889         host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
5890
5891         /* zero out entire instance structure */
5892         pinstance = (struct pmcraid_instance *)host->hostdata;
5893         memset(pinstance, 0, sizeof(*pinstance));
5894
5895         pinstance->chip_cfg =
5896                 (struct pmcraid_chip_details *)(dev_id->driver_data);
5897
5898         rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
5899
5900         if (rc < 0) {
5901                 dev_err(&pdev->dev, "failed to initialize adapter instance\n");
5902                 goto out_scsi_host_put;
5903         }
5904
5905         pci_set_drvdata(pdev, pinstance);
5906
5907         /* Save PCI config-space for use following the reset */
5908         rc = pci_save_state(pinstance->pdev);
5909
5910         if (rc != 0) {
5911                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5912                 goto out_scsi_host_put;
5913         }
5914
5915         pmcraid_disable_interrupts(pinstance, ~0);
5916
5917         rc = pmcraid_register_interrupt_handler(pinstance);
5918
5919         if (rc) {
5920                 dev_err(&pdev->dev, "couldn't register interrupt handler\n");
5921                 goto out_scsi_host_put;
5922         }
5923
5924         pmcraid_init_tasklets(pinstance);
5925
5926         /* allocate verious buffers used by LLD.*/
5927         rc = pmcraid_init_buffers(pinstance);
5928
5929         if (rc) {
5930                 pmcraid_err("couldn't allocate memory blocks\n");
5931                 goto out_unregister_isr;
5932         }
5933
5934         /* check the reset type required */
5935         pmcraid_reset_type(pinstance);
5936
5937         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5938
5939         /* Start IOA firmware initialization and bring card to Operational
5940          * state.
5941          */
5942         pmcraid_info("starting IOA initialization sequence\n");
5943         if (pmcraid_reset_bringup(pinstance)) {
5944                 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5945                 rc = 1;
5946                 goto out_release_bufs;
5947         }
5948
5949         /* Add adapter instance into mid-layer list */
5950         rc = scsi_add_host(pinstance->host, &pdev->dev);
5951         if (rc != 0) {
5952                 pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
5953                 goto out_release_bufs;
5954         }
5955
5956         scsi_scan_host(pinstance->host);
5957
5958         rc = pmcraid_setup_chrdev(pinstance);
5959
5960         if (rc != 0) {
5961                 pmcraid_err("couldn't create mgmt interface, error: %x\n",
5962                              rc);
5963                 goto out_remove_host;
5964         }
5965
5966         /* Schedule worker thread to handle CCN and take care of adding and
5967          * removing devices to OS
5968          */
5969         atomic_set(&pinstance->expose_resources, 1);
5970         schedule_work(&pinstance->worker_q);
5971         return rc;
5972
5973 out_remove_host:
5974         scsi_remove_host(host);
5975
5976 out_release_bufs:
5977         pmcraid_release_buffers(pinstance);
5978
5979 out_unregister_isr:
5980         pmcraid_kill_tasklets(pinstance);
5981         pmcraid_unregister_interrupt_handler(pinstance);
5982
5983 out_scsi_host_put:
5984         scsi_host_put(host);
5985
5986 cleanup_nomem:
5987         iounmap(mapped_pci_addr);
5988
5989 out_release_regions:
5990         pci_release_regions(pdev);
5991
5992 out_disable_device:
5993         atomic_dec(&pmcraid_adapter_count);
5994         pci_disable_device(pdev);
5995         return -ENODEV;
5996 }
5997
5998 /*
5999  * PCI driver structure of pcmraid driver
6000  */
6001 static struct pci_driver pmcraid_driver = {
6002         .name = PMCRAID_DRIVER_NAME,
6003         .id_table = pmcraid_pci_table,
6004         .probe = pmcraid_probe,
6005         .remove = pmcraid_remove,
6006         .suspend = pmcraid_suspend,
6007         .resume = pmcraid_resume,
6008         .shutdown = pmcraid_shutdown
6009 };
6010
6011 /**
6012  * pmcraid_init - module load entry point
6013  */
6014 static int __init pmcraid_init(void)
6015 {
6016         dev_t dev;
6017         int error;
6018
6019         pmcraid_info("%s Device Driver version: %s\n",
6020                          PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
6021
6022         error = alloc_chrdev_region(&dev, 0,
6023                                     PMCRAID_MAX_ADAPTERS,
6024                                     PMCRAID_DEVFILE);
6025
6026         if (error) {
6027                 pmcraid_err("failed to get a major number for adapters\n");
6028                 goto out_init;
6029         }
6030
6031         pmcraid_major = MAJOR(dev);
6032         pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
6033
6034         if (IS_ERR(pmcraid_class)) {
6035                 error = PTR_ERR(pmcraid_class);
6036                 pmcraid_err("failed to register with sysfs, error = %x\n",
6037                             error);
6038                 goto out_unreg_chrdev;
6039         }
6040
6041         error = pmcraid_netlink_init();
6042
6043         if (error)
6044                 goto out_unreg_chrdev;
6045
6046         error = pci_register_driver(&pmcraid_driver);
6047
6048         if (error == 0)
6049                 goto out_init;
6050
6051         pmcraid_err("failed to register pmcraid driver, error = %x\n",
6052                      error);
6053         class_destroy(pmcraid_class);
6054         pmcraid_netlink_release();
6055
6056 out_unreg_chrdev:
6057         unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
6058
6059 out_init:
6060         return error;
6061 }
6062
6063 /**
6064  * pmcraid_exit - module unload entry point
6065  */
6066 static void __exit pmcraid_exit(void)
6067 {
6068         pmcraid_netlink_release();
6069         unregister_chrdev_region(MKDEV(pmcraid_major, 0),
6070                                  PMCRAID_MAX_ADAPTERS);
6071         pci_unregister_driver(&pmcraid_driver);
6072         class_destroy(pmcraid_class);
6073 }
6074
6075 module_init(pmcraid_init);
6076 module_exit(pmcraid_exit);