ipr: set coherent DMA mask
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         if (ioa_cfg->allow_ml_add_del)
1430                                 schedule_work(&ioa_cfg->work_q);
1431                 } else {
1432                         ipr_clear_res_target(res);
1433                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434                 }
1435         } else if (!res->sdev || res->del_from_ml) {
1436                 res->add_to_ml = 1;
1437                 if (ioa_cfg->allow_ml_add_del)
1438                         schedule_work(&ioa_cfg->work_q);
1439         }
1440
1441         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:    ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  *      none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460         list_del(&hostrcb->queue);
1461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463         if (ioasc) {
1464                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465                         dev_err(&ioa_cfg->pdev->dev,
1466                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469         } else {
1470                 ipr_handle_config_change(ioa_cfg, hostrcb);
1471         }
1472 }
1473
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:          index into buffer
1477  * @buf:                string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  *      new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487         while (i && buf[i] == ' ')
1488                 i--;
1489         buf[i+1] = ' ';
1490         buf[i+2] = '\0';
1491         return i + 2;
1492 }
1493
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:             string to print at start of printk
1497  * @hostrcb:    hostrcb pointer
1498  * @vpd:                vendor/product id/sn struct
1499  *
1500  * Return value:
1501  *      none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504                                 struct ipr_vpd *vpd)
1505 {
1506         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507         int i = 0;
1508
1509         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:                vendor/product id/sn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531                     + IPR_SERIAL_NUM_LEN];
1532
1533         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535                IPR_PROD_ID_LEN);
1536         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537         ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541         ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:             string to print at start of printk
1547  * @hostrcb:    hostrcb pointer
1548  * @vpd:                vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554                                     struct ipr_ext_vpd *vpd)
1555 {
1556         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:                vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  *      none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570         ipr_log_vpd(&vpd->vpd);
1571         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572                 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:    ioa config struct
1578  * @hostrcb:    hostrcb struct
1579  *
1580  * Return value:
1581  *      none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584                                          struct ipr_hostrcb *hostrcb)
1585 {
1586         struct ipr_hostrcb_type_12_error *error;
1587
1588         if (ioa_cfg->sis64)
1589                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590         else
1591                 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593         ipr_err("-----Current Configuration-----\n");
1594         ipr_err("Cache Directory Card Information:\n");
1595         ipr_log_ext_vpd(&error->ioa_vpd);
1596         ipr_err("Adapter Card Information:\n");
1597         ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599         ipr_err("-----Expected Configuration-----\n");
1600         ipr_err("Cache Directory Card Information:\n");
1601         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602         ipr_err("Adapter Card Information:\n");
1603         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606                      be32_to_cpu(error->ioa_data[0]),
1607                      be32_to_cpu(error->ioa_data[1]),
1608                      be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:    ioa config struct
1614  * @hostrcb:    hostrcb struct
1615  *
1616  * Return value:
1617  *      none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620                                 struct ipr_hostrcb *hostrcb)
1621 {
1622         struct ipr_hostrcb_type_02_error *error =
1623                 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625         ipr_err("-----Current Configuration-----\n");
1626         ipr_err("Cache Directory Card Information:\n");
1627         ipr_log_vpd(&error->ioa_vpd);
1628         ipr_err("Adapter Card Information:\n");
1629         ipr_log_vpd(&error->cfc_vpd);
1630
1631         ipr_err("-----Expected Configuration-----\n");
1632         ipr_err("Cache Directory Card Information:\n");
1633         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634         ipr_err("Adapter Card Information:\n");
1635         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638                      be32_to_cpu(error->ioa_data[0]),
1639                      be32_to_cpu(error->ioa_data[1]),
1640                      be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                           struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_13_error *error;
1657
1658         error = &hostrcb->hcam.u.error.u.type_13_error;
1659         errors_logged = be32_to_cpu(error->errors_logged);
1660
1661         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662                 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664         dev_entry = error->dev;
1665
1666         for (i = 0; i < errors_logged; i++, dev_entry++) {
1667                 ipr_err_separator;
1668
1669                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670                 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672                 ipr_err("-----New Device Information-----\n");
1673                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675                 ipr_err("Cache Directory Card Information:\n");
1676                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678                 ipr_err("Adapter Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680         }
1681 }
1682
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:    ioa config struct
1686  * @hostrcb:    hostrcb struct
1687  *
1688  * Return value:
1689  *      none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692                                        struct ipr_hostrcb *hostrcb)
1693 {
1694         int errors_logged, i;
1695         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696         struct ipr_hostrcb_type_23_error *error;
1697         char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699         error = &hostrcb->hcam.u.error64.u.type_23_error;
1700         errors_logged = be32_to_cpu(error->errors_logged);
1701
1702         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703                 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705         dev_entry = error->dev;
1706
1707         for (i = 0; i < errors_logged; i++, dev_entry++) {
1708                 ipr_err_separator;
1709
1710                 ipr_err("Device %d : %s", i + 1,
1711                         __ipr_format_res_path(dev_entry->res_path,
1712                                               buffer, sizeof(buffer)));
1713                 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723         }
1724 }
1725
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:    ioa config struct
1729  * @hostrcb:    hostrcb struct
1730  *
1731  * Return value:
1732  *      none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735                                  struct ipr_hostrcb *hostrcb)
1736 {
1737         int errors_logged, i;
1738         struct ipr_hostrcb_device_data_entry *dev_entry;
1739         struct ipr_hostrcb_type_03_error *error;
1740
1741         error = &hostrcb->hcam.u.error.u.type_03_error;
1742         errors_logged = be32_to_cpu(error->errors_logged);
1743
1744         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745                 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747         dev_entry = error->dev;
1748
1749         for (i = 0; i < errors_logged; i++, dev_entry++) {
1750                 ipr_err_separator;
1751
1752                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753                 ipr_log_vpd(&dev_entry->vpd);
1754
1755                 ipr_err("-----New Device Information-----\n");
1756                 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758                 ipr_err("Cache Directory Card Information:\n");
1759                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761                 ipr_err("Adapter Card Information:\n");
1762                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765                         be32_to_cpu(dev_entry->ioa_data[0]),
1766                         be32_to_cpu(dev_entry->ioa_data[1]),
1767                         be32_to_cpu(dev_entry->ioa_data[2]),
1768                         be32_to_cpu(dev_entry->ioa_data[3]),
1769                         be32_to_cpu(dev_entry->ioa_data[4]));
1770         }
1771 }
1772
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:    ioa config struct
1776  * @hostrcb:    hostrcb struct
1777  *
1778  * Return value:
1779  *      none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782                                          struct ipr_hostrcb *hostrcb)
1783 {
1784         int i, num_entries;
1785         struct ipr_hostrcb_type_14_error *error;
1786         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789         error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791         ipr_err_separator;
1792
1793         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794                 error->protection_level,
1795                 ioa_cfg->host->host_no,
1796                 error->last_func_vset_res_addr.bus,
1797                 error->last_func_vset_res_addr.target,
1798                 error->last_func_vset_res_addr.lun);
1799
1800         ipr_err_separator;
1801
1802         array_entry = error->array_member;
1803         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804                             ARRAY_SIZE(error->array_member));
1805
1806         for (i = 0; i < num_entries; i++, array_entry++) {
1807                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808                         continue;
1809
1810                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811                         ipr_err("Exposed Array Member %d:\n", i);
1812                 else
1813                         ipr_err("Array Member %d:\n", i);
1814
1815                 ipr_log_ext_vpd(&array_entry->vpd);
1816                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818                                  "Expected Location");
1819
1820                 ipr_err_separator;
1821         }
1822 }
1823
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:    ioa config struct
1827  * @hostrcb:    hostrcb struct
1828  *
1829  * Return value:
1830  *      none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833                                 struct ipr_hostrcb *hostrcb)
1834 {
1835         int i;
1836         struct ipr_hostrcb_type_04_error *error;
1837         struct ipr_hostrcb_array_data_entry *array_entry;
1838         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840         error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842         ipr_err_separator;
1843
1844         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845                 error->protection_level,
1846                 ioa_cfg->host->host_no,
1847                 error->last_func_vset_res_addr.bus,
1848                 error->last_func_vset_res_addr.target,
1849                 error->last_func_vset_res_addr.lun);
1850
1851         ipr_err_separator;
1852
1853         array_entry = error->array_member;
1854
1855         for (i = 0; i < 18; i++) {
1856                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857                         continue;
1858
1859                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860                         ipr_err("Exposed Array Member %d:\n", i);
1861                 else
1862                         ipr_err("Array Member %d:\n", i);
1863
1864                 ipr_log_vpd(&array_entry->vpd);
1865
1866                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868                                  "Expected Location");
1869
1870                 ipr_err_separator;
1871
1872                 if (i == 9)
1873                         array_entry = error->array_member2;
1874                 else
1875                         array_entry++;
1876         }
1877 }
1878
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:    ioa config struct
1882  * @data:               IOA error data
1883  * @len:                data length
1884  *
1885  * Return value:
1886  *      none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890         int i;
1891
1892         if (len == 0)
1893                 return;
1894
1895         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898         for (i = 0; i < len / 4; i += 4) {
1899                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900                         be32_to_cpu(data[i]),
1901                         be32_to_cpu(data[i+1]),
1902                         be32_to_cpu(data[i+2]),
1903                         be32_to_cpu(data[i+3]));
1904         }
1905 }
1906
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                             struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_17_error *error;
1919
1920         if (ioa_cfg->sis64)
1921                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922         else
1923                 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926         strim(error->failure_reason);
1927
1928         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1930         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931         ipr_log_hex_data(ioa_cfg, error->data,
1932                          be32_to_cpu(hostrcb->hcam.length) -
1933                          (offsetof(struct ipr_hostrcb_error, u) +
1934                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:    ioa config struct
1940  * @hostrcb:    hostrcb struct
1941  *
1942  * Return value:
1943  *      none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946                                    struct ipr_hostrcb *hostrcb)
1947 {
1948         struct ipr_hostrcb_type_07_error *error;
1949
1950         error = &hostrcb->hcam.u.error.u.type_07_error;
1951         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952         strim(error->failure_reason);
1953
1954         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1956         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957         ipr_log_hex_data(ioa_cfg, error->data,
1958                          be32_to_cpu(hostrcb->hcam.length) -
1959                          (offsetof(struct ipr_hostrcb_error, u) +
1960                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964         u8 active;
1965         char *desc;
1966 } path_active_desc[] = {
1967         { IPR_PATH_NO_INFO, "Path" },
1968         { IPR_PATH_ACTIVE, "Active path" },
1969         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973         u8 state;
1974         char *desc;
1975 } path_state_desc[] = {
1976         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977         { IPR_PATH_HEALTHY, "is healthy" },
1978         { IPR_PATH_DEGRADED, "is degraded" },
1979         { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:    hostrcb struct
1985  * @fabric:             fabric descriptor
1986  *
1987  * Return value:
1988  *      none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991                                 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993         int i, j;
1994         u8 path_state = fabric->path_state;
1995         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996         u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999                 if (path_active_desc[i].active != active)
2000                         continue;
2001
2002                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003                         if (path_state_desc[j].state != state)
2004                                 continue;
2005
2006                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008                                              path_active_desc[i].desc, path_state_desc[j].desc,
2009                                              fabric->ioa_port);
2010                         } else if (fabric->cascaded_expander == 0xff) {
2011                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012                                              path_active_desc[i].desc, path_state_desc[j].desc,
2013                                              fabric->ioa_port, fabric->phy);
2014                         } else if (fabric->phy == 0xff) {
2015                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016                                              path_active_desc[i].desc, path_state_desc[j].desc,
2017                                              fabric->ioa_port, fabric->cascaded_expander);
2018                         } else {
2019                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020                                              path_active_desc[i].desc, path_state_desc[j].desc,
2021                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022                         }
2023                         return;
2024                 }
2025         }
2026
2027         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:    hostrcb struct
2034  * @fabric:             fabric descriptor
2035  *
2036  * Return value:
2037  *      none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040                                   struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042         int i, j;
2043         u8 path_state = fabric->path_state;
2044         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045         u8 state = path_state & IPR_PATH_STATE_MASK;
2046         char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049                 if (path_active_desc[i].active != active)
2050                         continue;
2051
2052                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053                         if (path_state_desc[j].state != state)
2054                                 continue;
2055
2056                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057                                      path_active_desc[i].desc, path_state_desc[j].desc,
2058                                      ipr_format_res_path(hostrcb->ioa_cfg,
2059                                                 fabric->res_path,
2060                                                 buffer, sizeof(buffer)));
2061                         return;
2062                 }
2063         }
2064
2065         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067                                     buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071         u8 type;
2072         char *desc;
2073 } path_type_desc[] = {
2074         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081         u8 status;
2082         char *desc;
2083 } path_status_desc[] = {
2084         { IPR_PATH_CFG_NO_PROB, "Functional" },
2085         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086         { IPR_PATH_CFG_FAILED, "Failed" },
2087         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088         { IPR_PATH_NOT_DETECTED, "Missing" },
2089         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093         "unknown",
2094         "disabled",
2095         "phy reset problem",
2096         "spinup hold",
2097         "port selector",
2098         "unknown",
2099         "unknown",
2100         "unknown",
2101         "1.5Gbps",
2102         "3.0Gbps",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown",
2107         "unknown",
2108         "unknown"
2109 };
2110
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:    hostrcb struct
2114  * @cfg:                fabric path element struct
2115  *
2116  * Return value:
2117  *      none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120                               struct ipr_hostrcb_config_element *cfg)
2121 {
2122         int i, j;
2123         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126         if (type == IPR_PATH_CFG_NOT_EXIST)
2127                 return;
2128
2129         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130                 if (path_type_desc[i].type != type)
2131                         continue;
2132
2133                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134                         if (path_status_desc[j].status != status)
2135                                 continue;
2136
2137                         if (type == IPR_PATH_CFG_IOA_PORT) {
2138                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139                                              path_status_desc[j].desc, path_type_desc[i].desc,
2140                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142                         } else {
2143                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2146                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148                                 } else if (cfg->cascaded_expander == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2151                                                      path_type_desc[i].desc, cfg->phy,
2152                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154                                 } else if (cfg->phy == 0xff) {
2155                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2157                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2158                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160                                 } else {
2161                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2163                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166                                 }
2167                         }
2168                         return;
2169                 }
2170         }
2171
2172         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:    hostrcb struct
2181  * @cfg:                fabric path element struct
2182  *
2183  * Return value:
2184  *      none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187                                 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189         int i, j;
2190         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193         char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196                 return;
2197
2198         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199                 if (path_type_desc[i].type != type)
2200                         continue;
2201
2202                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203                         if (path_status_desc[j].status != status)
2204                                 continue;
2205
2206                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207                                      path_status_desc[j].desc, path_type_desc[i].desc,
2208                                      ipr_format_res_path(hostrcb->ioa_cfg,
2209                                         cfg->res_path, buffer, sizeof(buffer)),
2210                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                         be32_to_cpu(cfg->wwid[0]),
2212                                         be32_to_cpu(cfg->wwid[1]));
2213                         return;
2214                 }
2215         }
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status,
2218                      ipr_format_res_path(hostrcb->ioa_cfg,
2219                         cfg->res_path, buffer, sizeof(buffer)),
2220                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:    ioa config struct
2227  * @hostrcb:    hostrcb struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233                                  struct ipr_hostrcb *hostrcb)
2234 {
2235         struct ipr_hostrcb_type_20_error *error;
2236         struct ipr_hostrcb_fabric_desc *fabric;
2237         struct ipr_hostrcb_config_element *cfg;
2238         int i, add_len;
2239
2240         error = &hostrcb->hcam.u.error.u.type_20_error;
2241         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244         add_len = be32_to_cpu(hostrcb->hcam.length) -
2245                 (offsetof(struct ipr_hostrcb_error, u) +
2246                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249                 ipr_log_fabric_path(hostrcb, fabric);
2250                 for_each_fabric_cfg(fabric, cfg)
2251                         ipr_log_path_elem(hostrcb, cfg);
2252
2253                 add_len -= be16_to_cpu(fabric->length);
2254                 fabric = (struct ipr_hostrcb_fabric_desc *)
2255                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256         }
2257
2258         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:    ioa config struct
2264  * @hostrcb:    hostrcb struct
2265  *
2266  * Return value:
2267  *      none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270                                       struct ipr_hostrcb *hostrcb)
2271 {
2272         int i, num_entries;
2273         struct ipr_hostrcb_type_24_error *error;
2274         struct ipr_hostrcb64_array_data_entry *array_entry;
2275         char buffer[IPR_MAX_RES_PATH_LENGTH];
2276         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278         error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280         ipr_err_separator;
2281
2282         ipr_err("RAID %s Array Configuration: %s\n",
2283                 error->protection_level,
2284                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285                         buffer, sizeof(buffer)));
2286
2287         ipr_err_separator;
2288
2289         array_entry = error->array_member;
2290         num_entries = min_t(u32, error->num_entries,
2291                             ARRAY_SIZE(error->array_member));
2292
2293         for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296                         continue;
2297
2298                 if (error->exposed_mode_adn == i)
2299                         ipr_err("Exposed Array Member %d:\n", i);
2300                 else
2301                         ipr_err("Array Member %d:\n", i);
2302
2303                 ipr_err("Array Member %d:\n", i);
2304                 ipr_log_ext_vpd(&array_entry->vpd);
2305                 ipr_err("Current Location: %s\n",
2306                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307                                 buffer, sizeof(buffer)));
2308                 ipr_err("Expected Location: %s\n",
2309                          ipr_format_res_path(ioa_cfg,
2310                                 array_entry->expected_res_path,
2311                                 buffer, sizeof(buffer)));
2312
2313                 ipr_err_separator;
2314         }
2315 }
2316
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:    ioa config struct
2320  * @hostrcb:    hostrcb struct
2321  *
2322  * Return value:
2323  *      none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326                                        struct ipr_hostrcb *hostrcb)
2327 {
2328         struct ipr_hostrcb_type_30_error *error;
2329         struct ipr_hostrcb64_fabric_desc *fabric;
2330         struct ipr_hostrcb64_config_element *cfg;
2331         int i, add_len;
2332
2333         error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338         add_len = be32_to_cpu(hostrcb->hcam.length) -
2339                 (offsetof(struct ipr_hostrcb64_error, u) +
2340                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343                 ipr_log64_fabric_path(hostrcb, fabric);
2344                 for_each_fabric_cfg(fabric, cfg)
2345                         ipr_log64_path_elem(hostrcb, cfg);
2346
2347                 add_len -= be16_to_cpu(fabric->length);
2348                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350         }
2351
2352         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:    ioa config struct
2358  * @hostrcb:    hostrcb struct
2359  *
2360  * Return value:
2361  *      none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364                                   struct ipr_hostrcb *hostrcb)
2365 {
2366         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367                          be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:    ioa config struct
2373  * @hostrcb:    hostrcb struct
2374  *
2375  * Return value:
2376  *      none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379                                          struct ipr_hostrcb *hostrcb)
2380 {
2381         struct ipr_hostrcb_type_21_error *error;
2382         char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384         error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386         ipr_err("-----Failing Device Information-----\n");
2387         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390         ipr_err("Device Resource Path: %s\n",
2391                 __ipr_format_res_path(error->res_path,
2392                                       buffer, sizeof(buffer)));
2393         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397         ipr_err("SCSI Sense Data:\n");
2398         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399         ipr_err("SCSI Command Descriptor Block: \n");
2400         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402         ipr_err("Additional IOA Data:\n");
2403         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:      IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  *      index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419         int i;
2420
2421         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423                         return i;
2424
2425         return 0;
2426 }
2427
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439                                 struct ipr_hostrcb *hostrcb)
2440 {
2441         u32 ioasc;
2442         int error_index;
2443         struct ipr_hostrcb_type_21_error *error;
2444
2445         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446                 return;
2447
2448         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2449                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450
2451         if (ioa_cfg->sis64)
2452                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2453         else
2454                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2455
2456         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2457             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2458                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459                 scsi_report_bus_reset(ioa_cfg->host,
2460                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2461         }
2462
2463         error_index = ipr_get_error(ioasc);
2464
2465         if (!ipr_error_table[error_index].log_hcam)
2466                 return;
2467
2468         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2471
2472                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474                                 return;
2475         }
2476
2477         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2478
2479         /* Set indication we have logged an error */
2480         ioa_cfg->errors_logged++;
2481
2482         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2483                 return;
2484         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2485                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2486
2487         switch (hostrcb->hcam.overlay_id) {
2488         case IPR_HOST_RCB_OVERLAY_ID_2:
2489                 ipr_log_cache_error(ioa_cfg, hostrcb);
2490                 break;
2491         case IPR_HOST_RCB_OVERLAY_ID_3:
2492                 ipr_log_config_error(ioa_cfg, hostrcb);
2493                 break;
2494         case IPR_HOST_RCB_OVERLAY_ID_4:
2495         case IPR_HOST_RCB_OVERLAY_ID_6:
2496                 ipr_log_array_error(ioa_cfg, hostrcb);
2497                 break;
2498         case IPR_HOST_RCB_OVERLAY_ID_7:
2499                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2500                 break;
2501         case IPR_HOST_RCB_OVERLAY_ID_12:
2502                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2503                 break;
2504         case IPR_HOST_RCB_OVERLAY_ID_13:
2505                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_14:
2508         case IPR_HOST_RCB_OVERLAY_ID_16:
2509                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2510                 break;
2511         case IPR_HOST_RCB_OVERLAY_ID_17:
2512                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2513                 break;
2514         case IPR_HOST_RCB_OVERLAY_ID_20:
2515                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_21:
2518                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_23:
2521                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_24:
2524         case IPR_HOST_RCB_OVERLAY_ID_26:
2525                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2526                 break;
2527         case IPR_HOST_RCB_OVERLAY_ID_30:
2528                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_1:
2531         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2532         default:
2533                 ipr_log_generic_error(ioa_cfg, hostrcb);
2534                 break;
2535         }
2536 }
2537
2538 /**
2539  * ipr_process_error - Op done function for an adapter error log.
2540  * @ipr_cmd:    ipr command struct
2541  *
2542  * This function is the op done function for an error log host
2543  * controlled async from the adapter. It will log the error and
2544  * send the HCAM back to the adapter.
2545  *
2546  * Return value:
2547  *      none
2548  **/
2549 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2550 {
2551         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2552         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2553         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2554         u32 fd_ioasc;
2555
2556         if (ioa_cfg->sis64)
2557                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2558         else
2559                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2560
2561         list_del(&hostrcb->queue);
2562         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563
2564         if (!ioasc) {
2565                 ipr_handle_log_data(ioa_cfg, hostrcb);
2566                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2567                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2569                 dev_err(&ioa_cfg->pdev->dev,
2570                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571         }
2572
2573         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2574 }
2575
2576 /**
2577  * ipr_timeout -  An internally generated op has timed out.
2578  * @ipr_cmd:    ipr command struct
2579  *
2580  * This function blocks host requests and initiates an
2581  * adapter reset.
2582  *
2583  * Return value:
2584  *      none
2585  **/
2586 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2587 {
2588         unsigned long lock_flags = 0;
2589         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590
2591         ENTER;
2592         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2593
2594         ioa_cfg->errors_logged++;
2595         dev_err(&ioa_cfg->pdev->dev,
2596                 "Adapter being reset due to command timeout.\n");
2597
2598         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2599                 ioa_cfg->sdt_state = GET_DUMP;
2600
2601         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2602                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2603
2604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605         LEAVE;
2606 }
2607
2608 /**
2609  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2610  * @ipr_cmd:    ipr command struct
2611  *
2612  * This function blocks host requests and initiates an
2613  * adapter reset.
2614  *
2615  * Return value:
2616  *      none
2617  **/
2618 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2619 {
2620         unsigned long lock_flags = 0;
2621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622
2623         ENTER;
2624         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625
2626         ioa_cfg->errors_logged++;
2627         dev_err(&ioa_cfg->pdev->dev,
2628                 "Adapter timed out transitioning to operational.\n");
2629
2630         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2631                 ioa_cfg->sdt_state = GET_DUMP;
2632
2633         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2634                 if (ipr_fastfail)
2635                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2636                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637         }
2638
2639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640         LEAVE;
2641 }
2642
2643 /**
2644  * ipr_find_ses_entry - Find matching SES in SES table
2645  * @res:        resource entry struct of SES
2646  *
2647  * Return value:
2648  *      pointer to SES table entry / NULL on failure
2649  **/
2650 static const struct ipr_ses_table_entry *
2651 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 {
2653         int i, j, matches;
2654         struct ipr_std_inq_vpids *vpids;
2655         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2656
2657         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2658                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2659                         if (ste->compare_product_id_byte[j] == 'X') {
2660                                 vpids = &res->std_inq_data.vpids;
2661                                 if (vpids->product_id[j] == ste->product_id[j])
2662                                         matches++;
2663                                 else
2664                                         break;
2665                         } else
2666                                 matches++;
2667                 }
2668
2669                 if (matches == IPR_PROD_ID_LEN)
2670                         return ste;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 /**
2677  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678  * @ioa_cfg:    ioa config struct
2679  * @bus:                SCSI bus
2680  * @bus_width:  bus width
2681  *
2682  * Return value:
2683  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2685  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2686  *      max 160MHz = max 320MB/sec).
2687  **/
2688 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2689 {
2690         struct ipr_resource_entry *res;
2691         const struct ipr_ses_table_entry *ste;
2692         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2693
2694         /* Loop through each config table entry in the config table buffer */
2695         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2696                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697                         continue;
2698
2699                 if (bus != res->bus)
2700                         continue;
2701
2702                 if (!(ste = ipr_find_ses_entry(res)))
2703                         continue;
2704
2705                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706         }
2707
2708         return max_xfer_rate;
2709 }
2710
2711 /**
2712  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713  * @ioa_cfg:            ioa config struct
2714  * @max_delay:          max delay in micro-seconds to wait
2715  *
2716  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717  *
2718  * Return value:
2719  *      0 on success / other on failure
2720  **/
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2722 {
2723         volatile u32 pcii_reg;
2724         int delay = 1;
2725
2726         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727         while (delay < max_delay) {
2728                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2729
2730                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731                         return 0;
2732
2733                 /* udelay cannot be used if delay is more than a few milliseconds */
2734                 if ((delay / 1000) > MAX_UDELAY_MS)
2735                         mdelay(delay / 1000);
2736                 else
2737                         udelay(delay);
2738
2739                 delay += delay;
2740         }
2741         return -EIO;
2742 }
2743
2744 /**
2745  * ipr_get_sis64_dump_data_section - Dump IOA memory
2746  * @ioa_cfg:                    ioa config struct
2747  * @start_addr:                 adapter address to dump
2748  * @dest:                       destination kernel buffer
2749  * @length_in_words:            length to dump in 4 byte words
2750  *
2751  * Return value:
2752  *      0 on success
2753  **/
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2755                                            u32 start_addr,
2756                                            __be32 *dest, u32 length_in_words)
2757 {
2758         int i;
2759
2760         for (i = 0; i < length_in_words; i++) {
2761                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2762                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2763                 dest++;
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * ipr_get_ldump_data_section - Dump IOA memory
2771  * @ioa_cfg:                    ioa config struct
2772  * @start_addr:                 adapter address to dump
2773  * @dest:                               destination kernel buffer
2774  * @length_in_words:    length to dump in 4 byte words
2775  *
2776  * Return value:
2777  *      0 on success / -EIO on failure
2778  **/
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2780                                       u32 start_addr,
2781                                       __be32 *dest, u32 length_in_words)
2782 {
2783         volatile u32 temp_pcii_reg;
2784         int i, delay = 0;
2785
2786         if (ioa_cfg->sis64)
2787                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2788                                                        dest, length_in_words);
2789
2790         /* Write IOA interrupt reg starting LDUMP state  */
2791         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2792                ioa_cfg->regs.set_uproc_interrupt_reg32);
2793
2794         /* Wait for IO debug acknowledge */
2795         if (ipr_wait_iodbg_ack(ioa_cfg,
2796                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2797                 dev_err(&ioa_cfg->pdev->dev,
2798                         "IOA dump long data transfer timeout\n");
2799                 return -EIO;
2800         }
2801
2802         /* Signal LDUMP interlocked - clear IO debug ack */
2803         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2804                ioa_cfg->regs.clr_interrupt_reg);
2805
2806         /* Write Mailbox with starting address */
2807         writel(start_addr, ioa_cfg->ioa_mailbox);
2808
2809         /* Signal address valid - clear IOA Reset alert */
2810         writel(IPR_UPROCI_RESET_ALERT,
2811                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2812
2813         for (i = 0; i < length_in_words; i++) {
2814                 /* Wait for IO debug acknowledge */
2815                 if (ipr_wait_iodbg_ack(ioa_cfg,
2816                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2817                         dev_err(&ioa_cfg->pdev->dev,
2818                                 "IOA dump short data transfer timeout\n");
2819                         return -EIO;
2820                 }
2821
2822                 /* Read data from mailbox and increment destination pointer */
2823                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824                 dest++;
2825
2826                 /* For all but the last word of data, signal data received */
2827                 if (i < (length_in_words - 1)) {
2828                         /* Signal dump data received - Clear IO debug Ack */
2829                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2830                                ioa_cfg->regs.clr_interrupt_reg);
2831                 }
2832         }
2833
2834         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835         writel(IPR_UPROCI_RESET_ALERT,
2836                ioa_cfg->regs.set_uproc_interrupt_reg32);
2837
2838         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2839                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2840
2841         /* Signal dump data received - Clear IO debug Ack */
2842         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2843                ioa_cfg->regs.clr_interrupt_reg);
2844
2845         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2847                 temp_pcii_reg =
2848                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2849
2850                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2851                         return 0;
2852
2853                 udelay(10);
2854                 delay += 10;
2855         }
2856
2857         return 0;
2858 }
2859
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2861 /**
2862  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863  * @ioa_cfg:            ioa config struct
2864  * @pci_address:        adapter address
2865  * @length:                     length of data to copy
2866  *
2867  * Copy data from PCI adapter to kernel buffer.
2868  * Note: length MUST be a 4 byte multiple
2869  * Return value:
2870  *      0 on success / other on failure
2871  **/
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2873                         unsigned long pci_address, u32 length)
2874 {
2875         int bytes_copied = 0;
2876         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2877         __be32 *page;
2878         unsigned long lock_flags = 0;
2879         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880
2881         if (ioa_cfg->sis64)
2882                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2883         else
2884                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2885
2886         while (bytes_copied < length &&
2887                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2888                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2889                     ioa_dump->page_offset == 0) {
2890                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2891
2892                         if (!page) {
2893                                 ipr_trace;
2894                                 return bytes_copied;
2895                         }
2896
2897                         ioa_dump->page_offset = 0;
2898                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2899                         ioa_dump->next_page_index++;
2900                 } else
2901                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2902
2903                 rem_len = length - bytes_copied;
2904                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2905                 cur_len = min(rem_len, rem_page_len);
2906
2907                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2908                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909                         rc = -EIO;
2910                 } else {
2911                         rc = ipr_get_ldump_data_section(ioa_cfg,
2912                                                         pci_address + bytes_copied,
2913                                                         &page[ioa_dump->page_offset / 4],
2914                                                         (cur_len / sizeof(u32)));
2915                 }
2916                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917
2918                 if (!rc) {
2919                         ioa_dump->page_offset += cur_len;
2920                         bytes_copied += cur_len;
2921                 } else {
2922                         ipr_trace;
2923                         break;
2924                 }
2925                 schedule();
2926         }
2927
2928         return bytes_copied;
2929 }
2930
2931 /**
2932  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933  * @hdr:        dump entry header struct
2934  *
2935  * Return value:
2936  *      nothing
2937  **/
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2939 {
2940         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2941         hdr->num_elems = 1;
2942         hdr->offset = sizeof(*hdr);
2943         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2944 }
2945
2946 /**
2947  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948  * @ioa_cfg:    ioa config struct
2949  * @driver_dump:        driver dump struct
2950  *
2951  * Return value:
2952  *      nothing
2953  **/
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2955                                    struct ipr_driver_dump *driver_dump)
2956 {
2957         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2958
2959         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2960         driver_dump->ioa_type_entry.hdr.len =
2961                 sizeof(struct ipr_dump_ioa_type_entry) -
2962                 sizeof(struct ipr_dump_entry_header);
2963         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2964         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2965         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2966         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2967                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2968                 ucode_vpd->minor_release[1];
2969         driver_dump->hdr.num_entries++;
2970 }
2971
2972 /**
2973  * ipr_dump_version_data - Fill in the driver version in the dump.
2974  * @ioa_cfg:    ioa config struct
2975  * @driver_dump:        driver dump struct
2976  *
2977  * Return value:
2978  *      nothing
2979  **/
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2981                                   struct ipr_driver_dump *driver_dump)
2982 {
2983         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2984         driver_dump->version_entry.hdr.len =
2985                 sizeof(struct ipr_dump_version_entry) -
2986                 sizeof(struct ipr_dump_entry_header);
2987         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2988         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2989         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2990         driver_dump->hdr.num_entries++;
2991 }
2992
2993 /**
2994  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995  * @ioa_cfg:    ioa config struct
2996  * @driver_dump:        driver dump struct
2997  *
2998  * Return value:
2999  *      nothing
3000  **/
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3002                                    struct ipr_driver_dump *driver_dump)
3003 {
3004         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3005         driver_dump->trace_entry.hdr.len =
3006                 sizeof(struct ipr_dump_trace_entry) -
3007                 sizeof(struct ipr_dump_entry_header);
3008         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3009         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3010         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3011         driver_dump->hdr.num_entries++;
3012 }
3013
3014 /**
3015  * ipr_dump_location_data - Fill in the IOA location in the dump.
3016  * @ioa_cfg:    ioa config struct
3017  * @driver_dump:        driver dump struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3023                                    struct ipr_driver_dump *driver_dump)
3024 {
3025         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3026         driver_dump->location_entry.hdr.len =
3027                 sizeof(struct ipr_dump_location_entry) -
3028                 sizeof(struct ipr_dump_entry_header);
3029         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3030         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3031         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3032         driver_dump->hdr.num_entries++;
3033 }
3034
3035 /**
3036  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037  * @ioa_cfg:    ioa config struct
3038  * @dump:               dump struct
3039  *
3040  * Return value:
3041  *      nothing
3042  **/
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3044 {
3045         unsigned long start_addr, sdt_word;
3046         unsigned long lock_flags = 0;
3047         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3048         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3049         u32 num_entries, max_num_entries, start_off, end_off;
3050         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3051         struct ipr_sdt *sdt;
3052         int valid = 1;
3053         int i;
3054
3055         ENTER;
3056
3057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058
3059         if (ioa_cfg->sdt_state != READ_DUMP) {
3060                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061                 return;
3062         }
3063
3064         if (ioa_cfg->sis64) {
3065                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066                 ssleep(IPR_DUMP_DELAY_SECONDS);
3067                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068         }
3069
3070         start_addr = readl(ioa_cfg->ioa_mailbox);
3071
3072         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3073                 dev_err(&ioa_cfg->pdev->dev,
3074                         "Invalid dump table format: %lx\n", start_addr);
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 return;
3077         }
3078
3079         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3080
3081         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3082
3083         /* Initialize the overall dump header */
3084         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3085         driver_dump->hdr.num_entries = 1;
3086         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3087         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3088         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3089         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3090
3091         ipr_dump_version_data(ioa_cfg, driver_dump);
3092         ipr_dump_location_data(ioa_cfg, driver_dump);
3093         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3094         ipr_dump_trace_data(ioa_cfg, driver_dump);
3095
3096         /* Update dump_header */
3097         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3098
3099         /* IOA Dump entry */
3100         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3101         ioa_dump->hdr.len = 0;
3102         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3104
3105         /* First entries in sdt are actually a list of dump addresses and
3106          lengths to gather the real dump data.  sdt represents the pointer
3107          to the ioa generated dump table.  Dump data will be extracted based
3108          on entries in this table */
3109         sdt = &ioa_dump->sdt;
3110
3111         if (ioa_cfg->sis64) {
3112                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3113                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3114         } else {
3115                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3116                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117         }
3118
3119         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3120                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3121         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3122                                         bytes_to_copy / sizeof(__be32));
3123
3124         /* Smart Dump table is ready to use and the first entry is valid */
3125         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3126             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3127                 dev_err(&ioa_cfg->pdev->dev,
3128                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129                         rc, be32_to_cpu(sdt->hdr.state));
3130                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3131                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3132                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133                 return;
3134         }
3135
3136         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3137
3138         if (num_entries > max_num_entries)
3139                 num_entries = max_num_entries;
3140
3141         /* Update dump length to the actual data to be copied */
3142         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3143         if (ioa_cfg->sis64)
3144                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3145         else
3146                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3147
3148         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149
3150         for (i = 0; i < num_entries; i++) {
3151                 if (ioa_dump->hdr.len > max_dump_size) {
3152                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3153                         break;
3154                 }
3155
3156                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3157                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3158                         if (ioa_cfg->sis64)
3159                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3160                         else {
3161                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3162                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3163
3164                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3165                                         bytes_to_copy = end_off - start_off;
3166                                 else
3167                                         valid = 0;
3168                         }
3169                         if (valid) {
3170                                 if (bytes_to_copy > max_dump_size) {
3171                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3172                                         continue;
3173                                 }
3174
3175                                 /* Copy data from adapter to driver buffers */
3176                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177                                                             bytes_to_copy);
3178
3179                                 ioa_dump->hdr.len += bytes_copied;
3180
3181                                 if (bytes_copied != bytes_to_copy) {
3182                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3183                                         break;
3184                                 }
3185                         }
3186                 }
3187         }
3188
3189         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3190
3191         /* Update dump_header */
3192         driver_dump->hdr.len += ioa_dump->hdr.len;
3193         wmb();
3194         ioa_cfg->sdt_state = DUMP_OBTAINED;
3195         LEAVE;
3196 }
3197
3198 #else
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3200 #endif
3201
3202 /**
3203  * ipr_release_dump - Free adapter dump memory
3204  * @kref:       kref struct
3205  *
3206  * Return value:
3207  *      nothing
3208  **/
3209 static void ipr_release_dump(struct kref *kref)
3210 {
3211         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3212         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3213         unsigned long lock_flags = 0;
3214         int i;
3215
3216         ENTER;
3217         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218         ioa_cfg->dump = NULL;
3219         ioa_cfg->sdt_state = INACTIVE;
3220         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221
3222         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3223                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3224
3225         vfree(dump->ioa_dump.ioa_data);
3226         kfree(dump);
3227         LEAVE;
3228 }
3229
3230 /**
3231  * ipr_worker_thread - Worker thread
3232  * @work:               ioa config struct
3233  *
3234  * Called at task level from a work thread. This function takes care
3235  * of adding and removing device from the mid-layer as configuration
3236  * changes are detected by the adapter.
3237  *
3238  * Return value:
3239  *      nothing
3240  **/
3241 static void ipr_worker_thread(struct work_struct *work)
3242 {
3243         unsigned long lock_flags;
3244         struct ipr_resource_entry *res;
3245         struct scsi_device *sdev;
3246         struct ipr_dump *dump;
3247         struct ipr_ioa_cfg *ioa_cfg =
3248                 container_of(work, struct ipr_ioa_cfg, work_q);
3249         u8 bus, target, lun;
3250         int did_work;
3251
3252         ENTER;
3253         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254
3255         if (ioa_cfg->sdt_state == READ_DUMP) {
3256                 dump = ioa_cfg->dump;
3257                 if (!dump) {
3258                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259                         return;
3260                 }
3261                 kref_get(&dump->kref);
3262                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263                 ipr_get_ioa_dump(ioa_cfg, dump);
3264                 kref_put(&dump->kref, ipr_release_dump);
3265
3266                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3268                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270                 return;
3271         }
3272
3273 restart:
3274         do {
3275                 did_work = 0;
3276                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3277                     !ioa_cfg->allow_ml_add_del) {
3278                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279                         return;
3280                 }
3281
3282                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283                         if (res->del_from_ml && res->sdev) {
3284                                 did_work = 1;
3285                                 sdev = res->sdev;
3286                                 if (!scsi_device_get(sdev)) {
3287                                         if (!res->add_to_ml)
3288                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289                                         else
3290                                                 res->del_from_ml = 0;
3291                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292                                         scsi_remove_device(sdev);
3293                                         scsi_device_put(sdev);
3294                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295                                 }
3296                                 break;
3297                         }
3298                 }
3299         } while (did_work);
3300
3301         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302                 if (res->add_to_ml) {
3303                         bus = res->bus;
3304                         target = res->target;
3305                         lun = res->lun;
3306                         res->add_to_ml = 0;
3307                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3309                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310                         goto restart;
3311                 }
3312         }
3313
3314         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316         LEAVE;
3317 }
3318
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3320 /**
3321  * ipr_read_trace - Dump the adapter trace
3322  * @filp:               open sysfs file
3323  * @kobj:               kobject struct
3324  * @bin_attr:           bin_attribute struct
3325  * @buf:                buffer
3326  * @off:                offset
3327  * @count:              buffer size
3328  *
3329  * Return value:
3330  *      number of bytes printed to buffer
3331  **/
3332 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3333                               struct bin_attribute *bin_attr,
3334                               char *buf, loff_t off, size_t count)
3335 {
3336         struct device *dev = container_of(kobj, struct device, kobj);
3337         struct Scsi_Host *shost = class_to_shost(dev);
3338         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339         unsigned long lock_flags = 0;
3340         ssize_t ret;
3341
3342         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3344                                 IPR_TRACE_SIZE);
3345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346
3347         return ret;
3348 }
3349
3350 static struct bin_attribute ipr_trace_attr = {
3351         .attr = {
3352                 .name = "trace",
3353                 .mode = S_IRUGO,
3354         },
3355         .size = 0,
3356         .read = ipr_read_trace,
3357 };
3358 #endif
3359
3360 /**
3361  * ipr_show_fw_version - Show the firmware version
3362  * @dev:        class device struct
3363  * @buf:        buffer
3364  *
3365  * Return value:
3366  *      number of bytes printed to buffer
3367  **/
3368 static ssize_t ipr_show_fw_version(struct device *dev,
3369                                    struct device_attribute *attr, char *buf)
3370 {
3371         struct Scsi_Host *shost = class_to_shost(dev);
3372         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3373         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3374         unsigned long lock_flags = 0;
3375         int len;
3376
3377         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3379                        ucode_vpd->major_release, ucode_vpd->card_type,
3380                        ucode_vpd->minor_release[0],
3381                        ucode_vpd->minor_release[1]);
3382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3383         return len;
3384 }
3385
3386 static struct device_attribute ipr_fw_version_attr = {
3387         .attr = {
3388                 .name =         "fw_version",
3389                 .mode =         S_IRUGO,
3390         },
3391         .show = ipr_show_fw_version,
3392 };
3393
3394 /**
3395  * ipr_show_log_level - Show the adapter's error logging level
3396  * @dev:        class device struct
3397  * @buf:        buffer
3398  *
3399  * Return value:
3400  *      number of bytes printed to buffer
3401  **/
3402 static ssize_t ipr_show_log_level(struct device *dev,
3403                                    struct device_attribute *attr, char *buf)
3404 {
3405         struct Scsi_Host *shost = class_to_shost(dev);
3406         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407         unsigned long lock_flags = 0;
3408         int len;
3409
3410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413         return len;
3414 }
3415
3416 /**
3417  * ipr_store_log_level - Change the adapter's error logging level
3418  * @dev:        class device struct
3419  * @buf:        buffer
3420  *
3421  * Return value:
3422  *      number of bytes printed to buffer
3423  **/
3424 static ssize_t ipr_store_log_level(struct device *dev,
3425                                    struct device_attribute *attr,
3426                                    const char *buf, size_t count)
3427 {
3428         struct Scsi_Host *shost = class_to_shost(dev);
3429         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3430         unsigned long lock_flags = 0;
3431
3432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3433         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435         return strlen(buf);
3436 }
3437
3438 static struct device_attribute ipr_log_level_attr = {
3439         .attr = {
3440                 .name =         "log_level",
3441                 .mode =         S_IRUGO | S_IWUSR,
3442         },
3443         .show = ipr_show_log_level,
3444         .store = ipr_store_log_level
3445 };
3446
3447 /**
3448  * ipr_store_diagnostics - IOA Diagnostics interface
3449  * @dev:        device struct
3450  * @buf:        buffer
3451  * @count:      buffer size
3452  *
3453  * This function will reset the adapter and wait a reasonable
3454  * amount of time for any errors that the adapter might log.
3455  *
3456  * Return value:
3457  *      count on success / other on failure
3458  **/
3459 static ssize_t ipr_store_diagnostics(struct device *dev,
3460                                      struct device_attribute *attr,
3461                                      const char *buf, size_t count)
3462 {
3463         struct Scsi_Host *shost = class_to_shost(dev);
3464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465         unsigned long lock_flags = 0;
3466         int rc = count;
3467
3468         if (!capable(CAP_SYS_ADMIN))
3469                 return -EACCES;
3470
3471         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472         while (ioa_cfg->in_reset_reload) {
3473                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3475                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         }
3477
3478         ioa_cfg->errors_logged = 0;
3479         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3480
3481         if (ioa_cfg->in_reset_reload) {
3482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484
3485                 /* Wait for a second for any errors to be logged */
3486                 msleep(1000);
3487         } else {
3488                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489                 return -EIO;
3490         }
3491
3492         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3494                 rc = -EIO;
3495         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496
3497         return rc;
3498 }
3499
3500 static struct device_attribute ipr_diagnostics_attr = {
3501         .attr = {
3502                 .name =         "run_diagnostics",
3503                 .mode =         S_IWUSR,
3504         },
3505         .store = ipr_store_diagnostics
3506 };
3507
3508 /**
3509  * ipr_show_adapter_state - Show the adapter's state
3510  * @class_dev:  device struct
3511  * @buf:        buffer
3512  *
3513  * Return value:
3514  *      number of bytes printed to buffer
3515  **/
3516 static ssize_t ipr_show_adapter_state(struct device *dev,
3517                                       struct device_attribute *attr, char *buf)
3518 {
3519         struct Scsi_Host *shost = class_to_shost(dev);
3520         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3521         unsigned long lock_flags = 0;
3522         int len;
3523
3524         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3525         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3526                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3527         else
3528                 len = snprintf(buf, PAGE_SIZE, "online\n");
3529         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530         return len;
3531 }
3532
3533 /**
3534  * ipr_store_adapter_state - Change adapter state
3535  * @dev:        device struct
3536  * @buf:        buffer
3537  * @count:      buffer size
3538  *
3539  * This function will change the adapter's state.
3540  *
3541  * Return value:
3542  *      count on success / other on failure
3543  **/
3544 static ssize_t ipr_store_adapter_state(struct device *dev,
3545                                        struct device_attribute *attr,
3546                                        const char *buf, size_t count)
3547 {
3548         struct Scsi_Host *shost = class_to_shost(dev);
3549         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550         unsigned long lock_flags;
3551         int result = count, i;
3552
3553         if (!capable(CAP_SYS_ADMIN))
3554                 return -EACCES;
3555
3556         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3558             !strncmp(buf, "online", 6)) {
3559                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3560                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3561                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3562                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563                 }
3564                 wmb();
3565                 ioa_cfg->reset_retries = 0;
3566                 ioa_cfg->in_ioa_bringdown = 0;
3567                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3568         }
3569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571
3572         return result;
3573 }
3574
3575 static struct device_attribute ipr_ioa_state_attr = {
3576         .attr = {
3577                 .name =         "online_state",
3578                 .mode =         S_IRUGO | S_IWUSR,
3579         },
3580         .show = ipr_show_adapter_state,
3581         .store = ipr_store_adapter_state
3582 };
3583
3584 /**
3585  * ipr_store_reset_adapter - Reset the adapter
3586  * @dev:        device struct
3587  * @buf:        buffer
3588  * @count:      buffer size
3589  *
3590  * This function will reset the adapter.
3591  *
3592  * Return value:
3593  *      count on success / other on failure
3594  **/
3595 static ssize_t ipr_store_reset_adapter(struct device *dev,
3596                                        struct device_attribute *attr,
3597                                        const char *buf, size_t count)
3598 {
3599         struct Scsi_Host *shost = class_to_shost(dev);
3600         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3601         unsigned long lock_flags;
3602         int result = count;
3603
3604         if (!capable(CAP_SYS_ADMIN))
3605                 return -EACCES;
3606
3607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608         if (!ioa_cfg->in_reset_reload)
3609                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613         return result;
3614 }
3615
3616 static struct device_attribute ipr_ioa_reset_attr = {
3617         .attr = {
3618                 .name =         "reset_host",
3619                 .mode =         S_IWUSR,
3620         },
3621         .store = ipr_store_reset_adapter
3622 };
3623
3624 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3625  /**
3626  * ipr_show_iopoll_weight - Show ipr polling mode
3627  * @dev:        class device struct
3628  * @buf:        buffer
3629  *
3630  * Return value:
3631  *      number of bytes printed to buffer
3632  **/
3633 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3634                                    struct device_attribute *attr, char *buf)
3635 {
3636         struct Scsi_Host *shost = class_to_shost(dev);
3637         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3638         unsigned long lock_flags = 0;
3639         int len;
3640
3641         spin_lock_irqsave(shost->host_lock, lock_flags);
3642         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3643         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3644
3645         return len;
3646 }
3647
3648 /**
3649  * ipr_store_iopoll_weight - Change the adapter's polling mode
3650  * @dev:        class device struct
3651  * @buf:        buffer
3652  *
3653  * Return value:
3654  *      number of bytes printed to buffer
3655  **/
3656 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3657                                         struct device_attribute *attr,
3658                                         const char *buf, size_t count)
3659 {
3660         struct Scsi_Host *shost = class_to_shost(dev);
3661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662         unsigned long user_iopoll_weight;
3663         unsigned long lock_flags = 0;
3664         int i;
3665
3666         if (!ioa_cfg->sis64) {
3667                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668                 return -EINVAL;
3669         }
3670         if (kstrtoul(buf, 10, &user_iopoll_weight))
3671                 return -EINVAL;
3672
3673         if (user_iopoll_weight > 256) {
3674                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3675                 return -EINVAL;
3676         }
3677
3678         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3679                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3680                 return strlen(buf);
3681         }
3682
3683         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3684                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3685                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686         }
3687
3688         spin_lock_irqsave(shost->host_lock, lock_flags);
3689         ioa_cfg->iopoll_weight = user_iopoll_weight;
3690         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3691                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3692                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3693                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3694                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695                 }
3696         }
3697         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3698
3699         return strlen(buf);
3700 }
3701
3702 static struct device_attribute ipr_iopoll_weight_attr = {
3703         .attr = {
3704                 .name =         "iopoll_weight",
3705                 .mode =         S_IRUGO | S_IWUSR,
3706         },
3707         .show = ipr_show_iopoll_weight,
3708         .store = ipr_store_iopoll_weight
3709 };
3710
3711 /**
3712  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713  * @buf_len:            buffer length
3714  *
3715  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716  * list to use for microcode download
3717  *
3718  * Return value:
3719  *      pointer to sglist / NULL on failure
3720  **/
3721 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3722 {
3723         int sg_size, order, bsize_elem, num_elem, i, j;
3724         struct ipr_sglist *sglist;
3725         struct scatterlist *scatterlist;
3726         struct page *page;
3727
3728         /* Get the minimum size per scatter/gather element */
3729         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3730
3731         /* Get the actual size per element */
3732         order = get_order(sg_size);
3733
3734         /* Determine the actual number of bytes per element */
3735         bsize_elem = PAGE_SIZE * (1 << order);
3736
3737         /* Determine the actual number of sg entries needed */
3738         if (buf_len % bsize_elem)
3739                 num_elem = (buf_len / bsize_elem) + 1;
3740         else
3741                 num_elem = buf_len / bsize_elem;
3742
3743         /* Allocate a scatter/gather list for the DMA */
3744         sglist = kzalloc(sizeof(struct ipr_sglist) +
3745                          (sizeof(struct scatterlist) * (num_elem - 1)),
3746                          GFP_KERNEL);
3747
3748         if (sglist == NULL) {
3749                 ipr_trace;
3750                 return NULL;
3751         }
3752
3753         scatterlist = sglist->scatterlist;
3754         sg_init_table(scatterlist, num_elem);
3755
3756         sglist->order = order;
3757         sglist->num_sg = num_elem;
3758
3759         /* Allocate a bunch of sg elements */
3760         for (i = 0; i < num_elem; i++) {
3761                 page = alloc_pages(GFP_KERNEL, order);
3762                 if (!page) {
3763                         ipr_trace;
3764
3765                         /* Free up what we already allocated */
3766                         for (j = i - 1; j >= 0; j--)
3767                                 __free_pages(sg_page(&scatterlist[j]), order);
3768                         kfree(sglist);
3769                         return NULL;
3770                 }
3771
3772                 sg_set_page(&scatterlist[i], page, 0, 0);
3773         }
3774
3775         return sglist;
3776 }
3777
3778 /**
3779  * ipr_free_ucode_buffer - Frees a microcode download buffer
3780  * @p_dnld:             scatter/gather list pointer
3781  *
3782  * Free a DMA'able ucode download buffer previously allocated with
3783  * ipr_alloc_ucode_buffer
3784  *
3785  * Return value:
3786  *      nothing
3787  **/
3788 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3789 {
3790         int i;
3791
3792         for (i = 0; i < sglist->num_sg; i++)
3793                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3794
3795         kfree(sglist);
3796 }
3797
3798 /**
3799  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800  * @sglist:             scatter/gather list pointer
3801  * @buffer:             buffer pointer
3802  * @len:                buffer length
3803  *
3804  * Copy a microcode image from a user buffer into a buffer allocated by
3805  * ipr_alloc_ucode_buffer
3806  *
3807  * Return value:
3808  *      0 on success / other on failure
3809  **/
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3811                                  u8 *buffer, u32 len)
3812 {
3813         int bsize_elem, i, result = 0;
3814         struct scatterlist *scatterlist;
3815         void *kaddr;
3816
3817         /* Determine the actual number of bytes per element */
3818         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3819
3820         scatterlist = sglist->scatterlist;
3821
3822         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3823                 struct page *page = sg_page(&scatterlist[i]);
3824
3825                 kaddr = kmap(page);
3826                 memcpy(kaddr, buffer, bsize_elem);
3827                 kunmap(page);
3828
3829                 scatterlist[i].length = bsize_elem;
3830
3831                 if (result != 0) {
3832                         ipr_trace;
3833                         return result;
3834                 }
3835         }
3836
3837         if (len % bsize_elem) {
3838                 struct page *page = sg_page(&scatterlist[i]);
3839
3840                 kaddr = kmap(page);
3841                 memcpy(kaddr, buffer, len % bsize_elem);
3842                 kunmap(page);
3843
3844                 scatterlist[i].length = len % bsize_elem;
3845         }
3846
3847         sglist->buffer_len = len;
3848         return result;
3849 }
3850
3851 /**
3852  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853  * @ipr_cmd:            ipr command struct
3854  * @sglist:             scatter/gather list
3855  *
3856  * Builds a microcode download IOA data list (IOADL).
3857  *
3858  **/
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3860                                     struct ipr_sglist *sglist)
3861 {
3862         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3863         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3864         struct scatterlist *scatterlist = sglist->scatterlist;
3865         int i;
3866
3867         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3868         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3869         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870
3871         ioarcb->ioadl_len =
3872                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3873         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3874                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3875                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3876                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877         }
3878
3879         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3880 }
3881
3882 /**
3883  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884  * @ipr_cmd:    ipr command struct
3885  * @sglist:             scatter/gather list
3886  *
3887  * Builds a microcode download IOA data list (IOADL).
3888  *
3889  **/
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3891                                   struct ipr_sglist *sglist)
3892 {
3893         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3894         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3895         struct scatterlist *scatterlist = sglist->scatterlist;
3896         int i;
3897
3898         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3899         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3900         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901
3902         ioarcb->ioadl_len =
3903                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3904
3905         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3906                 ioadl[i].flags_and_data_len =
3907                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3908                 ioadl[i].address =
3909                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910         }
3911
3912         ioadl[i-1].flags_and_data_len |=
3913                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3914 }
3915
3916 /**
3917  * ipr_update_ioa_ucode - Update IOA's microcode
3918  * @ioa_cfg:    ioa config struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Initiate an adapter reset to update the IOA's microcode
3922  *
3923  * Return value:
3924  *      0 on success / -EIO on failure
3925  **/
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3927                                 struct ipr_sglist *sglist)
3928 {
3929         unsigned long lock_flags;
3930
3931         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932         while (ioa_cfg->in_reset_reload) {
3933                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3935                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936         }
3937
3938         if (ioa_cfg->ucode_sglist) {
3939                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940                 dev_err(&ioa_cfg->pdev->dev,
3941                         "Microcode download already in progress\n");
3942                 return -EIO;
3943         }
3944
3945         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3946                                         sglist->scatterlist, sglist->num_sg,
3947                                         DMA_TO_DEVICE);
3948
3949         if (!sglist->num_dma_sg) {
3950                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951                 dev_err(&ioa_cfg->pdev->dev,
3952                         "Failed to map microcode download buffer!\n");
3953                 return -EIO;
3954         }
3955
3956         ioa_cfg->ucode_sglist = sglist;
3957         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3958         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3960
3961         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3962         ioa_cfg->ucode_sglist = NULL;
3963         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3964         return 0;
3965 }
3966
3967 /**
3968  * ipr_store_update_fw - Update the firmware on the adapter
3969  * @class_dev:  device struct
3970  * @buf:        buffer
3971  * @count:      buffer size
3972  *
3973  * This function will update the firmware on the adapter.
3974  *
3975  * Return value:
3976  *      count on success / other on failure
3977  **/
3978 static ssize_t ipr_store_update_fw(struct device *dev,
3979                                    struct device_attribute *attr,
3980                                    const char *buf, size_t count)
3981 {
3982         struct Scsi_Host *shost = class_to_shost(dev);
3983         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3984         struct ipr_ucode_image_header *image_hdr;
3985         const struct firmware *fw_entry;
3986         struct ipr_sglist *sglist;
3987         char fname[100];
3988         char *src;
3989         int len, result, dnld_size;
3990
3991         if (!capable(CAP_SYS_ADMIN))
3992                 return -EACCES;
3993
3994         len = snprintf(fname, 99, "%s", buf);
3995         fname[len-1] = '\0';
3996
3997         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3998                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3999                 return -EIO;
4000         }
4001
4002         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4003
4004         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4005         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4006         sglist = ipr_alloc_ucode_buffer(dnld_size);
4007
4008         if (!sglist) {
4009                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4010                 release_firmware(fw_entry);
4011                 return -ENOMEM;
4012         }
4013
4014         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4015
4016         if (result) {
4017                 dev_err(&ioa_cfg->pdev->dev,
4018                         "Microcode buffer copy to DMA buffer failed\n");
4019                 goto out;
4020         }
4021
4022         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4023
4024         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4025
4026         if (!result)
4027                 result = count;
4028 out:
4029         ipr_free_ucode_buffer(sglist);
4030         release_firmware(fw_entry);
4031         return result;
4032 }
4033
4034 static struct device_attribute ipr_update_fw_attr = {
4035         .attr = {
4036                 .name =         "update_fw",
4037                 .mode =         S_IWUSR,
4038         },
4039         .store = ipr_store_update_fw
4040 };
4041
4042 /**
4043  * ipr_show_fw_type - Show the adapter's firmware type.
4044  * @dev:        class device struct
4045  * @buf:        buffer
4046  *
4047  * Return value:
4048  *      number of bytes printed to buffer
4049  **/
4050 static ssize_t ipr_show_fw_type(struct device *dev,
4051                                 struct device_attribute *attr, char *buf)
4052 {
4053         struct Scsi_Host *shost = class_to_shost(dev);
4054         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4055         unsigned long lock_flags = 0;
4056         int len;
4057
4058         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4059         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4060         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061         return len;
4062 }
4063
4064 static struct device_attribute ipr_ioa_fw_type_attr = {
4065         .attr = {
4066                 .name =         "fw_type",
4067                 .mode =         S_IRUGO,
4068         },
4069         .show = ipr_show_fw_type
4070 };
4071
4072 static struct device_attribute *ipr_ioa_attrs[] = {
4073         &ipr_fw_version_attr,
4074         &ipr_log_level_attr,
4075         &ipr_diagnostics_attr,
4076         &ipr_ioa_state_attr,
4077         &ipr_ioa_reset_attr,
4078         &ipr_update_fw_attr,
4079         &ipr_ioa_fw_type_attr,
4080         &ipr_iopoll_weight_attr,
4081         NULL,
4082 };
4083
4084 #ifdef CONFIG_SCSI_IPR_DUMP
4085 /**
4086  * ipr_read_dump - Dump the adapter
4087  * @filp:               open sysfs file
4088  * @kobj:               kobject struct
4089  * @bin_attr:           bin_attribute struct
4090  * @buf:                buffer
4091  * @off:                offset
4092  * @count:              buffer size
4093  *
4094  * Return value:
4095  *      number of bytes printed to buffer
4096  **/
4097 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4098                              struct bin_attribute *bin_attr,
4099                              char *buf, loff_t off, size_t count)
4100 {
4101         struct device *cdev = container_of(kobj, struct device, kobj);
4102         struct Scsi_Host *shost = class_to_shost(cdev);
4103         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4104         struct ipr_dump *dump;
4105         unsigned long lock_flags = 0;
4106         char *src;
4107         int len, sdt_end;
4108         size_t rc = count;
4109
4110         if (!capable(CAP_SYS_ADMIN))
4111                 return -EACCES;
4112
4113         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4114         dump = ioa_cfg->dump;
4115
4116         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4117                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118                 return 0;
4119         }
4120         kref_get(&dump->kref);
4121         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4122
4123         if (off > dump->driver_dump.hdr.len) {
4124                 kref_put(&dump->kref, ipr_release_dump);
4125                 return 0;
4126         }
4127
4128         if (off + count > dump->driver_dump.hdr.len) {
4129                 count = dump->driver_dump.hdr.len - off;
4130                 rc = count;
4131         }
4132
4133         if (count && off < sizeof(dump->driver_dump)) {
4134                 if (off + count > sizeof(dump->driver_dump))
4135                         len = sizeof(dump->driver_dump) - off;
4136                 else
4137                         len = count;
4138                 src = (u8 *)&dump->driver_dump + off;
4139                 memcpy(buf, src, len);
4140                 buf += len;
4141                 off += len;
4142                 count -= len;
4143         }
4144
4145         off -= sizeof(dump->driver_dump);
4146
4147         if (ioa_cfg->sis64)
4148                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4149                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4150                            sizeof(struct ipr_sdt_entry));
4151         else
4152                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4153                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4154
4155         if (count && off < sdt_end) {
4156                 if (off + count > sdt_end)
4157                         len = sdt_end - off;
4158                 else
4159                         len = count;
4160                 src = (u8 *)&dump->ioa_dump + off;
4161                 memcpy(buf, src, len);
4162                 buf += len;
4163                 off += len;
4164                 count -= len;
4165         }
4166
4167         off -= sdt_end;
4168
4169         while (count) {
4170                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4171                         len = PAGE_ALIGN(off) - off;
4172                 else
4173                         len = count;
4174                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4175                 src += off & ~PAGE_MASK;
4176                 memcpy(buf, src, len);
4177                 buf += len;
4178                 off += len;
4179                 count -= len;
4180         }
4181
4182         kref_put(&dump->kref, ipr_release_dump);
4183         return rc;
4184 }
4185
4186 /**
4187  * ipr_alloc_dump - Prepare for adapter dump
4188  * @ioa_cfg:    ioa config struct
4189  *
4190  * Return value:
4191  *      0 on success / other on failure
4192  **/
4193 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4194 {
4195         struct ipr_dump *dump;
4196         __be32 **ioa_data;
4197         unsigned long lock_flags = 0;
4198
4199         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4200
4201         if (!dump) {
4202                 ipr_err("Dump memory allocation failed\n");
4203                 return -ENOMEM;
4204         }
4205
4206         if (ioa_cfg->sis64)
4207                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4208         else
4209                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4210
4211         if (!ioa_data) {
4212                 ipr_err("Dump memory allocation failed\n");
4213                 kfree(dump);
4214                 return -ENOMEM;
4215         }
4216
4217         dump->ioa_dump.ioa_data = ioa_data;
4218
4219         kref_init(&dump->kref);
4220         dump->ioa_cfg = ioa_cfg;
4221
4222         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4223
4224         if (INACTIVE != ioa_cfg->sdt_state) {
4225                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226                 vfree(dump->ioa_dump.ioa_data);
4227                 kfree(dump);
4228                 return 0;
4229         }
4230
4231         ioa_cfg->dump = dump;
4232         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4233         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4234                 ioa_cfg->dump_taken = 1;
4235                 schedule_work(&ioa_cfg->work_q);
4236         }
4237         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4238
4239         return 0;
4240 }
4241
4242 /**
4243  * ipr_free_dump - Free adapter dump memory
4244  * @ioa_cfg:    ioa config struct
4245  *
4246  * Return value:
4247  *      0 on success / other on failure
4248  **/
4249 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4250 {
4251         struct ipr_dump *dump;
4252         unsigned long lock_flags = 0;
4253
4254         ENTER;
4255
4256         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4257         dump = ioa_cfg->dump;
4258         if (!dump) {
4259                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4260                 return 0;
4261         }
4262
4263         ioa_cfg->dump = NULL;
4264         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4265
4266         kref_put(&dump->kref, ipr_release_dump);
4267
4268         LEAVE;
4269         return 0;
4270 }
4271
4272 /**
4273  * ipr_write_dump - Setup dump state of adapter
4274  * @filp:               open sysfs file
4275  * @kobj:               kobject struct
4276  * @bin_attr:           bin_attribute struct
4277  * @buf:                buffer
4278  * @off:                offset
4279  * @count:              buffer size
4280  *
4281  * Return value:
4282  *      number of bytes printed to buffer
4283  **/
4284 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4285                               struct bin_attribute *bin_attr,
4286                               char *buf, loff_t off, size_t count)
4287 {
4288         struct device *cdev = container_of(kobj, struct device, kobj);
4289         struct Scsi_Host *shost = class_to_shost(cdev);
4290         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4291         int rc;
4292
4293         if (!capable(CAP_SYS_ADMIN))
4294                 return -EACCES;
4295
4296         if (buf[0] == '1')
4297                 rc = ipr_alloc_dump(ioa_cfg);
4298         else if (buf[0] == '0')
4299                 rc = ipr_free_dump(ioa_cfg);
4300         else
4301                 return -EINVAL;
4302
4303         if (rc)
4304                 return rc;
4305         else
4306                 return count;
4307 }
4308
4309 static struct bin_attribute ipr_dump_attr = {
4310         .attr = {
4311                 .name = "dump",
4312                 .mode = S_IRUSR | S_IWUSR,
4313         },
4314         .size = 0,
4315         .read = ipr_read_dump,
4316         .write = ipr_write_dump
4317 };
4318 #else
4319 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4320 #endif
4321
4322 /**
4323  * ipr_change_queue_depth - Change the device's queue depth
4324  * @sdev:       scsi device struct
4325  * @qdepth:     depth to set
4326  * @reason:     calling context
4327  *
4328  * Return value:
4329  *      actual depth set
4330  **/
4331 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4332                                   int reason)
4333 {
4334         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4335         struct ipr_resource_entry *res;
4336         unsigned long lock_flags = 0;
4337
4338         if (reason != SCSI_QDEPTH_DEFAULT)
4339                 return -EOPNOTSUPP;
4340
4341         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4342         res = (struct ipr_resource_entry *)sdev->hostdata;
4343
4344         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4345                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4346         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4347
4348         scsi_adjust_queue_depth(sdev, qdepth);
4349         return sdev->queue_depth;
4350 }
4351
4352 /**
4353  * ipr_change_queue_type - Change the device's queue type
4354  * @dsev:               scsi device struct
4355  * @tag_type:   type of tags to use
4356  *
4357  * Return value:
4358  *      actual queue type set
4359  **/
4360 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4361 {
4362         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4363         struct ipr_resource_entry *res;
4364         unsigned long lock_flags = 0;
4365
4366         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4367         res = (struct ipr_resource_entry *)sdev->hostdata;
4368         if (res && ipr_is_gscsi(res))
4369                 tag_type = scsi_change_queue_type(sdev, tag_type);
4370         else
4371                 tag_type = 0;
4372         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373         return tag_type;
4374 }
4375
4376 /**
4377  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4378  * @dev:        device struct
4379  * @attr:       device attribute structure
4380  * @buf:        buffer
4381  *
4382  * Return value:
4383  *      number of bytes printed to buffer
4384  **/
4385 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4386 {
4387         struct scsi_device *sdev = to_scsi_device(dev);
4388         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4389         struct ipr_resource_entry *res;
4390         unsigned long lock_flags = 0;
4391         ssize_t len = -ENXIO;
4392
4393         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4394         res = (struct ipr_resource_entry *)sdev->hostdata;
4395         if (res)
4396                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4397         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4398         return len;
4399 }
4400
4401 static struct device_attribute ipr_adapter_handle_attr = {
4402         .attr = {
4403                 .name =         "adapter_handle",
4404                 .mode =         S_IRUSR,
4405         },
4406         .show = ipr_show_adapter_handle
4407 };
4408
4409 /**
4410  * ipr_show_resource_path - Show the resource path or the resource address for
4411  *                          this device.
4412  * @dev:        device struct
4413  * @attr:       device attribute structure
4414  * @buf:        buffer
4415  *
4416  * Return value:
4417  *      number of bytes printed to buffer
4418  **/
4419 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4420 {
4421         struct scsi_device *sdev = to_scsi_device(dev);
4422         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4423         struct ipr_resource_entry *res;
4424         unsigned long lock_flags = 0;
4425         ssize_t len = -ENXIO;
4426         char buffer[IPR_MAX_RES_PATH_LENGTH];
4427
4428         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4429         res = (struct ipr_resource_entry *)sdev->hostdata;
4430         if (res && ioa_cfg->sis64)
4431                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4432                                __ipr_format_res_path(res->res_path, buffer,
4433                                                      sizeof(buffer)));
4434         else if (res)
4435                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4436                                res->bus, res->target, res->lun);
4437
4438         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4439         return len;
4440 }
4441
4442 static struct device_attribute ipr_resource_path_attr = {
4443         .attr = {
4444                 .name =         "resource_path",
4445                 .mode =         S_IRUGO,
4446         },
4447         .show = ipr_show_resource_path
4448 };
4449
4450 /**
4451  * ipr_show_device_id - Show the device_id for this device.
4452  * @dev:        device struct
4453  * @attr:       device attribute structure
4454  * @buf:        buffer
4455  *
4456  * Return value:
4457  *      number of bytes printed to buffer
4458  **/
4459 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4460 {
4461         struct scsi_device *sdev = to_scsi_device(dev);
4462         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4463         struct ipr_resource_entry *res;
4464         unsigned long lock_flags = 0;
4465         ssize_t len = -ENXIO;
4466
4467         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4468         res = (struct ipr_resource_entry *)sdev->hostdata;
4469         if (res && ioa_cfg->sis64)
4470                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4471         else if (res)
4472                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4473
4474         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4475         return len;
4476 }
4477
4478 static struct device_attribute ipr_device_id_attr = {
4479         .attr = {
4480                 .name =         "device_id",
4481                 .mode =         S_IRUGO,
4482         },
4483         .show = ipr_show_device_id
4484 };
4485
4486 /**
4487  * ipr_show_resource_type - Show the resource type for this device.
4488  * @dev:        device struct
4489  * @attr:       device attribute structure
4490  * @buf:        buffer
4491  *
4492  * Return value:
4493  *      number of bytes printed to buffer
4494  **/
4495 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4496 {
4497         struct scsi_device *sdev = to_scsi_device(dev);
4498         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4499         struct ipr_resource_entry *res;
4500         unsigned long lock_flags = 0;
4501         ssize_t len = -ENXIO;
4502
4503         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4504         res = (struct ipr_resource_entry *)sdev->hostdata;
4505
4506         if (res)
4507                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4508
4509         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4510         return len;
4511 }
4512
4513 static struct device_attribute ipr_resource_type_attr = {
4514         .attr = {
4515                 .name =         "resource_type",
4516                 .mode =         S_IRUGO,
4517         },
4518         .show = ipr_show_resource_type
4519 };
4520
4521 static struct device_attribute *ipr_dev_attrs[] = {
4522         &ipr_adapter_handle_attr,
4523         &ipr_resource_path_attr,
4524         &ipr_device_id_attr,
4525         &ipr_resource_type_attr,
4526         NULL,
4527 };
4528
4529 /**
4530  * ipr_biosparam - Return the HSC mapping
4531  * @sdev:                       scsi device struct
4532  * @block_device:       block device pointer
4533  * @capacity:           capacity of the device
4534  * @parm:                       Array containing returned HSC values.
4535  *
4536  * This function generates the HSC parms that fdisk uses.
4537  * We want to make sure we return something that places partitions
4538  * on 4k boundaries for best performance with the IOA.
4539  *
4540  * Return value:
4541  *      0 on success
4542  **/
4543 static int ipr_biosparam(struct scsi_device *sdev,
4544                          struct block_device *block_device,
4545                          sector_t capacity, int *parm)
4546 {
4547         int heads, sectors;
4548         sector_t cylinders;
4549
4550         heads = 128;
4551         sectors = 32;
4552
4553         cylinders = capacity;
4554         sector_div(cylinders, (128 * 32));
4555
4556         /* return result */
4557         parm[0] = heads;
4558         parm[1] = sectors;
4559         parm[2] = cylinders;
4560
4561         return 0;
4562 }
4563
4564 /**
4565  * ipr_find_starget - Find target based on bus/target.
4566  * @starget:    scsi target struct
4567  *
4568  * Return value:
4569  *      resource entry pointer if found / NULL if not found
4570  **/
4571 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4572 {
4573         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4574         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4575         struct ipr_resource_entry *res;
4576
4577         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4578                 if ((res->bus == starget->channel) &&
4579                     (res->target == starget->id)) {
4580                         return res;
4581                 }
4582         }
4583
4584         return NULL;
4585 }
4586
4587 static struct ata_port_info sata_port_info;
4588
4589 /**
4590  * ipr_target_alloc - Prepare for commands to a SCSI target
4591  * @starget:    scsi target struct
4592  *
4593  * If the device is a SATA device, this function allocates an
4594  * ATA port with libata, else it does nothing.
4595  *
4596  * Return value:
4597  *      0 on success / non-0 on failure
4598  **/
4599 static int ipr_target_alloc(struct scsi_target *starget)
4600 {
4601         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4602         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4603         struct ipr_sata_port *sata_port;
4604         struct ata_port *ap;
4605         struct ipr_resource_entry *res;
4606         unsigned long lock_flags;
4607
4608         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4609         res = ipr_find_starget(starget);
4610         starget->hostdata = NULL;
4611
4612         if (res && ipr_is_gata(res)) {
4613                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4614                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4615                 if (!sata_port)
4616                         return -ENOMEM;
4617
4618                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4619                 if (ap) {
4620                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4621                         sata_port->ioa_cfg = ioa_cfg;
4622                         sata_port->ap = ap;
4623                         sata_port->res = res;
4624
4625                         res->sata_port = sata_port;
4626                         ap->private_data = sata_port;
4627                         starget->hostdata = sata_port;
4628                 } else {
4629                         kfree(sata_port);
4630                         return -ENOMEM;
4631                 }
4632         }
4633         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4634
4635         return 0;
4636 }
4637
4638 /**
4639  * ipr_target_destroy - Destroy a SCSI target
4640  * @starget:    scsi target struct
4641  *
4642  * If the device was a SATA device, this function frees the libata
4643  * ATA port, else it does nothing.
4644  *
4645  **/
4646 static void ipr_target_destroy(struct scsi_target *starget)
4647 {
4648         struct ipr_sata_port *sata_port = starget->hostdata;
4649         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4650         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4651
4652         if (ioa_cfg->sis64) {
4653                 if (!ipr_find_starget(starget)) {
4654                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4655                                 clear_bit(starget->id, ioa_cfg->array_ids);
4656                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4657                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4658                         else if (starget->channel == 0)
4659                                 clear_bit(starget->id, ioa_cfg->target_ids);
4660                 }
4661         }
4662
4663         if (sata_port) {
4664                 starget->hostdata = NULL;
4665                 ata_sas_port_destroy(sata_port->ap);
4666                 kfree(sata_port);
4667         }
4668 }
4669
4670 /**
4671  * ipr_find_sdev - Find device based on bus/target/lun.
4672  * @sdev:       scsi device struct
4673  *
4674  * Return value:
4675  *      resource entry pointer if found / NULL if not found
4676  **/
4677 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4678 {
4679         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4680         struct ipr_resource_entry *res;
4681
4682         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4683                 if ((res->bus == sdev->channel) &&
4684                     (res->target == sdev->id) &&
4685                     (res->lun == sdev->lun))
4686                         return res;
4687         }
4688
4689         return NULL;
4690 }
4691
4692 /**
4693  * ipr_slave_destroy - Unconfigure a SCSI device
4694  * @sdev:       scsi device struct
4695  *
4696  * Return value:
4697  *      nothing
4698  **/
4699 static void ipr_slave_destroy(struct scsi_device *sdev)
4700 {
4701         struct ipr_resource_entry *res;
4702         struct ipr_ioa_cfg *ioa_cfg;
4703         unsigned long lock_flags = 0;
4704
4705         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4706
4707         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4708         res = (struct ipr_resource_entry *) sdev->hostdata;
4709         if (res) {
4710                 if (res->sata_port)
4711                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4712                 sdev->hostdata = NULL;
4713                 res->sdev = NULL;
4714                 res->sata_port = NULL;
4715         }
4716         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4717 }
4718
4719 /**
4720  * ipr_slave_configure - Configure a SCSI device
4721  * @sdev:       scsi device struct
4722  *
4723  * This function configures the specified scsi device.
4724  *
4725  * Return value:
4726  *      0 on success
4727  **/
4728 static int ipr_slave_configure(struct scsi_device *sdev)
4729 {
4730         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4731         struct ipr_resource_entry *res;
4732         struct ata_port *ap = NULL;
4733         unsigned long lock_flags = 0;
4734         char buffer[IPR_MAX_RES_PATH_LENGTH];
4735
4736         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4737         res = sdev->hostdata;
4738         if (res) {
4739                 if (ipr_is_af_dasd_device(res))
4740                         sdev->type = TYPE_RAID;
4741                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4742                         sdev->scsi_level = 4;
4743                         sdev->no_uld_attach = 1;
4744                 }
4745                 if (ipr_is_vset_device(res)) {
4746                         blk_queue_rq_timeout(sdev->request_queue,
4747                                              IPR_VSET_RW_TIMEOUT);
4748                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4749                 }
4750                 if (ipr_is_gata(res) && res->sata_port)
4751                         ap = res->sata_port->ap;
4752                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4753
4754                 if (ap) {
4755                         scsi_adjust_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4756                         ata_sas_slave_configure(sdev, ap);
4757                 }
4758
4759                 if (ioa_cfg->sis64)
4760                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4761                                     ipr_format_res_path(ioa_cfg,
4762                                 res->res_path, buffer, sizeof(buffer)));
4763                 return 0;
4764         }
4765         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4766         return 0;
4767 }
4768
4769 /**
4770  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4771  * @sdev:       scsi device struct
4772  *
4773  * This function initializes an ATA port so that future commands
4774  * sent through queuecommand will work.
4775  *
4776  * Return value:
4777  *      0 on success
4778  **/
4779 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4780 {
4781         struct ipr_sata_port *sata_port = NULL;
4782         int rc = -ENXIO;
4783
4784         ENTER;
4785         if (sdev->sdev_target)
4786                 sata_port = sdev->sdev_target->hostdata;
4787         if (sata_port) {
4788                 rc = ata_sas_port_init(sata_port->ap);
4789                 if (rc == 0)
4790                         rc = ata_sas_sync_probe(sata_port->ap);
4791         }
4792
4793         if (rc)
4794                 ipr_slave_destroy(sdev);
4795
4796         LEAVE;
4797         return rc;
4798 }
4799
4800 /**
4801  * ipr_slave_alloc - Prepare for commands to a device.
4802  * @sdev:       scsi device struct
4803  *
4804  * This function saves a pointer to the resource entry
4805  * in the scsi device struct if the device exists. We
4806  * can then use this pointer in ipr_queuecommand when
4807  * handling new commands.
4808  *
4809  * Return value:
4810  *      0 on success / -ENXIO if device does not exist
4811  **/
4812 static int ipr_slave_alloc(struct scsi_device *sdev)
4813 {
4814         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4815         struct ipr_resource_entry *res;
4816         unsigned long lock_flags;
4817         int rc = -ENXIO;
4818
4819         sdev->hostdata = NULL;
4820
4821         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822
4823         res = ipr_find_sdev(sdev);
4824         if (res) {
4825                 res->sdev = sdev;
4826                 res->add_to_ml = 0;
4827                 res->in_erp = 0;
4828                 sdev->hostdata = res;
4829                 if (!ipr_is_naca_model(res))
4830                         res->needs_sync_complete = 1;
4831                 rc = 0;
4832                 if (ipr_is_gata(res)) {
4833                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834                         return ipr_ata_slave_alloc(sdev);
4835                 }
4836         }
4837
4838         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4839
4840         return rc;
4841 }
4842
4843 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4844 {
4845         struct ipr_ioa_cfg *ioa_cfg;
4846         unsigned long lock_flags = 0;
4847         int rc = SUCCESS;
4848
4849         ENTER;
4850         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4851         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4852
4853         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4854                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4855                 dev_err(&ioa_cfg->pdev->dev,
4856                         "Adapter being reset as a result of error recovery.\n");
4857
4858                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4859                         ioa_cfg->sdt_state = GET_DUMP;
4860         }
4861
4862         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4863         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4864         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4865
4866         /* If we got hit with a host reset while we were already resetting
4867          the adapter for some reason, and the reset failed. */
4868         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4869                 ipr_trace;
4870                 rc = FAILED;
4871         }
4872
4873         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4874         LEAVE;
4875         return rc;
4876 }
4877
4878 /**
4879  * ipr_device_reset - Reset the device
4880  * @ioa_cfg:    ioa config struct
4881  * @res:                resource entry struct
4882  *
4883  * This function issues a device reset to the affected device.
4884  * If the device is a SCSI device, a LUN reset will be sent
4885  * to the device first. If that does not work, a target reset
4886  * will be sent. If the device is a SATA device, a PHY reset will
4887  * be sent.
4888  *
4889  * Return value:
4890  *      0 on success / non-zero on failure
4891  **/
4892 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4893                             struct ipr_resource_entry *res)
4894 {
4895         struct ipr_cmnd *ipr_cmd;
4896         struct ipr_ioarcb *ioarcb;
4897         struct ipr_cmd_pkt *cmd_pkt;
4898         struct ipr_ioarcb_ata_regs *regs;
4899         u32 ioasc;
4900
4901         ENTER;
4902         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4903         ioarcb = &ipr_cmd->ioarcb;
4904         cmd_pkt = &ioarcb->cmd_pkt;
4905
4906         if (ipr_cmd->ioa_cfg->sis64) {
4907                 regs = &ipr_cmd->i.ata_ioadl.regs;
4908                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4909         } else
4910                 regs = &ioarcb->u.add_data.u.regs;
4911
4912         ioarcb->res_handle = res->res_handle;
4913         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4914         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4915         if (ipr_is_gata(res)) {
4916                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4917                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4918                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4919         }
4920
4921         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4922         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4923         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4924         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4925                 if (ipr_cmd->ioa_cfg->sis64)
4926                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4927                                sizeof(struct ipr_ioasa_gata));
4928                 else
4929                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4930                                sizeof(struct ipr_ioasa_gata));
4931         }
4932
4933         LEAVE;
4934         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4935 }
4936
4937 /**
4938  * ipr_sata_reset - Reset the SATA port
4939  * @link:       SATA link to reset
4940  * @classes:    class of the attached device
4941  *
4942  * This function issues a SATA phy reset to the affected ATA link.
4943  *
4944  * Return value:
4945  *      0 on success / non-zero on failure
4946  **/
4947 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4948                                 unsigned long deadline)
4949 {
4950         struct ipr_sata_port *sata_port = link->ap->private_data;
4951         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4952         struct ipr_resource_entry *res;
4953         unsigned long lock_flags = 0;
4954         int rc = -ENXIO;
4955
4956         ENTER;
4957         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4958         while (ioa_cfg->in_reset_reload) {
4959                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4960                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4961                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4962         }
4963
4964         res = sata_port->res;
4965         if (res) {
4966                 rc = ipr_device_reset(ioa_cfg, res);
4967                 *classes = res->ata_class;
4968         }
4969
4970         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4971         LEAVE;
4972         return rc;
4973 }
4974
4975 /**
4976  * ipr_eh_dev_reset - Reset the device
4977  * @scsi_cmd:   scsi command struct
4978  *
4979  * This function issues a device reset to the affected device.
4980  * A LUN reset will be sent to the device first. If that does
4981  * not work, a target reset will be sent.
4982  *
4983  * Return value:
4984  *      SUCCESS / FAILED
4985  **/
4986 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4987 {
4988         struct ipr_cmnd *ipr_cmd;
4989         struct ipr_ioa_cfg *ioa_cfg;
4990         struct ipr_resource_entry *res;
4991         struct ata_port *ap;
4992         int rc = 0;
4993         struct ipr_hrr_queue *hrrq;
4994
4995         ENTER;
4996         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4997         res = scsi_cmd->device->hostdata;
4998
4999         if (!res)
5000                 return FAILED;
5001
5002         /*
5003          * If we are currently going through reset/reload, return failed. This will force the
5004          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5005          * reset to complete
5006          */
5007         if (ioa_cfg->in_reset_reload)
5008                 return FAILED;
5009         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5010                 return FAILED;
5011
5012         for_each_hrrq(hrrq, ioa_cfg) {
5013                 spin_lock(&hrrq->_lock);
5014                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5015                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5016                                 if (ipr_cmd->scsi_cmd)
5017                                         ipr_cmd->done = ipr_scsi_eh_done;
5018                                 if (ipr_cmd->qc)
5019                                         ipr_cmd->done = ipr_sata_eh_done;
5020                                 if (ipr_cmd->qc &&
5021                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5022                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5023                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5024                                 }
5025                         }
5026                 }
5027                 spin_unlock(&hrrq->_lock);
5028         }
5029         res->resetting_device = 1;
5030         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5031
5032         if (ipr_is_gata(res) && res->sata_port) {
5033                 ap = res->sata_port->ap;
5034                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5035                 ata_std_error_handler(ap);
5036                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5037
5038                 for_each_hrrq(hrrq, ioa_cfg) {
5039                         spin_lock(&hrrq->_lock);
5040                         list_for_each_entry(ipr_cmd,
5041                                             &hrrq->hrrq_pending_q, queue) {
5042                                 if (ipr_cmd->ioarcb.res_handle ==
5043                                     res->res_handle) {
5044                                         rc = -EIO;
5045                                         break;
5046                                 }
5047                         }
5048                         spin_unlock(&hrrq->_lock);
5049                 }
5050         } else
5051                 rc = ipr_device_reset(ioa_cfg, res);
5052         res->resetting_device = 0;
5053         res->reset_occurred = 1;
5054
5055         LEAVE;
5056         return rc ? FAILED : SUCCESS;
5057 }
5058
5059 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5060 {
5061         int rc;
5062
5063         spin_lock_irq(cmd->device->host->host_lock);
5064         rc = __ipr_eh_dev_reset(cmd);
5065         spin_unlock_irq(cmd->device->host->host_lock);
5066
5067         return rc;
5068 }
5069
5070 /**
5071  * ipr_bus_reset_done - Op done function for bus reset.
5072  * @ipr_cmd:    ipr command struct
5073  *
5074  * This function is the op done function for a bus reset
5075  *
5076  * Return value:
5077  *      none
5078  **/
5079 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5080 {
5081         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5082         struct ipr_resource_entry *res;
5083
5084         ENTER;
5085         if (!ioa_cfg->sis64)
5086                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5087                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5088                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5089                                 break;
5090                         }
5091                 }
5092
5093         /*
5094          * If abort has not completed, indicate the reset has, else call the
5095          * abort's done function to wake the sleeping eh thread
5096          */
5097         if (ipr_cmd->sibling->sibling)
5098                 ipr_cmd->sibling->sibling = NULL;
5099         else
5100                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5101
5102         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5103         LEAVE;
5104 }
5105
5106 /**
5107  * ipr_abort_timeout - An abort task has timed out
5108  * @ipr_cmd:    ipr command struct
5109  *
5110  * This function handles when an abort task times out. If this
5111  * happens we issue a bus reset since we have resources tied
5112  * up that must be freed before returning to the midlayer.
5113  *
5114  * Return value:
5115  *      none
5116  **/
5117 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5118 {
5119         struct ipr_cmnd *reset_cmd;
5120         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5121         struct ipr_cmd_pkt *cmd_pkt;
5122         unsigned long lock_flags = 0;
5123
5124         ENTER;
5125         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5126         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5127                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5128                 return;
5129         }
5130
5131         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5132         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5133         ipr_cmd->sibling = reset_cmd;
5134         reset_cmd->sibling = ipr_cmd;
5135         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5136         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5137         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5138         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5139         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5140
5141         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5142         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5143         LEAVE;
5144 }
5145
5146 /**
5147  * ipr_cancel_op - Cancel specified op
5148  * @scsi_cmd:   scsi command struct
5149  *
5150  * This function cancels specified op.
5151  *
5152  * Return value:
5153  *      SUCCESS / FAILED
5154  **/
5155 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5156 {
5157         struct ipr_cmnd *ipr_cmd;
5158         struct ipr_ioa_cfg *ioa_cfg;
5159         struct ipr_resource_entry *res;
5160         struct ipr_cmd_pkt *cmd_pkt;
5161         u32 ioasc, int_reg;
5162         int op_found = 0;
5163         struct ipr_hrr_queue *hrrq;
5164
5165         ENTER;
5166         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5167         res = scsi_cmd->device->hostdata;
5168
5169         /* If we are currently going through reset/reload, return failed.
5170          * This will force the mid-layer to call ipr_eh_host_reset,
5171          * which will then go to sleep and wait for the reset to complete
5172          */
5173         if (ioa_cfg->in_reset_reload ||
5174             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5175                 return FAILED;
5176         if (!res)
5177                 return FAILED;
5178
5179         /*
5180          * If we are aborting a timed out op, chances are that the timeout was caused
5181          * by a still not detected EEH error. In such cases, reading a register will
5182          * trigger the EEH recovery infrastructure.
5183          */
5184         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5185
5186         if (!ipr_is_gscsi(res))
5187                 return FAILED;
5188
5189         for_each_hrrq(hrrq, ioa_cfg) {
5190                 spin_lock(&hrrq->_lock);
5191                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5192                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5193                                 ipr_cmd->done = ipr_scsi_eh_done;
5194                                 op_found = 1;
5195                                 break;
5196                         }
5197                 }
5198                 spin_unlock(&hrrq->_lock);
5199         }
5200
5201         if (!op_found)
5202                 return SUCCESS;
5203
5204         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5205         ipr_cmd->ioarcb.res_handle = res->res_handle;
5206         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5207         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5208         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5209         ipr_cmd->u.sdev = scsi_cmd->device;
5210
5211         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5212                     scsi_cmd->cmnd[0]);
5213         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5214         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5215
5216         /*
5217          * If the abort task timed out and we sent a bus reset, we will get
5218          * one the following responses to the abort
5219          */
5220         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5221                 ioasc = 0;
5222                 ipr_trace;
5223         }
5224
5225         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5226         if (!ipr_is_naca_model(res))
5227                 res->needs_sync_complete = 1;
5228
5229         LEAVE;
5230         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5231 }
5232
5233 /**
5234  * ipr_eh_abort - Abort a single op
5235  * @scsi_cmd:   scsi command struct
5236  *
5237  * Return value:
5238  *      SUCCESS / FAILED
5239  **/
5240 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5241 {
5242         unsigned long flags;
5243         int rc;
5244
5245         ENTER;
5246
5247         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5248         rc = ipr_cancel_op(scsi_cmd);
5249         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5250
5251         LEAVE;
5252         return rc;
5253 }
5254
5255 /**
5256  * ipr_handle_other_interrupt - Handle "other" interrupts
5257  * @ioa_cfg:    ioa config struct
5258  * @int_reg:    interrupt register
5259  *
5260  * Return value:
5261  *      IRQ_NONE / IRQ_HANDLED
5262  **/
5263 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5264                                               u32 int_reg)
5265 {
5266         irqreturn_t rc = IRQ_HANDLED;
5267         u32 int_mask_reg;
5268
5269         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5270         int_reg &= ~int_mask_reg;
5271
5272         /* If an interrupt on the adapter did not occur, ignore it.
5273          * Or in the case of SIS 64, check for a stage change interrupt.
5274          */
5275         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5276                 if (ioa_cfg->sis64) {
5277                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5278                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5279                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5280
5281                                 /* clear stage change */
5282                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5283                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5284                                 list_del(&ioa_cfg->reset_cmd->queue);
5285                                 del_timer(&ioa_cfg->reset_cmd->timer);
5286                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5287                                 return IRQ_HANDLED;
5288                         }
5289                 }
5290
5291                 return IRQ_NONE;
5292         }
5293
5294         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5295                 /* Mask the interrupt */
5296                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5297
5298                 /* Clear the interrupt */
5299                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5300                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5301
5302                 list_del(&ioa_cfg->reset_cmd->queue);
5303                 del_timer(&ioa_cfg->reset_cmd->timer);
5304                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5305         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5306                 if (ioa_cfg->clear_isr) {
5307                         if (ipr_debug && printk_ratelimit())
5308                                 dev_err(&ioa_cfg->pdev->dev,
5309                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5310                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5311                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5312                         return IRQ_NONE;
5313                 }
5314         } else {
5315                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5316                         ioa_cfg->ioa_unit_checked = 1;
5317                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5318                         dev_err(&ioa_cfg->pdev->dev,
5319                                 "No Host RRQ. 0x%08X\n", int_reg);
5320                 else
5321                         dev_err(&ioa_cfg->pdev->dev,
5322                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5323
5324                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5325                         ioa_cfg->sdt_state = GET_DUMP;
5326
5327                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5328                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5329         }
5330
5331         return rc;
5332 }
5333
5334 /**
5335  * ipr_isr_eh - Interrupt service routine error handler
5336  * @ioa_cfg:    ioa config struct
5337  * @msg:        message to log
5338  *
5339  * Return value:
5340  *      none
5341  **/
5342 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5343 {
5344         ioa_cfg->errors_logged++;
5345         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5346
5347         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5348                 ioa_cfg->sdt_state = GET_DUMP;
5349
5350         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5351 }
5352
5353 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5354                                                 struct list_head *doneq)
5355 {
5356         u32 ioasc;
5357         u16 cmd_index;
5358         struct ipr_cmnd *ipr_cmd;
5359         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5360         int num_hrrq = 0;
5361
5362         /* If interrupts are disabled, ignore the interrupt */
5363         if (!hrr_queue->allow_interrupts)
5364                 return 0;
5365
5366         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5367                hrr_queue->toggle_bit) {
5368
5369                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5370                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5371                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5372
5373                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5374                              cmd_index < hrr_queue->min_cmd_id)) {
5375                         ipr_isr_eh(ioa_cfg,
5376                                 "Invalid response handle from IOA: ",
5377                                 cmd_index);
5378                         break;
5379                 }
5380
5381                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5382                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5383
5384                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5385
5386                 list_move_tail(&ipr_cmd->queue, doneq);
5387
5388                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5389                         hrr_queue->hrrq_curr++;
5390                 } else {
5391                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5392                         hrr_queue->toggle_bit ^= 1u;
5393                 }
5394                 num_hrrq++;
5395                 if (budget > 0 && num_hrrq >= budget)
5396                         break;
5397         }
5398
5399         return num_hrrq;
5400 }
5401
5402 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5403 {
5404         struct ipr_ioa_cfg *ioa_cfg;
5405         struct ipr_hrr_queue *hrrq;
5406         struct ipr_cmnd *ipr_cmd, *temp;
5407         unsigned long hrrq_flags;
5408         int completed_ops;
5409         LIST_HEAD(doneq);
5410
5411         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5412         ioa_cfg = hrrq->ioa_cfg;
5413
5414         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5415         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5416
5417         if (completed_ops < budget)
5418                 blk_iopoll_complete(iop);
5419         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5420
5421         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5422                 list_del(&ipr_cmd->queue);
5423                 del_timer(&ipr_cmd->timer);
5424                 ipr_cmd->fast_done(ipr_cmd);
5425         }
5426
5427         return completed_ops;
5428 }
5429
5430 /**
5431  * ipr_isr - Interrupt service routine
5432  * @irq:        irq number
5433  * @devp:       pointer to ioa config struct
5434  *
5435  * Return value:
5436  *      IRQ_NONE / IRQ_HANDLED
5437  **/
5438 static irqreturn_t ipr_isr(int irq, void *devp)
5439 {
5440         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5441         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5442         unsigned long hrrq_flags = 0;
5443         u32 int_reg = 0;
5444         int num_hrrq = 0;
5445         int irq_none = 0;
5446         struct ipr_cmnd *ipr_cmd, *temp;
5447         irqreturn_t rc = IRQ_NONE;
5448         LIST_HEAD(doneq);
5449
5450         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5451         /* If interrupts are disabled, ignore the interrupt */
5452         if (!hrrq->allow_interrupts) {
5453                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5454                 return IRQ_NONE;
5455         }
5456
5457         while (1) {
5458                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5459                         rc =  IRQ_HANDLED;
5460
5461                         if (!ioa_cfg->clear_isr)
5462                                 break;
5463
5464                         /* Clear the PCI interrupt */
5465                         num_hrrq = 0;
5466                         do {
5467                                 writel(IPR_PCII_HRRQ_UPDATED,
5468                                      ioa_cfg->regs.clr_interrupt_reg32);
5469                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5470                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5471                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5472
5473                 } else if (rc == IRQ_NONE && irq_none == 0) {
5474                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5475                         irq_none++;
5476                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5477                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5478                         ipr_isr_eh(ioa_cfg,
5479                                 "Error clearing HRRQ: ", num_hrrq);
5480                         rc = IRQ_HANDLED;
5481                         break;
5482                 } else
5483                         break;
5484         }
5485
5486         if (unlikely(rc == IRQ_NONE))
5487                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5488
5489         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5490         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5491                 list_del(&ipr_cmd->queue);
5492                 del_timer(&ipr_cmd->timer);
5493                 ipr_cmd->fast_done(ipr_cmd);
5494         }
5495         return rc;
5496 }
5497
5498 /**
5499  * ipr_isr_mhrrq - Interrupt service routine
5500  * @irq:        irq number
5501  * @devp:       pointer to ioa config struct
5502  *
5503  * Return value:
5504  *      IRQ_NONE / IRQ_HANDLED
5505  **/
5506 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5507 {
5508         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5509         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5510         unsigned long hrrq_flags = 0;
5511         struct ipr_cmnd *ipr_cmd, *temp;
5512         irqreturn_t rc = IRQ_NONE;
5513         LIST_HEAD(doneq);
5514
5515         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5516
5517         /* If interrupts are disabled, ignore the interrupt */
5518         if (!hrrq->allow_interrupts) {
5519                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5520                 return IRQ_NONE;
5521         }
5522
5523         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5524                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5525                        hrrq->toggle_bit) {
5526                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5527                                 blk_iopoll_sched(&hrrq->iopoll);
5528                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5529                         return IRQ_HANDLED;
5530                 }
5531         } else {
5532                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5533                         hrrq->toggle_bit)
5534
5535                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5536                                 rc =  IRQ_HANDLED;
5537         }
5538
5539         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5540
5541         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5542                 list_del(&ipr_cmd->queue);
5543                 del_timer(&ipr_cmd->timer);
5544                 ipr_cmd->fast_done(ipr_cmd);
5545         }
5546         return rc;
5547 }
5548
5549 /**
5550  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5551  * @ioa_cfg:    ioa config struct
5552  * @ipr_cmd:    ipr command struct
5553  *
5554  * Return value:
5555  *      0 on success / -1 on failure
5556  **/
5557 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5558                              struct ipr_cmnd *ipr_cmd)
5559 {
5560         int i, nseg;
5561         struct scatterlist *sg;
5562         u32 length;
5563         u32 ioadl_flags = 0;
5564         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5565         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5566         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5567
5568         length = scsi_bufflen(scsi_cmd);
5569         if (!length)
5570                 return 0;
5571
5572         nseg = scsi_dma_map(scsi_cmd);
5573         if (nseg < 0) {
5574                 if (printk_ratelimit())
5575                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5576                 return -1;
5577         }
5578
5579         ipr_cmd->dma_use_sg = nseg;
5580
5581         ioarcb->data_transfer_length = cpu_to_be32(length);
5582         ioarcb->ioadl_len =
5583                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5584
5585         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5586                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5587                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5588         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5589                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5590
5591         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5592                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5593                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5594                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5595         }
5596
5597         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5598         return 0;
5599 }
5600
5601 /**
5602  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5603  * @ioa_cfg:    ioa config struct
5604  * @ipr_cmd:    ipr command struct
5605  *
5606  * Return value:
5607  *      0 on success / -1 on failure
5608  **/
5609 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5610                            struct ipr_cmnd *ipr_cmd)
5611 {
5612         int i, nseg;
5613         struct scatterlist *sg;
5614         u32 length;
5615         u32 ioadl_flags = 0;
5616         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5617         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5618         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5619
5620         length = scsi_bufflen(scsi_cmd);
5621         if (!length)
5622                 return 0;
5623
5624         nseg = scsi_dma_map(scsi_cmd);
5625         if (nseg < 0) {
5626                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5627                 return -1;
5628         }
5629
5630         ipr_cmd->dma_use_sg = nseg;
5631
5632         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5633                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5634                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5635                 ioarcb->data_transfer_length = cpu_to_be32(length);
5636                 ioarcb->ioadl_len =
5637                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5638         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5639                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5640                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5641                 ioarcb->read_ioadl_len =
5642                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5643         }
5644
5645         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5646                 ioadl = ioarcb->u.add_data.u.ioadl;
5647                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5648                                     offsetof(struct ipr_ioarcb, u.add_data));
5649                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5650         }
5651
5652         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5653                 ioadl[i].flags_and_data_len =
5654                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5655                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5656         }
5657
5658         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5659         return 0;
5660 }
5661
5662 /**
5663  * ipr_erp_done - Process completion of ERP for a device
5664  * @ipr_cmd:            ipr command struct
5665  *
5666  * This function copies the sense buffer into the scsi_cmd
5667  * struct and pushes the scsi_done function.
5668  *
5669  * Return value:
5670  *      nothing
5671  **/
5672 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5673 {
5674         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5675         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5676         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5677
5678         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5679                 scsi_cmd->result |= (DID_ERROR << 16);
5680                 scmd_printk(KERN_ERR, scsi_cmd,
5681                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5682         } else {
5683                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5684                        SCSI_SENSE_BUFFERSIZE);
5685         }
5686
5687         if (res) {
5688                 if (!ipr_is_naca_model(res))
5689                         res->needs_sync_complete = 1;
5690                 res->in_erp = 0;
5691         }
5692         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5693         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5694         scsi_cmd->scsi_done(scsi_cmd);
5695 }
5696
5697 /**
5698  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5699  * @ipr_cmd:    ipr command struct
5700  *
5701  * Return value:
5702  *      none
5703  **/
5704 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5705 {
5706         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5707         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5708         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5709
5710         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5711         ioarcb->data_transfer_length = 0;
5712         ioarcb->read_data_transfer_length = 0;
5713         ioarcb->ioadl_len = 0;
5714         ioarcb->read_ioadl_len = 0;
5715         ioasa->hdr.ioasc = 0;
5716         ioasa->hdr.residual_data_len = 0;
5717
5718         if (ipr_cmd->ioa_cfg->sis64)
5719                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5720                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5721         else {
5722                 ioarcb->write_ioadl_addr =
5723                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5724                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5725         }
5726 }
5727
5728 /**
5729  * ipr_erp_request_sense - Send request sense to a device
5730  * @ipr_cmd:    ipr command struct
5731  *
5732  * This function sends a request sense to a device as a result
5733  * of a check condition.
5734  *
5735  * Return value:
5736  *      nothing
5737  **/
5738 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5739 {
5740         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5741         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5742
5743         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5744                 ipr_erp_done(ipr_cmd);
5745                 return;
5746         }
5747
5748         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5749
5750         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5751         cmd_pkt->cdb[0] = REQUEST_SENSE;
5752         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5753         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5754         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5755         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5756
5757         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5758                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5759
5760         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5761                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5762 }
5763
5764 /**
5765  * ipr_erp_cancel_all - Send cancel all to a device
5766  * @ipr_cmd:    ipr command struct
5767  *
5768  * This function sends a cancel all to a device to clear the
5769  * queue. If we are running TCQ on the device, QERR is set to 1,
5770  * which means all outstanding ops have been dropped on the floor.
5771  * Cancel all will return them to us.
5772  *
5773  * Return value:
5774  *      nothing
5775  **/
5776 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5777 {
5778         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5779         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5780         struct ipr_cmd_pkt *cmd_pkt;
5781
5782         res->in_erp = 1;
5783
5784         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5785
5786         if (!scsi_get_tag_type(scsi_cmd->device)) {
5787                 ipr_erp_request_sense(ipr_cmd);
5788                 return;
5789         }
5790
5791         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5792         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5793         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5794
5795         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5796                    IPR_CANCEL_ALL_TIMEOUT);
5797 }
5798
5799 /**
5800  * ipr_dump_ioasa - Dump contents of IOASA
5801  * @ioa_cfg:    ioa config struct
5802  * @ipr_cmd:    ipr command struct
5803  * @res:                resource entry struct
5804  *
5805  * This function is invoked by the interrupt handler when ops
5806  * fail. It will log the IOASA if appropriate. Only called
5807  * for GPDD ops.
5808  *
5809  * Return value:
5810  *      none
5811  **/
5812 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5813                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5814 {
5815         int i;
5816         u16 data_len;
5817         u32 ioasc, fd_ioasc;
5818         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5819         __be32 *ioasa_data = (__be32 *)ioasa;
5820         int error_index;
5821
5822         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5823         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5824
5825         if (0 == ioasc)
5826                 return;
5827
5828         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5829                 return;
5830
5831         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5832                 error_index = ipr_get_error(fd_ioasc);
5833         else
5834                 error_index = ipr_get_error(ioasc);
5835
5836         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5837                 /* Don't log an error if the IOA already logged one */
5838                 if (ioasa->hdr.ilid != 0)
5839                         return;
5840
5841                 if (!ipr_is_gscsi(res))
5842                         return;
5843
5844                 if (ipr_error_table[error_index].log_ioasa == 0)
5845                         return;
5846         }
5847
5848         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5849
5850         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5851         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5852                 data_len = sizeof(struct ipr_ioasa64);
5853         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5854                 data_len = sizeof(struct ipr_ioasa);
5855
5856         ipr_err("IOASA Dump:\n");
5857
5858         for (i = 0; i < data_len / 4; i += 4) {
5859                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5860                         be32_to_cpu(ioasa_data[i]),
5861                         be32_to_cpu(ioasa_data[i+1]),
5862                         be32_to_cpu(ioasa_data[i+2]),
5863                         be32_to_cpu(ioasa_data[i+3]));
5864         }
5865 }
5866
5867 /**
5868  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5869  * @ioasa:              IOASA
5870  * @sense_buf:  sense data buffer
5871  *
5872  * Return value:
5873  *      none
5874  **/
5875 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5876 {
5877         u32 failing_lba;
5878         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5879         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5880         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5881         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5882
5883         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5884
5885         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5886                 return;
5887
5888         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5889
5890         if (ipr_is_vset_device(res) &&
5891             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5892             ioasa->u.vset.failing_lba_hi != 0) {
5893                 sense_buf[0] = 0x72;
5894                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5895                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5896                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5897
5898                 sense_buf[7] = 12;
5899                 sense_buf[8] = 0;
5900                 sense_buf[9] = 0x0A;
5901                 sense_buf[10] = 0x80;
5902
5903                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5904
5905                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5906                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5907                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5908                 sense_buf[15] = failing_lba & 0x000000ff;
5909
5910                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5911
5912                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5913                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5914                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5915                 sense_buf[19] = failing_lba & 0x000000ff;
5916         } else {
5917                 sense_buf[0] = 0x70;
5918                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5919                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5920                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5921
5922                 /* Illegal request */
5923                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5924                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5925                         sense_buf[7] = 10;      /* additional length */
5926
5927                         /* IOARCB was in error */
5928                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5929                                 sense_buf[15] = 0xC0;
5930                         else    /* Parameter data was invalid */
5931                                 sense_buf[15] = 0x80;
5932
5933                         sense_buf[16] =
5934                             ((IPR_FIELD_POINTER_MASK &
5935                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5936                         sense_buf[17] =
5937                             (IPR_FIELD_POINTER_MASK &
5938                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5939                 } else {
5940                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5941                                 if (ipr_is_vset_device(res))
5942                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5943                                 else
5944                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5945
5946                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5947                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5948                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5949                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5950                                 sense_buf[6] = failing_lba & 0x000000ff;
5951                         }
5952
5953                         sense_buf[7] = 6;       /* additional length */
5954                 }
5955         }
5956 }
5957
5958 /**
5959  * ipr_get_autosense - Copy autosense data to sense buffer
5960  * @ipr_cmd:    ipr command struct
5961  *
5962  * This function copies the autosense buffer to the buffer
5963  * in the scsi_cmd, if there is autosense available.
5964  *
5965  * Return value:
5966  *      1 if autosense was available / 0 if not
5967  **/
5968 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5969 {
5970         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5971         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5972
5973         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5974                 return 0;
5975
5976         if (ipr_cmd->ioa_cfg->sis64)
5977                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5978                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5979                            SCSI_SENSE_BUFFERSIZE));
5980         else
5981                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5982                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5983                            SCSI_SENSE_BUFFERSIZE));
5984         return 1;
5985 }
5986
5987 /**
5988  * ipr_erp_start - Process an error response for a SCSI op
5989  * @ioa_cfg:    ioa config struct
5990  * @ipr_cmd:    ipr command struct
5991  *
5992  * This function determines whether or not to initiate ERP
5993  * on the affected device.
5994  *
5995  * Return value:
5996  *      nothing
5997  **/
5998 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5999                               struct ipr_cmnd *ipr_cmd)
6000 {
6001         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6002         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6003         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6004         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6005
6006         if (!res) {
6007                 ipr_scsi_eh_done(ipr_cmd);
6008                 return;
6009         }
6010
6011         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6012                 ipr_gen_sense(ipr_cmd);
6013
6014         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6015
6016         switch (masked_ioasc) {
6017         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6018                 if (ipr_is_naca_model(res))
6019                         scsi_cmd->result |= (DID_ABORT << 16);
6020                 else
6021                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6022                 break;
6023         case IPR_IOASC_IR_RESOURCE_HANDLE:
6024         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6025                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6026                 break;
6027         case IPR_IOASC_HW_SEL_TIMEOUT:
6028                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6029                 if (!ipr_is_naca_model(res))
6030                         res->needs_sync_complete = 1;
6031                 break;
6032         case IPR_IOASC_SYNC_REQUIRED:
6033                 if (!res->in_erp)
6034                         res->needs_sync_complete = 1;
6035                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6036                 break;
6037         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6038         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6039                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6040                 break;
6041         case IPR_IOASC_BUS_WAS_RESET:
6042         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6043                 /*
6044                  * Report the bus reset and ask for a retry. The device
6045                  * will give CC/UA the next command.
6046                  */
6047                 if (!res->resetting_device)
6048                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6049                 scsi_cmd->result |= (DID_ERROR << 16);
6050                 if (!ipr_is_naca_model(res))
6051                         res->needs_sync_complete = 1;
6052                 break;
6053         case IPR_IOASC_HW_DEV_BUS_STATUS:
6054                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6055                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6056                         if (!ipr_get_autosense(ipr_cmd)) {
6057                                 if (!ipr_is_naca_model(res)) {
6058                                         ipr_erp_cancel_all(ipr_cmd);
6059                                         return;
6060                                 }
6061                         }
6062                 }
6063                 if (!ipr_is_naca_model(res))
6064                         res->needs_sync_complete = 1;
6065                 break;
6066         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6067                 break;
6068         default:
6069                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6070                         scsi_cmd->result |= (DID_ERROR << 16);
6071                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6072                         res->needs_sync_complete = 1;
6073                 break;
6074         }
6075
6076         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6077         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6078         scsi_cmd->scsi_done(scsi_cmd);
6079 }
6080
6081 /**
6082  * ipr_scsi_done - mid-layer done function
6083  * @ipr_cmd:    ipr command struct
6084  *
6085  * This function is invoked by the interrupt handler for
6086  * ops generated by the SCSI mid-layer
6087  *
6088  * Return value:
6089  *      none
6090  **/
6091 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6092 {
6093         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6094         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6095         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6096         unsigned long hrrq_flags;
6097
6098         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6099
6100         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6101                 scsi_dma_unmap(scsi_cmd);
6102
6103                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6104                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6105                 scsi_cmd->scsi_done(scsi_cmd);
6106                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6107         } else {
6108                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6109                 ipr_erp_start(ioa_cfg, ipr_cmd);
6110                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6111         }
6112 }
6113
6114 /**
6115  * ipr_queuecommand - Queue a mid-layer request
6116  * @shost:              scsi host struct
6117  * @scsi_cmd:   scsi command struct
6118  *
6119  * This function queues a request generated by the mid-layer.
6120  *
6121  * Return value:
6122  *      0 on success
6123  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6124  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6125  **/
6126 static int ipr_queuecommand(struct Scsi_Host *shost,
6127                             struct scsi_cmnd *scsi_cmd)
6128 {
6129         struct ipr_ioa_cfg *ioa_cfg;
6130         struct ipr_resource_entry *res;
6131         struct ipr_ioarcb *ioarcb;
6132         struct ipr_cmnd *ipr_cmd;
6133         unsigned long hrrq_flags, lock_flags;
6134         int rc;
6135         struct ipr_hrr_queue *hrrq;
6136         int hrrq_id;
6137
6138         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6139
6140         scsi_cmd->result = (DID_OK << 16);
6141         res = scsi_cmd->device->hostdata;
6142
6143         if (ipr_is_gata(res) && res->sata_port) {
6144                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6145                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6147                 return rc;
6148         }
6149
6150         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6151         hrrq = &ioa_cfg->hrrq[hrrq_id];
6152
6153         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6154         /*
6155          * We are currently blocking all devices due to a host reset
6156          * We have told the host to stop giving us new requests, but
6157          * ERP ops don't count. FIXME
6158          */
6159         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6160                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6161                 return SCSI_MLQUEUE_HOST_BUSY;
6162         }
6163
6164         /*
6165          * FIXME - Create scsi_set_host_offline interface
6166          *  and the ioa_is_dead check can be removed
6167          */
6168         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6169                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6170                 goto err_nodev;
6171         }
6172
6173         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6174         if (ipr_cmd == NULL) {
6175                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6176                 return SCSI_MLQUEUE_HOST_BUSY;
6177         }
6178         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6179
6180         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6181         ioarcb = &ipr_cmd->ioarcb;
6182
6183         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6184         ipr_cmd->scsi_cmd = scsi_cmd;
6185         ipr_cmd->done = ipr_scsi_eh_done;
6186
6187         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6188                 if (scsi_cmd->underflow == 0)
6189                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6190
6191                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6192                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6193                         res->reset_occurred = 0;
6194                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6195                 }
6196                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6197                 if (scsi_cmd->flags & SCMD_TAGGED)
6198                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6199                 else
6200                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6201         }
6202
6203         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6204             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6205                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6206         }
6207
6208         if (ioa_cfg->sis64)
6209                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6210         else
6211                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6212
6213         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6214         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6215                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6216                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6217                 if (!rc)
6218                         scsi_dma_unmap(scsi_cmd);
6219                 return SCSI_MLQUEUE_HOST_BUSY;
6220         }
6221
6222         if (unlikely(hrrq->ioa_is_dead)) {
6223                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6224                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6225                 scsi_dma_unmap(scsi_cmd);
6226                 goto err_nodev;
6227         }
6228
6229         ioarcb->res_handle = res->res_handle;
6230         if (res->needs_sync_complete) {
6231                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6232                 res->needs_sync_complete = 0;
6233         }
6234         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6235         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6236         ipr_send_command(ipr_cmd);
6237         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6238         return 0;
6239
6240 err_nodev:
6241         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6242         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6243         scsi_cmd->result = (DID_NO_CONNECT << 16);
6244         scsi_cmd->scsi_done(scsi_cmd);
6245         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6246         return 0;
6247 }
6248
6249 /**
6250  * ipr_ioctl - IOCTL handler
6251  * @sdev:       scsi device struct
6252  * @cmd:        IOCTL cmd
6253  * @arg:        IOCTL arg
6254  *
6255  * Return value:
6256  *      0 on success / other on failure
6257  **/
6258 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6259 {
6260         struct ipr_resource_entry *res;
6261
6262         res = (struct ipr_resource_entry *)sdev->hostdata;
6263         if (res && ipr_is_gata(res)) {
6264                 if (cmd == HDIO_GET_IDENTITY)
6265                         return -ENOTTY;
6266                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6267         }
6268
6269         return -EINVAL;
6270 }
6271
6272 /**
6273  * ipr_info - Get information about the card/driver
6274  * @scsi_host:  scsi host struct
6275  *
6276  * Return value:
6277  *      pointer to buffer with description string
6278  **/
6279 static const char *ipr_ioa_info(struct Scsi_Host *host)
6280 {
6281         static char buffer[512];
6282         struct ipr_ioa_cfg *ioa_cfg;
6283         unsigned long lock_flags = 0;
6284
6285         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6286
6287         spin_lock_irqsave(host->host_lock, lock_flags);
6288         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6289         spin_unlock_irqrestore(host->host_lock, lock_flags);
6290
6291         return buffer;
6292 }
6293
6294 static struct scsi_host_template driver_template = {
6295         .module = THIS_MODULE,
6296         .name = "IPR",
6297         .info = ipr_ioa_info,
6298         .ioctl = ipr_ioctl,
6299         .queuecommand = ipr_queuecommand,
6300         .eh_abort_handler = ipr_eh_abort,
6301         .eh_device_reset_handler = ipr_eh_dev_reset,
6302         .eh_host_reset_handler = ipr_eh_host_reset,
6303         .slave_alloc = ipr_slave_alloc,
6304         .slave_configure = ipr_slave_configure,
6305         .slave_destroy = ipr_slave_destroy,
6306         .target_alloc = ipr_target_alloc,
6307         .target_destroy = ipr_target_destroy,
6308         .change_queue_depth = ipr_change_queue_depth,
6309         .change_queue_type = ipr_change_queue_type,
6310         .bios_param = ipr_biosparam,
6311         .can_queue = IPR_MAX_COMMANDS,
6312         .this_id = -1,
6313         .sg_tablesize = IPR_MAX_SGLIST,
6314         .max_sectors = IPR_IOA_MAX_SECTORS,
6315         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6316         .use_clustering = ENABLE_CLUSTERING,
6317         .shost_attrs = ipr_ioa_attrs,
6318         .sdev_attrs = ipr_dev_attrs,
6319         .proc_name = IPR_NAME,
6320         .no_write_same = 1,
6321         .use_blk_tags = 1,
6322 };
6323
6324 /**
6325  * ipr_ata_phy_reset - libata phy_reset handler
6326  * @ap:         ata port to reset
6327  *
6328  **/
6329 static void ipr_ata_phy_reset(struct ata_port *ap)
6330 {
6331         unsigned long flags;
6332         struct ipr_sata_port *sata_port = ap->private_data;
6333         struct ipr_resource_entry *res = sata_port->res;
6334         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6335         int rc;
6336
6337         ENTER;
6338         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6339         while (ioa_cfg->in_reset_reload) {
6340                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6341                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6342                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6343         }
6344
6345         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6346                 goto out_unlock;
6347
6348         rc = ipr_device_reset(ioa_cfg, res);
6349
6350         if (rc) {
6351                 ap->link.device[0].class = ATA_DEV_NONE;
6352                 goto out_unlock;
6353         }
6354
6355         ap->link.device[0].class = res->ata_class;
6356         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6357                 ap->link.device[0].class = ATA_DEV_NONE;
6358
6359 out_unlock:
6360         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6361         LEAVE;
6362 }
6363
6364 /**
6365  * ipr_ata_post_internal - Cleanup after an internal command
6366  * @qc: ATA queued command
6367  *
6368  * Return value:
6369  *      none
6370  **/
6371 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6372 {
6373         struct ipr_sata_port *sata_port = qc->ap->private_data;
6374         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6375         struct ipr_cmnd *ipr_cmd;
6376         struct ipr_hrr_queue *hrrq;
6377         unsigned long flags;
6378
6379         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6380         while (ioa_cfg->in_reset_reload) {
6381                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6382                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6383                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6384         }
6385
6386         for_each_hrrq(hrrq, ioa_cfg) {
6387                 spin_lock(&hrrq->_lock);
6388                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6389                         if (ipr_cmd->qc == qc) {
6390                                 ipr_device_reset(ioa_cfg, sata_port->res);
6391                                 break;
6392                         }
6393                 }
6394                 spin_unlock(&hrrq->_lock);
6395         }
6396         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6397 }
6398
6399 /**
6400  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6401  * @regs:       destination
6402  * @tf: source ATA taskfile
6403  *
6404  * Return value:
6405  *      none
6406  **/
6407 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6408                              struct ata_taskfile *tf)
6409 {
6410         regs->feature = tf->feature;
6411         regs->nsect = tf->nsect;
6412         regs->lbal = tf->lbal;
6413         regs->lbam = tf->lbam;
6414         regs->lbah = tf->lbah;
6415         regs->device = tf->device;
6416         regs->command = tf->command;
6417         regs->hob_feature = tf->hob_feature;
6418         regs->hob_nsect = tf->hob_nsect;
6419         regs->hob_lbal = tf->hob_lbal;
6420         regs->hob_lbam = tf->hob_lbam;
6421         regs->hob_lbah = tf->hob_lbah;
6422         regs->ctl = tf->ctl;
6423 }
6424
6425 /**
6426  * ipr_sata_done - done function for SATA commands
6427  * @ipr_cmd:    ipr command struct
6428  *
6429  * This function is invoked by the interrupt handler for
6430  * ops generated by the SCSI mid-layer to SATA devices
6431  *
6432  * Return value:
6433  *      none
6434  **/
6435 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6436 {
6437         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6438         struct ata_queued_cmd *qc = ipr_cmd->qc;
6439         struct ipr_sata_port *sata_port = qc->ap->private_data;
6440         struct ipr_resource_entry *res = sata_port->res;
6441         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6442
6443         spin_lock(&ipr_cmd->hrrq->_lock);
6444         if (ipr_cmd->ioa_cfg->sis64)
6445                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6446                        sizeof(struct ipr_ioasa_gata));
6447         else
6448                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6449                        sizeof(struct ipr_ioasa_gata));
6450         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6451
6452         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6453                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6454
6455         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6456                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6457         else
6458                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6459         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6460         spin_unlock(&ipr_cmd->hrrq->_lock);
6461         ata_qc_complete(qc);
6462 }
6463
6464 /**
6465  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6466  * @ipr_cmd:    ipr command struct
6467  * @qc:         ATA queued command
6468  *
6469  **/
6470 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6471                                   struct ata_queued_cmd *qc)
6472 {
6473         u32 ioadl_flags = 0;
6474         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6475         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6476         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6477         int len = qc->nbytes;
6478         struct scatterlist *sg;
6479         unsigned int si;
6480         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6481
6482         if (len == 0)
6483                 return;
6484
6485         if (qc->dma_dir == DMA_TO_DEVICE) {
6486                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6487                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6488         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6489                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6490
6491         ioarcb->data_transfer_length = cpu_to_be32(len);
6492         ioarcb->ioadl_len =
6493                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6494         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6495                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6496
6497         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6498                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6499                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6500                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6501
6502                 last_ioadl64 = ioadl64;
6503                 ioadl64++;
6504         }
6505
6506         if (likely(last_ioadl64))
6507                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6508 }
6509
6510 /**
6511  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6512  * @ipr_cmd:    ipr command struct
6513  * @qc:         ATA queued command
6514  *
6515  **/
6516 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6517                                 struct ata_queued_cmd *qc)
6518 {
6519         u32 ioadl_flags = 0;
6520         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6521         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6522         struct ipr_ioadl_desc *last_ioadl = NULL;
6523         int len = qc->nbytes;
6524         struct scatterlist *sg;
6525         unsigned int si;
6526
6527         if (len == 0)
6528                 return;
6529
6530         if (qc->dma_dir == DMA_TO_DEVICE) {
6531                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6532                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6533                 ioarcb->data_transfer_length = cpu_to_be32(len);
6534                 ioarcb->ioadl_len =
6535                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6536         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6537                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6538                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6539                 ioarcb->read_ioadl_len =
6540                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6541         }
6542
6543         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6544                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6545                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6546
6547                 last_ioadl = ioadl;
6548                 ioadl++;
6549         }
6550
6551         if (likely(last_ioadl))
6552                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6553 }
6554
6555 /**
6556  * ipr_qc_defer - Get a free ipr_cmd
6557  * @qc: queued command
6558  *
6559  * Return value:
6560  *      0 if success
6561  **/
6562 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6563 {
6564         struct ata_port *ap = qc->ap;
6565         struct ipr_sata_port *sata_port = ap->private_data;
6566         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6567         struct ipr_cmnd *ipr_cmd;
6568         struct ipr_hrr_queue *hrrq;
6569         int hrrq_id;
6570
6571         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6572         hrrq = &ioa_cfg->hrrq[hrrq_id];
6573
6574         qc->lldd_task = NULL;
6575         spin_lock(&hrrq->_lock);
6576         if (unlikely(hrrq->ioa_is_dead)) {
6577                 spin_unlock(&hrrq->_lock);
6578                 return 0;
6579         }
6580
6581         if (unlikely(!hrrq->allow_cmds)) {
6582                 spin_unlock(&hrrq->_lock);
6583                 return ATA_DEFER_LINK;
6584         }
6585
6586         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6587         if (ipr_cmd == NULL) {
6588                 spin_unlock(&hrrq->_lock);
6589                 return ATA_DEFER_LINK;
6590         }
6591
6592         qc->lldd_task = ipr_cmd;
6593         spin_unlock(&hrrq->_lock);
6594         return 0;
6595 }
6596
6597 /**
6598  * ipr_qc_issue - Issue a SATA qc to a device
6599  * @qc: queued command
6600  *
6601  * Return value:
6602  *      0 if success
6603  **/
6604 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6605 {
6606         struct ata_port *ap = qc->ap;
6607         struct ipr_sata_port *sata_port = ap->private_data;
6608         struct ipr_resource_entry *res = sata_port->res;
6609         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6610         struct ipr_cmnd *ipr_cmd;
6611         struct ipr_ioarcb *ioarcb;
6612         struct ipr_ioarcb_ata_regs *regs;
6613
6614         if (qc->lldd_task == NULL)
6615                 ipr_qc_defer(qc);
6616
6617         ipr_cmd = qc->lldd_task;
6618         if (ipr_cmd == NULL)
6619                 return AC_ERR_SYSTEM;
6620
6621         qc->lldd_task = NULL;
6622         spin_lock(&ipr_cmd->hrrq->_lock);
6623         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6624                         ipr_cmd->hrrq->ioa_is_dead)) {
6625                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6626                 spin_unlock(&ipr_cmd->hrrq->_lock);
6627                 return AC_ERR_SYSTEM;
6628         }
6629
6630         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6631         ioarcb = &ipr_cmd->ioarcb;
6632
6633         if (ioa_cfg->sis64) {
6634                 regs = &ipr_cmd->i.ata_ioadl.regs;
6635                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6636         } else
6637                 regs = &ioarcb->u.add_data.u.regs;
6638
6639         memset(regs, 0, sizeof(*regs));
6640         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6641
6642         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6643         ipr_cmd->qc = qc;
6644         ipr_cmd->done = ipr_sata_done;
6645         ipr_cmd->ioarcb.res_handle = res->res_handle;
6646         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6647         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6648         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6649         ipr_cmd->dma_use_sg = qc->n_elem;
6650
6651         if (ioa_cfg->sis64)
6652                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6653         else
6654                 ipr_build_ata_ioadl(ipr_cmd, qc);
6655
6656         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6657         ipr_copy_sata_tf(regs, &qc->tf);
6658         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6659         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6660
6661         switch (qc->tf.protocol) {
6662         case ATA_PROT_NODATA:
6663         case ATA_PROT_PIO:
6664                 break;
6665
6666         case ATA_PROT_DMA:
6667                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6668                 break;
6669
6670         case ATAPI_PROT_PIO:
6671         case ATAPI_PROT_NODATA:
6672                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6673                 break;
6674
6675         case ATAPI_PROT_DMA:
6676                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6677                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6678                 break;
6679
6680         default:
6681                 WARN_ON(1);
6682                 spin_unlock(&ipr_cmd->hrrq->_lock);
6683                 return AC_ERR_INVALID;
6684         }
6685
6686         ipr_send_command(ipr_cmd);
6687         spin_unlock(&ipr_cmd->hrrq->_lock);
6688
6689         return 0;
6690 }
6691
6692 /**
6693  * ipr_qc_fill_rtf - Read result TF
6694  * @qc: ATA queued command
6695  *
6696  * Return value:
6697  *      true
6698  **/
6699 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6700 {
6701         struct ipr_sata_port *sata_port = qc->ap->private_data;
6702         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6703         struct ata_taskfile *tf = &qc->result_tf;
6704
6705         tf->feature = g->error;
6706         tf->nsect = g->nsect;
6707         tf->lbal = g->lbal;
6708         tf->lbam = g->lbam;
6709         tf->lbah = g->lbah;
6710         tf->device = g->device;
6711         tf->command = g->status;
6712         tf->hob_nsect = g->hob_nsect;
6713         tf->hob_lbal = g->hob_lbal;
6714         tf->hob_lbam = g->hob_lbam;
6715         tf->hob_lbah = g->hob_lbah;
6716
6717         return true;
6718 }
6719
6720 static struct ata_port_operations ipr_sata_ops = {
6721         .phy_reset = ipr_ata_phy_reset,
6722         .hardreset = ipr_sata_reset,
6723         .post_internal_cmd = ipr_ata_post_internal,
6724         .qc_prep = ata_noop_qc_prep,
6725         .qc_defer = ipr_qc_defer,
6726         .qc_issue = ipr_qc_issue,
6727         .qc_fill_rtf = ipr_qc_fill_rtf,
6728         .port_start = ata_sas_port_start,
6729         .port_stop = ata_sas_port_stop
6730 };
6731
6732 static struct ata_port_info sata_port_info = {
6733         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6734         .pio_mask       = ATA_PIO4_ONLY,
6735         .mwdma_mask     = ATA_MWDMA2,
6736         .udma_mask      = ATA_UDMA6,
6737         .port_ops       = &ipr_sata_ops
6738 };
6739
6740 #ifdef CONFIG_PPC_PSERIES
6741 static const u16 ipr_blocked_processors[] = {
6742         PVR_NORTHSTAR,
6743         PVR_PULSAR,
6744         PVR_POWER4,
6745         PVR_ICESTAR,
6746         PVR_SSTAR,
6747         PVR_POWER4p,
6748         PVR_630,
6749         PVR_630p
6750 };
6751
6752 /**
6753  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6754  * @ioa_cfg:    ioa cfg struct
6755  *
6756  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6757  * certain pSeries hardware. This function determines if the given
6758  * adapter is in one of these confgurations or not.
6759  *
6760  * Return value:
6761  *      1 if adapter is not supported / 0 if adapter is supported
6762  **/
6763 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6764 {
6765         int i;
6766
6767         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6768                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6769                         if (pvr_version_is(ipr_blocked_processors[i]))
6770                                 return 1;
6771                 }
6772         }
6773         return 0;
6774 }
6775 #else
6776 #define ipr_invalid_adapter(ioa_cfg) 0
6777 #endif
6778
6779 /**
6780  * ipr_ioa_bringdown_done - IOA bring down completion.
6781  * @ipr_cmd:    ipr command struct
6782  *
6783  * This function processes the completion of an adapter bring down.
6784  * It wakes any reset sleepers.
6785  *
6786  * Return value:
6787  *      IPR_RC_JOB_RETURN
6788  **/
6789 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6790 {
6791         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6792         int i;
6793
6794         ENTER;
6795         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6796                 ipr_trace;
6797                 spin_unlock_irq(ioa_cfg->host->host_lock);
6798                 scsi_unblock_requests(ioa_cfg->host);
6799                 spin_lock_irq(ioa_cfg->host->host_lock);
6800         }
6801
6802         ioa_cfg->in_reset_reload = 0;
6803         ioa_cfg->reset_retries = 0;
6804         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6805                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6806                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6807                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6808         }
6809         wmb();
6810
6811         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6812         wake_up_all(&ioa_cfg->reset_wait_q);
6813         LEAVE;
6814
6815         return IPR_RC_JOB_RETURN;
6816 }
6817
6818 /**
6819  * ipr_ioa_reset_done - IOA reset completion.
6820  * @ipr_cmd:    ipr command struct
6821  *
6822  * This function processes the completion of an adapter reset.
6823  * It schedules any necessary mid-layer add/removes and
6824  * wakes any reset sleepers.
6825  *
6826  * Return value:
6827  *      IPR_RC_JOB_RETURN
6828  **/
6829 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6830 {
6831         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6832         struct ipr_resource_entry *res;
6833         struct ipr_hostrcb *hostrcb, *temp;
6834         int i = 0, j;
6835
6836         ENTER;
6837         ioa_cfg->in_reset_reload = 0;
6838         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6839                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6840                 ioa_cfg->hrrq[j].allow_cmds = 1;
6841                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6842         }
6843         wmb();
6844         ioa_cfg->reset_cmd = NULL;
6845         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6846
6847         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6848                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6849                         ipr_trace;
6850                         break;
6851                 }
6852         }
6853         schedule_work(&ioa_cfg->work_q);
6854
6855         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6856                 list_del(&hostrcb->queue);
6857                 if (i++ < IPR_NUM_LOG_HCAMS)
6858                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6859                 else
6860                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6861         }
6862
6863         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6864         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6865
6866         ioa_cfg->reset_retries = 0;
6867         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6868         wake_up_all(&ioa_cfg->reset_wait_q);
6869
6870         spin_unlock(ioa_cfg->host->host_lock);
6871         scsi_unblock_requests(ioa_cfg->host);
6872         spin_lock(ioa_cfg->host->host_lock);
6873
6874         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6875                 scsi_block_requests(ioa_cfg->host);
6876
6877         LEAVE;
6878         return IPR_RC_JOB_RETURN;
6879 }
6880
6881 /**
6882  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6883  * @supported_dev:      supported device struct
6884  * @vpids:                      vendor product id struct
6885  *
6886  * Return value:
6887  *      none
6888  **/
6889 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6890                                  struct ipr_std_inq_vpids *vpids)
6891 {
6892         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6893         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6894         supported_dev->num_records = 1;
6895         supported_dev->data_length =
6896                 cpu_to_be16(sizeof(struct ipr_supported_device));
6897         supported_dev->reserved = 0;
6898 }
6899
6900 /**
6901  * ipr_set_supported_devs - Send Set Supported Devices for a device
6902  * @ipr_cmd:    ipr command struct
6903  *
6904  * This function sends a Set Supported Devices to the adapter
6905  *
6906  * Return value:
6907  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6908  **/
6909 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6910 {
6911         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6912         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6913         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6914         struct ipr_resource_entry *res = ipr_cmd->u.res;
6915
6916         ipr_cmd->job_step = ipr_ioa_reset_done;
6917
6918         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6919                 if (!ipr_is_scsi_disk(res))
6920                         continue;
6921
6922                 ipr_cmd->u.res = res;
6923                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6924
6925                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6926                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6927                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6928
6929                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6930                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6931                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6932                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6933
6934                 ipr_init_ioadl(ipr_cmd,
6935                                ioa_cfg->vpd_cbs_dma +
6936                                  offsetof(struct ipr_misc_cbs, supp_dev),
6937                                sizeof(struct ipr_supported_device),
6938                                IPR_IOADL_FLAGS_WRITE_LAST);
6939
6940                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6941                            IPR_SET_SUP_DEVICE_TIMEOUT);
6942
6943                 if (!ioa_cfg->sis64)
6944                         ipr_cmd->job_step = ipr_set_supported_devs;
6945                 LEAVE;
6946                 return IPR_RC_JOB_RETURN;
6947         }
6948
6949         LEAVE;
6950         return IPR_RC_JOB_CONTINUE;
6951 }
6952
6953 /**
6954  * ipr_get_mode_page - Locate specified mode page
6955  * @mode_pages: mode page buffer
6956  * @page_code:  page code to find
6957  * @len:                minimum required length for mode page
6958  *
6959  * Return value:
6960  *      pointer to mode page / NULL on failure
6961  **/
6962 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6963                                u32 page_code, u32 len)
6964 {
6965         struct ipr_mode_page_hdr *mode_hdr;
6966         u32 page_length;
6967         u32 length;
6968
6969         if (!mode_pages || (mode_pages->hdr.length == 0))
6970                 return NULL;
6971
6972         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6973         mode_hdr = (struct ipr_mode_page_hdr *)
6974                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6975
6976         while (length) {
6977                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6978                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6979                                 return mode_hdr;
6980                         break;
6981                 } else {
6982                         page_length = (sizeof(struct ipr_mode_page_hdr) +
6983                                        mode_hdr->page_length);
6984                         length -= page_length;
6985                         mode_hdr = (struct ipr_mode_page_hdr *)
6986                                 ((unsigned long)mode_hdr + page_length);
6987                 }
6988         }
6989         return NULL;
6990 }
6991
6992 /**
6993  * ipr_check_term_power - Check for term power errors
6994  * @ioa_cfg:    ioa config struct
6995  * @mode_pages: IOAFP mode pages buffer
6996  *
6997  * Check the IOAFP's mode page 28 for term power errors
6998  *
6999  * Return value:
7000  *      nothing
7001  **/
7002 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7003                                  struct ipr_mode_pages *mode_pages)
7004 {
7005         int i;
7006         int entry_length;
7007         struct ipr_dev_bus_entry *bus;
7008         struct ipr_mode_page28 *mode_page;
7009
7010         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7011                                       sizeof(struct ipr_mode_page28));
7012
7013         entry_length = mode_page->entry_length;
7014
7015         bus = mode_page->bus;
7016
7017         for (i = 0; i < mode_page->num_entries; i++) {
7018                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7019                         dev_err(&ioa_cfg->pdev->dev,
7020                                 "Term power is absent on scsi bus %d\n",
7021                                 bus->res_addr.bus);
7022                 }
7023
7024                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7025         }
7026 }
7027
7028 /**
7029  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7030  * @ioa_cfg:    ioa config struct
7031  *
7032  * Looks through the config table checking for SES devices. If
7033  * the SES device is in the SES table indicating a maximum SCSI
7034  * bus speed, the speed is limited for the bus.
7035  *
7036  * Return value:
7037  *      none
7038  **/
7039 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7040 {
7041         u32 max_xfer_rate;
7042         int i;
7043
7044         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7045                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7046                                                        ioa_cfg->bus_attr[i].bus_width);
7047
7048                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7049                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7050         }
7051 }
7052
7053 /**
7054  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7055  * @ioa_cfg:    ioa config struct
7056  * @mode_pages: mode page 28 buffer
7057  *
7058  * Updates mode page 28 based on driver configuration
7059  *
7060  * Return value:
7061  *      none
7062  **/
7063 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7064                                           struct ipr_mode_pages *mode_pages)
7065 {
7066         int i, entry_length;
7067         struct ipr_dev_bus_entry *bus;
7068         struct ipr_bus_attributes *bus_attr;
7069         struct ipr_mode_page28 *mode_page;
7070
7071         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7072                                       sizeof(struct ipr_mode_page28));
7073
7074         entry_length = mode_page->entry_length;
7075
7076         /* Loop for each device bus entry */
7077         for (i = 0, bus = mode_page->bus;
7078              i < mode_page->num_entries;
7079              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7080                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7081                         dev_err(&ioa_cfg->pdev->dev,
7082                                 "Invalid resource address reported: 0x%08X\n",
7083                                 IPR_GET_PHYS_LOC(bus->res_addr));
7084                         continue;
7085                 }
7086
7087                 bus_attr = &ioa_cfg->bus_attr[i];
7088                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7089                 bus->bus_width = bus_attr->bus_width;
7090                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7091                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7092                 if (bus_attr->qas_enabled)
7093                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7094                 else
7095                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7096         }
7097 }
7098
7099 /**
7100  * ipr_build_mode_select - Build a mode select command
7101  * @ipr_cmd:    ipr command struct
7102  * @res_handle: resource handle to send command to
7103  * @parm:               Byte 2 of Mode Sense command
7104  * @dma_addr:   DMA buffer address
7105  * @xfer_len:   data transfer length
7106  *
7107  * Return value:
7108  *      none
7109  **/
7110 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7111                                   __be32 res_handle, u8 parm,
7112                                   dma_addr_t dma_addr, u8 xfer_len)
7113 {
7114         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7115
7116         ioarcb->res_handle = res_handle;
7117         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7118         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7119         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7120         ioarcb->cmd_pkt.cdb[1] = parm;
7121         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7122
7123         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7124 }
7125
7126 /**
7127  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7128  * @ipr_cmd:    ipr command struct
7129  *
7130  * This function sets up the SCSI bus attributes and sends
7131  * a Mode Select for Page 28 to activate them.
7132  *
7133  * Return value:
7134  *      IPR_RC_JOB_RETURN
7135  **/
7136 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7137 {
7138         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7139         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7140         int length;
7141
7142         ENTER;
7143         ipr_scsi_bus_speed_limit(ioa_cfg);
7144         ipr_check_term_power(ioa_cfg, mode_pages);
7145         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7146         length = mode_pages->hdr.length + 1;
7147         mode_pages->hdr.length = 0;
7148
7149         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7150                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7151                               length);
7152
7153         ipr_cmd->job_step = ipr_set_supported_devs;
7154         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7155                                     struct ipr_resource_entry, queue);
7156         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7157
7158         LEAVE;
7159         return IPR_RC_JOB_RETURN;
7160 }
7161
7162 /**
7163  * ipr_build_mode_sense - Builds a mode sense command
7164  * @ipr_cmd:    ipr command struct
7165  * @res:                resource entry struct
7166  * @parm:               Byte 2 of mode sense command
7167  * @dma_addr:   DMA address of mode sense buffer
7168  * @xfer_len:   Size of DMA buffer
7169  *
7170  * Return value:
7171  *      none
7172  **/
7173 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7174                                  __be32 res_handle,
7175                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7176 {
7177         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7178
7179         ioarcb->res_handle = res_handle;
7180         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7181         ioarcb->cmd_pkt.cdb[2] = parm;
7182         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7183         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7184
7185         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7186 }
7187
7188 /**
7189  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7190  * @ipr_cmd:    ipr command struct
7191  *
7192  * This function handles the failure of an IOA bringup command.
7193  *
7194  * Return value:
7195  *      IPR_RC_JOB_RETURN
7196  **/
7197 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7198 {
7199         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7200         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7201
7202         dev_err(&ioa_cfg->pdev->dev,
7203                 "0x%02X failed with IOASC: 0x%08X\n",
7204                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7205
7206         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7207         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7208         return IPR_RC_JOB_RETURN;
7209 }
7210
7211 /**
7212  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7213  * @ipr_cmd:    ipr command struct
7214  *
7215  * This function handles the failure of a Mode Sense to the IOAFP.
7216  * Some adapters do not handle all mode pages.
7217  *
7218  * Return value:
7219  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7220  **/
7221 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7222 {
7223         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7224         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7225
7226         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7227                 ipr_cmd->job_step = ipr_set_supported_devs;
7228                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7229                                             struct ipr_resource_entry, queue);
7230                 return IPR_RC_JOB_CONTINUE;
7231         }
7232
7233         return ipr_reset_cmd_failed(ipr_cmd);
7234 }
7235
7236 /**
7237  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7238  * @ipr_cmd:    ipr command struct
7239  *
7240  * This function send a Page 28 mode sense to the IOA to
7241  * retrieve SCSI bus attributes.
7242  *
7243  * Return value:
7244  *      IPR_RC_JOB_RETURN
7245  **/
7246 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7247 {
7248         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7249
7250         ENTER;
7251         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7252                              0x28, ioa_cfg->vpd_cbs_dma +
7253                              offsetof(struct ipr_misc_cbs, mode_pages),
7254                              sizeof(struct ipr_mode_pages));
7255
7256         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7257         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7258
7259         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7260
7261         LEAVE;
7262         return IPR_RC_JOB_RETURN;
7263 }
7264
7265 /**
7266  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7267  * @ipr_cmd:    ipr command struct
7268  *
7269  * This function enables dual IOA RAID support if possible.
7270  *
7271  * Return value:
7272  *      IPR_RC_JOB_RETURN
7273  **/
7274 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7275 {
7276         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7277         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7278         struct ipr_mode_page24 *mode_page;
7279         int length;
7280
7281         ENTER;
7282         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7283                                       sizeof(struct ipr_mode_page24));
7284
7285         if (mode_page)
7286                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7287
7288         length = mode_pages->hdr.length + 1;
7289         mode_pages->hdr.length = 0;
7290
7291         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7292                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7293                               length);
7294
7295         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7296         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7297
7298         LEAVE;
7299         return IPR_RC_JOB_RETURN;
7300 }
7301
7302 /**
7303  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7304  * @ipr_cmd:    ipr command struct
7305  *
7306  * This function handles the failure of a Mode Sense to the IOAFP.
7307  * Some adapters do not handle all mode pages.
7308  *
7309  * Return value:
7310  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7311  **/
7312 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7313 {
7314         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7315
7316         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7317                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7318                 return IPR_RC_JOB_CONTINUE;
7319         }
7320
7321         return ipr_reset_cmd_failed(ipr_cmd);
7322 }
7323
7324 /**
7325  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7326  * @ipr_cmd:    ipr command struct
7327  *
7328  * This function send a mode sense to the IOA to retrieve
7329  * the IOA Advanced Function Control mode page.
7330  *
7331  * Return value:
7332  *      IPR_RC_JOB_RETURN
7333  **/
7334 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7335 {
7336         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7337
7338         ENTER;
7339         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7340                              0x24, ioa_cfg->vpd_cbs_dma +
7341                              offsetof(struct ipr_misc_cbs, mode_pages),
7342                              sizeof(struct ipr_mode_pages));
7343
7344         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7345         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7346
7347         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7348
7349         LEAVE;
7350         return IPR_RC_JOB_RETURN;
7351 }
7352
7353 /**
7354  * ipr_init_res_table - Initialize the resource table
7355  * @ipr_cmd:    ipr command struct
7356  *
7357  * This function looks through the existing resource table, comparing
7358  * it with the config table. This function will take care of old/new
7359  * devices and schedule adding/removing them from the mid-layer
7360  * as appropriate.
7361  *
7362  * Return value:
7363  *      IPR_RC_JOB_CONTINUE
7364  **/
7365 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7366 {
7367         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7368         struct ipr_resource_entry *res, *temp;
7369         struct ipr_config_table_entry_wrapper cfgtew;
7370         int entries, found, flag, i;
7371         LIST_HEAD(old_res);
7372
7373         ENTER;
7374         if (ioa_cfg->sis64)
7375                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7376         else
7377                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7378
7379         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7380                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7381
7382         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7383                 list_move_tail(&res->queue, &old_res);
7384
7385         if (ioa_cfg->sis64)
7386                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7387         else
7388                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7389
7390         for (i = 0; i < entries; i++) {
7391                 if (ioa_cfg->sis64)
7392                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7393                 else
7394                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7395                 found = 0;
7396
7397                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7398                         if (ipr_is_same_device(res, &cfgtew)) {
7399                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7400                                 found = 1;
7401                                 break;
7402                         }
7403                 }
7404
7405                 if (!found) {
7406                         if (list_empty(&ioa_cfg->free_res_q)) {
7407                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7408                                 break;
7409                         }
7410
7411                         found = 1;
7412                         res = list_entry(ioa_cfg->free_res_q.next,
7413                                          struct ipr_resource_entry, queue);
7414                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7415                         ipr_init_res_entry(res, &cfgtew);
7416                         res->add_to_ml = 1;
7417                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7418                         res->sdev->allow_restart = 1;
7419
7420                 if (found)
7421                         ipr_update_res_entry(res, &cfgtew);
7422         }
7423
7424         list_for_each_entry_safe(res, temp, &old_res, queue) {
7425                 if (res->sdev) {
7426                         res->del_from_ml = 1;
7427                         res->res_handle = IPR_INVALID_RES_HANDLE;
7428                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7429                 }
7430         }
7431
7432         list_for_each_entry_safe(res, temp, &old_res, queue) {
7433                 ipr_clear_res_target(res);
7434                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7435         }
7436
7437         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7438                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7439         else
7440                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7441
7442         LEAVE;
7443         return IPR_RC_JOB_CONTINUE;
7444 }
7445
7446 /**
7447  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7448  * @ipr_cmd:    ipr command struct
7449  *
7450  * This function sends a Query IOA Configuration command
7451  * to the adapter to retrieve the IOA configuration table.
7452  *
7453  * Return value:
7454  *      IPR_RC_JOB_RETURN
7455  **/
7456 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7457 {
7458         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7459         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7460         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7461         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7462
7463         ENTER;
7464         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7465                 ioa_cfg->dual_raid = 1;
7466         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7467                  ucode_vpd->major_release, ucode_vpd->card_type,
7468                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7469         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7470         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7471
7472         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7473         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7474         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7475         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7476
7477         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7478                        IPR_IOADL_FLAGS_READ_LAST);
7479
7480         ipr_cmd->job_step = ipr_init_res_table;
7481
7482         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7483
7484         LEAVE;
7485         return IPR_RC_JOB_RETURN;
7486 }
7487
7488 /**
7489  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7490  * @ipr_cmd:    ipr command struct
7491  *
7492  * This utility function sends an inquiry to the adapter.
7493  *
7494  * Return value:
7495  *      none
7496  **/
7497 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7498                               dma_addr_t dma_addr, u8 xfer_len)
7499 {
7500         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7501
7502         ENTER;
7503         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7504         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7505
7506         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7507         ioarcb->cmd_pkt.cdb[1] = flags;
7508         ioarcb->cmd_pkt.cdb[2] = page;
7509         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7510
7511         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7512
7513         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7514         LEAVE;
7515 }
7516
7517 /**
7518  * ipr_inquiry_page_supported - Is the given inquiry page supported
7519  * @page0:              inquiry page 0 buffer
7520  * @page:               page code.
7521  *
7522  * This function determines if the specified inquiry page is supported.
7523  *
7524  * Return value:
7525  *      1 if page is supported / 0 if not
7526  **/
7527 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7528 {
7529         int i;
7530
7531         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7532                 if (page0->page[i] == page)
7533                         return 1;
7534
7535         return 0;
7536 }
7537
7538 /**
7539  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7540  * @ipr_cmd:    ipr command struct
7541  *
7542  * This function sends a Page 0xD0 inquiry to the adapter
7543  * to retrieve adapter capabilities.
7544  *
7545  * Return value:
7546  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7547  **/
7548 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7549 {
7550         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7551         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7552         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7553
7554         ENTER;
7555         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7556         memset(cap, 0, sizeof(*cap));
7557
7558         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7559                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7560                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7561                                   sizeof(struct ipr_inquiry_cap));
7562                 return IPR_RC_JOB_RETURN;
7563         }
7564
7565         LEAVE;
7566         return IPR_RC_JOB_CONTINUE;
7567 }
7568
7569 /**
7570  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7571  * @ipr_cmd:    ipr command struct
7572  *
7573  * This function sends a Page 3 inquiry to the adapter
7574  * to retrieve software VPD information.
7575  *
7576  * Return value:
7577  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7578  **/
7579 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7580 {
7581         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582
7583         ENTER;
7584
7585         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7586
7587         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7588                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7589                           sizeof(struct ipr_inquiry_page3));
7590
7591         LEAVE;
7592         return IPR_RC_JOB_RETURN;
7593 }
7594
7595 /**
7596  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7597  * @ipr_cmd:    ipr command struct
7598  *
7599  * This function sends a Page 0 inquiry to the adapter
7600  * to retrieve supported inquiry pages.
7601  *
7602  * Return value:
7603  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7604  **/
7605 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7606 {
7607         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7608         char type[5];
7609
7610         ENTER;
7611
7612         /* Grab the type out of the VPD and store it away */
7613         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7614         type[4] = '\0';
7615         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7616
7617         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7618
7619         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7620                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7621                           sizeof(struct ipr_inquiry_page0));
7622
7623         LEAVE;
7624         return IPR_RC_JOB_RETURN;
7625 }
7626
7627 /**
7628  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7629  * @ipr_cmd:    ipr command struct
7630  *
7631  * This function sends a standard inquiry to the adapter.
7632  *
7633  * Return value:
7634  *      IPR_RC_JOB_RETURN
7635  **/
7636 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7637 {
7638         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639
7640         ENTER;
7641         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7642
7643         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7644                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7645                           sizeof(struct ipr_ioa_vpd));
7646
7647         LEAVE;
7648         return IPR_RC_JOB_RETURN;
7649 }
7650
7651 /**
7652  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7653  * @ipr_cmd:    ipr command struct
7654  *
7655  * This function send an Identify Host Request Response Queue
7656  * command to establish the HRRQ with the adapter.
7657  *
7658  * Return value:
7659  *      IPR_RC_JOB_RETURN
7660  **/
7661 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7662 {
7663         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7664         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7665         struct ipr_hrr_queue *hrrq;
7666
7667         ENTER;
7668         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7669         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7670
7671         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7672                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7673
7674                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7675                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7676
7677                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7678                 if (ioa_cfg->sis64)
7679                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7680
7681                 if (ioa_cfg->nvectors == 1)
7682                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7683                 else
7684                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7685
7686                 ioarcb->cmd_pkt.cdb[2] =
7687                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7688                 ioarcb->cmd_pkt.cdb[3] =
7689                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7690                 ioarcb->cmd_pkt.cdb[4] =
7691                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7692                 ioarcb->cmd_pkt.cdb[5] =
7693                         ((u64) hrrq->host_rrq_dma) & 0xff;
7694                 ioarcb->cmd_pkt.cdb[7] =
7695                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7696                 ioarcb->cmd_pkt.cdb[8] =
7697                         (sizeof(u32) * hrrq->size) & 0xff;
7698
7699                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7700                         ioarcb->cmd_pkt.cdb[9] =
7701                                         ioa_cfg->identify_hrrq_index;
7702
7703                 if (ioa_cfg->sis64) {
7704                         ioarcb->cmd_pkt.cdb[10] =
7705                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7706                         ioarcb->cmd_pkt.cdb[11] =
7707                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7708                         ioarcb->cmd_pkt.cdb[12] =
7709                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7710                         ioarcb->cmd_pkt.cdb[13] =
7711                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7712                 }
7713
7714                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7715                         ioarcb->cmd_pkt.cdb[14] =
7716                                         ioa_cfg->identify_hrrq_index;
7717
7718                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7719                            IPR_INTERNAL_TIMEOUT);
7720
7721                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7722                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7723
7724                 LEAVE;
7725                 return IPR_RC_JOB_RETURN;
7726         }
7727
7728         LEAVE;
7729         return IPR_RC_JOB_CONTINUE;
7730 }
7731
7732 /**
7733  * ipr_reset_timer_done - Adapter reset timer function
7734  * @ipr_cmd:    ipr command struct
7735  *
7736  * Description: This function is used in adapter reset processing
7737  * for timing events. If the reset_cmd pointer in the IOA
7738  * config struct is not this adapter's we are doing nested
7739  * resets and fail_all_ops will take care of freeing the
7740  * command block.
7741  *
7742  * Return value:
7743  *      none
7744  **/
7745 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7746 {
7747         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7748         unsigned long lock_flags = 0;
7749
7750         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7751
7752         if (ioa_cfg->reset_cmd == ipr_cmd) {
7753                 list_del(&ipr_cmd->queue);
7754                 ipr_cmd->done(ipr_cmd);
7755         }
7756
7757         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7758 }
7759
7760 /**
7761  * ipr_reset_start_timer - Start a timer for adapter reset job
7762  * @ipr_cmd:    ipr command struct
7763  * @timeout:    timeout value
7764  *
7765  * Description: This function is used in adapter reset processing
7766  * for timing events. If the reset_cmd pointer in the IOA
7767  * config struct is not this adapter's we are doing nested
7768  * resets and fail_all_ops will take care of freeing the
7769  * command block.
7770  *
7771  * Return value:
7772  *      none
7773  **/
7774 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7775                                   unsigned long timeout)
7776 {
7777
7778         ENTER;
7779         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7780         ipr_cmd->done = ipr_reset_ioa_job;
7781
7782         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7783         ipr_cmd->timer.expires = jiffies + timeout;
7784         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7785         add_timer(&ipr_cmd->timer);
7786 }
7787
7788 /**
7789  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7790  * @ioa_cfg:    ioa cfg struct
7791  *
7792  * Return value:
7793  *      nothing
7794  **/
7795 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7796 {
7797         struct ipr_hrr_queue *hrrq;
7798
7799         for_each_hrrq(hrrq, ioa_cfg) {
7800                 spin_lock(&hrrq->_lock);
7801                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7802
7803                 /* Initialize Host RRQ pointers */
7804                 hrrq->hrrq_start = hrrq->host_rrq;
7805                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7806                 hrrq->hrrq_curr = hrrq->hrrq_start;
7807                 hrrq->toggle_bit = 1;
7808                 spin_unlock(&hrrq->_lock);
7809         }
7810         wmb();
7811
7812         ioa_cfg->identify_hrrq_index = 0;
7813         if (ioa_cfg->hrrq_num == 1)
7814                 atomic_set(&ioa_cfg->hrrq_index, 0);
7815         else
7816                 atomic_set(&ioa_cfg->hrrq_index, 1);
7817
7818         /* Zero out config table */
7819         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7820 }
7821
7822 /**
7823  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7824  * @ipr_cmd:    ipr command struct
7825  *
7826  * Return value:
7827  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7828  **/
7829 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7830 {
7831         unsigned long stage, stage_time;
7832         u32 feedback;
7833         volatile u32 int_reg;
7834         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7835         u64 maskval = 0;
7836
7837         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7838         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7839         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7840
7841         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7842
7843         /* sanity check the stage_time value */
7844         if (stage_time == 0)
7845                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7846         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7847                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7848         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7849                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7850
7851         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7852                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7853                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7854                 stage_time = ioa_cfg->transop_timeout;
7855                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7856         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7857                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7858                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7859                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7860                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7861                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7862                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7863                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7864                         return IPR_RC_JOB_CONTINUE;
7865                 }
7866         }
7867
7868         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7869         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7870         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7871         ipr_cmd->done = ipr_reset_ioa_job;
7872         add_timer(&ipr_cmd->timer);
7873
7874         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7875
7876         return IPR_RC_JOB_RETURN;
7877 }
7878
7879 /**
7880  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7881  * @ipr_cmd:    ipr command struct
7882  *
7883  * This function reinitializes some control blocks and
7884  * enables destructive diagnostics on the adapter.
7885  *
7886  * Return value:
7887  *      IPR_RC_JOB_RETURN
7888  **/
7889 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7890 {
7891         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7892         volatile u32 int_reg;
7893         volatile u64 maskval;
7894         int i;
7895
7896         ENTER;
7897         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7898         ipr_init_ioa_mem(ioa_cfg);
7899
7900         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7901                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7902                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7903                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7904         }
7905         wmb();
7906         if (ioa_cfg->sis64) {
7907                 /* Set the adapter to the correct endian mode. */
7908                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7909                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7910         }
7911
7912         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7913
7914         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7915                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7916                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7917                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7918                 return IPR_RC_JOB_CONTINUE;
7919         }
7920
7921         /* Enable destructive diagnostics on IOA */
7922         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7923
7924         if (ioa_cfg->sis64) {
7925                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7926                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7927                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7928         } else
7929                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7930
7931         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7932
7933         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7934
7935         if (ioa_cfg->sis64) {
7936                 ipr_cmd->job_step = ipr_reset_next_stage;
7937                 return IPR_RC_JOB_CONTINUE;
7938         }
7939
7940         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7941         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7942         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7943         ipr_cmd->done = ipr_reset_ioa_job;
7944         add_timer(&ipr_cmd->timer);
7945         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7946
7947         LEAVE;
7948         return IPR_RC_JOB_RETURN;
7949 }
7950
7951 /**
7952  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7953  * @ipr_cmd:    ipr command struct
7954  *
7955  * This function is invoked when an adapter dump has run out
7956  * of processing time.
7957  *
7958  * Return value:
7959  *      IPR_RC_JOB_CONTINUE
7960  **/
7961 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7962 {
7963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7964
7965         if (ioa_cfg->sdt_state == GET_DUMP)
7966                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7967         else if (ioa_cfg->sdt_state == READ_DUMP)
7968                 ioa_cfg->sdt_state = ABORT_DUMP;
7969
7970         ioa_cfg->dump_timeout = 1;
7971         ipr_cmd->job_step = ipr_reset_alert;
7972
7973         return IPR_RC_JOB_CONTINUE;
7974 }
7975
7976 /**
7977  * ipr_unit_check_no_data - Log a unit check/no data error log
7978  * @ioa_cfg:            ioa config struct
7979  *
7980  * Logs an error indicating the adapter unit checked, but for some
7981  * reason, we were unable to fetch the unit check buffer.
7982  *
7983  * Return value:
7984  *      nothing
7985  **/
7986 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7987 {
7988         ioa_cfg->errors_logged++;
7989         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7990 }
7991
7992 /**
7993  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7994  * @ioa_cfg:            ioa config struct
7995  *
7996  * Fetches the unit check buffer from the adapter by clocking the data
7997  * through the mailbox register.
7998  *
7999  * Return value:
8000  *      nothing
8001  **/
8002 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8003 {
8004         unsigned long mailbox;
8005         struct ipr_hostrcb *hostrcb;
8006         struct ipr_uc_sdt sdt;
8007         int rc, length;
8008         u32 ioasc;
8009
8010         mailbox = readl(ioa_cfg->ioa_mailbox);
8011
8012         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8013                 ipr_unit_check_no_data(ioa_cfg);
8014                 return;
8015         }
8016
8017         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8018         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8019                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8020
8021         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8022             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8023             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8024                 ipr_unit_check_no_data(ioa_cfg);
8025                 return;
8026         }
8027
8028         /* Find length of the first sdt entry (UC buffer) */
8029         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8030                 length = be32_to_cpu(sdt.entry[0].end_token);
8031         else
8032                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8033                           be32_to_cpu(sdt.entry[0].start_token)) &
8034                           IPR_FMT2_MBX_ADDR_MASK;
8035
8036         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8037                              struct ipr_hostrcb, queue);
8038         list_del(&hostrcb->queue);
8039         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8040
8041         rc = ipr_get_ldump_data_section(ioa_cfg,
8042                                         be32_to_cpu(sdt.entry[0].start_token),
8043                                         (__be32 *)&hostrcb->hcam,
8044                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8045
8046         if (!rc) {
8047                 ipr_handle_log_data(ioa_cfg, hostrcb);
8048                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8049                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8050                     ioa_cfg->sdt_state == GET_DUMP)
8051                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8052         } else
8053                 ipr_unit_check_no_data(ioa_cfg);
8054
8055         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8056 }
8057
8058 /**
8059  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8060  * @ipr_cmd:    ipr command struct
8061  *
8062  * Description: This function will call to get the unit check buffer.
8063  *
8064  * Return value:
8065  *      IPR_RC_JOB_RETURN
8066  **/
8067 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8068 {
8069         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8070
8071         ENTER;
8072         ioa_cfg->ioa_unit_checked = 0;
8073         ipr_get_unit_check_buffer(ioa_cfg);
8074         ipr_cmd->job_step = ipr_reset_alert;
8075         ipr_reset_start_timer(ipr_cmd, 0);
8076
8077         LEAVE;
8078         return IPR_RC_JOB_RETURN;
8079 }
8080
8081 /**
8082  * ipr_reset_restore_cfg_space - Restore PCI config space.
8083  * @ipr_cmd:    ipr command struct
8084  *
8085  * Description: This function restores the saved PCI config space of
8086  * the adapter, fails all outstanding ops back to the callers, and
8087  * fetches the dump/unit check if applicable to this reset.
8088  *
8089  * Return value:
8090  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8091  **/
8092 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8093 {
8094         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8095         u32 int_reg;
8096
8097         ENTER;
8098         ioa_cfg->pdev->state_saved = true;
8099         pci_restore_state(ioa_cfg->pdev);
8100
8101         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8102                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8103                 return IPR_RC_JOB_CONTINUE;
8104         }
8105
8106         ipr_fail_all_ops(ioa_cfg);
8107
8108         if (ioa_cfg->sis64) {
8109                 /* Set the adapter to the correct endian mode. */
8110                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8111                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8112         }
8113
8114         if (ioa_cfg->ioa_unit_checked) {
8115                 if (ioa_cfg->sis64) {
8116                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8117                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8118                         return IPR_RC_JOB_RETURN;
8119                 } else {
8120                         ioa_cfg->ioa_unit_checked = 0;
8121                         ipr_get_unit_check_buffer(ioa_cfg);
8122                         ipr_cmd->job_step = ipr_reset_alert;
8123                         ipr_reset_start_timer(ipr_cmd, 0);
8124                         return IPR_RC_JOB_RETURN;
8125                 }
8126         }
8127
8128         if (ioa_cfg->in_ioa_bringdown) {
8129                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8130         } else {
8131                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8132
8133                 if (GET_DUMP == ioa_cfg->sdt_state) {
8134                         ioa_cfg->sdt_state = READ_DUMP;
8135                         ioa_cfg->dump_timeout = 0;
8136                         if (ioa_cfg->sis64)
8137                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8138                         else
8139                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8140                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8141                         schedule_work(&ioa_cfg->work_q);
8142                         return IPR_RC_JOB_RETURN;
8143                 }
8144         }
8145
8146         LEAVE;
8147         return IPR_RC_JOB_CONTINUE;
8148 }
8149
8150 /**
8151  * ipr_reset_bist_done - BIST has completed on the adapter.
8152  * @ipr_cmd:    ipr command struct
8153  *
8154  * Description: Unblock config space and resume the reset process.
8155  *
8156  * Return value:
8157  *      IPR_RC_JOB_CONTINUE
8158  **/
8159 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8160 {
8161         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8162
8163         ENTER;
8164         if (ioa_cfg->cfg_locked)
8165                 pci_cfg_access_unlock(ioa_cfg->pdev);
8166         ioa_cfg->cfg_locked = 0;
8167         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8168         LEAVE;
8169         return IPR_RC_JOB_CONTINUE;
8170 }
8171
8172 /**
8173  * ipr_reset_start_bist - Run BIST on the adapter.
8174  * @ipr_cmd:    ipr command struct
8175  *
8176  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8177  *
8178  * Return value:
8179  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8180  **/
8181 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8182 {
8183         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8184         int rc = PCIBIOS_SUCCESSFUL;
8185
8186         ENTER;
8187         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8188                 writel(IPR_UPROCI_SIS64_START_BIST,
8189                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8190         else
8191                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8192
8193         if (rc == PCIBIOS_SUCCESSFUL) {
8194                 ipr_cmd->job_step = ipr_reset_bist_done;
8195                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8196                 rc = IPR_RC_JOB_RETURN;
8197         } else {
8198                 if (ioa_cfg->cfg_locked)
8199                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8200                 ioa_cfg->cfg_locked = 0;
8201                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8202                 rc = IPR_RC_JOB_CONTINUE;
8203         }
8204
8205         LEAVE;
8206         return rc;
8207 }
8208
8209 /**
8210  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8211  * @ipr_cmd:    ipr command struct
8212  *
8213  * Description: This clears PCI reset to the adapter and delays two seconds.
8214  *
8215  * Return value:
8216  *      IPR_RC_JOB_RETURN
8217  **/
8218 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8219 {
8220         ENTER;
8221         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8222         ipr_cmd->job_step = ipr_reset_bist_done;
8223         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8224         LEAVE;
8225         return IPR_RC_JOB_RETURN;
8226 }
8227
8228 /**
8229  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8230  * @ipr_cmd:    ipr command struct
8231  *
8232  * Description: This asserts PCI reset to the adapter.
8233  *
8234  * Return value:
8235  *      IPR_RC_JOB_RETURN
8236  **/
8237 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8238 {
8239         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8240         struct pci_dev *pdev = ioa_cfg->pdev;
8241
8242         ENTER;
8243         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8244         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8245         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8246         LEAVE;
8247         return IPR_RC_JOB_RETURN;
8248 }
8249
8250 /**
8251  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8252  * @ipr_cmd:    ipr command struct
8253  *
8254  * Description: This attempts to block config access to the IOA.
8255  *
8256  * Return value:
8257  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8258  **/
8259 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8260 {
8261         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8262         int rc = IPR_RC_JOB_CONTINUE;
8263
8264         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8265                 ioa_cfg->cfg_locked = 1;
8266                 ipr_cmd->job_step = ioa_cfg->reset;
8267         } else {
8268                 if (ipr_cmd->u.time_left) {
8269                         rc = IPR_RC_JOB_RETURN;
8270                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8271                         ipr_reset_start_timer(ipr_cmd,
8272                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8273                 } else {
8274                         ipr_cmd->job_step = ioa_cfg->reset;
8275                         dev_err(&ioa_cfg->pdev->dev,
8276                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8277                 }
8278         }
8279
8280         return rc;
8281 }
8282
8283 /**
8284  * ipr_reset_block_config_access - Block config access to the IOA
8285  * @ipr_cmd:    ipr command struct
8286  *
8287  * Description: This attempts to block config access to the IOA
8288  *
8289  * Return value:
8290  *      IPR_RC_JOB_CONTINUE
8291  **/
8292 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8293 {
8294         ipr_cmd->ioa_cfg->cfg_locked = 0;
8295         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8296         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8297         return IPR_RC_JOB_CONTINUE;
8298 }
8299
8300 /**
8301  * ipr_reset_allowed - Query whether or not IOA can be reset
8302  * @ioa_cfg:    ioa config struct
8303  *
8304  * Return value:
8305  *      0 if reset not allowed / non-zero if reset is allowed
8306  **/
8307 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8308 {
8309         volatile u32 temp_reg;
8310
8311         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8312         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8313 }
8314
8315 /**
8316  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8317  * @ipr_cmd:    ipr command struct
8318  *
8319  * Description: This function waits for adapter permission to run BIST,
8320  * then runs BIST. If the adapter does not give permission after a
8321  * reasonable time, we will reset the adapter anyway. The impact of
8322  * resetting the adapter without warning the adapter is the risk of
8323  * losing the persistent error log on the adapter. If the adapter is
8324  * reset while it is writing to the flash on the adapter, the flash
8325  * segment will have bad ECC and be zeroed.
8326  *
8327  * Return value:
8328  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8329  **/
8330 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8331 {
8332         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8333         int rc = IPR_RC_JOB_RETURN;
8334
8335         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8336                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8337                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8338         } else {
8339                 ipr_cmd->job_step = ipr_reset_block_config_access;
8340                 rc = IPR_RC_JOB_CONTINUE;
8341         }
8342
8343         return rc;
8344 }
8345
8346 /**
8347  * ipr_reset_alert - Alert the adapter of a pending reset
8348  * @ipr_cmd:    ipr command struct
8349  *
8350  * Description: This function alerts the adapter that it will be reset.
8351  * If memory space is not currently enabled, proceed directly
8352  * to running BIST on the adapter. The timer must always be started
8353  * so we guarantee we do not run BIST from ipr_isr.
8354  *
8355  * Return value:
8356  *      IPR_RC_JOB_RETURN
8357  **/
8358 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8359 {
8360         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8361         u16 cmd_reg;
8362         int rc;
8363
8364         ENTER;
8365         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8366
8367         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8368                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8369                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8370                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8371         } else {
8372                 ipr_cmd->job_step = ipr_reset_block_config_access;
8373         }
8374
8375         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8376         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8377
8378         LEAVE;
8379         return IPR_RC_JOB_RETURN;
8380 }
8381
8382 /**
8383  * ipr_reset_ucode_download_done - Microcode download completion
8384  * @ipr_cmd:    ipr command struct
8385  *
8386  * Description: This function unmaps the microcode download buffer.
8387  *
8388  * Return value:
8389  *      IPR_RC_JOB_CONTINUE
8390  **/
8391 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8392 {
8393         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8394         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8395
8396         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8397                      sglist->num_sg, DMA_TO_DEVICE);
8398
8399         ipr_cmd->job_step = ipr_reset_alert;
8400         return IPR_RC_JOB_CONTINUE;
8401 }
8402
8403 /**
8404  * ipr_reset_ucode_download - Download microcode to the adapter
8405  * @ipr_cmd:    ipr command struct
8406  *
8407  * Description: This function checks to see if it there is microcode
8408  * to download to the adapter. If there is, a download is performed.
8409  *
8410  * Return value:
8411  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8412  **/
8413 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8414 {
8415         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8416         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8417
8418         ENTER;
8419         ipr_cmd->job_step = ipr_reset_alert;
8420
8421         if (!sglist)
8422                 return IPR_RC_JOB_CONTINUE;
8423
8424         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8425         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8426         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8427         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8428         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8429         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8430         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8431
8432         if (ioa_cfg->sis64)
8433                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8434         else
8435                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8436         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8437
8438         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8439                    IPR_WRITE_BUFFER_TIMEOUT);
8440
8441         LEAVE;
8442         return IPR_RC_JOB_RETURN;
8443 }
8444
8445 /**
8446  * ipr_reset_shutdown_ioa - Shutdown the adapter
8447  * @ipr_cmd:    ipr command struct
8448  *
8449  * Description: This function issues an adapter shutdown of the
8450  * specified type to the specified adapter as part of the
8451  * adapter reset job.
8452  *
8453  * Return value:
8454  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8455  **/
8456 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8457 {
8458         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8459         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8460         unsigned long timeout;
8461         int rc = IPR_RC_JOB_CONTINUE;
8462
8463         ENTER;
8464         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8465                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8466                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8467                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8468                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8469                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8470
8471                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8472                         timeout = IPR_SHUTDOWN_TIMEOUT;
8473                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8474                         timeout = IPR_INTERNAL_TIMEOUT;
8475                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8476                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8477                 else
8478                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8479
8480                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8481
8482                 rc = IPR_RC_JOB_RETURN;
8483                 ipr_cmd->job_step = ipr_reset_ucode_download;
8484         } else
8485                 ipr_cmd->job_step = ipr_reset_alert;
8486
8487         LEAVE;
8488         return rc;
8489 }
8490
8491 /**
8492  * ipr_reset_ioa_job - Adapter reset job
8493  * @ipr_cmd:    ipr command struct
8494  *
8495  * Description: This function is the job router for the adapter reset job.
8496  *
8497  * Return value:
8498  *      none
8499  **/
8500 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8501 {
8502         u32 rc, ioasc;
8503         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8504
8505         do {
8506                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8507
8508                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8509                         /*
8510                          * We are doing nested adapter resets and this is
8511                          * not the current reset job.
8512                          */
8513                         list_add_tail(&ipr_cmd->queue,
8514                                         &ipr_cmd->hrrq->hrrq_free_q);
8515                         return;
8516                 }
8517
8518                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8519                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8520                         if (rc == IPR_RC_JOB_RETURN)
8521                                 return;
8522                 }
8523
8524                 ipr_reinit_ipr_cmnd(ipr_cmd);
8525                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8526                 rc = ipr_cmd->job_step(ipr_cmd);
8527         } while (rc == IPR_RC_JOB_CONTINUE);
8528 }
8529
8530 /**
8531  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8532  * @ioa_cfg:            ioa config struct
8533  * @job_step:           first job step of reset job
8534  * @shutdown_type:      shutdown type
8535  *
8536  * Description: This function will initiate the reset of the given adapter
8537  * starting at the selected job step.
8538  * If the caller needs to wait on the completion of the reset,
8539  * the caller must sleep on the reset_wait_q.
8540  *
8541  * Return value:
8542  *      none
8543  **/
8544 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8545                                     int (*job_step) (struct ipr_cmnd *),
8546                                     enum ipr_shutdown_type shutdown_type)
8547 {
8548         struct ipr_cmnd *ipr_cmd;
8549         int i;
8550
8551         ioa_cfg->in_reset_reload = 1;
8552         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8553                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8554                 ioa_cfg->hrrq[i].allow_cmds = 0;
8555                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8556         }
8557         wmb();
8558         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8559                 scsi_block_requests(ioa_cfg->host);
8560
8561         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8562         ioa_cfg->reset_cmd = ipr_cmd;
8563         ipr_cmd->job_step = job_step;
8564         ipr_cmd->u.shutdown_type = shutdown_type;
8565
8566         ipr_reset_ioa_job(ipr_cmd);
8567 }
8568
8569 /**
8570  * ipr_initiate_ioa_reset - Initiate an adapter reset
8571  * @ioa_cfg:            ioa config struct
8572  * @shutdown_type:      shutdown type
8573  *
8574  * Description: This function will initiate the reset of the given adapter.
8575  * If the caller needs to wait on the completion of the reset,
8576  * the caller must sleep on the reset_wait_q.
8577  *
8578  * Return value:
8579  *      none
8580  **/
8581 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8582                                    enum ipr_shutdown_type shutdown_type)
8583 {
8584         int i;
8585
8586         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8587                 return;
8588
8589         if (ioa_cfg->in_reset_reload) {
8590                 if (ioa_cfg->sdt_state == GET_DUMP)
8591                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8592                 else if (ioa_cfg->sdt_state == READ_DUMP)
8593                         ioa_cfg->sdt_state = ABORT_DUMP;
8594         }
8595
8596         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8597                 dev_err(&ioa_cfg->pdev->dev,
8598                         "IOA taken offline - error recovery failed\n");
8599
8600                 ioa_cfg->reset_retries = 0;
8601                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8602                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8603                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8604                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8605                 }
8606                 wmb();
8607
8608                 if (ioa_cfg->in_ioa_bringdown) {
8609                         ioa_cfg->reset_cmd = NULL;
8610                         ioa_cfg->in_reset_reload = 0;
8611                         ipr_fail_all_ops(ioa_cfg);
8612                         wake_up_all(&ioa_cfg->reset_wait_q);
8613
8614                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8615                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8616                                 scsi_unblock_requests(ioa_cfg->host);
8617                                 spin_lock_irq(ioa_cfg->host->host_lock);
8618                         }
8619                         return;
8620                 } else {
8621                         ioa_cfg->in_ioa_bringdown = 1;
8622                         shutdown_type = IPR_SHUTDOWN_NONE;
8623                 }
8624         }
8625
8626         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8627                                 shutdown_type);
8628 }
8629
8630 /**
8631  * ipr_reset_freeze - Hold off all I/O activity
8632  * @ipr_cmd:    ipr command struct
8633  *
8634  * Description: If the PCI slot is frozen, hold off all I/O
8635  * activity; then, as soon as the slot is available again,
8636  * initiate an adapter reset.
8637  */
8638 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8639 {
8640         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8641         int i;
8642
8643         /* Disallow new interrupts, avoid loop */
8644         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8645                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8646                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8647                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8648         }
8649         wmb();
8650         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8651         ipr_cmd->done = ipr_reset_ioa_job;
8652         return IPR_RC_JOB_RETURN;
8653 }
8654
8655 /**
8656  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8657  * @pdev:       PCI device struct
8658  *
8659  * Description: This routine is called to tell us that the MMIO
8660  * access to the IOA has been restored
8661  */
8662 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8663 {
8664         unsigned long flags = 0;
8665         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8666
8667         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8668         if (!ioa_cfg->probe_done)
8669                 pci_save_state(pdev);
8670         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8671         return PCI_ERS_RESULT_NEED_RESET;
8672 }
8673
8674 /**
8675  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8676  * @pdev:       PCI device struct
8677  *
8678  * Description: This routine is called to tell us that the PCI bus
8679  * is down. Can't do anything here, except put the device driver
8680  * into a holding pattern, waiting for the PCI bus to come back.
8681  */
8682 static void ipr_pci_frozen(struct pci_dev *pdev)
8683 {
8684         unsigned long flags = 0;
8685         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8686
8687         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8688         if (ioa_cfg->probe_done)
8689                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8690         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8691 }
8692
8693 /**
8694  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8695  * @pdev:       PCI device struct
8696  *
8697  * Description: This routine is called by the pci error recovery
8698  * code after the PCI slot has been reset, just before we
8699  * should resume normal operations.
8700  */
8701 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8702 {
8703         unsigned long flags = 0;
8704         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8705
8706         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8707         if (ioa_cfg->probe_done) {
8708                 if (ioa_cfg->needs_warm_reset)
8709                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8710                 else
8711                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8712                                                 IPR_SHUTDOWN_NONE);
8713         } else
8714                 wake_up_all(&ioa_cfg->eeh_wait_q);
8715         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8716         return PCI_ERS_RESULT_RECOVERED;
8717 }
8718
8719 /**
8720  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8721  * @pdev:       PCI device struct
8722  *
8723  * Description: This routine is called when the PCI bus has
8724  * permanently failed.
8725  */
8726 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8727 {
8728         unsigned long flags = 0;
8729         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8730         int i;
8731
8732         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8733         if (ioa_cfg->probe_done) {
8734                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8735                         ioa_cfg->sdt_state = ABORT_DUMP;
8736                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8737                 ioa_cfg->in_ioa_bringdown = 1;
8738                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8739                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8740                         ioa_cfg->hrrq[i].allow_cmds = 0;
8741                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8742                 }
8743                 wmb();
8744                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8745         } else
8746                 wake_up_all(&ioa_cfg->eeh_wait_q);
8747         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8748 }
8749
8750 /**
8751  * ipr_pci_error_detected - Called when a PCI error is detected.
8752  * @pdev:       PCI device struct
8753  * @state:      PCI channel state
8754  *
8755  * Description: Called when a PCI error is detected.
8756  *
8757  * Return value:
8758  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8759  */
8760 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8761                                                pci_channel_state_t state)
8762 {
8763         switch (state) {
8764         case pci_channel_io_frozen:
8765                 ipr_pci_frozen(pdev);
8766                 return PCI_ERS_RESULT_CAN_RECOVER;
8767         case pci_channel_io_perm_failure:
8768                 ipr_pci_perm_failure(pdev);
8769                 return PCI_ERS_RESULT_DISCONNECT;
8770                 break;
8771         default:
8772                 break;
8773         }
8774         return PCI_ERS_RESULT_NEED_RESET;
8775 }
8776
8777 /**
8778  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8779  * @ioa_cfg:    ioa cfg struct
8780  *
8781  * Description: This is the second phase of adapter intialization
8782  * This function takes care of initilizing the adapter to the point
8783  * where it can accept new commands.
8784
8785  * Return value:
8786  *      0 on success / -EIO on failure
8787  **/
8788 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8789 {
8790         int rc = 0;
8791         unsigned long host_lock_flags = 0;
8792
8793         ENTER;
8794         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8795         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8796         ioa_cfg->probe_done = 1;
8797         if (ioa_cfg->needs_hard_reset) {
8798                 ioa_cfg->needs_hard_reset = 0;
8799                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8800         } else
8801                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8802                                         IPR_SHUTDOWN_NONE);
8803         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8804         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8805         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8806
8807         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8808                 rc = -EIO;
8809         } else if (ipr_invalid_adapter(ioa_cfg)) {
8810                 if (!ipr_testmode)
8811                         rc = -EIO;
8812
8813                 dev_err(&ioa_cfg->pdev->dev,
8814                         "Adapter not supported in this hardware configuration.\n");
8815         }
8816
8817         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8818
8819         LEAVE;
8820         return rc;
8821 }
8822
8823 /**
8824  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8825  * @ioa_cfg:    ioa config struct
8826  *
8827  * Return value:
8828  *      none
8829  **/
8830 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8831 {
8832         int i;
8833
8834         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8835                 if (ioa_cfg->ipr_cmnd_list[i])
8836                         dma_pool_free(ioa_cfg->ipr_cmd_pool,
8837                                       ioa_cfg->ipr_cmnd_list[i],
8838                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8839
8840                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8841         }
8842
8843         if (ioa_cfg->ipr_cmd_pool)
8844                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8845
8846         kfree(ioa_cfg->ipr_cmnd_list);
8847         kfree(ioa_cfg->ipr_cmnd_list_dma);
8848         ioa_cfg->ipr_cmnd_list = NULL;
8849         ioa_cfg->ipr_cmnd_list_dma = NULL;
8850         ioa_cfg->ipr_cmd_pool = NULL;
8851 }
8852
8853 /**
8854  * ipr_free_mem - Frees memory allocated for an adapter
8855  * @ioa_cfg:    ioa cfg struct
8856  *
8857  * Return value:
8858  *      nothing
8859  **/
8860 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8861 {
8862         int i;
8863
8864         kfree(ioa_cfg->res_entries);
8865         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8866                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8867         ipr_free_cmd_blks(ioa_cfg);
8868
8869         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8870                 dma_free_coherent(&ioa_cfg->pdev->dev,
8871                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
8872                                   ioa_cfg->hrrq[i].host_rrq,
8873                                   ioa_cfg->hrrq[i].host_rrq_dma);
8874
8875         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8876                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8877
8878         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8879                 dma_free_coherent(&ioa_cfg->pdev->dev,
8880                                   sizeof(struct ipr_hostrcb),
8881                                   ioa_cfg->hostrcb[i],
8882                                   ioa_cfg->hostrcb_dma[i]);
8883         }
8884
8885         ipr_free_dump(ioa_cfg);
8886         kfree(ioa_cfg->trace);
8887 }
8888
8889 /**
8890  * ipr_free_all_resources - Free all allocated resources for an adapter.
8891  * @ipr_cmd:    ipr command struct
8892  *
8893  * This function frees all allocated resources for the
8894  * specified adapter.
8895  *
8896  * Return value:
8897  *      none
8898  **/
8899 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8900 {
8901         struct pci_dev *pdev = ioa_cfg->pdev;
8902
8903         ENTER;
8904         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8905             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8906                 int i;
8907                 for (i = 0; i < ioa_cfg->nvectors; i++)
8908                         free_irq(ioa_cfg->vectors_info[i].vec,
8909                                 &ioa_cfg->hrrq[i]);
8910         } else
8911                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8912
8913         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8914                 pci_disable_msi(pdev);
8915                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8916         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8917                 pci_disable_msix(pdev);
8918                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8919         }
8920
8921         iounmap(ioa_cfg->hdw_dma_regs);
8922         pci_release_regions(pdev);
8923         ipr_free_mem(ioa_cfg);
8924         scsi_host_put(ioa_cfg->host);
8925         pci_disable_device(pdev);
8926         LEAVE;
8927 }
8928
8929 /**
8930  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8931  * @ioa_cfg:    ioa config struct
8932  *
8933  * Return value:
8934  *      0 on success / -ENOMEM on allocation failure
8935  **/
8936 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8937 {
8938         struct ipr_cmnd *ipr_cmd;
8939         struct ipr_ioarcb *ioarcb;
8940         dma_addr_t dma_addr;
8941         int i, entries_each_hrrq, hrrq_id = 0;
8942
8943         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8944                                                 sizeof(struct ipr_cmnd), 512, 0);
8945
8946         if (!ioa_cfg->ipr_cmd_pool)
8947                 return -ENOMEM;
8948
8949         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8950         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8951
8952         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8953                 ipr_free_cmd_blks(ioa_cfg);
8954                 return -ENOMEM;
8955         }
8956
8957         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8958                 if (ioa_cfg->hrrq_num > 1) {
8959                         if (i == 0) {
8960                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8961                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8962                                         ioa_cfg->hrrq[i].max_cmd_id =
8963                                                 (entries_each_hrrq - 1);
8964                         } else {
8965                                 entries_each_hrrq =
8966                                         IPR_NUM_BASE_CMD_BLKS/
8967                                         (ioa_cfg->hrrq_num - 1);
8968                                 ioa_cfg->hrrq[i].min_cmd_id =
8969                                         IPR_NUM_INTERNAL_CMD_BLKS +
8970                                         (i - 1) * entries_each_hrrq;
8971                                 ioa_cfg->hrrq[i].max_cmd_id =
8972                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8973                                         i * entries_each_hrrq - 1);
8974                         }
8975                 } else {
8976                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
8977                         ioa_cfg->hrrq[i].min_cmd_id = 0;
8978                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8979                 }
8980                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8981         }
8982
8983         BUG_ON(ioa_cfg->hrrq_num == 0);
8984
8985         i = IPR_NUM_CMD_BLKS -
8986                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8987         if (i > 0) {
8988                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8989                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8990         }
8991
8992         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8993                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8994
8995                 if (!ipr_cmd) {
8996                         ipr_free_cmd_blks(ioa_cfg);
8997                         return -ENOMEM;
8998                 }
8999
9000                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9001                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9002                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9003
9004                 ioarcb = &ipr_cmd->ioarcb;
9005                 ipr_cmd->dma_addr = dma_addr;
9006                 if (ioa_cfg->sis64)
9007                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9008                 else
9009                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9010
9011                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9012                 if (ioa_cfg->sis64) {
9013                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9014                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9015                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9016                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9017                 } else {
9018                         ioarcb->write_ioadl_addr =
9019                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9020                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9021                         ioarcb->ioasa_host_pci_addr =
9022                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9023                 }
9024                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9025                 ipr_cmd->cmd_index = i;
9026                 ipr_cmd->ioa_cfg = ioa_cfg;
9027                 ipr_cmd->sense_buffer_dma = dma_addr +
9028                         offsetof(struct ipr_cmnd, sense_buffer);
9029
9030                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9031                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9032                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9033                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9034                         hrrq_id++;
9035         }
9036
9037         return 0;
9038 }
9039
9040 /**
9041  * ipr_alloc_mem - Allocate memory for an adapter
9042  * @ioa_cfg:    ioa config struct
9043  *
9044  * Return value:
9045  *      0 on success / non-zero for error
9046  **/
9047 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9048 {
9049         struct pci_dev *pdev = ioa_cfg->pdev;
9050         int i, rc = -ENOMEM;
9051
9052         ENTER;
9053         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9054                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9055
9056         if (!ioa_cfg->res_entries)
9057                 goto out;
9058
9059         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9060                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9061                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9062         }
9063
9064         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9065                                               sizeof(struct ipr_misc_cbs),
9066                                               &ioa_cfg->vpd_cbs_dma,
9067                                               GFP_KERNEL);
9068
9069         if (!ioa_cfg->vpd_cbs)
9070                 goto out_free_res_entries;
9071
9072         if (ipr_alloc_cmd_blks(ioa_cfg))
9073                 goto out_free_vpd_cbs;
9074
9075         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9076                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9077                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9078                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9079                                         GFP_KERNEL);
9080
9081                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9082                         while (--i > 0)
9083                                 dma_free_coherent(&pdev->dev,
9084                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9085                                         ioa_cfg->hrrq[i].host_rrq,
9086                                         ioa_cfg->hrrq[i].host_rrq_dma);
9087                         goto out_ipr_free_cmd_blocks;
9088                 }
9089                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9090         }
9091
9092         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9093                                                   ioa_cfg->cfg_table_size,
9094                                                   &ioa_cfg->cfg_table_dma,
9095                                                   GFP_KERNEL);
9096
9097         if (!ioa_cfg->u.cfg_table)
9098                 goto out_free_host_rrq;
9099
9100         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9101                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9102                                                          sizeof(struct ipr_hostrcb),
9103                                                          &ioa_cfg->hostrcb_dma[i],
9104                                                          GFP_KERNEL);
9105
9106                 if (!ioa_cfg->hostrcb[i])
9107                         goto out_free_hostrcb_dma;
9108
9109                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9110                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9111                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9112                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9113         }
9114
9115         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9116                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9117
9118         if (!ioa_cfg->trace)
9119                 goto out_free_hostrcb_dma;
9120
9121         rc = 0;
9122 out:
9123         LEAVE;
9124         return rc;
9125
9126 out_free_hostrcb_dma:
9127         while (i-- > 0) {
9128                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9129                                   ioa_cfg->hostrcb[i],
9130                                   ioa_cfg->hostrcb_dma[i]);
9131         }
9132         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9133                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9134 out_free_host_rrq:
9135         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9136                 dma_free_coherent(&pdev->dev,
9137                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9138                                   ioa_cfg->hrrq[i].host_rrq,
9139                                   ioa_cfg->hrrq[i].host_rrq_dma);
9140         }
9141 out_ipr_free_cmd_blocks:
9142         ipr_free_cmd_blks(ioa_cfg);
9143 out_free_vpd_cbs:
9144         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9145                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9146 out_free_res_entries:
9147         kfree(ioa_cfg->res_entries);
9148         goto out;
9149 }
9150
9151 /**
9152  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9153  * @ioa_cfg:    ioa config struct
9154  *
9155  * Return value:
9156  *      none
9157  **/
9158 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9159 {
9160         int i;
9161
9162         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9163                 ioa_cfg->bus_attr[i].bus = i;
9164                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9165                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9166                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9167                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9168                 else
9169                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9170         }
9171 }
9172
9173 /**
9174  * ipr_init_regs - Initialize IOA registers
9175  * @ioa_cfg:    ioa config struct
9176  *
9177  * Return value:
9178  *      none
9179  **/
9180 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9181 {
9182         const struct ipr_interrupt_offsets *p;
9183         struct ipr_interrupts *t;
9184         void __iomem *base;
9185
9186         p = &ioa_cfg->chip_cfg->regs;
9187         t = &ioa_cfg->regs;
9188         base = ioa_cfg->hdw_dma_regs;
9189
9190         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9191         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9192         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9193         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9194         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9195         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9196         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9197         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9198         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9199         t->ioarrin_reg = base + p->ioarrin_reg;
9200         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9201         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9202         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9203         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9204         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9205         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9206
9207         if (ioa_cfg->sis64) {
9208                 t->init_feedback_reg = base + p->init_feedback_reg;
9209                 t->dump_addr_reg = base + p->dump_addr_reg;
9210                 t->dump_data_reg = base + p->dump_data_reg;
9211                 t->endian_swap_reg = base + p->endian_swap_reg;
9212         }
9213 }
9214
9215 /**
9216  * ipr_init_ioa_cfg - Initialize IOA config struct
9217  * @ioa_cfg:    ioa config struct
9218  * @host:               scsi host struct
9219  * @pdev:               PCI dev struct
9220  *
9221  * Return value:
9222  *      none
9223  **/
9224 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9225                              struct Scsi_Host *host, struct pci_dev *pdev)
9226 {
9227         int i;
9228
9229         ioa_cfg->host = host;
9230         ioa_cfg->pdev = pdev;
9231         ioa_cfg->log_level = ipr_log_level;
9232         ioa_cfg->doorbell = IPR_DOORBELL;
9233         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9234         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9235         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9236         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9237         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9238         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9239
9240         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9241         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9242         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9243         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9244         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9245         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9246         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9247         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9248         ioa_cfg->sdt_state = INACTIVE;
9249
9250         ipr_initialize_bus_attr(ioa_cfg);
9251         ioa_cfg->max_devs_supported = ipr_max_devs;
9252
9253         if (ioa_cfg->sis64) {
9254                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9255                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9256                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9257                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9258                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9259                                            + ((sizeof(struct ipr_config_table_entry64)
9260                                                * ioa_cfg->max_devs_supported)));
9261         } else {
9262                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9263                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9264                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9265                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9266                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9267                                            + ((sizeof(struct ipr_config_table_entry)
9268                                                * ioa_cfg->max_devs_supported)));
9269         }
9270
9271         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9272         host->unique_id = host->host_no;
9273         host->max_cmd_len = IPR_MAX_CDB_LEN;
9274         host->can_queue = ioa_cfg->max_cmds;
9275         pci_set_drvdata(pdev, ioa_cfg);
9276
9277         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9278                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9279                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9280                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9281                 if (i == 0)
9282                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9283                 else
9284                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9285         }
9286 }
9287
9288 /**
9289  * ipr_get_chip_info - Find adapter chip information
9290  * @dev_id:             PCI device id struct
9291  *
9292  * Return value:
9293  *      ptr to chip information on success / NULL on failure
9294  **/
9295 static const struct ipr_chip_t *
9296 ipr_get_chip_info(const struct pci_device_id *dev_id)
9297 {
9298         int i;
9299
9300         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9301                 if (ipr_chip[i].vendor == dev_id->vendor &&
9302                     ipr_chip[i].device == dev_id->device)
9303                         return &ipr_chip[i];
9304         return NULL;
9305 }
9306
9307 /**
9308  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9309  *                                              during probe time
9310  * @ioa_cfg:    ioa config struct
9311  *
9312  * Return value:
9313  *      None
9314  **/
9315 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9316 {
9317         struct pci_dev *pdev = ioa_cfg->pdev;
9318
9319         if (pci_channel_offline(pdev)) {
9320                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9321                                    !pci_channel_offline(pdev),
9322                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9323                 pci_restore_state(pdev);
9324         }
9325 }
9326
9327 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9328 {
9329         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9330         int i, vectors;
9331
9332         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9333                 entries[i].entry = i;
9334
9335         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9336                                         entries, 1, ipr_number_of_msix);
9337         if (vectors < 0) {
9338                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9339                 return vectors;
9340         }
9341
9342         for (i = 0; i < vectors; i++)
9343                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9344         ioa_cfg->nvectors = vectors;
9345
9346         return 0;
9347 }
9348
9349 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9350 {
9351         int i, vectors;
9352
9353         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9354         if (vectors < 0) {
9355                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9356                 return vectors;
9357         }
9358
9359         for (i = 0; i < vectors; i++)
9360                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9361         ioa_cfg->nvectors = vectors;
9362
9363         return 0;
9364 }
9365
9366 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9367 {
9368         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9369
9370         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9371                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9372                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9373                 ioa_cfg->vectors_info[vec_idx].
9374                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9375         }
9376 }
9377
9378 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9379 {
9380         int i, rc;
9381
9382         for (i = 1; i < ioa_cfg->nvectors; i++) {
9383                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9384                         ipr_isr_mhrrq,
9385                         0,
9386                         ioa_cfg->vectors_info[i].desc,
9387                         &ioa_cfg->hrrq[i]);
9388                 if (rc) {
9389                         while (--i >= 0)
9390                                 free_irq(ioa_cfg->vectors_info[i].vec,
9391                                         &ioa_cfg->hrrq[i]);
9392                         return rc;
9393                 }
9394         }
9395         return 0;
9396 }
9397
9398 /**
9399  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9400  * @pdev:               PCI device struct
9401  *
9402  * Description: Simply set the msi_received flag to 1 indicating that
9403  * Message Signaled Interrupts are supported.
9404  *
9405  * Return value:
9406  *      0 on success / non-zero on failure
9407  **/
9408 static irqreturn_t ipr_test_intr(int irq, void *devp)
9409 {
9410         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9411         unsigned long lock_flags = 0;
9412         irqreturn_t rc = IRQ_HANDLED;
9413
9414         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9415         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9416
9417         ioa_cfg->msi_received = 1;
9418         wake_up(&ioa_cfg->msi_wait_q);
9419
9420         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9421         return rc;
9422 }
9423
9424 /**
9425  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9426  * @pdev:               PCI device struct
9427  *
9428  * Description: The return value from pci_enable_msi_range() can not always be
9429  * trusted.  This routine sets up and initiates a test interrupt to determine
9430  * if the interrupt is received via the ipr_test_intr() service routine.
9431  * If the tests fails, the driver will fall back to LSI.
9432  *
9433  * Return value:
9434  *      0 on success / non-zero on failure
9435  **/
9436 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9437 {
9438         int rc;
9439         volatile u32 int_reg;
9440         unsigned long lock_flags = 0;
9441
9442         ENTER;
9443
9444         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9445         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9446         ioa_cfg->msi_received = 0;
9447         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9448         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9449         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9450         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9451
9452         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9453                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9454         else
9455                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9456         if (rc) {
9457                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9458                 return rc;
9459         } else if (ipr_debug)
9460                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9461
9462         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9463         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9464         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9465         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9466         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9467
9468         if (!ioa_cfg->msi_received) {
9469                 /* MSI test failed */
9470                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9471                 rc = -EOPNOTSUPP;
9472         } else if (ipr_debug)
9473                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9474
9475         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9476
9477         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9478                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9479         else
9480                 free_irq(pdev->irq, ioa_cfg);
9481
9482         LEAVE;
9483
9484         return rc;
9485 }
9486
9487  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9488  * @pdev:               PCI device struct
9489  * @dev_id:             PCI device id struct
9490  *
9491  * Return value:
9492  *      0 on success / non-zero on failure
9493  **/
9494 static int ipr_probe_ioa(struct pci_dev *pdev,
9495                          const struct pci_device_id *dev_id)
9496 {
9497         struct ipr_ioa_cfg *ioa_cfg;
9498         struct Scsi_Host *host;
9499         unsigned long ipr_regs_pci;
9500         void __iomem *ipr_regs;
9501         int rc = PCIBIOS_SUCCESSFUL;
9502         volatile u32 mask, uproc, interrupts;
9503         unsigned long lock_flags, driver_lock_flags;
9504
9505         ENTER;
9506
9507         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9508         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9509
9510         if (!host) {
9511                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9512                 rc = -ENOMEM;
9513                 goto out;
9514         }
9515
9516         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9517         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9518         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9519
9520         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9521
9522         if (!ioa_cfg->ipr_chip) {
9523                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9524                         dev_id->vendor, dev_id->device);
9525                 goto out_scsi_host_put;
9526         }
9527
9528         /* set SIS 32 or SIS 64 */
9529         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9530         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9531         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9532         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9533
9534         if (ipr_transop_timeout)
9535                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9536         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9537                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9538         else
9539                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9540
9541         ioa_cfg->revid = pdev->revision;
9542
9543         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9544
9545         ipr_regs_pci = pci_resource_start(pdev, 0);
9546
9547         rc = pci_request_regions(pdev, IPR_NAME);
9548         if (rc < 0) {
9549                 dev_err(&pdev->dev,
9550                         "Couldn't register memory range of registers\n");
9551                 goto out_scsi_host_put;
9552         }
9553
9554         rc = pci_enable_device(pdev);
9555
9556         if (rc || pci_channel_offline(pdev)) {
9557                 if (pci_channel_offline(pdev)) {
9558                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9559                         rc = pci_enable_device(pdev);
9560                 }
9561
9562                 if (rc) {
9563                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9564                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9565                         goto out_release_regions;
9566                 }
9567         }
9568
9569         ipr_regs = pci_ioremap_bar(pdev, 0);
9570
9571         if (!ipr_regs) {
9572                 dev_err(&pdev->dev,
9573                         "Couldn't map memory range of registers\n");
9574                 rc = -ENOMEM;
9575                 goto out_disable;
9576         }
9577
9578         ioa_cfg->hdw_dma_regs = ipr_regs;
9579         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9580         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9581
9582         ipr_init_regs(ioa_cfg);
9583
9584         if (ioa_cfg->sis64) {
9585                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9586                 if (rc < 0) {
9587                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9588                         rc = dma_set_mask_and_coherent(&pdev->dev,
9589                                                        DMA_BIT_MASK(32));
9590                 }
9591         } else
9592                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9593
9594         if (rc < 0) {
9595                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9596                 goto cleanup_nomem;
9597         }
9598
9599         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9600                                    ioa_cfg->chip_cfg->cache_line_size);
9601
9602         if (rc != PCIBIOS_SUCCESSFUL) {
9603                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9604                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9605                 rc = -EIO;
9606                 goto cleanup_nomem;
9607         }
9608
9609         /* Issue MMIO read to ensure card is not in EEH */
9610         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9611         ipr_wait_for_pci_err_recovery(ioa_cfg);
9612
9613         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9614                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9615                         IPR_MAX_MSIX_VECTORS);
9616                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9617         }
9618
9619         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9620                         ipr_enable_msix(ioa_cfg) == 0)
9621                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9622         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9623                         ipr_enable_msi(ioa_cfg) == 0)
9624                 ioa_cfg->intr_flag = IPR_USE_MSI;
9625         else {
9626                 ioa_cfg->intr_flag = IPR_USE_LSI;
9627                 ioa_cfg->nvectors = 1;
9628                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9629         }
9630
9631         pci_set_master(pdev);
9632
9633         if (pci_channel_offline(pdev)) {
9634                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9635                 pci_set_master(pdev);
9636                 if (pci_channel_offline(pdev)) {
9637                         rc = -EIO;
9638                         goto out_msi_disable;
9639                 }
9640         }
9641
9642         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9643             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9644                 rc = ipr_test_msi(ioa_cfg, pdev);
9645                 if (rc == -EOPNOTSUPP) {
9646                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9647                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9648                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9649                                 pci_disable_msi(pdev);
9650                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9651                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9652                                 pci_disable_msix(pdev);
9653                         }
9654
9655                         ioa_cfg->intr_flag = IPR_USE_LSI;
9656                         ioa_cfg->nvectors = 1;
9657                 }
9658                 else if (rc)
9659                         goto out_msi_disable;
9660                 else {
9661                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9662                                 dev_info(&pdev->dev,
9663                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9664                                         ioa_cfg->nvectors, pdev->irq);
9665                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9666                                 dev_info(&pdev->dev,
9667                                         "Request for %d MSIXs succeeded.",
9668                                         ioa_cfg->nvectors);
9669                 }
9670         }
9671
9672         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9673                                 (unsigned int)num_online_cpus(),
9674                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9675
9676         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9677                 goto out_msi_disable;
9678
9679         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9680                 goto out_msi_disable;
9681
9682         rc = ipr_alloc_mem(ioa_cfg);
9683         if (rc < 0) {
9684                 dev_err(&pdev->dev,
9685                         "Couldn't allocate enough memory for device driver!\n");
9686                 goto out_msi_disable;
9687         }
9688
9689         /* Save away PCI config space for use following IOA reset */
9690         rc = pci_save_state(pdev);
9691
9692         if (rc != PCIBIOS_SUCCESSFUL) {
9693                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9694                 rc = -EIO;
9695                 goto cleanup_nolog;
9696         }
9697
9698         /*
9699          * If HRRQ updated interrupt is not masked, or reset alert is set,
9700          * the card is in an unknown state and needs a hard reset
9701          */
9702         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9703         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9704         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9705         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9706                 ioa_cfg->needs_hard_reset = 1;
9707         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9708                 ioa_cfg->needs_hard_reset = 1;
9709         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9710                 ioa_cfg->ioa_unit_checked = 1;
9711
9712         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9713         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9714         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9715
9716         if (ioa_cfg->intr_flag == IPR_USE_MSI
9717                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9718                 name_msi_vectors(ioa_cfg);
9719                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9720                         0,
9721                         ioa_cfg->vectors_info[0].desc,
9722                         &ioa_cfg->hrrq[0]);
9723                 if (!rc)
9724                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9725         } else {
9726                 rc = request_irq(pdev->irq, ipr_isr,
9727                          IRQF_SHARED,
9728                          IPR_NAME, &ioa_cfg->hrrq[0]);
9729         }
9730         if (rc) {
9731                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9732                         pdev->irq, rc);
9733                 goto cleanup_nolog;
9734         }
9735
9736         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9737             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9738                 ioa_cfg->needs_warm_reset = 1;
9739                 ioa_cfg->reset = ipr_reset_slot_reset;
9740         } else
9741                 ioa_cfg->reset = ipr_reset_start_bist;
9742
9743         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9744         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9745         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9746
9747         LEAVE;
9748 out:
9749         return rc;
9750
9751 cleanup_nolog:
9752         ipr_free_mem(ioa_cfg);
9753 out_msi_disable:
9754         ipr_wait_for_pci_err_recovery(ioa_cfg);
9755         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9756                 pci_disable_msi(pdev);
9757         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9758                 pci_disable_msix(pdev);
9759 cleanup_nomem:
9760         iounmap(ipr_regs);
9761 out_disable:
9762         pci_disable_device(pdev);
9763 out_release_regions:
9764         pci_release_regions(pdev);
9765 out_scsi_host_put:
9766         scsi_host_put(host);
9767         goto out;
9768 }
9769
9770 /**
9771  * ipr_scan_vsets - Scans for VSET devices
9772  * @ioa_cfg:    ioa config struct
9773  *
9774  * Description: Since the VSET resources do not follow SAM in that we can have
9775  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9776  *
9777  * Return value:
9778  *      none
9779  **/
9780 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9781 {
9782         int target, lun;
9783
9784         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9785                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9786                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9787 }
9788
9789 /**
9790  * ipr_initiate_ioa_bringdown - Bring down an adapter
9791  * @ioa_cfg:            ioa config struct
9792  * @shutdown_type:      shutdown type
9793  *
9794  * Description: This function will initiate bringing down the adapter.
9795  * This consists of issuing an IOA shutdown to the adapter
9796  * to flush the cache, and running BIST.
9797  * If the caller needs to wait on the completion of the reset,
9798  * the caller must sleep on the reset_wait_q.
9799  *
9800  * Return value:
9801  *      none
9802  **/
9803 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9804                                        enum ipr_shutdown_type shutdown_type)
9805 {
9806         ENTER;
9807         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9808                 ioa_cfg->sdt_state = ABORT_DUMP;
9809         ioa_cfg->reset_retries = 0;
9810         ioa_cfg->in_ioa_bringdown = 1;
9811         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9812         LEAVE;
9813 }
9814
9815 /**
9816  * __ipr_remove - Remove a single adapter
9817  * @pdev:       pci device struct
9818  *
9819  * Adapter hot plug remove entry point.
9820  *
9821  * Return value:
9822  *      none
9823  **/
9824 static void __ipr_remove(struct pci_dev *pdev)
9825 {
9826         unsigned long host_lock_flags = 0;
9827         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9828         int i;
9829         unsigned long driver_lock_flags;
9830         ENTER;
9831
9832         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9833         while (ioa_cfg->in_reset_reload) {
9834                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9835                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9836                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9837         }
9838
9839         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9840                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9841                 ioa_cfg->hrrq[i].removing_ioa = 1;
9842                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9843         }
9844         wmb();
9845         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9846
9847         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9848         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9849         flush_work(&ioa_cfg->work_q);
9850         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9851         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9852
9853         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9854         list_del(&ioa_cfg->queue);
9855         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9856
9857         if (ioa_cfg->sdt_state == ABORT_DUMP)
9858                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9859         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9860
9861         ipr_free_all_resources(ioa_cfg);
9862
9863         LEAVE;
9864 }
9865
9866 /**
9867  * ipr_remove - IOA hot plug remove entry point
9868  * @pdev:       pci device struct
9869  *
9870  * Adapter hot plug remove entry point.
9871  *
9872  * Return value:
9873  *      none
9874  **/
9875 static void ipr_remove(struct pci_dev *pdev)
9876 {
9877         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9878
9879         ENTER;
9880
9881         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9882                               &ipr_trace_attr);
9883         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9884                              &ipr_dump_attr);
9885         scsi_remove_host(ioa_cfg->host);
9886
9887         __ipr_remove(pdev);
9888
9889         LEAVE;
9890 }
9891
9892 /**
9893  * ipr_probe - Adapter hot plug add entry point
9894  *
9895  * Return value:
9896  *      0 on success / non-zero on failure
9897  **/
9898 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9899 {
9900         struct ipr_ioa_cfg *ioa_cfg;
9901         int rc, i;
9902
9903         rc = ipr_probe_ioa(pdev, dev_id);
9904
9905         if (rc)
9906                 return rc;
9907
9908         ioa_cfg = pci_get_drvdata(pdev);
9909         rc = ipr_probe_ioa_part2(ioa_cfg);
9910
9911         if (rc) {
9912                 __ipr_remove(pdev);
9913                 return rc;
9914         }
9915
9916         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9917
9918         if (rc) {
9919                 __ipr_remove(pdev);
9920                 return rc;
9921         }
9922
9923         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9924                                    &ipr_trace_attr);
9925
9926         if (rc) {
9927                 scsi_remove_host(ioa_cfg->host);
9928                 __ipr_remove(pdev);
9929                 return rc;
9930         }
9931
9932         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9933                                    &ipr_dump_attr);
9934
9935         if (rc) {
9936                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9937                                       &ipr_trace_attr);
9938                 scsi_remove_host(ioa_cfg->host);
9939                 __ipr_remove(pdev);
9940                 return rc;
9941         }
9942
9943         scsi_scan_host(ioa_cfg->host);
9944         ipr_scan_vsets(ioa_cfg);
9945         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9946         ioa_cfg->allow_ml_add_del = 1;
9947         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9948         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9949
9950         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9951                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9952                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9953                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9954                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9955                 }
9956         }
9957
9958         schedule_work(&ioa_cfg->work_q);
9959         return 0;
9960 }
9961
9962 /**
9963  * ipr_shutdown - Shutdown handler.
9964  * @pdev:       pci device struct
9965  *
9966  * This function is invoked upon system shutdown/reboot. It will issue
9967  * an adapter shutdown to the adapter to flush the write cache.
9968  *
9969  * Return value:
9970  *      none
9971  **/
9972 static void ipr_shutdown(struct pci_dev *pdev)
9973 {
9974         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9975         unsigned long lock_flags = 0;
9976         int i;
9977
9978         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9979         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9980                 ioa_cfg->iopoll_weight = 0;
9981                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9982                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9983         }
9984
9985         while (ioa_cfg->in_reset_reload) {
9986                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9987                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9989         }
9990
9991         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9992         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9993         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9994 }
9995
9996 static struct pci_device_id ipr_pci_table[] = {
9997         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9998                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9999         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10000                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10001         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10002                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10003         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10004                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10005         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10006                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10007         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10008                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10009         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10010                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10011         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10012                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10013                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10014         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10015               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10016         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10017               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10018               IPR_USE_LONG_TRANSOP_TIMEOUT },
10019         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10020               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10021               IPR_USE_LONG_TRANSOP_TIMEOUT },
10022         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10023               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10024         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10025               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10026               IPR_USE_LONG_TRANSOP_TIMEOUT},
10027         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10028               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10029               IPR_USE_LONG_TRANSOP_TIMEOUT },
10030         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10031               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10032               IPR_USE_LONG_TRANSOP_TIMEOUT },
10033         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10034               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10035         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10036               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10037         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10038               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10039               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10040         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10041                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10042         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10043                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10044         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10045                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10046                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10047         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10048                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10049                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10050         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10051                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10052         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10053                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10054         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10055                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10056         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10057                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10058         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10059                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10060         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10061                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10062         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10063                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10064         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10065                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10066         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10067                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10068         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10069                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10070         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10071                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10072         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10073                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10074         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10075                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10076         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10077                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10078         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10079                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10080         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10081                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10082         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10083                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10084         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10085                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10086         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10087                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10088         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10089                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10090         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10091                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10092         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10093                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10094         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10095                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10096         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10097                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10098         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10099                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10100         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10101                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10102         { }
10103 };
10104 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10105
10106 static const struct pci_error_handlers ipr_err_handler = {
10107         .error_detected = ipr_pci_error_detected,
10108         .mmio_enabled = ipr_pci_mmio_enabled,
10109         .slot_reset = ipr_pci_slot_reset,
10110 };
10111
10112 static struct pci_driver ipr_driver = {
10113         .name = IPR_NAME,
10114         .id_table = ipr_pci_table,
10115         .probe = ipr_probe,
10116         .remove = ipr_remove,
10117         .shutdown = ipr_shutdown,
10118         .err_handler = &ipr_err_handler,
10119 };
10120
10121 /**
10122  * ipr_halt_done - Shutdown prepare completion
10123  *
10124  * Return value:
10125  *      none
10126  **/
10127 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10128 {
10129         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10130 }
10131
10132 /**
10133  * ipr_halt - Issue shutdown prepare to all adapters
10134  *
10135  * Return value:
10136  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10137  **/
10138 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10139 {
10140         struct ipr_cmnd *ipr_cmd;
10141         struct ipr_ioa_cfg *ioa_cfg;
10142         unsigned long flags = 0, driver_lock_flags;
10143
10144         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10145                 return NOTIFY_DONE;
10146
10147         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10148
10149         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10150                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10151                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10152                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10153                         continue;
10154                 }
10155
10156                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10157                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10158                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10159                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10160                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10161
10162                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10163                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10164         }
10165         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10166
10167         return NOTIFY_OK;
10168 }
10169
10170 static struct notifier_block ipr_notifier = {
10171         ipr_halt, NULL, 0
10172 };
10173
10174 /**
10175  * ipr_init - Module entry point
10176  *
10177  * Return value:
10178  *      0 on success / negative value on failure
10179  **/
10180 static int __init ipr_init(void)
10181 {
10182         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10183                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10184
10185         register_reboot_notifier(&ipr_notifier);
10186         return pci_register_driver(&ipr_driver);
10187 }
10188
10189 /**
10190  * ipr_exit - Module unload
10191  *
10192  * Module unload entry point.
10193  *
10194  * Return value:
10195  *      none
10196  **/
10197 static void __exit ipr_exit(void)
10198 {
10199         unregister_reboot_notifier(&ipr_notifier);
10200         pci_unregister_driver(&ipr_driver);
10201 }
10202
10203 module_init(ipr_init);
10204 module_exit(ipr_exit);