ipr: add support for async scanning to speed up boot
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         schedule_work(&ioa_cfg->work_q);
1430                 } else {
1431                         ipr_clear_res_target(res);
1432                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1433                 }
1434         } else if (!res->sdev || res->del_from_ml) {
1435                 res->add_to_ml = 1;
1436                 schedule_work(&ioa_cfg->work_q);
1437         }
1438
1439         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1440 }
1441
1442 /**
1443  * ipr_process_ccn - Op done function for a CCN.
1444  * @ipr_cmd:    ipr command struct
1445  *
1446  * This function is the op done function for a configuration
1447  * change notification host controlled async from the adapter.
1448  *
1449  * Return value:
1450  *      none
1451  **/
1452 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1453 {
1454         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1455         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1456         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1457
1458         list_del(&hostrcb->queue);
1459         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1460
1461         if (ioasc) {
1462                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1463                         dev_err(&ioa_cfg->pdev->dev,
1464                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1465
1466                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1467         } else {
1468                 ipr_handle_config_change(ioa_cfg, hostrcb);
1469         }
1470 }
1471
1472 /**
1473  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1474  * @i:          index into buffer
1475  * @buf:                string to modify
1476  *
1477  * This function will strip all trailing whitespace, pad the end
1478  * of the string with a single space, and NULL terminate the string.
1479  *
1480  * Return value:
1481  *      new length of string
1482  **/
1483 static int strip_and_pad_whitespace(int i, char *buf)
1484 {
1485         while (i && buf[i] == ' ')
1486                 i--;
1487         buf[i+1] = ' ';
1488         buf[i+2] = '\0';
1489         return i + 2;
1490 }
1491
1492 /**
1493  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1494  * @prefix:             string to print at start of printk
1495  * @hostrcb:    hostrcb pointer
1496  * @vpd:                vendor/product id/sn struct
1497  *
1498  * Return value:
1499  *      none
1500  **/
1501 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1502                                 struct ipr_vpd *vpd)
1503 {
1504         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1505         int i = 0;
1506
1507         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1508         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1509
1510         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1511         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1512
1513         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1514         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1515
1516         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1517 }
1518
1519 /**
1520  * ipr_log_vpd - Log the passed VPD to the error log.
1521  * @vpd:                vendor/product id/sn struct
1522  *
1523  * Return value:
1524  *      none
1525  **/
1526 static void ipr_log_vpd(struct ipr_vpd *vpd)
1527 {
1528         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1529                     + IPR_SERIAL_NUM_LEN];
1530
1531         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1532         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1533                IPR_PROD_ID_LEN);
1534         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1535         ipr_err("Vendor/Product ID: %s\n", buffer);
1536
1537         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1538         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1539         ipr_err("    Serial Number: %s\n", buffer);
1540 }
1541
1542 /**
1543  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1544  * @prefix:             string to print at start of printk
1545  * @hostrcb:    hostrcb pointer
1546  * @vpd:                vendor/product id/sn/wwn struct
1547  *
1548  * Return value:
1549  *      none
1550  **/
1551 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1552                                     struct ipr_ext_vpd *vpd)
1553 {
1554         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1555         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1556                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1557 }
1558
1559 /**
1560  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1561  * @vpd:                vendor/product id/sn/wwn struct
1562  *
1563  * Return value:
1564  *      none
1565  **/
1566 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1567 {
1568         ipr_log_vpd(&vpd->vpd);
1569         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1570                 be32_to_cpu(vpd->wwid[1]));
1571 }
1572
1573 /**
1574  * ipr_log_enhanced_cache_error - Log a cache error.
1575  * @ioa_cfg:    ioa config struct
1576  * @hostrcb:    hostrcb struct
1577  *
1578  * Return value:
1579  *      none
1580  **/
1581 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1582                                          struct ipr_hostrcb *hostrcb)
1583 {
1584         struct ipr_hostrcb_type_12_error *error;
1585
1586         if (ioa_cfg->sis64)
1587                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1588         else
1589                 error = &hostrcb->hcam.u.error.u.type_12_error;
1590
1591         ipr_err("-----Current Configuration-----\n");
1592         ipr_err("Cache Directory Card Information:\n");
1593         ipr_log_ext_vpd(&error->ioa_vpd);
1594         ipr_err("Adapter Card Information:\n");
1595         ipr_log_ext_vpd(&error->cfc_vpd);
1596
1597         ipr_err("-----Expected Configuration-----\n");
1598         ipr_err("Cache Directory Card Information:\n");
1599         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1600         ipr_err("Adapter Card Information:\n");
1601         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1602
1603         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1604                      be32_to_cpu(error->ioa_data[0]),
1605                      be32_to_cpu(error->ioa_data[1]),
1606                      be32_to_cpu(error->ioa_data[2]));
1607 }
1608
1609 /**
1610  * ipr_log_cache_error - Log a cache error.
1611  * @ioa_cfg:    ioa config struct
1612  * @hostrcb:    hostrcb struct
1613  *
1614  * Return value:
1615  *      none
1616  **/
1617 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1618                                 struct ipr_hostrcb *hostrcb)
1619 {
1620         struct ipr_hostrcb_type_02_error *error =
1621                 &hostrcb->hcam.u.error.u.type_02_error;
1622
1623         ipr_err("-----Current Configuration-----\n");
1624         ipr_err("Cache Directory Card Information:\n");
1625         ipr_log_vpd(&error->ioa_vpd);
1626         ipr_err("Adapter Card Information:\n");
1627         ipr_log_vpd(&error->cfc_vpd);
1628
1629         ipr_err("-----Expected Configuration-----\n");
1630         ipr_err("Cache Directory Card Information:\n");
1631         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1632         ipr_err("Adapter Card Information:\n");
1633         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1634
1635         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1636                      be32_to_cpu(error->ioa_data[0]),
1637                      be32_to_cpu(error->ioa_data[1]),
1638                      be32_to_cpu(error->ioa_data[2]));
1639 }
1640
1641 /**
1642  * ipr_log_enhanced_config_error - Log a configuration error.
1643  * @ioa_cfg:    ioa config struct
1644  * @hostrcb:    hostrcb struct
1645  *
1646  * Return value:
1647  *      none
1648  **/
1649 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650                                           struct ipr_hostrcb *hostrcb)
1651 {
1652         int errors_logged, i;
1653         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1654         struct ipr_hostrcb_type_13_error *error;
1655
1656         error = &hostrcb->hcam.u.error.u.type_13_error;
1657         errors_logged = be32_to_cpu(error->errors_logged);
1658
1659         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1660                 be32_to_cpu(error->errors_detected), errors_logged);
1661
1662         dev_entry = error->dev;
1663
1664         for (i = 0; i < errors_logged; i++, dev_entry++) {
1665                 ipr_err_separator;
1666
1667                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1668                 ipr_log_ext_vpd(&dev_entry->vpd);
1669
1670                 ipr_err("-----New Device Information-----\n");
1671                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1672
1673                 ipr_err("Cache Directory Card Information:\n");
1674                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1675
1676                 ipr_err("Adapter Card Information:\n");
1677                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1678         }
1679 }
1680
1681 /**
1682  * ipr_log_sis64_config_error - Log a device error.
1683  * @ioa_cfg:    ioa config struct
1684  * @hostrcb:    hostrcb struct
1685  *
1686  * Return value:
1687  *      none
1688  **/
1689 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1690                                        struct ipr_hostrcb *hostrcb)
1691 {
1692         int errors_logged, i;
1693         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1694         struct ipr_hostrcb_type_23_error *error;
1695         char buffer[IPR_MAX_RES_PATH_LENGTH];
1696
1697         error = &hostrcb->hcam.u.error64.u.type_23_error;
1698         errors_logged = be32_to_cpu(error->errors_logged);
1699
1700         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1701                 be32_to_cpu(error->errors_detected), errors_logged);
1702
1703         dev_entry = error->dev;
1704
1705         for (i = 0; i < errors_logged; i++, dev_entry++) {
1706                 ipr_err_separator;
1707
1708                 ipr_err("Device %d : %s", i + 1,
1709                         __ipr_format_res_path(dev_entry->res_path,
1710                                               buffer, sizeof(buffer)));
1711                 ipr_log_ext_vpd(&dev_entry->vpd);
1712
1713                 ipr_err("-----New Device Information-----\n");
1714                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1715
1716                 ipr_err("Cache Directory Card Information:\n");
1717                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1718
1719                 ipr_err("Adapter Card Information:\n");
1720                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1721         }
1722 }
1723
1724 /**
1725  * ipr_log_config_error - Log a configuration error.
1726  * @ioa_cfg:    ioa config struct
1727  * @hostrcb:    hostrcb struct
1728  *
1729  * Return value:
1730  *      none
1731  **/
1732 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1733                                  struct ipr_hostrcb *hostrcb)
1734 {
1735         int errors_logged, i;
1736         struct ipr_hostrcb_device_data_entry *dev_entry;
1737         struct ipr_hostrcb_type_03_error *error;
1738
1739         error = &hostrcb->hcam.u.error.u.type_03_error;
1740         errors_logged = be32_to_cpu(error->errors_logged);
1741
1742         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1743                 be32_to_cpu(error->errors_detected), errors_logged);
1744
1745         dev_entry = error->dev;
1746
1747         for (i = 0; i < errors_logged; i++, dev_entry++) {
1748                 ipr_err_separator;
1749
1750                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1751                 ipr_log_vpd(&dev_entry->vpd);
1752
1753                 ipr_err("-----New Device Information-----\n");
1754                 ipr_log_vpd(&dev_entry->new_vpd);
1755
1756                 ipr_err("Cache Directory Card Information:\n");
1757                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1758
1759                 ipr_err("Adapter Card Information:\n");
1760                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1761
1762                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1763                         be32_to_cpu(dev_entry->ioa_data[0]),
1764                         be32_to_cpu(dev_entry->ioa_data[1]),
1765                         be32_to_cpu(dev_entry->ioa_data[2]),
1766                         be32_to_cpu(dev_entry->ioa_data[3]),
1767                         be32_to_cpu(dev_entry->ioa_data[4]));
1768         }
1769 }
1770
1771 /**
1772  * ipr_log_enhanced_array_error - Log an array configuration error.
1773  * @ioa_cfg:    ioa config struct
1774  * @hostrcb:    hostrcb struct
1775  *
1776  * Return value:
1777  *      none
1778  **/
1779 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1780                                          struct ipr_hostrcb *hostrcb)
1781 {
1782         int i, num_entries;
1783         struct ipr_hostrcb_type_14_error *error;
1784         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1785         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1786
1787         error = &hostrcb->hcam.u.error.u.type_14_error;
1788
1789         ipr_err_separator;
1790
1791         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1792                 error->protection_level,
1793                 ioa_cfg->host->host_no,
1794                 error->last_func_vset_res_addr.bus,
1795                 error->last_func_vset_res_addr.target,
1796                 error->last_func_vset_res_addr.lun);
1797
1798         ipr_err_separator;
1799
1800         array_entry = error->array_member;
1801         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1802                             ARRAY_SIZE(error->array_member));
1803
1804         for (i = 0; i < num_entries; i++, array_entry++) {
1805                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1806                         continue;
1807
1808                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1809                         ipr_err("Exposed Array Member %d:\n", i);
1810                 else
1811                         ipr_err("Array Member %d:\n", i);
1812
1813                 ipr_log_ext_vpd(&array_entry->vpd);
1814                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1815                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1816                                  "Expected Location");
1817
1818                 ipr_err_separator;
1819         }
1820 }
1821
1822 /**
1823  * ipr_log_array_error - Log an array configuration error.
1824  * @ioa_cfg:    ioa config struct
1825  * @hostrcb:    hostrcb struct
1826  *
1827  * Return value:
1828  *      none
1829  **/
1830 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1831                                 struct ipr_hostrcb *hostrcb)
1832 {
1833         int i;
1834         struct ipr_hostrcb_type_04_error *error;
1835         struct ipr_hostrcb_array_data_entry *array_entry;
1836         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1837
1838         error = &hostrcb->hcam.u.error.u.type_04_error;
1839
1840         ipr_err_separator;
1841
1842         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1843                 error->protection_level,
1844                 ioa_cfg->host->host_no,
1845                 error->last_func_vset_res_addr.bus,
1846                 error->last_func_vset_res_addr.target,
1847                 error->last_func_vset_res_addr.lun);
1848
1849         ipr_err_separator;
1850
1851         array_entry = error->array_member;
1852
1853         for (i = 0; i < 18; i++) {
1854                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1855                         continue;
1856
1857                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1858                         ipr_err("Exposed Array Member %d:\n", i);
1859                 else
1860                         ipr_err("Array Member %d:\n", i);
1861
1862                 ipr_log_vpd(&array_entry->vpd);
1863
1864                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1865                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1866                                  "Expected Location");
1867
1868                 ipr_err_separator;
1869
1870                 if (i == 9)
1871                         array_entry = error->array_member2;
1872                 else
1873                         array_entry++;
1874         }
1875 }
1876
1877 /**
1878  * ipr_log_hex_data - Log additional hex IOA error data.
1879  * @ioa_cfg:    ioa config struct
1880  * @data:               IOA error data
1881  * @len:                data length
1882  *
1883  * Return value:
1884  *      none
1885  **/
1886 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1887 {
1888         int i;
1889
1890         if (len == 0)
1891                 return;
1892
1893         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1894                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1895
1896         for (i = 0; i < len / 4; i += 4) {
1897                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1898                         be32_to_cpu(data[i]),
1899                         be32_to_cpu(data[i+1]),
1900                         be32_to_cpu(data[i+2]),
1901                         be32_to_cpu(data[i+3]));
1902         }
1903 }
1904
1905 /**
1906  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1907  * @ioa_cfg:    ioa config struct
1908  * @hostrcb:    hostrcb struct
1909  *
1910  * Return value:
1911  *      none
1912  **/
1913 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1914                                             struct ipr_hostrcb *hostrcb)
1915 {
1916         struct ipr_hostrcb_type_17_error *error;
1917
1918         if (ioa_cfg->sis64)
1919                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1920         else
1921                 error = &hostrcb->hcam.u.error.u.type_17_error;
1922
1923         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1924         strim(error->failure_reason);
1925
1926         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1927                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1928         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1929         ipr_log_hex_data(ioa_cfg, error->data,
1930                          be32_to_cpu(hostrcb->hcam.length) -
1931                          (offsetof(struct ipr_hostrcb_error, u) +
1932                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1933 }
1934
1935 /**
1936  * ipr_log_dual_ioa_error - Log a dual adapter error.
1937  * @ioa_cfg:    ioa config struct
1938  * @hostrcb:    hostrcb struct
1939  *
1940  * Return value:
1941  *      none
1942  **/
1943 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1944                                    struct ipr_hostrcb *hostrcb)
1945 {
1946         struct ipr_hostrcb_type_07_error *error;
1947
1948         error = &hostrcb->hcam.u.error.u.type_07_error;
1949         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1950         strim(error->failure_reason);
1951
1952         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1953                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1954         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1955         ipr_log_hex_data(ioa_cfg, error->data,
1956                          be32_to_cpu(hostrcb->hcam.length) -
1957                          (offsetof(struct ipr_hostrcb_error, u) +
1958                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1959 }
1960
1961 static const struct {
1962         u8 active;
1963         char *desc;
1964 } path_active_desc[] = {
1965         { IPR_PATH_NO_INFO, "Path" },
1966         { IPR_PATH_ACTIVE, "Active path" },
1967         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1968 };
1969
1970 static const struct {
1971         u8 state;
1972         char *desc;
1973 } path_state_desc[] = {
1974         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1975         { IPR_PATH_HEALTHY, "is healthy" },
1976         { IPR_PATH_DEGRADED, "is degraded" },
1977         { IPR_PATH_FAILED, "is failed" }
1978 };
1979
1980 /**
1981  * ipr_log_fabric_path - Log a fabric path error
1982  * @hostrcb:    hostrcb struct
1983  * @fabric:             fabric descriptor
1984  *
1985  * Return value:
1986  *      none
1987  **/
1988 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1989                                 struct ipr_hostrcb_fabric_desc *fabric)
1990 {
1991         int i, j;
1992         u8 path_state = fabric->path_state;
1993         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1994         u8 state = path_state & IPR_PATH_STATE_MASK;
1995
1996         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1997                 if (path_active_desc[i].active != active)
1998                         continue;
1999
2000                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2001                         if (path_state_desc[j].state != state)
2002                                 continue;
2003
2004                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2005                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2006                                              path_active_desc[i].desc, path_state_desc[j].desc,
2007                                              fabric->ioa_port);
2008                         } else if (fabric->cascaded_expander == 0xff) {
2009                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2010                                              path_active_desc[i].desc, path_state_desc[j].desc,
2011                                              fabric->ioa_port, fabric->phy);
2012                         } else if (fabric->phy == 0xff) {
2013                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2014                                              path_active_desc[i].desc, path_state_desc[j].desc,
2015                                              fabric->ioa_port, fabric->cascaded_expander);
2016                         } else {
2017                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2018                                              path_active_desc[i].desc, path_state_desc[j].desc,
2019                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2020                         }
2021                         return;
2022                 }
2023         }
2024
2025         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2026                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2027 }
2028
2029 /**
2030  * ipr_log64_fabric_path - Log a fabric path error
2031  * @hostrcb:    hostrcb struct
2032  * @fabric:             fabric descriptor
2033  *
2034  * Return value:
2035  *      none
2036  **/
2037 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2038                                   struct ipr_hostrcb64_fabric_desc *fabric)
2039 {
2040         int i, j;
2041         u8 path_state = fabric->path_state;
2042         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2043         u8 state = path_state & IPR_PATH_STATE_MASK;
2044         char buffer[IPR_MAX_RES_PATH_LENGTH];
2045
2046         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2047                 if (path_active_desc[i].active != active)
2048                         continue;
2049
2050                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2051                         if (path_state_desc[j].state != state)
2052                                 continue;
2053
2054                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2055                                      path_active_desc[i].desc, path_state_desc[j].desc,
2056                                      ipr_format_res_path(hostrcb->ioa_cfg,
2057                                                 fabric->res_path,
2058                                                 buffer, sizeof(buffer)));
2059                         return;
2060                 }
2061         }
2062
2063         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2064                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2065                                     buffer, sizeof(buffer)));
2066 }
2067
2068 static const struct {
2069         u8 type;
2070         char *desc;
2071 } path_type_desc[] = {
2072         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2073         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2074         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2075         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2076 };
2077
2078 static const struct {
2079         u8 status;
2080         char *desc;
2081 } path_status_desc[] = {
2082         { IPR_PATH_CFG_NO_PROB, "Functional" },
2083         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2084         { IPR_PATH_CFG_FAILED, "Failed" },
2085         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2086         { IPR_PATH_NOT_DETECTED, "Missing" },
2087         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2088 };
2089
2090 static const char *link_rate[] = {
2091         "unknown",
2092         "disabled",
2093         "phy reset problem",
2094         "spinup hold",
2095         "port selector",
2096         "unknown",
2097         "unknown",
2098         "unknown",
2099         "1.5Gbps",
2100         "3.0Gbps",
2101         "unknown",
2102         "unknown",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown"
2107 };
2108
2109 /**
2110  * ipr_log_path_elem - Log a fabric path element.
2111  * @hostrcb:    hostrcb struct
2112  * @cfg:                fabric path element struct
2113  *
2114  * Return value:
2115  *      none
2116  **/
2117 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2118                               struct ipr_hostrcb_config_element *cfg)
2119 {
2120         int i, j;
2121         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2122         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2123
2124         if (type == IPR_PATH_CFG_NOT_EXIST)
2125                 return;
2126
2127         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2128                 if (path_type_desc[i].type != type)
2129                         continue;
2130
2131                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2132                         if (path_status_desc[j].status != status)
2133                                 continue;
2134
2135                         if (type == IPR_PATH_CFG_IOA_PORT) {
2136                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2137                                              path_status_desc[j].desc, path_type_desc[i].desc,
2138                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140                         } else {
2141                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2142                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2143                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2144                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2146                                 } else if (cfg->cascaded_expander == 0xff) {
2147                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2148                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2149                                                      path_type_desc[i].desc, cfg->phy,
2150                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2151                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2152                                 } else if (cfg->phy == 0xff) {
2153                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2154                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2155                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2156                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158                                 } else {
2159                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2160                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2161                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2162                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164                                 }
2165                         }
2166                         return;
2167                 }
2168         }
2169
2170         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2171                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2172                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2173                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2174 }
2175
2176 /**
2177  * ipr_log64_path_elem - Log a fabric path element.
2178  * @hostrcb:    hostrcb struct
2179  * @cfg:                fabric path element struct
2180  *
2181  * Return value:
2182  *      none
2183  **/
2184 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2185                                 struct ipr_hostrcb64_config_element *cfg)
2186 {
2187         int i, j;
2188         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2189         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2190         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2191         char buffer[IPR_MAX_RES_PATH_LENGTH];
2192
2193         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2194                 return;
2195
2196         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2197                 if (path_type_desc[i].type != type)
2198                         continue;
2199
2200                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2201                         if (path_status_desc[j].status != status)
2202                                 continue;
2203
2204                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2205                                      path_status_desc[j].desc, path_type_desc[i].desc,
2206                                      ipr_format_res_path(hostrcb->ioa_cfg,
2207                                         cfg->res_path, buffer, sizeof(buffer)),
2208                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209                                         be32_to_cpu(cfg->wwid[0]),
2210                                         be32_to_cpu(cfg->wwid[1]));
2211                         return;
2212                 }
2213         }
2214         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2215                      "WWN=%08X%08X\n", cfg->type_status,
2216                      ipr_format_res_path(hostrcb->ioa_cfg,
2217                         cfg->res_path, buffer, sizeof(buffer)),
2218                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220 }
2221
2222 /**
2223  * ipr_log_fabric_error - Log a fabric error.
2224  * @ioa_cfg:    ioa config struct
2225  * @hostrcb:    hostrcb struct
2226  *
2227  * Return value:
2228  *      none
2229  **/
2230 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2231                                  struct ipr_hostrcb *hostrcb)
2232 {
2233         struct ipr_hostrcb_type_20_error *error;
2234         struct ipr_hostrcb_fabric_desc *fabric;
2235         struct ipr_hostrcb_config_element *cfg;
2236         int i, add_len;
2237
2238         error = &hostrcb->hcam.u.error.u.type_20_error;
2239         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2240         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2241
2242         add_len = be32_to_cpu(hostrcb->hcam.length) -
2243                 (offsetof(struct ipr_hostrcb_error, u) +
2244                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2245
2246         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2247                 ipr_log_fabric_path(hostrcb, fabric);
2248                 for_each_fabric_cfg(fabric, cfg)
2249                         ipr_log_path_elem(hostrcb, cfg);
2250
2251                 add_len -= be16_to_cpu(fabric->length);
2252                 fabric = (struct ipr_hostrcb_fabric_desc *)
2253                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2254         }
2255
2256         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2257 }
2258
2259 /**
2260  * ipr_log_sis64_array_error - Log a sis64 array error.
2261  * @ioa_cfg:    ioa config struct
2262  * @hostrcb:    hostrcb struct
2263  *
2264  * Return value:
2265  *      none
2266  **/
2267 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2268                                       struct ipr_hostrcb *hostrcb)
2269 {
2270         int i, num_entries;
2271         struct ipr_hostrcb_type_24_error *error;
2272         struct ipr_hostrcb64_array_data_entry *array_entry;
2273         char buffer[IPR_MAX_RES_PATH_LENGTH];
2274         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2275
2276         error = &hostrcb->hcam.u.error64.u.type_24_error;
2277
2278         ipr_err_separator;
2279
2280         ipr_err("RAID %s Array Configuration: %s\n",
2281                 error->protection_level,
2282                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2283                         buffer, sizeof(buffer)));
2284
2285         ipr_err_separator;
2286
2287         array_entry = error->array_member;
2288         num_entries = min_t(u32, error->num_entries,
2289                             ARRAY_SIZE(error->array_member));
2290
2291         for (i = 0; i < num_entries; i++, array_entry++) {
2292
2293                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2294                         continue;
2295
2296                 if (error->exposed_mode_adn == i)
2297                         ipr_err("Exposed Array Member %d:\n", i);
2298                 else
2299                         ipr_err("Array Member %d:\n", i);
2300
2301                 ipr_err("Array Member %d:\n", i);
2302                 ipr_log_ext_vpd(&array_entry->vpd);
2303                 ipr_err("Current Location: %s\n",
2304                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2305                                 buffer, sizeof(buffer)));
2306                 ipr_err("Expected Location: %s\n",
2307                          ipr_format_res_path(ioa_cfg,
2308                                 array_entry->expected_res_path,
2309                                 buffer, sizeof(buffer)));
2310
2311                 ipr_err_separator;
2312         }
2313 }
2314
2315 /**
2316  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2317  * @ioa_cfg:    ioa config struct
2318  * @hostrcb:    hostrcb struct
2319  *
2320  * Return value:
2321  *      none
2322  **/
2323 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2324                                        struct ipr_hostrcb *hostrcb)
2325 {
2326         struct ipr_hostrcb_type_30_error *error;
2327         struct ipr_hostrcb64_fabric_desc *fabric;
2328         struct ipr_hostrcb64_config_element *cfg;
2329         int i, add_len;
2330
2331         error = &hostrcb->hcam.u.error64.u.type_30_error;
2332
2333         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2334         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2335
2336         add_len = be32_to_cpu(hostrcb->hcam.length) -
2337                 (offsetof(struct ipr_hostrcb64_error, u) +
2338                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2339
2340         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2341                 ipr_log64_fabric_path(hostrcb, fabric);
2342                 for_each_fabric_cfg(fabric, cfg)
2343                         ipr_log64_path_elem(hostrcb, cfg);
2344
2345                 add_len -= be16_to_cpu(fabric->length);
2346                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2347                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2348         }
2349
2350         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2351 }
2352
2353 /**
2354  * ipr_log_generic_error - Log an adapter error.
2355  * @ioa_cfg:    ioa config struct
2356  * @hostrcb:    hostrcb struct
2357  *
2358  * Return value:
2359  *      none
2360  **/
2361 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2362                                   struct ipr_hostrcb *hostrcb)
2363 {
2364         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2365                          be32_to_cpu(hostrcb->hcam.length));
2366 }
2367
2368 /**
2369  * ipr_log_sis64_device_error - Log a cache error.
2370  * @ioa_cfg:    ioa config struct
2371  * @hostrcb:    hostrcb struct
2372  *
2373  * Return value:
2374  *      none
2375  **/
2376 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2377                                          struct ipr_hostrcb *hostrcb)
2378 {
2379         struct ipr_hostrcb_type_21_error *error;
2380         char buffer[IPR_MAX_RES_PATH_LENGTH];
2381
2382         error = &hostrcb->hcam.u.error64.u.type_21_error;
2383
2384         ipr_err("-----Failing Device Information-----\n");
2385         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2386                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2387                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2388         ipr_err("Device Resource Path: %s\n",
2389                 __ipr_format_res_path(error->res_path,
2390                                       buffer, sizeof(buffer)));
2391         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2392         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2393         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2394         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2395         ipr_err("SCSI Sense Data:\n");
2396         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2397         ipr_err("SCSI Command Descriptor Block: \n");
2398         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2399
2400         ipr_err("Additional IOA Data:\n");
2401         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2402 }
2403
2404 /**
2405  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2406  * @ioasc:      IOASC
2407  *
2408  * This function will return the index of into the ipr_error_table
2409  * for the specified IOASC. If the IOASC is not in the table,
2410  * 0 will be returned, which points to the entry used for unknown errors.
2411  *
2412  * Return value:
2413  *      index into the ipr_error_table
2414  **/
2415 static u32 ipr_get_error(u32 ioasc)
2416 {
2417         int i;
2418
2419         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2420                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2421                         return i;
2422
2423         return 0;
2424 }
2425
2426 /**
2427  * ipr_handle_log_data - Log an adapter error.
2428  * @ioa_cfg:    ioa config struct
2429  * @hostrcb:    hostrcb struct
2430  *
2431  * This function logs an adapter error to the system.
2432  *
2433  * Return value:
2434  *      none
2435  **/
2436 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2437                                 struct ipr_hostrcb *hostrcb)
2438 {
2439         u32 ioasc;
2440         int error_index;
2441         struct ipr_hostrcb_type_21_error *error;
2442
2443         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2444                 return;
2445
2446         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2447                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2448
2449         if (ioa_cfg->sis64)
2450                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2451         else
2452                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2453
2454         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2455             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2456                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2457                 scsi_report_bus_reset(ioa_cfg->host,
2458                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2459         }
2460
2461         error_index = ipr_get_error(ioasc);
2462
2463         if (!ipr_error_table[error_index].log_hcam)
2464                 return;
2465
2466         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2467             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2468                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2469
2470                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2471                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2472                                 return;
2473         }
2474
2475         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2476
2477         /* Set indication we have logged an error */
2478         ioa_cfg->errors_logged++;
2479
2480         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2481                 return;
2482         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2483                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2484
2485         switch (hostrcb->hcam.overlay_id) {
2486         case IPR_HOST_RCB_OVERLAY_ID_2:
2487                 ipr_log_cache_error(ioa_cfg, hostrcb);
2488                 break;
2489         case IPR_HOST_RCB_OVERLAY_ID_3:
2490                 ipr_log_config_error(ioa_cfg, hostrcb);
2491                 break;
2492         case IPR_HOST_RCB_OVERLAY_ID_4:
2493         case IPR_HOST_RCB_OVERLAY_ID_6:
2494                 ipr_log_array_error(ioa_cfg, hostrcb);
2495                 break;
2496         case IPR_HOST_RCB_OVERLAY_ID_7:
2497                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2498                 break;
2499         case IPR_HOST_RCB_OVERLAY_ID_12:
2500                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2501                 break;
2502         case IPR_HOST_RCB_OVERLAY_ID_13:
2503                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2504                 break;
2505         case IPR_HOST_RCB_OVERLAY_ID_14:
2506         case IPR_HOST_RCB_OVERLAY_ID_16:
2507                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2508                 break;
2509         case IPR_HOST_RCB_OVERLAY_ID_17:
2510                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2511                 break;
2512         case IPR_HOST_RCB_OVERLAY_ID_20:
2513                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2514                 break;
2515         case IPR_HOST_RCB_OVERLAY_ID_21:
2516                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2517                 break;
2518         case IPR_HOST_RCB_OVERLAY_ID_23:
2519                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2520                 break;
2521         case IPR_HOST_RCB_OVERLAY_ID_24:
2522         case IPR_HOST_RCB_OVERLAY_ID_26:
2523                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2524                 break;
2525         case IPR_HOST_RCB_OVERLAY_ID_30:
2526                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2527                 break;
2528         case IPR_HOST_RCB_OVERLAY_ID_1:
2529         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2530         default:
2531                 ipr_log_generic_error(ioa_cfg, hostrcb);
2532                 break;
2533         }
2534 }
2535
2536 /**
2537  * ipr_process_error - Op done function for an adapter error log.
2538  * @ipr_cmd:    ipr command struct
2539  *
2540  * This function is the op done function for an error log host
2541  * controlled async from the adapter. It will log the error and
2542  * send the HCAM back to the adapter.
2543  *
2544  * Return value:
2545  *      none
2546  **/
2547 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2548 {
2549         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2550         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2551         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2552         u32 fd_ioasc;
2553
2554         if (ioa_cfg->sis64)
2555                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2556         else
2557                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2558
2559         list_del(&hostrcb->queue);
2560         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2561
2562         if (!ioasc) {
2563                 ipr_handle_log_data(ioa_cfg, hostrcb);
2564                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2565                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2566         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2567                 dev_err(&ioa_cfg->pdev->dev,
2568                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2569         }
2570
2571         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2572 }
2573
2574 /**
2575  * ipr_timeout -  An internally generated op has timed out.
2576  * @ipr_cmd:    ipr command struct
2577  *
2578  * This function blocks host requests and initiates an
2579  * adapter reset.
2580  *
2581  * Return value:
2582  *      none
2583  **/
2584 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2585 {
2586         unsigned long lock_flags = 0;
2587         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2588
2589         ENTER;
2590         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2591
2592         ioa_cfg->errors_logged++;
2593         dev_err(&ioa_cfg->pdev->dev,
2594                 "Adapter being reset due to command timeout.\n");
2595
2596         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2597                 ioa_cfg->sdt_state = GET_DUMP;
2598
2599         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2600                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2601
2602         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2603         LEAVE;
2604 }
2605
2606 /**
2607  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2608  * @ipr_cmd:    ipr command struct
2609  *
2610  * This function blocks host requests and initiates an
2611  * adapter reset.
2612  *
2613  * Return value:
2614  *      none
2615  **/
2616 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2617 {
2618         unsigned long lock_flags = 0;
2619         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2620
2621         ENTER;
2622         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2623
2624         ioa_cfg->errors_logged++;
2625         dev_err(&ioa_cfg->pdev->dev,
2626                 "Adapter timed out transitioning to operational.\n");
2627
2628         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2629                 ioa_cfg->sdt_state = GET_DUMP;
2630
2631         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2632                 if (ipr_fastfail)
2633                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2634                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2635         }
2636
2637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2638         LEAVE;
2639 }
2640
2641 /**
2642  * ipr_find_ses_entry - Find matching SES in SES table
2643  * @res:        resource entry struct of SES
2644  *
2645  * Return value:
2646  *      pointer to SES table entry / NULL on failure
2647  **/
2648 static const struct ipr_ses_table_entry *
2649 ipr_find_ses_entry(struct ipr_resource_entry *res)
2650 {
2651         int i, j, matches;
2652         struct ipr_std_inq_vpids *vpids;
2653         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2654
2655         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2656                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2657                         if (ste->compare_product_id_byte[j] == 'X') {
2658                                 vpids = &res->std_inq_data.vpids;
2659                                 if (vpids->product_id[j] == ste->product_id[j])
2660                                         matches++;
2661                                 else
2662                                         break;
2663                         } else
2664                                 matches++;
2665                 }
2666
2667                 if (matches == IPR_PROD_ID_LEN)
2668                         return ste;
2669         }
2670
2671         return NULL;
2672 }
2673
2674 /**
2675  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2676  * @ioa_cfg:    ioa config struct
2677  * @bus:                SCSI bus
2678  * @bus_width:  bus width
2679  *
2680  * Return value:
2681  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2682  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2683  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2684  *      max 160MHz = max 320MB/sec).
2685  **/
2686 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2687 {
2688         struct ipr_resource_entry *res;
2689         const struct ipr_ses_table_entry *ste;
2690         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2691
2692         /* Loop through each config table entry in the config table buffer */
2693         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2694                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2695                         continue;
2696
2697                 if (bus != res->bus)
2698                         continue;
2699
2700                 if (!(ste = ipr_find_ses_entry(res)))
2701                         continue;
2702
2703                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2704         }
2705
2706         return max_xfer_rate;
2707 }
2708
2709 /**
2710  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2711  * @ioa_cfg:            ioa config struct
2712  * @max_delay:          max delay in micro-seconds to wait
2713  *
2714  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2715  *
2716  * Return value:
2717  *      0 on success / other on failure
2718  **/
2719 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2720 {
2721         volatile u32 pcii_reg;
2722         int delay = 1;
2723
2724         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2725         while (delay < max_delay) {
2726                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2727
2728                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2729                         return 0;
2730
2731                 /* udelay cannot be used if delay is more than a few milliseconds */
2732                 if ((delay / 1000) > MAX_UDELAY_MS)
2733                         mdelay(delay / 1000);
2734                 else
2735                         udelay(delay);
2736
2737                 delay += delay;
2738         }
2739         return -EIO;
2740 }
2741
2742 /**
2743  * ipr_get_sis64_dump_data_section - Dump IOA memory
2744  * @ioa_cfg:                    ioa config struct
2745  * @start_addr:                 adapter address to dump
2746  * @dest:                       destination kernel buffer
2747  * @length_in_words:            length to dump in 4 byte words
2748  *
2749  * Return value:
2750  *      0 on success
2751  **/
2752 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2753                                            u32 start_addr,
2754                                            __be32 *dest, u32 length_in_words)
2755 {
2756         int i;
2757
2758         for (i = 0; i < length_in_words; i++) {
2759                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2760                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2761                 dest++;
2762         }
2763
2764         return 0;
2765 }
2766
2767 /**
2768  * ipr_get_ldump_data_section - Dump IOA memory
2769  * @ioa_cfg:                    ioa config struct
2770  * @start_addr:                 adapter address to dump
2771  * @dest:                               destination kernel buffer
2772  * @length_in_words:    length to dump in 4 byte words
2773  *
2774  * Return value:
2775  *      0 on success / -EIO on failure
2776  **/
2777 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2778                                       u32 start_addr,
2779                                       __be32 *dest, u32 length_in_words)
2780 {
2781         volatile u32 temp_pcii_reg;
2782         int i, delay = 0;
2783
2784         if (ioa_cfg->sis64)
2785                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2786                                                        dest, length_in_words);
2787
2788         /* Write IOA interrupt reg starting LDUMP state  */
2789         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2790                ioa_cfg->regs.set_uproc_interrupt_reg32);
2791
2792         /* Wait for IO debug acknowledge */
2793         if (ipr_wait_iodbg_ack(ioa_cfg,
2794                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2795                 dev_err(&ioa_cfg->pdev->dev,
2796                         "IOA dump long data transfer timeout\n");
2797                 return -EIO;
2798         }
2799
2800         /* Signal LDUMP interlocked - clear IO debug ack */
2801         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2802                ioa_cfg->regs.clr_interrupt_reg);
2803
2804         /* Write Mailbox with starting address */
2805         writel(start_addr, ioa_cfg->ioa_mailbox);
2806
2807         /* Signal address valid - clear IOA Reset alert */
2808         writel(IPR_UPROCI_RESET_ALERT,
2809                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2810
2811         for (i = 0; i < length_in_words; i++) {
2812                 /* Wait for IO debug acknowledge */
2813                 if (ipr_wait_iodbg_ack(ioa_cfg,
2814                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2815                         dev_err(&ioa_cfg->pdev->dev,
2816                                 "IOA dump short data transfer timeout\n");
2817                         return -EIO;
2818                 }
2819
2820                 /* Read data from mailbox and increment destination pointer */
2821                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2822                 dest++;
2823
2824                 /* For all but the last word of data, signal data received */
2825                 if (i < (length_in_words - 1)) {
2826                         /* Signal dump data received - Clear IO debug Ack */
2827                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2828                                ioa_cfg->regs.clr_interrupt_reg);
2829                 }
2830         }
2831
2832         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2833         writel(IPR_UPROCI_RESET_ALERT,
2834                ioa_cfg->regs.set_uproc_interrupt_reg32);
2835
2836         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2837                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2838
2839         /* Signal dump data received - Clear IO debug Ack */
2840         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2841                ioa_cfg->regs.clr_interrupt_reg);
2842
2843         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2844         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2845                 temp_pcii_reg =
2846                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2847
2848                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2849                         return 0;
2850
2851                 udelay(10);
2852                 delay += 10;
2853         }
2854
2855         return 0;
2856 }
2857
2858 #ifdef CONFIG_SCSI_IPR_DUMP
2859 /**
2860  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2861  * @ioa_cfg:            ioa config struct
2862  * @pci_address:        adapter address
2863  * @length:                     length of data to copy
2864  *
2865  * Copy data from PCI adapter to kernel buffer.
2866  * Note: length MUST be a 4 byte multiple
2867  * Return value:
2868  *      0 on success / other on failure
2869  **/
2870 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2871                         unsigned long pci_address, u32 length)
2872 {
2873         int bytes_copied = 0;
2874         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2875         __be32 *page;
2876         unsigned long lock_flags = 0;
2877         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2878
2879         if (ioa_cfg->sis64)
2880                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2881         else
2882                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2883
2884         while (bytes_copied < length &&
2885                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2886                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2887                     ioa_dump->page_offset == 0) {
2888                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2889
2890                         if (!page) {
2891                                 ipr_trace;
2892                                 return bytes_copied;
2893                         }
2894
2895                         ioa_dump->page_offset = 0;
2896                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2897                         ioa_dump->next_page_index++;
2898                 } else
2899                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2900
2901                 rem_len = length - bytes_copied;
2902                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2903                 cur_len = min(rem_len, rem_page_len);
2904
2905                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2907                         rc = -EIO;
2908                 } else {
2909                         rc = ipr_get_ldump_data_section(ioa_cfg,
2910                                                         pci_address + bytes_copied,
2911                                                         &page[ioa_dump->page_offset / 4],
2912                                                         (cur_len / sizeof(u32)));
2913                 }
2914                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2915
2916                 if (!rc) {
2917                         ioa_dump->page_offset += cur_len;
2918                         bytes_copied += cur_len;
2919                 } else {
2920                         ipr_trace;
2921                         break;
2922                 }
2923                 schedule();
2924         }
2925
2926         return bytes_copied;
2927 }
2928
2929 /**
2930  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2931  * @hdr:        dump entry header struct
2932  *
2933  * Return value:
2934  *      nothing
2935  **/
2936 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2937 {
2938         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2939         hdr->num_elems = 1;
2940         hdr->offset = sizeof(*hdr);
2941         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2942 }
2943
2944 /**
2945  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2946  * @ioa_cfg:    ioa config struct
2947  * @driver_dump:        driver dump struct
2948  *
2949  * Return value:
2950  *      nothing
2951  **/
2952 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2953                                    struct ipr_driver_dump *driver_dump)
2954 {
2955         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2956
2957         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2958         driver_dump->ioa_type_entry.hdr.len =
2959                 sizeof(struct ipr_dump_ioa_type_entry) -
2960                 sizeof(struct ipr_dump_entry_header);
2961         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2962         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2963         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2964         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2965                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2966                 ucode_vpd->minor_release[1];
2967         driver_dump->hdr.num_entries++;
2968 }
2969
2970 /**
2971  * ipr_dump_version_data - Fill in the driver version in the dump.
2972  * @ioa_cfg:    ioa config struct
2973  * @driver_dump:        driver dump struct
2974  *
2975  * Return value:
2976  *      nothing
2977  **/
2978 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2979                                   struct ipr_driver_dump *driver_dump)
2980 {
2981         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2982         driver_dump->version_entry.hdr.len =
2983                 sizeof(struct ipr_dump_version_entry) -
2984                 sizeof(struct ipr_dump_entry_header);
2985         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2986         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2987         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2988         driver_dump->hdr.num_entries++;
2989 }
2990
2991 /**
2992  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2993  * @ioa_cfg:    ioa config struct
2994  * @driver_dump:        driver dump struct
2995  *
2996  * Return value:
2997  *      nothing
2998  **/
2999 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3000                                    struct ipr_driver_dump *driver_dump)
3001 {
3002         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3003         driver_dump->trace_entry.hdr.len =
3004                 sizeof(struct ipr_dump_trace_entry) -
3005                 sizeof(struct ipr_dump_entry_header);
3006         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3007         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3008         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3009         driver_dump->hdr.num_entries++;
3010 }
3011
3012 /**
3013  * ipr_dump_location_data - Fill in the IOA location in the dump.
3014  * @ioa_cfg:    ioa config struct
3015  * @driver_dump:        driver dump struct
3016  *
3017  * Return value:
3018  *      nothing
3019  **/
3020 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3021                                    struct ipr_driver_dump *driver_dump)
3022 {
3023         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3024         driver_dump->location_entry.hdr.len =
3025                 sizeof(struct ipr_dump_location_entry) -
3026                 sizeof(struct ipr_dump_entry_header);
3027         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3028         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3029         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3030         driver_dump->hdr.num_entries++;
3031 }
3032
3033 /**
3034  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3035  * @ioa_cfg:    ioa config struct
3036  * @dump:               dump struct
3037  *
3038  * Return value:
3039  *      nothing
3040  **/
3041 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3042 {
3043         unsigned long start_addr, sdt_word;
3044         unsigned long lock_flags = 0;
3045         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3046         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3047         u32 num_entries, max_num_entries, start_off, end_off;
3048         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3049         struct ipr_sdt *sdt;
3050         int valid = 1;
3051         int i;
3052
3053         ENTER;
3054
3055         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3056
3057         if (ioa_cfg->sdt_state != READ_DUMP) {
3058                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3059                 return;
3060         }
3061
3062         if (ioa_cfg->sis64) {
3063                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064                 ssleep(IPR_DUMP_DELAY_SECONDS);
3065                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066         }
3067
3068         start_addr = readl(ioa_cfg->ioa_mailbox);
3069
3070         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3071                 dev_err(&ioa_cfg->pdev->dev,
3072                         "Invalid dump table format: %lx\n", start_addr);
3073                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074                 return;
3075         }
3076
3077         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3078
3079         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3080
3081         /* Initialize the overall dump header */
3082         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3083         driver_dump->hdr.num_entries = 1;
3084         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3085         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3086         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3087         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3088
3089         ipr_dump_version_data(ioa_cfg, driver_dump);
3090         ipr_dump_location_data(ioa_cfg, driver_dump);
3091         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3092         ipr_dump_trace_data(ioa_cfg, driver_dump);
3093
3094         /* Update dump_header */
3095         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3096
3097         /* IOA Dump entry */
3098         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3099         ioa_dump->hdr.len = 0;
3100         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3101         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3102
3103         /* First entries in sdt are actually a list of dump addresses and
3104          lengths to gather the real dump data.  sdt represents the pointer
3105          to the ioa generated dump table.  Dump data will be extracted based
3106          on entries in this table */
3107         sdt = &ioa_dump->sdt;
3108
3109         if (ioa_cfg->sis64) {
3110                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3111                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3112         } else {
3113                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3114                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3115         }
3116
3117         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3118                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3119         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3120                                         bytes_to_copy / sizeof(__be32));
3121
3122         /* Smart Dump table is ready to use and the first entry is valid */
3123         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3124             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3125                 dev_err(&ioa_cfg->pdev->dev,
3126                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3127                         rc, be32_to_cpu(sdt->hdr.state));
3128                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3129                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3130                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3131                 return;
3132         }
3133
3134         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3135
3136         if (num_entries > max_num_entries)
3137                 num_entries = max_num_entries;
3138
3139         /* Update dump length to the actual data to be copied */
3140         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3141         if (ioa_cfg->sis64)
3142                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3143         else
3144                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3145
3146         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147
3148         for (i = 0; i < num_entries; i++) {
3149                 if (ioa_dump->hdr.len > max_dump_size) {
3150                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3151                         break;
3152                 }
3153
3154                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3155                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3156                         if (ioa_cfg->sis64)
3157                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3158                         else {
3159                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3160                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3161
3162                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3163                                         bytes_to_copy = end_off - start_off;
3164                                 else
3165                                         valid = 0;
3166                         }
3167                         if (valid) {
3168                                 if (bytes_to_copy > max_dump_size) {
3169                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3170                                         continue;
3171                                 }
3172
3173                                 /* Copy data from adapter to driver buffers */
3174                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3175                                                             bytes_to_copy);
3176
3177                                 ioa_dump->hdr.len += bytes_copied;
3178
3179                                 if (bytes_copied != bytes_to_copy) {
3180                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3181                                         break;
3182                                 }
3183                         }
3184                 }
3185         }
3186
3187         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3188
3189         /* Update dump_header */
3190         driver_dump->hdr.len += ioa_dump->hdr.len;
3191         wmb();
3192         ioa_cfg->sdt_state = DUMP_OBTAINED;
3193         LEAVE;
3194 }
3195
3196 #else
3197 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3198 #endif
3199
3200 /**
3201  * ipr_release_dump - Free adapter dump memory
3202  * @kref:       kref struct
3203  *
3204  * Return value:
3205  *      nothing
3206  **/
3207 static void ipr_release_dump(struct kref *kref)
3208 {
3209         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3210         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3211         unsigned long lock_flags = 0;
3212         int i;
3213
3214         ENTER;
3215         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216         ioa_cfg->dump = NULL;
3217         ioa_cfg->sdt_state = INACTIVE;
3218         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3219
3220         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3221                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3222
3223         vfree(dump->ioa_dump.ioa_data);
3224         kfree(dump);
3225         LEAVE;
3226 }
3227
3228 /**
3229  * ipr_worker_thread - Worker thread
3230  * @work:               ioa config struct
3231  *
3232  * Called at task level from a work thread. This function takes care
3233  * of adding and removing device from the mid-layer as configuration
3234  * changes are detected by the adapter.
3235  *
3236  * Return value:
3237  *      nothing
3238  **/
3239 static void ipr_worker_thread(struct work_struct *work)
3240 {
3241         unsigned long lock_flags;
3242         struct ipr_resource_entry *res;
3243         struct scsi_device *sdev;
3244         struct ipr_dump *dump;
3245         struct ipr_ioa_cfg *ioa_cfg =
3246                 container_of(work, struct ipr_ioa_cfg, work_q);
3247         u8 bus, target, lun;
3248         int did_work;
3249
3250         ENTER;
3251         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3252
3253         if (ioa_cfg->sdt_state == READ_DUMP) {
3254                 dump = ioa_cfg->dump;
3255                 if (!dump) {
3256                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257                         return;
3258                 }
3259                 kref_get(&dump->kref);
3260                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261                 ipr_get_ioa_dump(ioa_cfg, dump);
3262                 kref_put(&dump->kref, ipr_release_dump);
3263
3264                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3265                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3266                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3267                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268                 return;
3269         }
3270
3271 restart:
3272         do {
3273                 did_work = 0;
3274                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3275                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276                         return;
3277                 }
3278
3279                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3280                         if (res->del_from_ml && res->sdev) {
3281                                 did_work = 1;
3282                                 sdev = res->sdev;
3283                                 if (!scsi_device_get(sdev)) {
3284                                         if (!res->add_to_ml)
3285                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3286                                         else
3287                                                 res->del_from_ml = 0;
3288                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289                                         scsi_remove_device(sdev);
3290                                         scsi_device_put(sdev);
3291                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292                                 }
3293                                 break;
3294                         }
3295                 }
3296         } while (did_work);
3297
3298         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299                 if (res->add_to_ml) {
3300                         bus = res->bus;
3301                         target = res->target;
3302                         lun = res->lun;
3303                         res->add_to_ml = 0;
3304                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3306                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3307                         goto restart;
3308                 }
3309         }
3310
3311         ioa_cfg->scan_done = 1;
3312         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3314         LEAVE;
3315 }
3316
3317 #ifdef CONFIG_SCSI_IPR_TRACE
3318 /**
3319  * ipr_read_trace - Dump the adapter trace
3320  * @filp:               open sysfs file
3321  * @kobj:               kobject struct
3322  * @bin_attr:           bin_attribute struct
3323  * @buf:                buffer
3324  * @off:                offset
3325  * @count:              buffer size
3326  *
3327  * Return value:
3328  *      number of bytes printed to buffer
3329  **/
3330 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3331                               struct bin_attribute *bin_attr,
3332                               char *buf, loff_t off, size_t count)
3333 {
3334         struct device *dev = container_of(kobj, struct device, kobj);
3335         struct Scsi_Host *shost = class_to_shost(dev);
3336         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3337         unsigned long lock_flags = 0;
3338         ssize_t ret;
3339
3340         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3341         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3342                                 IPR_TRACE_SIZE);
3343         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344
3345         return ret;
3346 }
3347
3348 static struct bin_attribute ipr_trace_attr = {
3349         .attr = {
3350                 .name = "trace",
3351                 .mode = S_IRUGO,
3352         },
3353         .size = 0,
3354         .read = ipr_read_trace,
3355 };
3356 #endif
3357
3358 /**
3359  * ipr_show_fw_version - Show the firmware version
3360  * @dev:        class device struct
3361  * @buf:        buffer
3362  *
3363  * Return value:
3364  *      number of bytes printed to buffer
3365  **/
3366 static ssize_t ipr_show_fw_version(struct device *dev,
3367                                    struct device_attribute *attr, char *buf)
3368 {
3369         struct Scsi_Host *shost = class_to_shost(dev);
3370         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3371         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3372         unsigned long lock_flags = 0;
3373         int len;
3374
3375         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3376         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3377                        ucode_vpd->major_release, ucode_vpd->card_type,
3378                        ucode_vpd->minor_release[0],
3379                        ucode_vpd->minor_release[1]);
3380         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381         return len;
3382 }
3383
3384 static struct device_attribute ipr_fw_version_attr = {
3385         .attr = {
3386                 .name =         "fw_version",
3387                 .mode =         S_IRUGO,
3388         },
3389         .show = ipr_show_fw_version,
3390 };
3391
3392 /**
3393  * ipr_show_log_level - Show the adapter's error logging level
3394  * @dev:        class device struct
3395  * @buf:        buffer
3396  *
3397  * Return value:
3398  *      number of bytes printed to buffer
3399  **/
3400 static ssize_t ipr_show_log_level(struct device *dev,
3401                                    struct device_attribute *attr, char *buf)
3402 {
3403         struct Scsi_Host *shost = class_to_shost(dev);
3404         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405         unsigned long lock_flags = 0;
3406         int len;
3407
3408         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3409         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411         return len;
3412 }
3413
3414 /**
3415  * ipr_store_log_level - Change the adapter's error logging level
3416  * @dev:        class device struct
3417  * @buf:        buffer
3418  *
3419  * Return value:
3420  *      number of bytes printed to buffer
3421  **/
3422 static ssize_t ipr_store_log_level(struct device *dev,
3423                                    struct device_attribute *attr,
3424                                    const char *buf, size_t count)
3425 {
3426         struct Scsi_Host *shost = class_to_shost(dev);
3427         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3428         unsigned long lock_flags = 0;
3429
3430         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3432         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433         return strlen(buf);
3434 }
3435
3436 static struct device_attribute ipr_log_level_attr = {
3437         .attr = {
3438                 .name =         "log_level",
3439                 .mode =         S_IRUGO | S_IWUSR,
3440         },
3441         .show = ipr_show_log_level,
3442         .store = ipr_store_log_level
3443 };
3444
3445 /**
3446  * ipr_store_diagnostics - IOA Diagnostics interface
3447  * @dev:        device struct
3448  * @buf:        buffer
3449  * @count:      buffer size
3450  *
3451  * This function will reset the adapter and wait a reasonable
3452  * amount of time for any errors that the adapter might log.
3453  *
3454  * Return value:
3455  *      count on success / other on failure
3456  **/
3457 static ssize_t ipr_store_diagnostics(struct device *dev,
3458                                      struct device_attribute *attr,
3459                                      const char *buf, size_t count)
3460 {
3461         struct Scsi_Host *shost = class_to_shost(dev);
3462         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463         unsigned long lock_flags = 0;
3464         int rc = count;
3465
3466         if (!capable(CAP_SYS_ADMIN))
3467                 return -EACCES;
3468
3469         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3470         while (ioa_cfg->in_reset_reload) {
3471                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3473                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474         }
3475
3476         ioa_cfg->errors_logged = 0;
3477         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3478
3479         if (ioa_cfg->in_reset_reload) {
3480                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3482
3483                 /* Wait for a second for any errors to be logged */
3484                 msleep(1000);
3485         } else {
3486                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3487                 return -EIO;
3488         }
3489
3490         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3491         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3492                 rc = -EIO;
3493         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3494
3495         return rc;
3496 }
3497
3498 static struct device_attribute ipr_diagnostics_attr = {
3499         .attr = {
3500                 .name =         "run_diagnostics",
3501                 .mode =         S_IWUSR,
3502         },
3503         .store = ipr_store_diagnostics
3504 };
3505
3506 /**
3507  * ipr_show_adapter_state - Show the adapter's state
3508  * @class_dev:  device struct
3509  * @buf:        buffer
3510  *
3511  * Return value:
3512  *      number of bytes printed to buffer
3513  **/
3514 static ssize_t ipr_show_adapter_state(struct device *dev,
3515                                       struct device_attribute *attr, char *buf)
3516 {
3517         struct Scsi_Host *shost = class_to_shost(dev);
3518         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519         unsigned long lock_flags = 0;
3520         int len;
3521
3522         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3524                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3525         else
3526                 len = snprintf(buf, PAGE_SIZE, "online\n");
3527         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3528         return len;
3529 }
3530
3531 /**
3532  * ipr_store_adapter_state - Change adapter state
3533  * @dev:        device struct
3534  * @buf:        buffer
3535  * @count:      buffer size
3536  *
3537  * This function will change the adapter's state.
3538  *
3539  * Return value:
3540  *      count on success / other on failure
3541  **/
3542 static ssize_t ipr_store_adapter_state(struct device *dev,
3543                                        struct device_attribute *attr,
3544                                        const char *buf, size_t count)
3545 {
3546         struct Scsi_Host *shost = class_to_shost(dev);
3547         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3548         unsigned long lock_flags;
3549         int result = count, i;
3550
3551         if (!capable(CAP_SYS_ADMIN))
3552                 return -EACCES;
3553
3554         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3555         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3556             !strncmp(buf, "online", 6)) {
3557                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3558                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3559                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3560                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3561                 }
3562                 wmb();
3563                 ioa_cfg->reset_retries = 0;
3564                 ioa_cfg->in_ioa_bringdown = 0;
3565                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3566         }
3567         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3568         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3569
3570         return result;
3571 }
3572
3573 static struct device_attribute ipr_ioa_state_attr = {
3574         .attr = {
3575                 .name =         "online_state",
3576                 .mode =         S_IRUGO | S_IWUSR,
3577         },
3578         .show = ipr_show_adapter_state,
3579         .store = ipr_store_adapter_state
3580 };
3581
3582 /**
3583  * ipr_store_reset_adapter - Reset the adapter
3584  * @dev:        device struct
3585  * @buf:        buffer
3586  * @count:      buffer size
3587  *
3588  * This function will reset the adapter.
3589  *
3590  * Return value:
3591  *      count on success / other on failure
3592  **/
3593 static ssize_t ipr_store_reset_adapter(struct device *dev,
3594                                        struct device_attribute *attr,
3595                                        const char *buf, size_t count)
3596 {
3597         struct Scsi_Host *shost = class_to_shost(dev);
3598         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3599         unsigned long lock_flags;
3600         int result = count;
3601
3602         if (!capable(CAP_SYS_ADMIN))
3603                 return -EACCES;
3604
3605         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3606         if (!ioa_cfg->in_reset_reload)
3607                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3609         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3610
3611         return result;
3612 }
3613
3614 static struct device_attribute ipr_ioa_reset_attr = {
3615         .attr = {
3616                 .name =         "reset_host",
3617                 .mode =         S_IWUSR,
3618         },
3619         .store = ipr_store_reset_adapter
3620 };
3621
3622 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3623  /**
3624  * ipr_show_iopoll_weight - Show ipr polling mode
3625  * @dev:        class device struct
3626  * @buf:        buffer
3627  *
3628  * Return value:
3629  *      number of bytes printed to buffer
3630  **/
3631 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3632                                    struct device_attribute *attr, char *buf)
3633 {
3634         struct Scsi_Host *shost = class_to_shost(dev);
3635         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636         unsigned long lock_flags = 0;
3637         int len;
3638
3639         spin_lock_irqsave(shost->host_lock, lock_flags);
3640         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3641         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643         return len;
3644 }
3645
3646 /**
3647  * ipr_store_iopoll_weight - Change the adapter's polling mode
3648  * @dev:        class device struct
3649  * @buf:        buffer
3650  *
3651  * Return value:
3652  *      number of bytes printed to buffer
3653  **/
3654 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3655                                         struct device_attribute *attr,
3656                                         const char *buf, size_t count)
3657 {
3658         struct Scsi_Host *shost = class_to_shost(dev);
3659         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660         unsigned long user_iopoll_weight;
3661         unsigned long lock_flags = 0;
3662         int i;
3663
3664         if (!ioa_cfg->sis64) {
3665                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3666                 return -EINVAL;
3667         }
3668         if (kstrtoul(buf, 10, &user_iopoll_weight))
3669                 return -EINVAL;
3670
3671         if (user_iopoll_weight > 256) {
3672                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3673                 return -EINVAL;
3674         }
3675
3676         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3677                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3678                 return strlen(buf);
3679         }
3680
3681         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3682                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3683                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3684         }
3685
3686         spin_lock_irqsave(shost->host_lock, lock_flags);
3687         ioa_cfg->iopoll_weight = user_iopoll_weight;
3688         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3689                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3690                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3691                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3692                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3693                 }
3694         }
3695         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3696
3697         return strlen(buf);
3698 }
3699
3700 static struct device_attribute ipr_iopoll_weight_attr = {
3701         .attr = {
3702                 .name =         "iopoll_weight",
3703                 .mode =         S_IRUGO | S_IWUSR,
3704         },
3705         .show = ipr_show_iopoll_weight,
3706         .store = ipr_store_iopoll_weight
3707 };
3708
3709 /**
3710  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3711  * @buf_len:            buffer length
3712  *
3713  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3714  * list to use for microcode download
3715  *
3716  * Return value:
3717  *      pointer to sglist / NULL on failure
3718  **/
3719 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3720 {
3721         int sg_size, order, bsize_elem, num_elem, i, j;
3722         struct ipr_sglist *sglist;
3723         struct scatterlist *scatterlist;
3724         struct page *page;
3725
3726         /* Get the minimum size per scatter/gather element */
3727         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3728
3729         /* Get the actual size per element */
3730         order = get_order(sg_size);
3731
3732         /* Determine the actual number of bytes per element */
3733         bsize_elem = PAGE_SIZE * (1 << order);
3734
3735         /* Determine the actual number of sg entries needed */
3736         if (buf_len % bsize_elem)
3737                 num_elem = (buf_len / bsize_elem) + 1;
3738         else
3739                 num_elem = buf_len / bsize_elem;
3740
3741         /* Allocate a scatter/gather list for the DMA */
3742         sglist = kzalloc(sizeof(struct ipr_sglist) +
3743                          (sizeof(struct scatterlist) * (num_elem - 1)),
3744                          GFP_KERNEL);
3745
3746         if (sglist == NULL) {
3747                 ipr_trace;
3748                 return NULL;
3749         }
3750
3751         scatterlist = sglist->scatterlist;
3752         sg_init_table(scatterlist, num_elem);
3753
3754         sglist->order = order;
3755         sglist->num_sg = num_elem;
3756
3757         /* Allocate a bunch of sg elements */
3758         for (i = 0; i < num_elem; i++) {
3759                 page = alloc_pages(GFP_KERNEL, order);
3760                 if (!page) {
3761                         ipr_trace;
3762
3763                         /* Free up what we already allocated */
3764                         for (j = i - 1; j >= 0; j--)
3765                                 __free_pages(sg_page(&scatterlist[j]), order);
3766                         kfree(sglist);
3767                         return NULL;
3768                 }
3769
3770                 sg_set_page(&scatterlist[i], page, 0, 0);
3771         }
3772
3773         return sglist;
3774 }
3775
3776 /**
3777  * ipr_free_ucode_buffer - Frees a microcode download buffer
3778  * @p_dnld:             scatter/gather list pointer
3779  *
3780  * Free a DMA'able ucode download buffer previously allocated with
3781  * ipr_alloc_ucode_buffer
3782  *
3783  * Return value:
3784  *      nothing
3785  **/
3786 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3787 {
3788         int i;
3789
3790         for (i = 0; i < sglist->num_sg; i++)
3791                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3792
3793         kfree(sglist);
3794 }
3795
3796 /**
3797  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3798  * @sglist:             scatter/gather list pointer
3799  * @buffer:             buffer pointer
3800  * @len:                buffer length
3801  *
3802  * Copy a microcode image from a user buffer into a buffer allocated by
3803  * ipr_alloc_ucode_buffer
3804  *
3805  * Return value:
3806  *      0 on success / other on failure
3807  **/
3808 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3809                                  u8 *buffer, u32 len)
3810 {
3811         int bsize_elem, i, result = 0;
3812         struct scatterlist *scatterlist;
3813         void *kaddr;
3814
3815         /* Determine the actual number of bytes per element */
3816         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3817
3818         scatterlist = sglist->scatterlist;
3819
3820         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3821                 struct page *page = sg_page(&scatterlist[i]);
3822
3823                 kaddr = kmap(page);
3824                 memcpy(kaddr, buffer, bsize_elem);
3825                 kunmap(page);
3826
3827                 scatterlist[i].length = bsize_elem;
3828
3829                 if (result != 0) {
3830                         ipr_trace;
3831                         return result;
3832                 }
3833         }
3834
3835         if (len % bsize_elem) {
3836                 struct page *page = sg_page(&scatterlist[i]);
3837
3838                 kaddr = kmap(page);
3839                 memcpy(kaddr, buffer, len % bsize_elem);
3840                 kunmap(page);
3841
3842                 scatterlist[i].length = len % bsize_elem;
3843         }
3844
3845         sglist->buffer_len = len;
3846         return result;
3847 }
3848
3849 /**
3850  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3851  * @ipr_cmd:            ipr command struct
3852  * @sglist:             scatter/gather list
3853  *
3854  * Builds a microcode download IOA data list (IOADL).
3855  *
3856  **/
3857 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3858                                     struct ipr_sglist *sglist)
3859 {
3860         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3861         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3862         struct scatterlist *scatterlist = sglist->scatterlist;
3863         int i;
3864
3865         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3866         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3867         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3868
3869         ioarcb->ioadl_len =
3870                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3871         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3872                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3873                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3874                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3875         }
3876
3877         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3878 }
3879
3880 /**
3881  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3882  * @ipr_cmd:    ipr command struct
3883  * @sglist:             scatter/gather list
3884  *
3885  * Builds a microcode download IOA data list (IOADL).
3886  *
3887  **/
3888 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3889                                   struct ipr_sglist *sglist)
3890 {
3891         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3892         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3893         struct scatterlist *scatterlist = sglist->scatterlist;
3894         int i;
3895
3896         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3897         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3898         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3899
3900         ioarcb->ioadl_len =
3901                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3902
3903         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3904                 ioadl[i].flags_and_data_len =
3905                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3906                 ioadl[i].address =
3907                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3908         }
3909
3910         ioadl[i-1].flags_and_data_len |=
3911                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3912 }
3913
3914 /**
3915  * ipr_update_ioa_ucode - Update IOA's microcode
3916  * @ioa_cfg:    ioa config struct
3917  * @sglist:             scatter/gather list
3918  *
3919  * Initiate an adapter reset to update the IOA's microcode
3920  *
3921  * Return value:
3922  *      0 on success / -EIO on failure
3923  **/
3924 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3925                                 struct ipr_sglist *sglist)
3926 {
3927         unsigned long lock_flags;
3928
3929         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3930         while (ioa_cfg->in_reset_reload) {
3931                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3933                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3934         }
3935
3936         if (ioa_cfg->ucode_sglist) {
3937                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3938                 dev_err(&ioa_cfg->pdev->dev,
3939                         "Microcode download already in progress\n");
3940                 return -EIO;
3941         }
3942
3943         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3944                                         sglist->scatterlist, sglist->num_sg,
3945                                         DMA_TO_DEVICE);
3946
3947         if (!sglist->num_dma_sg) {
3948                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3949                 dev_err(&ioa_cfg->pdev->dev,
3950                         "Failed to map microcode download buffer!\n");
3951                 return -EIO;
3952         }
3953
3954         ioa_cfg->ucode_sglist = sglist;
3955         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3956         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3958
3959         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3960         ioa_cfg->ucode_sglist = NULL;
3961         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962         return 0;
3963 }
3964
3965 /**
3966  * ipr_store_update_fw - Update the firmware on the adapter
3967  * @class_dev:  device struct
3968  * @buf:        buffer
3969  * @count:      buffer size
3970  *
3971  * This function will update the firmware on the adapter.
3972  *
3973  * Return value:
3974  *      count on success / other on failure
3975  **/
3976 static ssize_t ipr_store_update_fw(struct device *dev,
3977                                    struct device_attribute *attr,
3978                                    const char *buf, size_t count)
3979 {
3980         struct Scsi_Host *shost = class_to_shost(dev);
3981         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3982         struct ipr_ucode_image_header *image_hdr;
3983         const struct firmware *fw_entry;
3984         struct ipr_sglist *sglist;
3985         char fname[100];
3986         char *src;
3987         int len, result, dnld_size;
3988
3989         if (!capable(CAP_SYS_ADMIN))
3990                 return -EACCES;
3991
3992         len = snprintf(fname, 99, "%s", buf);
3993         fname[len-1] = '\0';
3994
3995         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3996                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3997                 return -EIO;
3998         }
3999
4000         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4001
4002         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4003         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4004         sglist = ipr_alloc_ucode_buffer(dnld_size);
4005
4006         if (!sglist) {
4007                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4008                 release_firmware(fw_entry);
4009                 return -ENOMEM;
4010         }
4011
4012         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4013
4014         if (result) {
4015                 dev_err(&ioa_cfg->pdev->dev,
4016                         "Microcode buffer copy to DMA buffer failed\n");
4017                 goto out;
4018         }
4019
4020         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4021
4022         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4023
4024         if (!result)
4025                 result = count;
4026 out:
4027         ipr_free_ucode_buffer(sglist);
4028         release_firmware(fw_entry);
4029         return result;
4030 }
4031
4032 static struct device_attribute ipr_update_fw_attr = {
4033         .attr = {
4034                 .name =         "update_fw",
4035                 .mode =         S_IWUSR,
4036         },
4037         .store = ipr_store_update_fw
4038 };
4039
4040 /**
4041  * ipr_show_fw_type - Show the adapter's firmware type.
4042  * @dev:        class device struct
4043  * @buf:        buffer
4044  *
4045  * Return value:
4046  *      number of bytes printed to buffer
4047  **/
4048 static ssize_t ipr_show_fw_type(struct device *dev,
4049                                 struct device_attribute *attr, char *buf)
4050 {
4051         struct Scsi_Host *shost = class_to_shost(dev);
4052         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4053         unsigned long lock_flags = 0;
4054         int len;
4055
4056         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4058         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4059         return len;
4060 }
4061
4062 static struct device_attribute ipr_ioa_fw_type_attr = {
4063         .attr = {
4064                 .name =         "fw_type",
4065                 .mode =         S_IRUGO,
4066         },
4067         .show = ipr_show_fw_type
4068 };
4069
4070 static struct device_attribute *ipr_ioa_attrs[] = {
4071         &ipr_fw_version_attr,
4072         &ipr_log_level_attr,
4073         &ipr_diagnostics_attr,
4074         &ipr_ioa_state_attr,
4075         &ipr_ioa_reset_attr,
4076         &ipr_update_fw_attr,
4077         &ipr_ioa_fw_type_attr,
4078         &ipr_iopoll_weight_attr,
4079         NULL,
4080 };
4081
4082 #ifdef CONFIG_SCSI_IPR_DUMP
4083 /**
4084  * ipr_read_dump - Dump the adapter
4085  * @filp:               open sysfs file
4086  * @kobj:               kobject struct
4087  * @bin_attr:           bin_attribute struct
4088  * @buf:                buffer
4089  * @off:                offset
4090  * @count:              buffer size
4091  *
4092  * Return value:
4093  *      number of bytes printed to buffer
4094  **/
4095 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4096                              struct bin_attribute *bin_attr,
4097                              char *buf, loff_t off, size_t count)
4098 {
4099         struct device *cdev = container_of(kobj, struct device, kobj);
4100         struct Scsi_Host *shost = class_to_shost(cdev);
4101         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4102         struct ipr_dump *dump;
4103         unsigned long lock_flags = 0;
4104         char *src;
4105         int len, sdt_end;
4106         size_t rc = count;
4107
4108         if (!capable(CAP_SYS_ADMIN))
4109                 return -EACCES;
4110
4111         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4112         dump = ioa_cfg->dump;
4113
4114         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4115                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4116                 return 0;
4117         }
4118         kref_get(&dump->kref);
4119         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4120
4121         if (off > dump->driver_dump.hdr.len) {
4122                 kref_put(&dump->kref, ipr_release_dump);
4123                 return 0;
4124         }
4125
4126         if (off + count > dump->driver_dump.hdr.len) {
4127                 count = dump->driver_dump.hdr.len - off;
4128                 rc = count;
4129         }
4130
4131         if (count && off < sizeof(dump->driver_dump)) {
4132                 if (off + count > sizeof(dump->driver_dump))
4133                         len = sizeof(dump->driver_dump) - off;
4134                 else
4135                         len = count;
4136                 src = (u8 *)&dump->driver_dump + off;
4137                 memcpy(buf, src, len);
4138                 buf += len;
4139                 off += len;
4140                 count -= len;
4141         }
4142
4143         off -= sizeof(dump->driver_dump);
4144
4145         if (ioa_cfg->sis64)
4146                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4147                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4148                            sizeof(struct ipr_sdt_entry));
4149         else
4150                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4151                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4152
4153         if (count && off < sdt_end) {
4154                 if (off + count > sdt_end)
4155                         len = sdt_end - off;
4156                 else
4157                         len = count;
4158                 src = (u8 *)&dump->ioa_dump + off;
4159                 memcpy(buf, src, len);
4160                 buf += len;
4161                 off += len;
4162                 count -= len;
4163         }
4164
4165         off -= sdt_end;
4166
4167         while (count) {
4168                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4169                         len = PAGE_ALIGN(off) - off;
4170                 else
4171                         len = count;
4172                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4173                 src += off & ~PAGE_MASK;
4174                 memcpy(buf, src, len);
4175                 buf += len;
4176                 off += len;
4177                 count -= len;
4178         }
4179
4180         kref_put(&dump->kref, ipr_release_dump);
4181         return rc;
4182 }
4183
4184 /**
4185  * ipr_alloc_dump - Prepare for adapter dump
4186  * @ioa_cfg:    ioa config struct
4187  *
4188  * Return value:
4189  *      0 on success / other on failure
4190  **/
4191 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4192 {
4193         struct ipr_dump *dump;
4194         __be32 **ioa_data;
4195         unsigned long lock_flags = 0;
4196
4197         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4198
4199         if (!dump) {
4200                 ipr_err("Dump memory allocation failed\n");
4201                 return -ENOMEM;
4202         }
4203
4204         if (ioa_cfg->sis64)
4205                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4206         else
4207                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4208
4209         if (!ioa_data) {
4210                 ipr_err("Dump memory allocation failed\n");
4211                 kfree(dump);
4212                 return -ENOMEM;
4213         }
4214
4215         dump->ioa_dump.ioa_data = ioa_data;
4216
4217         kref_init(&dump->kref);
4218         dump->ioa_cfg = ioa_cfg;
4219
4220         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4221
4222         if (INACTIVE != ioa_cfg->sdt_state) {
4223                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224                 vfree(dump->ioa_dump.ioa_data);
4225                 kfree(dump);
4226                 return 0;
4227         }
4228
4229         ioa_cfg->dump = dump;
4230         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4231         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4232                 ioa_cfg->dump_taken = 1;
4233                 schedule_work(&ioa_cfg->work_q);
4234         }
4235         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236
4237         return 0;
4238 }
4239
4240 /**
4241  * ipr_free_dump - Free adapter dump memory
4242  * @ioa_cfg:    ioa config struct
4243  *
4244  * Return value:
4245  *      0 on success / other on failure
4246  **/
4247 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4248 {
4249         struct ipr_dump *dump;
4250         unsigned long lock_flags = 0;
4251
4252         ENTER;
4253
4254         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255         dump = ioa_cfg->dump;
4256         if (!dump) {
4257                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4258                 return 0;
4259         }
4260
4261         ioa_cfg->dump = NULL;
4262         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4263
4264         kref_put(&dump->kref, ipr_release_dump);
4265
4266         LEAVE;
4267         return 0;
4268 }
4269
4270 /**
4271  * ipr_write_dump - Setup dump state of adapter
4272  * @filp:               open sysfs file
4273  * @kobj:               kobject struct
4274  * @bin_attr:           bin_attribute struct
4275  * @buf:                buffer
4276  * @off:                offset
4277  * @count:              buffer size
4278  *
4279  * Return value:
4280  *      number of bytes printed to buffer
4281  **/
4282 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4283                               struct bin_attribute *bin_attr,
4284                               char *buf, loff_t off, size_t count)
4285 {
4286         struct device *cdev = container_of(kobj, struct device, kobj);
4287         struct Scsi_Host *shost = class_to_shost(cdev);
4288         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4289         int rc;
4290
4291         if (!capable(CAP_SYS_ADMIN))
4292                 return -EACCES;
4293
4294         if (buf[0] == '1')
4295                 rc = ipr_alloc_dump(ioa_cfg);
4296         else if (buf[0] == '0')
4297                 rc = ipr_free_dump(ioa_cfg);
4298         else
4299                 return -EINVAL;
4300
4301         if (rc)
4302                 return rc;
4303         else
4304                 return count;
4305 }
4306
4307 static struct bin_attribute ipr_dump_attr = {
4308         .attr = {
4309                 .name = "dump",
4310                 .mode = S_IRUSR | S_IWUSR,
4311         },
4312         .size = 0,
4313         .read = ipr_read_dump,
4314         .write = ipr_write_dump
4315 };
4316 #else
4317 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4318 #endif
4319
4320 /**
4321  * ipr_change_queue_depth - Change the device's queue depth
4322  * @sdev:       scsi device struct
4323  * @qdepth:     depth to set
4324  * @reason:     calling context
4325  *
4326  * Return value:
4327  *      actual depth set
4328  **/
4329 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4330 {
4331         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4332         struct ipr_resource_entry *res;
4333         unsigned long lock_flags = 0;
4334
4335         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336         res = (struct ipr_resource_entry *)sdev->hostdata;
4337
4338         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4339                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4340         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4341
4342         scsi_change_queue_depth(sdev, qdepth);
4343         return sdev->queue_depth;
4344 }
4345
4346 /**
4347  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4348  * @dev:        device struct
4349  * @attr:       device attribute structure
4350  * @buf:        buffer
4351  *
4352  * Return value:
4353  *      number of bytes printed to buffer
4354  **/
4355 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4356 {
4357         struct scsi_device *sdev = to_scsi_device(dev);
4358         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359         struct ipr_resource_entry *res;
4360         unsigned long lock_flags = 0;
4361         ssize_t len = -ENXIO;
4362
4363         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364         res = (struct ipr_resource_entry *)sdev->hostdata;
4365         if (res)
4366                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4367         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4368         return len;
4369 }
4370
4371 static struct device_attribute ipr_adapter_handle_attr = {
4372         .attr = {
4373                 .name =         "adapter_handle",
4374                 .mode =         S_IRUSR,
4375         },
4376         .show = ipr_show_adapter_handle
4377 };
4378
4379 /**
4380  * ipr_show_resource_path - Show the resource path or the resource address for
4381  *                          this device.
4382  * @dev:        device struct
4383  * @attr:       device attribute structure
4384  * @buf:        buffer
4385  *
4386  * Return value:
4387  *      number of bytes printed to buffer
4388  **/
4389 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4390 {
4391         struct scsi_device *sdev = to_scsi_device(dev);
4392         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393         struct ipr_resource_entry *res;
4394         unsigned long lock_flags = 0;
4395         ssize_t len = -ENXIO;
4396         char buffer[IPR_MAX_RES_PATH_LENGTH];
4397
4398         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4399         res = (struct ipr_resource_entry *)sdev->hostdata;
4400         if (res && ioa_cfg->sis64)
4401                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4402                                __ipr_format_res_path(res->res_path, buffer,
4403                                                      sizeof(buffer)));
4404         else if (res)
4405                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4406                                res->bus, res->target, res->lun);
4407
4408         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409         return len;
4410 }
4411
4412 static struct device_attribute ipr_resource_path_attr = {
4413         .attr = {
4414                 .name =         "resource_path",
4415                 .mode =         S_IRUGO,
4416         },
4417         .show = ipr_show_resource_path
4418 };
4419
4420 /**
4421  * ipr_show_device_id - Show the device_id for this device.
4422  * @dev:        device struct
4423  * @attr:       device attribute structure
4424  * @buf:        buffer
4425  *
4426  * Return value:
4427  *      number of bytes printed to buffer
4428  **/
4429 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4430 {
4431         struct scsi_device *sdev = to_scsi_device(dev);
4432         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4433         struct ipr_resource_entry *res;
4434         unsigned long lock_flags = 0;
4435         ssize_t len = -ENXIO;
4436
4437         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4438         res = (struct ipr_resource_entry *)sdev->hostdata;
4439         if (res && ioa_cfg->sis64)
4440                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4441         else if (res)
4442                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4443
4444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4445         return len;
4446 }
4447
4448 static struct device_attribute ipr_device_id_attr = {
4449         .attr = {
4450                 .name =         "device_id",
4451                 .mode =         S_IRUGO,
4452         },
4453         .show = ipr_show_device_id
4454 };
4455
4456 /**
4457  * ipr_show_resource_type - Show the resource type for this device.
4458  * @dev:        device struct
4459  * @attr:       device attribute structure
4460  * @buf:        buffer
4461  *
4462  * Return value:
4463  *      number of bytes printed to buffer
4464  **/
4465 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4466 {
4467         struct scsi_device *sdev = to_scsi_device(dev);
4468         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4469         struct ipr_resource_entry *res;
4470         unsigned long lock_flags = 0;
4471         ssize_t len = -ENXIO;
4472
4473         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4474         res = (struct ipr_resource_entry *)sdev->hostdata;
4475
4476         if (res)
4477                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4478
4479         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480         return len;
4481 }
4482
4483 static struct device_attribute ipr_resource_type_attr = {
4484         .attr = {
4485                 .name =         "resource_type",
4486                 .mode =         S_IRUGO,
4487         },
4488         .show = ipr_show_resource_type
4489 };
4490
4491 static struct device_attribute *ipr_dev_attrs[] = {
4492         &ipr_adapter_handle_attr,
4493         &ipr_resource_path_attr,
4494         &ipr_device_id_attr,
4495         &ipr_resource_type_attr,
4496         NULL,
4497 };
4498
4499 /**
4500  * ipr_biosparam - Return the HSC mapping
4501  * @sdev:                       scsi device struct
4502  * @block_device:       block device pointer
4503  * @capacity:           capacity of the device
4504  * @parm:                       Array containing returned HSC values.
4505  *
4506  * This function generates the HSC parms that fdisk uses.
4507  * We want to make sure we return something that places partitions
4508  * on 4k boundaries for best performance with the IOA.
4509  *
4510  * Return value:
4511  *      0 on success
4512  **/
4513 static int ipr_biosparam(struct scsi_device *sdev,
4514                          struct block_device *block_device,
4515                          sector_t capacity, int *parm)
4516 {
4517         int heads, sectors;
4518         sector_t cylinders;
4519
4520         heads = 128;
4521         sectors = 32;
4522
4523         cylinders = capacity;
4524         sector_div(cylinders, (128 * 32));
4525
4526         /* return result */
4527         parm[0] = heads;
4528         parm[1] = sectors;
4529         parm[2] = cylinders;
4530
4531         return 0;
4532 }
4533
4534 /**
4535  * ipr_find_starget - Find target based on bus/target.
4536  * @starget:    scsi target struct
4537  *
4538  * Return value:
4539  *      resource entry pointer if found / NULL if not found
4540  **/
4541 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4542 {
4543         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4544         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4545         struct ipr_resource_entry *res;
4546
4547         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4548                 if ((res->bus == starget->channel) &&
4549                     (res->target == starget->id)) {
4550                         return res;
4551                 }
4552         }
4553
4554         return NULL;
4555 }
4556
4557 static struct ata_port_info sata_port_info;
4558
4559 /**
4560  * ipr_target_alloc - Prepare for commands to a SCSI target
4561  * @starget:    scsi target struct
4562  *
4563  * If the device is a SATA device, this function allocates an
4564  * ATA port with libata, else it does nothing.
4565  *
4566  * Return value:
4567  *      0 on success / non-0 on failure
4568  **/
4569 static int ipr_target_alloc(struct scsi_target *starget)
4570 {
4571         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4572         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4573         struct ipr_sata_port *sata_port;
4574         struct ata_port *ap;
4575         struct ipr_resource_entry *res;
4576         unsigned long lock_flags;
4577
4578         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4579         res = ipr_find_starget(starget);
4580         starget->hostdata = NULL;
4581
4582         if (res && ipr_is_gata(res)) {
4583                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4585                 if (!sata_port)
4586                         return -ENOMEM;
4587
4588                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4589                 if (ap) {
4590                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4591                         sata_port->ioa_cfg = ioa_cfg;
4592                         sata_port->ap = ap;
4593                         sata_port->res = res;
4594
4595                         res->sata_port = sata_port;
4596                         ap->private_data = sata_port;
4597                         starget->hostdata = sata_port;
4598                 } else {
4599                         kfree(sata_port);
4600                         return -ENOMEM;
4601                 }
4602         }
4603         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604
4605         return 0;
4606 }
4607
4608 /**
4609  * ipr_target_destroy - Destroy a SCSI target
4610  * @starget:    scsi target struct
4611  *
4612  * If the device was a SATA device, this function frees the libata
4613  * ATA port, else it does nothing.
4614  *
4615  **/
4616 static void ipr_target_destroy(struct scsi_target *starget)
4617 {
4618         struct ipr_sata_port *sata_port = starget->hostdata;
4619         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4620         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4621
4622         if (ioa_cfg->sis64) {
4623                 if (!ipr_find_starget(starget)) {
4624                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4625                                 clear_bit(starget->id, ioa_cfg->array_ids);
4626                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4627                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4628                         else if (starget->channel == 0)
4629                                 clear_bit(starget->id, ioa_cfg->target_ids);
4630                 }
4631         }
4632
4633         if (sata_port) {
4634                 starget->hostdata = NULL;
4635                 ata_sas_port_destroy(sata_port->ap);
4636                 kfree(sata_port);
4637         }
4638 }
4639
4640 /**
4641  * ipr_find_sdev - Find device based on bus/target/lun.
4642  * @sdev:       scsi device struct
4643  *
4644  * Return value:
4645  *      resource entry pointer if found / NULL if not found
4646  **/
4647 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4648 {
4649         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4650         struct ipr_resource_entry *res;
4651
4652         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4653                 if ((res->bus == sdev->channel) &&
4654                     (res->target == sdev->id) &&
4655                     (res->lun == sdev->lun))
4656                         return res;
4657         }
4658
4659         return NULL;
4660 }
4661
4662 /**
4663  * ipr_slave_destroy - Unconfigure a SCSI device
4664  * @sdev:       scsi device struct
4665  *
4666  * Return value:
4667  *      nothing
4668  **/
4669 static void ipr_slave_destroy(struct scsi_device *sdev)
4670 {
4671         struct ipr_resource_entry *res;
4672         struct ipr_ioa_cfg *ioa_cfg;
4673         unsigned long lock_flags = 0;
4674
4675         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4676
4677         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4678         res = (struct ipr_resource_entry *) sdev->hostdata;
4679         if (res) {
4680                 if (res->sata_port)
4681                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4682                 sdev->hostdata = NULL;
4683                 res->sdev = NULL;
4684                 res->sata_port = NULL;
4685         }
4686         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4687 }
4688
4689 /**
4690  * ipr_slave_configure - Configure a SCSI device
4691  * @sdev:       scsi device struct
4692  *
4693  * This function configures the specified scsi device.
4694  *
4695  * Return value:
4696  *      0 on success
4697  **/
4698 static int ipr_slave_configure(struct scsi_device *sdev)
4699 {
4700         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4701         struct ipr_resource_entry *res;
4702         struct ata_port *ap = NULL;
4703         unsigned long lock_flags = 0;
4704         char buffer[IPR_MAX_RES_PATH_LENGTH];
4705
4706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707         res = sdev->hostdata;
4708         if (res) {
4709                 if (ipr_is_af_dasd_device(res))
4710                         sdev->type = TYPE_RAID;
4711                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4712                         sdev->scsi_level = 4;
4713                         sdev->no_uld_attach = 1;
4714                 }
4715                 if (ipr_is_vset_device(res)) {
4716                         blk_queue_rq_timeout(sdev->request_queue,
4717                                              IPR_VSET_RW_TIMEOUT);
4718                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4719                 }
4720                 if (ipr_is_gata(res) && res->sata_port)
4721                         ap = res->sata_port->ap;
4722                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4723
4724                 if (ap) {
4725                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4726                         ata_sas_slave_configure(sdev, ap);
4727                 }
4728
4729                 if (ioa_cfg->sis64)
4730                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4731                                     ipr_format_res_path(ioa_cfg,
4732                                 res->res_path, buffer, sizeof(buffer)));
4733                 return 0;
4734         }
4735         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4736         return 0;
4737 }
4738
4739 /**
4740  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4741  * @sdev:       scsi device struct
4742  *
4743  * This function initializes an ATA port so that future commands
4744  * sent through queuecommand will work.
4745  *
4746  * Return value:
4747  *      0 on success
4748  **/
4749 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4750 {
4751         struct ipr_sata_port *sata_port = NULL;
4752         int rc = -ENXIO;
4753
4754         ENTER;
4755         if (sdev->sdev_target)
4756                 sata_port = sdev->sdev_target->hostdata;
4757         if (sata_port) {
4758                 rc = ata_sas_port_init(sata_port->ap);
4759                 if (rc == 0)
4760                         rc = ata_sas_sync_probe(sata_port->ap);
4761         }
4762
4763         if (rc)
4764                 ipr_slave_destroy(sdev);
4765
4766         LEAVE;
4767         return rc;
4768 }
4769
4770 /**
4771  * ipr_slave_alloc - Prepare for commands to a device.
4772  * @sdev:       scsi device struct
4773  *
4774  * This function saves a pointer to the resource entry
4775  * in the scsi device struct if the device exists. We
4776  * can then use this pointer in ipr_queuecommand when
4777  * handling new commands.
4778  *
4779  * Return value:
4780  *      0 on success / -ENXIO if device does not exist
4781  **/
4782 static int ipr_slave_alloc(struct scsi_device *sdev)
4783 {
4784         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4785         struct ipr_resource_entry *res;
4786         unsigned long lock_flags;
4787         int rc = -ENXIO;
4788
4789         sdev->hostdata = NULL;
4790
4791         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4792
4793         res = ipr_find_sdev(sdev);
4794         if (res) {
4795                 res->sdev = sdev;
4796                 res->add_to_ml = 0;
4797                 res->in_erp = 0;
4798                 sdev->hostdata = res;
4799                 if (!ipr_is_naca_model(res))
4800                         res->needs_sync_complete = 1;
4801                 rc = 0;
4802                 if (ipr_is_gata(res)) {
4803                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4804                         return ipr_ata_slave_alloc(sdev);
4805                 }
4806         }
4807
4808         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4809
4810         return rc;
4811 }
4812
4813 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4814 {
4815         struct ipr_ioa_cfg *ioa_cfg;
4816         unsigned long lock_flags = 0;
4817         int rc = SUCCESS;
4818
4819         ENTER;
4820         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4821         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822
4823         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4824                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4825                 dev_err(&ioa_cfg->pdev->dev,
4826                         "Adapter being reset as a result of error recovery.\n");
4827
4828                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4829                         ioa_cfg->sdt_state = GET_DUMP;
4830         }
4831
4832         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4834         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4835
4836         /* If we got hit with a host reset while we were already resetting
4837          the adapter for some reason, and the reset failed. */
4838         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4839                 ipr_trace;
4840                 rc = FAILED;
4841         }
4842
4843         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4844         LEAVE;
4845         return rc;
4846 }
4847
4848 /**
4849  * ipr_device_reset - Reset the device
4850  * @ioa_cfg:    ioa config struct
4851  * @res:                resource entry struct
4852  *
4853  * This function issues a device reset to the affected device.
4854  * If the device is a SCSI device, a LUN reset will be sent
4855  * to the device first. If that does not work, a target reset
4856  * will be sent. If the device is a SATA device, a PHY reset will
4857  * be sent.
4858  *
4859  * Return value:
4860  *      0 on success / non-zero on failure
4861  **/
4862 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4863                             struct ipr_resource_entry *res)
4864 {
4865         struct ipr_cmnd *ipr_cmd;
4866         struct ipr_ioarcb *ioarcb;
4867         struct ipr_cmd_pkt *cmd_pkt;
4868         struct ipr_ioarcb_ata_regs *regs;
4869         u32 ioasc;
4870
4871         ENTER;
4872         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4873         ioarcb = &ipr_cmd->ioarcb;
4874         cmd_pkt = &ioarcb->cmd_pkt;
4875
4876         if (ipr_cmd->ioa_cfg->sis64) {
4877                 regs = &ipr_cmd->i.ata_ioadl.regs;
4878                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4879         } else
4880                 regs = &ioarcb->u.add_data.u.regs;
4881
4882         ioarcb->res_handle = res->res_handle;
4883         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4884         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4885         if (ipr_is_gata(res)) {
4886                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4887                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4888                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4889         }
4890
4891         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4892         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4893         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4894         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4895                 if (ipr_cmd->ioa_cfg->sis64)
4896                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4897                                sizeof(struct ipr_ioasa_gata));
4898                 else
4899                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4900                                sizeof(struct ipr_ioasa_gata));
4901         }
4902
4903         LEAVE;
4904         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4905 }
4906
4907 /**
4908  * ipr_sata_reset - Reset the SATA port
4909  * @link:       SATA link to reset
4910  * @classes:    class of the attached device
4911  *
4912  * This function issues a SATA phy reset to the affected ATA link.
4913  *
4914  * Return value:
4915  *      0 on success / non-zero on failure
4916  **/
4917 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4918                                 unsigned long deadline)
4919 {
4920         struct ipr_sata_port *sata_port = link->ap->private_data;
4921         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4922         struct ipr_resource_entry *res;
4923         unsigned long lock_flags = 0;
4924         int rc = -ENXIO;
4925
4926         ENTER;
4927         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928         while (ioa_cfg->in_reset_reload) {
4929                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4930                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4931                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4932         }
4933
4934         res = sata_port->res;
4935         if (res) {
4936                 rc = ipr_device_reset(ioa_cfg, res);
4937                 *classes = res->ata_class;
4938         }
4939
4940         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941         LEAVE;
4942         return rc;
4943 }
4944
4945 /**
4946  * ipr_eh_dev_reset - Reset the device
4947  * @scsi_cmd:   scsi command struct
4948  *
4949  * This function issues a device reset to the affected device.
4950  * A LUN reset will be sent to the device first. If that does
4951  * not work, a target reset will be sent.
4952  *
4953  * Return value:
4954  *      SUCCESS / FAILED
4955  **/
4956 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4957 {
4958         struct ipr_cmnd *ipr_cmd;
4959         struct ipr_ioa_cfg *ioa_cfg;
4960         struct ipr_resource_entry *res;
4961         struct ata_port *ap;
4962         int rc = 0;
4963         struct ipr_hrr_queue *hrrq;
4964
4965         ENTER;
4966         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4967         res = scsi_cmd->device->hostdata;
4968
4969         if (!res)
4970                 return FAILED;
4971
4972         /*
4973          * If we are currently going through reset/reload, return failed. This will force the
4974          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4975          * reset to complete
4976          */
4977         if (ioa_cfg->in_reset_reload)
4978                 return FAILED;
4979         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4980                 return FAILED;
4981
4982         for_each_hrrq(hrrq, ioa_cfg) {
4983                 spin_lock(&hrrq->_lock);
4984                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4985                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4986                                 if (ipr_cmd->scsi_cmd)
4987                                         ipr_cmd->done = ipr_scsi_eh_done;
4988                                 if (ipr_cmd->qc)
4989                                         ipr_cmd->done = ipr_sata_eh_done;
4990                                 if (ipr_cmd->qc &&
4991                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4992                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4993                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4994                                 }
4995                         }
4996                 }
4997                 spin_unlock(&hrrq->_lock);
4998         }
4999         res->resetting_device = 1;
5000         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5001
5002         if (ipr_is_gata(res) && res->sata_port) {
5003                 ap = res->sata_port->ap;
5004                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5005                 ata_std_error_handler(ap);
5006                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5007
5008                 for_each_hrrq(hrrq, ioa_cfg) {
5009                         spin_lock(&hrrq->_lock);
5010                         list_for_each_entry(ipr_cmd,
5011                                             &hrrq->hrrq_pending_q, queue) {
5012                                 if (ipr_cmd->ioarcb.res_handle ==
5013                                     res->res_handle) {
5014                                         rc = -EIO;
5015                                         break;
5016                                 }
5017                         }
5018                         spin_unlock(&hrrq->_lock);
5019                 }
5020         } else
5021                 rc = ipr_device_reset(ioa_cfg, res);
5022         res->resetting_device = 0;
5023         res->reset_occurred = 1;
5024
5025         LEAVE;
5026         return rc ? FAILED : SUCCESS;
5027 }
5028
5029 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5030 {
5031         int rc;
5032
5033         spin_lock_irq(cmd->device->host->host_lock);
5034         rc = __ipr_eh_dev_reset(cmd);
5035         spin_unlock_irq(cmd->device->host->host_lock);
5036
5037         return rc;
5038 }
5039
5040 /**
5041  * ipr_bus_reset_done - Op done function for bus reset.
5042  * @ipr_cmd:    ipr command struct
5043  *
5044  * This function is the op done function for a bus reset
5045  *
5046  * Return value:
5047  *      none
5048  **/
5049 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5050 {
5051         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5052         struct ipr_resource_entry *res;
5053
5054         ENTER;
5055         if (!ioa_cfg->sis64)
5056                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5057                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5058                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5059                                 break;
5060                         }
5061                 }
5062
5063         /*
5064          * If abort has not completed, indicate the reset has, else call the
5065          * abort's done function to wake the sleeping eh thread
5066          */
5067         if (ipr_cmd->sibling->sibling)
5068                 ipr_cmd->sibling->sibling = NULL;
5069         else
5070                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5071
5072         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5073         LEAVE;
5074 }
5075
5076 /**
5077  * ipr_abort_timeout - An abort task has timed out
5078  * @ipr_cmd:    ipr command struct
5079  *
5080  * This function handles when an abort task times out. If this
5081  * happens we issue a bus reset since we have resources tied
5082  * up that must be freed before returning to the midlayer.
5083  *
5084  * Return value:
5085  *      none
5086  **/
5087 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5088 {
5089         struct ipr_cmnd *reset_cmd;
5090         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5091         struct ipr_cmd_pkt *cmd_pkt;
5092         unsigned long lock_flags = 0;
5093
5094         ENTER;
5095         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5096         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5097                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5098                 return;
5099         }
5100
5101         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5102         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5103         ipr_cmd->sibling = reset_cmd;
5104         reset_cmd->sibling = ipr_cmd;
5105         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5106         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5107         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5108         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5109         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5110
5111         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5112         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5113         LEAVE;
5114 }
5115
5116 /**
5117  * ipr_cancel_op - Cancel specified op
5118  * @scsi_cmd:   scsi command struct
5119  *
5120  * This function cancels specified op.
5121  *
5122  * Return value:
5123  *      SUCCESS / FAILED
5124  **/
5125 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5126 {
5127         struct ipr_cmnd *ipr_cmd;
5128         struct ipr_ioa_cfg *ioa_cfg;
5129         struct ipr_resource_entry *res;
5130         struct ipr_cmd_pkt *cmd_pkt;
5131         u32 ioasc, int_reg;
5132         int op_found = 0;
5133         struct ipr_hrr_queue *hrrq;
5134
5135         ENTER;
5136         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5137         res = scsi_cmd->device->hostdata;
5138
5139         /* If we are currently going through reset/reload, return failed.
5140          * This will force the mid-layer to call ipr_eh_host_reset,
5141          * which will then go to sleep and wait for the reset to complete
5142          */
5143         if (ioa_cfg->in_reset_reload ||
5144             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5145                 return FAILED;
5146         if (!res)
5147                 return FAILED;
5148
5149         /*
5150          * If we are aborting a timed out op, chances are that the timeout was caused
5151          * by a still not detected EEH error. In such cases, reading a register will
5152          * trigger the EEH recovery infrastructure.
5153          */
5154         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5155
5156         if (!ipr_is_gscsi(res))
5157                 return FAILED;
5158
5159         for_each_hrrq(hrrq, ioa_cfg) {
5160                 spin_lock(&hrrq->_lock);
5161                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5162                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5163                                 ipr_cmd->done = ipr_scsi_eh_done;
5164                                 op_found = 1;
5165                                 break;
5166                         }
5167                 }
5168                 spin_unlock(&hrrq->_lock);
5169         }
5170
5171         if (!op_found)
5172                 return SUCCESS;
5173
5174         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5175         ipr_cmd->ioarcb.res_handle = res->res_handle;
5176         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5177         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5178         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5179         ipr_cmd->u.sdev = scsi_cmd->device;
5180
5181         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5182                     scsi_cmd->cmnd[0]);
5183         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5184         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5185
5186         /*
5187          * If the abort task timed out and we sent a bus reset, we will get
5188          * one the following responses to the abort
5189          */
5190         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5191                 ioasc = 0;
5192                 ipr_trace;
5193         }
5194
5195         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5196         if (!ipr_is_naca_model(res))
5197                 res->needs_sync_complete = 1;
5198
5199         LEAVE;
5200         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5201 }
5202
5203 /**
5204  * ipr_eh_abort - Abort a single op
5205  * @scsi_cmd:   scsi command struct
5206  *
5207  * Return value:
5208  *      0 if scan in progress / 1 if scan is complete
5209  **/
5210 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5211 {
5212         unsigned long lock_flags;
5213         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5214         int rc = 0;
5215
5216         spin_lock_irqsave(shost->host_lock, lock_flags);
5217         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5218                 rc = 1;
5219         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5220                 rc = 1;
5221         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5222         return rc;
5223 }
5224
5225 /**
5226  * ipr_eh_host_reset - Reset the host adapter
5227  * @scsi_cmd:   scsi command struct
5228  *
5229  * Return value:
5230  *      SUCCESS / FAILED
5231  **/
5232 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5233 {
5234         unsigned long flags;
5235         int rc;
5236
5237         ENTER;
5238
5239         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5240         rc = ipr_cancel_op(scsi_cmd);
5241         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5242
5243         LEAVE;
5244         return rc;
5245 }
5246
5247 /**
5248  * ipr_handle_other_interrupt - Handle "other" interrupts
5249  * @ioa_cfg:    ioa config struct
5250  * @int_reg:    interrupt register
5251  *
5252  * Return value:
5253  *      IRQ_NONE / IRQ_HANDLED
5254  **/
5255 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5256                                               u32 int_reg)
5257 {
5258         irqreturn_t rc = IRQ_HANDLED;
5259         u32 int_mask_reg;
5260
5261         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5262         int_reg &= ~int_mask_reg;
5263
5264         /* If an interrupt on the adapter did not occur, ignore it.
5265          * Or in the case of SIS 64, check for a stage change interrupt.
5266          */
5267         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5268                 if (ioa_cfg->sis64) {
5269                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5270                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5271                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5272
5273                                 /* clear stage change */
5274                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5275                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5276                                 list_del(&ioa_cfg->reset_cmd->queue);
5277                                 del_timer(&ioa_cfg->reset_cmd->timer);
5278                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5279                                 return IRQ_HANDLED;
5280                         }
5281                 }
5282
5283                 return IRQ_NONE;
5284         }
5285
5286         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5287                 /* Mask the interrupt */
5288                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5289
5290                 /* Clear the interrupt */
5291                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5292                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5293
5294                 list_del(&ioa_cfg->reset_cmd->queue);
5295                 del_timer(&ioa_cfg->reset_cmd->timer);
5296                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5297         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5298                 if (ioa_cfg->clear_isr) {
5299                         if (ipr_debug && printk_ratelimit())
5300                                 dev_err(&ioa_cfg->pdev->dev,
5301                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5302                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5303                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5304                         return IRQ_NONE;
5305                 }
5306         } else {
5307                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5308                         ioa_cfg->ioa_unit_checked = 1;
5309                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5310                         dev_err(&ioa_cfg->pdev->dev,
5311                                 "No Host RRQ. 0x%08X\n", int_reg);
5312                 else
5313                         dev_err(&ioa_cfg->pdev->dev,
5314                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5315
5316                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5317                         ioa_cfg->sdt_state = GET_DUMP;
5318
5319                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5320                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5321         }
5322
5323         return rc;
5324 }
5325
5326 /**
5327  * ipr_isr_eh - Interrupt service routine error handler
5328  * @ioa_cfg:    ioa config struct
5329  * @msg:        message to log
5330  *
5331  * Return value:
5332  *      none
5333  **/
5334 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5335 {
5336         ioa_cfg->errors_logged++;
5337         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5338
5339         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5340                 ioa_cfg->sdt_state = GET_DUMP;
5341
5342         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5343 }
5344
5345 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5346                                                 struct list_head *doneq)
5347 {
5348         u32 ioasc;
5349         u16 cmd_index;
5350         struct ipr_cmnd *ipr_cmd;
5351         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5352         int num_hrrq = 0;
5353
5354         /* If interrupts are disabled, ignore the interrupt */
5355         if (!hrr_queue->allow_interrupts)
5356                 return 0;
5357
5358         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5359                hrr_queue->toggle_bit) {
5360
5361                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5362                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5363                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5364
5365                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5366                              cmd_index < hrr_queue->min_cmd_id)) {
5367                         ipr_isr_eh(ioa_cfg,
5368                                 "Invalid response handle from IOA: ",
5369                                 cmd_index);
5370                         break;
5371                 }
5372
5373                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5374                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5375
5376                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5377
5378                 list_move_tail(&ipr_cmd->queue, doneq);
5379
5380                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5381                         hrr_queue->hrrq_curr++;
5382                 } else {
5383                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5384                         hrr_queue->toggle_bit ^= 1u;
5385                 }
5386                 num_hrrq++;
5387                 if (budget > 0 && num_hrrq >= budget)
5388                         break;
5389         }
5390
5391         return num_hrrq;
5392 }
5393
5394 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5395 {
5396         struct ipr_ioa_cfg *ioa_cfg;
5397         struct ipr_hrr_queue *hrrq;
5398         struct ipr_cmnd *ipr_cmd, *temp;
5399         unsigned long hrrq_flags;
5400         int completed_ops;
5401         LIST_HEAD(doneq);
5402
5403         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5404         ioa_cfg = hrrq->ioa_cfg;
5405
5406         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5407         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5408
5409         if (completed_ops < budget)
5410                 blk_iopoll_complete(iop);
5411         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5412
5413         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5414                 list_del(&ipr_cmd->queue);
5415                 del_timer(&ipr_cmd->timer);
5416                 ipr_cmd->fast_done(ipr_cmd);
5417         }
5418
5419         return completed_ops;
5420 }
5421
5422 /**
5423  * ipr_isr - Interrupt service routine
5424  * @irq:        irq number
5425  * @devp:       pointer to ioa config struct
5426  *
5427  * Return value:
5428  *      IRQ_NONE / IRQ_HANDLED
5429  **/
5430 static irqreturn_t ipr_isr(int irq, void *devp)
5431 {
5432         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5433         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5434         unsigned long hrrq_flags = 0;
5435         u32 int_reg = 0;
5436         int num_hrrq = 0;
5437         int irq_none = 0;
5438         struct ipr_cmnd *ipr_cmd, *temp;
5439         irqreturn_t rc = IRQ_NONE;
5440         LIST_HEAD(doneq);
5441
5442         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5443         /* If interrupts are disabled, ignore the interrupt */
5444         if (!hrrq->allow_interrupts) {
5445                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5446                 return IRQ_NONE;
5447         }
5448
5449         while (1) {
5450                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5451                         rc =  IRQ_HANDLED;
5452
5453                         if (!ioa_cfg->clear_isr)
5454                                 break;
5455
5456                         /* Clear the PCI interrupt */
5457                         num_hrrq = 0;
5458                         do {
5459                                 writel(IPR_PCII_HRRQ_UPDATED,
5460                                      ioa_cfg->regs.clr_interrupt_reg32);
5461                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5462                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5463                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5464
5465                 } else if (rc == IRQ_NONE && irq_none == 0) {
5466                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5467                         irq_none++;
5468                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5469                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5470                         ipr_isr_eh(ioa_cfg,
5471                                 "Error clearing HRRQ: ", num_hrrq);
5472                         rc = IRQ_HANDLED;
5473                         break;
5474                 } else
5475                         break;
5476         }
5477
5478         if (unlikely(rc == IRQ_NONE))
5479                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5480
5481         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5482         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5483                 list_del(&ipr_cmd->queue);
5484                 del_timer(&ipr_cmd->timer);
5485                 ipr_cmd->fast_done(ipr_cmd);
5486         }
5487         return rc;
5488 }
5489
5490 /**
5491  * ipr_isr_mhrrq - Interrupt service routine
5492  * @irq:        irq number
5493  * @devp:       pointer to ioa config struct
5494  *
5495  * Return value:
5496  *      IRQ_NONE / IRQ_HANDLED
5497  **/
5498 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5499 {
5500         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5501         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5502         unsigned long hrrq_flags = 0;
5503         struct ipr_cmnd *ipr_cmd, *temp;
5504         irqreturn_t rc = IRQ_NONE;
5505         LIST_HEAD(doneq);
5506
5507         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5508
5509         /* If interrupts are disabled, ignore the interrupt */
5510         if (!hrrq->allow_interrupts) {
5511                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5512                 return IRQ_NONE;
5513         }
5514
5515         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5516                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5517                        hrrq->toggle_bit) {
5518                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5519                                 blk_iopoll_sched(&hrrq->iopoll);
5520                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5521                         return IRQ_HANDLED;
5522                 }
5523         } else {
5524                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5525                         hrrq->toggle_bit)
5526
5527                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5528                                 rc =  IRQ_HANDLED;
5529         }
5530
5531         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5532
5533         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5534                 list_del(&ipr_cmd->queue);
5535                 del_timer(&ipr_cmd->timer);
5536                 ipr_cmd->fast_done(ipr_cmd);
5537         }
5538         return rc;
5539 }
5540
5541 /**
5542  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5543  * @ioa_cfg:    ioa config struct
5544  * @ipr_cmd:    ipr command struct
5545  *
5546  * Return value:
5547  *      0 on success / -1 on failure
5548  **/
5549 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5550                              struct ipr_cmnd *ipr_cmd)
5551 {
5552         int i, nseg;
5553         struct scatterlist *sg;
5554         u32 length;
5555         u32 ioadl_flags = 0;
5556         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5557         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5558         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5559
5560         length = scsi_bufflen(scsi_cmd);
5561         if (!length)
5562                 return 0;
5563
5564         nseg = scsi_dma_map(scsi_cmd);
5565         if (nseg < 0) {
5566                 if (printk_ratelimit())
5567                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5568                 return -1;
5569         }
5570
5571         ipr_cmd->dma_use_sg = nseg;
5572
5573         ioarcb->data_transfer_length = cpu_to_be32(length);
5574         ioarcb->ioadl_len =
5575                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5576
5577         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5578                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5579                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5580         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5581                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5582
5583         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5584                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5585                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5586                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5587         }
5588
5589         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5590         return 0;
5591 }
5592
5593 /**
5594  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5595  * @ioa_cfg:    ioa config struct
5596  * @ipr_cmd:    ipr command struct
5597  *
5598  * Return value:
5599  *      0 on success / -1 on failure
5600  **/
5601 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5602                            struct ipr_cmnd *ipr_cmd)
5603 {
5604         int i, nseg;
5605         struct scatterlist *sg;
5606         u32 length;
5607         u32 ioadl_flags = 0;
5608         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5609         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5610         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5611
5612         length = scsi_bufflen(scsi_cmd);
5613         if (!length)
5614                 return 0;
5615
5616         nseg = scsi_dma_map(scsi_cmd);
5617         if (nseg < 0) {
5618                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5619                 return -1;
5620         }
5621
5622         ipr_cmd->dma_use_sg = nseg;
5623
5624         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5625                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5626                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5627                 ioarcb->data_transfer_length = cpu_to_be32(length);
5628                 ioarcb->ioadl_len =
5629                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5630         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5631                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5632                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5633                 ioarcb->read_ioadl_len =
5634                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5635         }
5636
5637         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5638                 ioadl = ioarcb->u.add_data.u.ioadl;
5639                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5640                                     offsetof(struct ipr_ioarcb, u.add_data));
5641                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5642         }
5643
5644         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5645                 ioadl[i].flags_and_data_len =
5646                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5647                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5648         }
5649
5650         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5651         return 0;
5652 }
5653
5654 /**
5655  * ipr_erp_done - Process completion of ERP for a device
5656  * @ipr_cmd:            ipr command struct
5657  *
5658  * This function copies the sense buffer into the scsi_cmd
5659  * struct and pushes the scsi_done function.
5660  *
5661  * Return value:
5662  *      nothing
5663  **/
5664 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5665 {
5666         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5667         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5668         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5669
5670         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5671                 scsi_cmd->result |= (DID_ERROR << 16);
5672                 scmd_printk(KERN_ERR, scsi_cmd,
5673                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5674         } else {
5675                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5676                        SCSI_SENSE_BUFFERSIZE);
5677         }
5678
5679         if (res) {
5680                 if (!ipr_is_naca_model(res))
5681                         res->needs_sync_complete = 1;
5682                 res->in_erp = 0;
5683         }
5684         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5685         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5686         scsi_cmd->scsi_done(scsi_cmd);
5687 }
5688
5689 /**
5690  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5691  * @ipr_cmd:    ipr command struct
5692  *
5693  * Return value:
5694  *      none
5695  **/
5696 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5697 {
5698         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5699         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5700         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5701
5702         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5703         ioarcb->data_transfer_length = 0;
5704         ioarcb->read_data_transfer_length = 0;
5705         ioarcb->ioadl_len = 0;
5706         ioarcb->read_ioadl_len = 0;
5707         ioasa->hdr.ioasc = 0;
5708         ioasa->hdr.residual_data_len = 0;
5709
5710         if (ipr_cmd->ioa_cfg->sis64)
5711                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5712                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5713         else {
5714                 ioarcb->write_ioadl_addr =
5715                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5716                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5717         }
5718 }
5719
5720 /**
5721  * ipr_erp_request_sense - Send request sense to a device
5722  * @ipr_cmd:    ipr command struct
5723  *
5724  * This function sends a request sense to a device as a result
5725  * of a check condition.
5726  *
5727  * Return value:
5728  *      nothing
5729  **/
5730 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5731 {
5732         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5733         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5734
5735         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5736                 ipr_erp_done(ipr_cmd);
5737                 return;
5738         }
5739
5740         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5741
5742         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5743         cmd_pkt->cdb[0] = REQUEST_SENSE;
5744         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5745         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5746         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5747         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5748
5749         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5750                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5751
5752         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5753                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5754 }
5755
5756 /**
5757  * ipr_erp_cancel_all - Send cancel all to a device
5758  * @ipr_cmd:    ipr command struct
5759  *
5760  * This function sends a cancel all to a device to clear the
5761  * queue. If we are running TCQ on the device, QERR is set to 1,
5762  * which means all outstanding ops have been dropped on the floor.
5763  * Cancel all will return them to us.
5764  *
5765  * Return value:
5766  *      nothing
5767  **/
5768 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5769 {
5770         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5771         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5772         struct ipr_cmd_pkt *cmd_pkt;
5773
5774         res->in_erp = 1;
5775
5776         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5777
5778         if (!scsi_cmd->device->simple_tags) {
5779                 ipr_erp_request_sense(ipr_cmd);
5780                 return;
5781         }
5782
5783         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5784         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5785         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5786
5787         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5788                    IPR_CANCEL_ALL_TIMEOUT);
5789 }
5790
5791 /**
5792  * ipr_dump_ioasa - Dump contents of IOASA
5793  * @ioa_cfg:    ioa config struct
5794  * @ipr_cmd:    ipr command struct
5795  * @res:                resource entry struct
5796  *
5797  * This function is invoked by the interrupt handler when ops
5798  * fail. It will log the IOASA if appropriate. Only called
5799  * for GPDD ops.
5800  *
5801  * Return value:
5802  *      none
5803  **/
5804 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5805                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5806 {
5807         int i;
5808         u16 data_len;
5809         u32 ioasc, fd_ioasc;
5810         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5811         __be32 *ioasa_data = (__be32 *)ioasa;
5812         int error_index;
5813
5814         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5815         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5816
5817         if (0 == ioasc)
5818                 return;
5819
5820         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5821                 return;
5822
5823         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5824                 error_index = ipr_get_error(fd_ioasc);
5825         else
5826                 error_index = ipr_get_error(ioasc);
5827
5828         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5829                 /* Don't log an error if the IOA already logged one */
5830                 if (ioasa->hdr.ilid != 0)
5831                         return;
5832
5833                 if (!ipr_is_gscsi(res))
5834                         return;
5835
5836                 if (ipr_error_table[error_index].log_ioasa == 0)
5837                         return;
5838         }
5839
5840         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5841
5842         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5843         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5844                 data_len = sizeof(struct ipr_ioasa64);
5845         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5846                 data_len = sizeof(struct ipr_ioasa);
5847
5848         ipr_err("IOASA Dump:\n");
5849
5850         for (i = 0; i < data_len / 4; i += 4) {
5851                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5852                         be32_to_cpu(ioasa_data[i]),
5853                         be32_to_cpu(ioasa_data[i+1]),
5854                         be32_to_cpu(ioasa_data[i+2]),
5855                         be32_to_cpu(ioasa_data[i+3]));
5856         }
5857 }
5858
5859 /**
5860  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5861  * @ioasa:              IOASA
5862  * @sense_buf:  sense data buffer
5863  *
5864  * Return value:
5865  *      none
5866  **/
5867 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5868 {
5869         u32 failing_lba;
5870         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5871         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5872         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5873         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5874
5875         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5876
5877         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5878                 return;
5879
5880         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5881
5882         if (ipr_is_vset_device(res) &&
5883             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5884             ioasa->u.vset.failing_lba_hi != 0) {
5885                 sense_buf[0] = 0x72;
5886                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5887                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5888                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5889
5890                 sense_buf[7] = 12;
5891                 sense_buf[8] = 0;
5892                 sense_buf[9] = 0x0A;
5893                 sense_buf[10] = 0x80;
5894
5895                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5896
5897                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5898                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5899                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5900                 sense_buf[15] = failing_lba & 0x000000ff;
5901
5902                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5903
5904                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5905                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5906                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5907                 sense_buf[19] = failing_lba & 0x000000ff;
5908         } else {
5909                 sense_buf[0] = 0x70;
5910                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5911                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5912                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5913
5914                 /* Illegal request */
5915                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5916                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5917                         sense_buf[7] = 10;      /* additional length */
5918
5919                         /* IOARCB was in error */
5920                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5921                                 sense_buf[15] = 0xC0;
5922                         else    /* Parameter data was invalid */
5923                                 sense_buf[15] = 0x80;
5924
5925                         sense_buf[16] =
5926                             ((IPR_FIELD_POINTER_MASK &
5927                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5928                         sense_buf[17] =
5929                             (IPR_FIELD_POINTER_MASK &
5930                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5931                 } else {
5932                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5933                                 if (ipr_is_vset_device(res))
5934                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5935                                 else
5936                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5937
5938                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5939                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5940                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5941                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5942                                 sense_buf[6] = failing_lba & 0x000000ff;
5943                         }
5944
5945                         sense_buf[7] = 6;       /* additional length */
5946                 }
5947         }
5948 }
5949
5950 /**
5951  * ipr_get_autosense - Copy autosense data to sense buffer
5952  * @ipr_cmd:    ipr command struct
5953  *
5954  * This function copies the autosense buffer to the buffer
5955  * in the scsi_cmd, if there is autosense available.
5956  *
5957  * Return value:
5958  *      1 if autosense was available / 0 if not
5959  **/
5960 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5961 {
5962         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5963         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5964
5965         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5966                 return 0;
5967
5968         if (ipr_cmd->ioa_cfg->sis64)
5969                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5970                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5971                            SCSI_SENSE_BUFFERSIZE));
5972         else
5973                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5974                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5975                            SCSI_SENSE_BUFFERSIZE));
5976         return 1;
5977 }
5978
5979 /**
5980  * ipr_erp_start - Process an error response for a SCSI op
5981  * @ioa_cfg:    ioa config struct
5982  * @ipr_cmd:    ipr command struct
5983  *
5984  * This function determines whether or not to initiate ERP
5985  * on the affected device.
5986  *
5987  * Return value:
5988  *      nothing
5989  **/
5990 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5991                               struct ipr_cmnd *ipr_cmd)
5992 {
5993         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5994         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5995         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5996         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5997
5998         if (!res) {
5999                 ipr_scsi_eh_done(ipr_cmd);
6000                 return;
6001         }
6002
6003         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6004                 ipr_gen_sense(ipr_cmd);
6005
6006         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6007
6008         switch (masked_ioasc) {
6009         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6010                 if (ipr_is_naca_model(res))
6011                         scsi_cmd->result |= (DID_ABORT << 16);
6012                 else
6013                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6014                 break;
6015         case IPR_IOASC_IR_RESOURCE_HANDLE:
6016         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6017                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6018                 break;
6019         case IPR_IOASC_HW_SEL_TIMEOUT:
6020                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6021                 if (!ipr_is_naca_model(res))
6022                         res->needs_sync_complete = 1;
6023                 break;
6024         case IPR_IOASC_SYNC_REQUIRED:
6025                 if (!res->in_erp)
6026                         res->needs_sync_complete = 1;
6027                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6028                 break;
6029         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6030         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6031                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6032                 break;
6033         case IPR_IOASC_BUS_WAS_RESET:
6034         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6035                 /*
6036                  * Report the bus reset and ask for a retry. The device
6037                  * will give CC/UA the next command.
6038                  */
6039                 if (!res->resetting_device)
6040                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6041                 scsi_cmd->result |= (DID_ERROR << 16);
6042                 if (!ipr_is_naca_model(res))
6043                         res->needs_sync_complete = 1;
6044                 break;
6045         case IPR_IOASC_HW_DEV_BUS_STATUS:
6046                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6047                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6048                         if (!ipr_get_autosense(ipr_cmd)) {
6049                                 if (!ipr_is_naca_model(res)) {
6050                                         ipr_erp_cancel_all(ipr_cmd);
6051                                         return;
6052                                 }
6053                         }
6054                 }
6055                 if (!ipr_is_naca_model(res))
6056                         res->needs_sync_complete = 1;
6057                 break;
6058         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6059                 break;
6060         default:
6061                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6062                         scsi_cmd->result |= (DID_ERROR << 16);
6063                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6064                         res->needs_sync_complete = 1;
6065                 break;
6066         }
6067
6068         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6069         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6070         scsi_cmd->scsi_done(scsi_cmd);
6071 }
6072
6073 /**
6074  * ipr_scsi_done - mid-layer done function
6075  * @ipr_cmd:    ipr command struct
6076  *
6077  * This function is invoked by the interrupt handler for
6078  * ops generated by the SCSI mid-layer
6079  *
6080  * Return value:
6081  *      none
6082  **/
6083 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6084 {
6085         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6086         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6087         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6088         unsigned long hrrq_flags;
6089
6090         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6091
6092         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6093                 scsi_dma_unmap(scsi_cmd);
6094
6095                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6096                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6097                 scsi_cmd->scsi_done(scsi_cmd);
6098                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6099         } else {
6100                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6101                 ipr_erp_start(ioa_cfg, ipr_cmd);
6102                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6103         }
6104 }
6105
6106 /**
6107  * ipr_queuecommand - Queue a mid-layer request
6108  * @shost:              scsi host struct
6109  * @scsi_cmd:   scsi command struct
6110  *
6111  * This function queues a request generated by the mid-layer.
6112  *
6113  * Return value:
6114  *      0 on success
6115  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6116  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6117  **/
6118 static int ipr_queuecommand(struct Scsi_Host *shost,
6119                             struct scsi_cmnd *scsi_cmd)
6120 {
6121         struct ipr_ioa_cfg *ioa_cfg;
6122         struct ipr_resource_entry *res;
6123         struct ipr_ioarcb *ioarcb;
6124         struct ipr_cmnd *ipr_cmd;
6125         unsigned long hrrq_flags, lock_flags;
6126         int rc;
6127         struct ipr_hrr_queue *hrrq;
6128         int hrrq_id;
6129
6130         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6131
6132         scsi_cmd->result = (DID_OK << 16);
6133         res = scsi_cmd->device->hostdata;
6134
6135         if (ipr_is_gata(res) && res->sata_port) {
6136                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6137                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6138                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6139                 return rc;
6140         }
6141
6142         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6143         hrrq = &ioa_cfg->hrrq[hrrq_id];
6144
6145         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6146         /*
6147          * We are currently blocking all devices due to a host reset
6148          * We have told the host to stop giving us new requests, but
6149          * ERP ops don't count. FIXME
6150          */
6151         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6152                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6153                 return SCSI_MLQUEUE_HOST_BUSY;
6154         }
6155
6156         /*
6157          * FIXME - Create scsi_set_host_offline interface
6158          *  and the ioa_is_dead check can be removed
6159          */
6160         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6161                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6162                 goto err_nodev;
6163         }
6164
6165         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6166         if (ipr_cmd == NULL) {
6167                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6168                 return SCSI_MLQUEUE_HOST_BUSY;
6169         }
6170         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6171
6172         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6173         ioarcb = &ipr_cmd->ioarcb;
6174
6175         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6176         ipr_cmd->scsi_cmd = scsi_cmd;
6177         ipr_cmd->done = ipr_scsi_eh_done;
6178
6179         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6180                 if (scsi_cmd->underflow == 0)
6181                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6182
6183                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6184                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6185                         res->reset_occurred = 0;
6186                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6187                 }
6188                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6189                 if (scsi_cmd->flags & SCMD_TAGGED)
6190                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6191                 else
6192                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6193         }
6194
6195         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6196             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6197                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6198         }
6199
6200         if (ioa_cfg->sis64)
6201                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6202         else
6203                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6204
6205         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6206         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6207                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6208                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6209                 if (!rc)
6210                         scsi_dma_unmap(scsi_cmd);
6211                 return SCSI_MLQUEUE_HOST_BUSY;
6212         }
6213
6214         if (unlikely(hrrq->ioa_is_dead)) {
6215                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6216                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6217                 scsi_dma_unmap(scsi_cmd);
6218                 goto err_nodev;
6219         }
6220
6221         ioarcb->res_handle = res->res_handle;
6222         if (res->needs_sync_complete) {
6223                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6224                 res->needs_sync_complete = 0;
6225         }
6226         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6227         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6228         ipr_send_command(ipr_cmd);
6229         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6230         return 0;
6231
6232 err_nodev:
6233         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6234         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6235         scsi_cmd->result = (DID_NO_CONNECT << 16);
6236         scsi_cmd->scsi_done(scsi_cmd);
6237         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6238         return 0;
6239 }
6240
6241 /**
6242  * ipr_ioctl - IOCTL handler
6243  * @sdev:       scsi device struct
6244  * @cmd:        IOCTL cmd
6245  * @arg:        IOCTL arg
6246  *
6247  * Return value:
6248  *      0 on success / other on failure
6249  **/
6250 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6251 {
6252         struct ipr_resource_entry *res;
6253
6254         res = (struct ipr_resource_entry *)sdev->hostdata;
6255         if (res && ipr_is_gata(res)) {
6256                 if (cmd == HDIO_GET_IDENTITY)
6257                         return -ENOTTY;
6258                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6259         }
6260
6261         return -EINVAL;
6262 }
6263
6264 /**
6265  * ipr_info - Get information about the card/driver
6266  * @scsi_host:  scsi host struct
6267  *
6268  * Return value:
6269  *      pointer to buffer with description string
6270  **/
6271 static const char *ipr_ioa_info(struct Scsi_Host *host)
6272 {
6273         static char buffer[512];
6274         struct ipr_ioa_cfg *ioa_cfg;
6275         unsigned long lock_flags = 0;
6276
6277         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6278
6279         spin_lock_irqsave(host->host_lock, lock_flags);
6280         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6281         spin_unlock_irqrestore(host->host_lock, lock_flags);
6282
6283         return buffer;
6284 }
6285
6286 static struct scsi_host_template driver_template = {
6287         .module = THIS_MODULE,
6288         .name = "IPR",
6289         .info = ipr_ioa_info,
6290         .ioctl = ipr_ioctl,
6291         .queuecommand = ipr_queuecommand,
6292         .eh_abort_handler = ipr_eh_abort,
6293         .eh_device_reset_handler = ipr_eh_dev_reset,
6294         .eh_host_reset_handler = ipr_eh_host_reset,
6295         .slave_alloc = ipr_slave_alloc,
6296         .slave_configure = ipr_slave_configure,
6297         .slave_destroy = ipr_slave_destroy,
6298         .scan_finished = ipr_scan_finished,
6299         .target_alloc = ipr_target_alloc,
6300         .target_destroy = ipr_target_destroy,
6301         .change_queue_depth = ipr_change_queue_depth,
6302         .bios_param = ipr_biosparam,
6303         .can_queue = IPR_MAX_COMMANDS,
6304         .this_id = -1,
6305         .sg_tablesize = IPR_MAX_SGLIST,
6306         .max_sectors = IPR_IOA_MAX_SECTORS,
6307         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6308         .use_clustering = ENABLE_CLUSTERING,
6309         .shost_attrs = ipr_ioa_attrs,
6310         .sdev_attrs = ipr_dev_attrs,
6311         .proc_name = IPR_NAME,
6312         .no_write_same = 1,
6313         .use_blk_tags = 1,
6314 };
6315
6316 /**
6317  * ipr_ata_phy_reset - libata phy_reset handler
6318  * @ap:         ata port to reset
6319  *
6320  **/
6321 static void ipr_ata_phy_reset(struct ata_port *ap)
6322 {
6323         unsigned long flags;
6324         struct ipr_sata_port *sata_port = ap->private_data;
6325         struct ipr_resource_entry *res = sata_port->res;
6326         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6327         int rc;
6328
6329         ENTER;
6330         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6331         while (ioa_cfg->in_reset_reload) {
6332                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6333                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6334                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6335         }
6336
6337         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6338                 goto out_unlock;
6339
6340         rc = ipr_device_reset(ioa_cfg, res);
6341
6342         if (rc) {
6343                 ap->link.device[0].class = ATA_DEV_NONE;
6344                 goto out_unlock;
6345         }
6346
6347         ap->link.device[0].class = res->ata_class;
6348         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6349                 ap->link.device[0].class = ATA_DEV_NONE;
6350
6351 out_unlock:
6352         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6353         LEAVE;
6354 }
6355
6356 /**
6357  * ipr_ata_post_internal - Cleanup after an internal command
6358  * @qc: ATA queued command
6359  *
6360  * Return value:
6361  *      none
6362  **/
6363 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6364 {
6365         struct ipr_sata_port *sata_port = qc->ap->private_data;
6366         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6367         struct ipr_cmnd *ipr_cmd;
6368         struct ipr_hrr_queue *hrrq;
6369         unsigned long flags;
6370
6371         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6372         while (ioa_cfg->in_reset_reload) {
6373                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6374                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6375                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6376         }
6377
6378         for_each_hrrq(hrrq, ioa_cfg) {
6379                 spin_lock(&hrrq->_lock);
6380                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6381                         if (ipr_cmd->qc == qc) {
6382                                 ipr_device_reset(ioa_cfg, sata_port->res);
6383                                 break;
6384                         }
6385                 }
6386                 spin_unlock(&hrrq->_lock);
6387         }
6388         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6389 }
6390
6391 /**
6392  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6393  * @regs:       destination
6394  * @tf: source ATA taskfile
6395  *
6396  * Return value:
6397  *      none
6398  **/
6399 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6400                              struct ata_taskfile *tf)
6401 {
6402         regs->feature = tf->feature;
6403         regs->nsect = tf->nsect;
6404         regs->lbal = tf->lbal;
6405         regs->lbam = tf->lbam;
6406         regs->lbah = tf->lbah;
6407         regs->device = tf->device;
6408         regs->command = tf->command;
6409         regs->hob_feature = tf->hob_feature;
6410         regs->hob_nsect = tf->hob_nsect;
6411         regs->hob_lbal = tf->hob_lbal;
6412         regs->hob_lbam = tf->hob_lbam;
6413         regs->hob_lbah = tf->hob_lbah;
6414         regs->ctl = tf->ctl;
6415 }
6416
6417 /**
6418  * ipr_sata_done - done function for SATA commands
6419  * @ipr_cmd:    ipr command struct
6420  *
6421  * This function is invoked by the interrupt handler for
6422  * ops generated by the SCSI mid-layer to SATA devices
6423  *
6424  * Return value:
6425  *      none
6426  **/
6427 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6428 {
6429         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6430         struct ata_queued_cmd *qc = ipr_cmd->qc;
6431         struct ipr_sata_port *sata_port = qc->ap->private_data;
6432         struct ipr_resource_entry *res = sata_port->res;
6433         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6434
6435         spin_lock(&ipr_cmd->hrrq->_lock);
6436         if (ipr_cmd->ioa_cfg->sis64)
6437                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6438                        sizeof(struct ipr_ioasa_gata));
6439         else
6440                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6441                        sizeof(struct ipr_ioasa_gata));
6442         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6443
6444         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6445                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6446
6447         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6448                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6449         else
6450                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6451         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6452         spin_unlock(&ipr_cmd->hrrq->_lock);
6453         ata_qc_complete(qc);
6454 }
6455
6456 /**
6457  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6458  * @ipr_cmd:    ipr command struct
6459  * @qc:         ATA queued command
6460  *
6461  **/
6462 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6463                                   struct ata_queued_cmd *qc)
6464 {
6465         u32 ioadl_flags = 0;
6466         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6467         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6468         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6469         int len = qc->nbytes;
6470         struct scatterlist *sg;
6471         unsigned int si;
6472         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6473
6474         if (len == 0)
6475                 return;
6476
6477         if (qc->dma_dir == DMA_TO_DEVICE) {
6478                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6479                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6480         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6481                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6482
6483         ioarcb->data_transfer_length = cpu_to_be32(len);
6484         ioarcb->ioadl_len =
6485                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6486         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6487                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6488
6489         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6490                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6491                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6492                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6493
6494                 last_ioadl64 = ioadl64;
6495                 ioadl64++;
6496         }
6497
6498         if (likely(last_ioadl64))
6499                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6500 }
6501
6502 /**
6503  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6504  * @ipr_cmd:    ipr command struct
6505  * @qc:         ATA queued command
6506  *
6507  **/
6508 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6509                                 struct ata_queued_cmd *qc)
6510 {
6511         u32 ioadl_flags = 0;
6512         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6513         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6514         struct ipr_ioadl_desc *last_ioadl = NULL;
6515         int len = qc->nbytes;
6516         struct scatterlist *sg;
6517         unsigned int si;
6518
6519         if (len == 0)
6520                 return;
6521
6522         if (qc->dma_dir == DMA_TO_DEVICE) {
6523                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6524                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6525                 ioarcb->data_transfer_length = cpu_to_be32(len);
6526                 ioarcb->ioadl_len =
6527                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6528         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6529                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6530                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6531                 ioarcb->read_ioadl_len =
6532                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6533         }
6534
6535         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6536                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6537                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6538
6539                 last_ioadl = ioadl;
6540                 ioadl++;
6541         }
6542
6543         if (likely(last_ioadl))
6544                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6545 }
6546
6547 /**
6548  * ipr_qc_defer - Get a free ipr_cmd
6549  * @qc: queued command
6550  *
6551  * Return value:
6552  *      0 if success
6553  **/
6554 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6555 {
6556         struct ata_port *ap = qc->ap;
6557         struct ipr_sata_port *sata_port = ap->private_data;
6558         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6559         struct ipr_cmnd *ipr_cmd;
6560         struct ipr_hrr_queue *hrrq;
6561         int hrrq_id;
6562
6563         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6564         hrrq = &ioa_cfg->hrrq[hrrq_id];
6565
6566         qc->lldd_task = NULL;
6567         spin_lock(&hrrq->_lock);
6568         if (unlikely(hrrq->ioa_is_dead)) {
6569                 spin_unlock(&hrrq->_lock);
6570                 return 0;
6571         }
6572
6573         if (unlikely(!hrrq->allow_cmds)) {
6574                 spin_unlock(&hrrq->_lock);
6575                 return ATA_DEFER_LINK;
6576         }
6577
6578         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6579         if (ipr_cmd == NULL) {
6580                 spin_unlock(&hrrq->_lock);
6581                 return ATA_DEFER_LINK;
6582         }
6583
6584         qc->lldd_task = ipr_cmd;
6585         spin_unlock(&hrrq->_lock);
6586         return 0;
6587 }
6588
6589 /**
6590  * ipr_qc_issue - Issue a SATA qc to a device
6591  * @qc: queued command
6592  *
6593  * Return value:
6594  *      0 if success
6595  **/
6596 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6597 {
6598         struct ata_port *ap = qc->ap;
6599         struct ipr_sata_port *sata_port = ap->private_data;
6600         struct ipr_resource_entry *res = sata_port->res;
6601         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6602         struct ipr_cmnd *ipr_cmd;
6603         struct ipr_ioarcb *ioarcb;
6604         struct ipr_ioarcb_ata_regs *regs;
6605
6606         if (qc->lldd_task == NULL)
6607                 ipr_qc_defer(qc);
6608
6609         ipr_cmd = qc->lldd_task;
6610         if (ipr_cmd == NULL)
6611                 return AC_ERR_SYSTEM;
6612
6613         qc->lldd_task = NULL;
6614         spin_lock(&ipr_cmd->hrrq->_lock);
6615         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6616                         ipr_cmd->hrrq->ioa_is_dead)) {
6617                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6618                 spin_unlock(&ipr_cmd->hrrq->_lock);
6619                 return AC_ERR_SYSTEM;
6620         }
6621
6622         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6623         ioarcb = &ipr_cmd->ioarcb;
6624
6625         if (ioa_cfg->sis64) {
6626                 regs = &ipr_cmd->i.ata_ioadl.regs;
6627                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6628         } else
6629                 regs = &ioarcb->u.add_data.u.regs;
6630
6631         memset(regs, 0, sizeof(*regs));
6632         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6633
6634         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6635         ipr_cmd->qc = qc;
6636         ipr_cmd->done = ipr_sata_done;
6637         ipr_cmd->ioarcb.res_handle = res->res_handle;
6638         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6639         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6640         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6641         ipr_cmd->dma_use_sg = qc->n_elem;
6642
6643         if (ioa_cfg->sis64)
6644                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6645         else
6646                 ipr_build_ata_ioadl(ipr_cmd, qc);
6647
6648         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6649         ipr_copy_sata_tf(regs, &qc->tf);
6650         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6651         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6652
6653         switch (qc->tf.protocol) {
6654         case ATA_PROT_NODATA:
6655         case ATA_PROT_PIO:
6656                 break;
6657
6658         case ATA_PROT_DMA:
6659                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6660                 break;
6661
6662         case ATAPI_PROT_PIO:
6663         case ATAPI_PROT_NODATA:
6664                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6665                 break;
6666
6667         case ATAPI_PROT_DMA:
6668                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6669                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6670                 break;
6671
6672         default:
6673                 WARN_ON(1);
6674                 spin_unlock(&ipr_cmd->hrrq->_lock);
6675                 return AC_ERR_INVALID;
6676         }
6677
6678         ipr_send_command(ipr_cmd);
6679         spin_unlock(&ipr_cmd->hrrq->_lock);
6680
6681         return 0;
6682 }
6683
6684 /**
6685  * ipr_qc_fill_rtf - Read result TF
6686  * @qc: ATA queued command
6687  *
6688  * Return value:
6689  *      true
6690  **/
6691 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6692 {
6693         struct ipr_sata_port *sata_port = qc->ap->private_data;
6694         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6695         struct ata_taskfile *tf = &qc->result_tf;
6696
6697         tf->feature = g->error;
6698         tf->nsect = g->nsect;
6699         tf->lbal = g->lbal;
6700         tf->lbam = g->lbam;
6701         tf->lbah = g->lbah;
6702         tf->device = g->device;
6703         tf->command = g->status;
6704         tf->hob_nsect = g->hob_nsect;
6705         tf->hob_lbal = g->hob_lbal;
6706         tf->hob_lbam = g->hob_lbam;
6707         tf->hob_lbah = g->hob_lbah;
6708
6709         return true;
6710 }
6711
6712 static struct ata_port_operations ipr_sata_ops = {
6713         .phy_reset = ipr_ata_phy_reset,
6714         .hardreset = ipr_sata_reset,
6715         .post_internal_cmd = ipr_ata_post_internal,
6716         .qc_prep = ata_noop_qc_prep,
6717         .qc_defer = ipr_qc_defer,
6718         .qc_issue = ipr_qc_issue,
6719         .qc_fill_rtf = ipr_qc_fill_rtf,
6720         .port_start = ata_sas_port_start,
6721         .port_stop = ata_sas_port_stop
6722 };
6723
6724 static struct ata_port_info sata_port_info = {
6725         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6726         .pio_mask       = ATA_PIO4_ONLY,
6727         .mwdma_mask     = ATA_MWDMA2,
6728         .udma_mask      = ATA_UDMA6,
6729         .port_ops       = &ipr_sata_ops
6730 };
6731
6732 #ifdef CONFIG_PPC_PSERIES
6733 static const u16 ipr_blocked_processors[] = {
6734         PVR_NORTHSTAR,
6735         PVR_PULSAR,
6736         PVR_POWER4,
6737         PVR_ICESTAR,
6738         PVR_SSTAR,
6739         PVR_POWER4p,
6740         PVR_630,
6741         PVR_630p
6742 };
6743
6744 /**
6745  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6746  * @ioa_cfg:    ioa cfg struct
6747  *
6748  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6749  * certain pSeries hardware. This function determines if the given
6750  * adapter is in one of these confgurations or not.
6751  *
6752  * Return value:
6753  *      1 if adapter is not supported / 0 if adapter is supported
6754  **/
6755 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6756 {
6757         int i;
6758
6759         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6760                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6761                         if (pvr_version_is(ipr_blocked_processors[i]))
6762                                 return 1;
6763                 }
6764         }
6765         return 0;
6766 }
6767 #else
6768 #define ipr_invalid_adapter(ioa_cfg) 0
6769 #endif
6770
6771 /**
6772  * ipr_ioa_bringdown_done - IOA bring down completion.
6773  * @ipr_cmd:    ipr command struct
6774  *
6775  * This function processes the completion of an adapter bring down.
6776  * It wakes any reset sleepers.
6777  *
6778  * Return value:
6779  *      IPR_RC_JOB_RETURN
6780  **/
6781 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6782 {
6783         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6784         int i;
6785
6786         ENTER;
6787         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6788                 ipr_trace;
6789                 spin_unlock_irq(ioa_cfg->host->host_lock);
6790                 scsi_unblock_requests(ioa_cfg->host);
6791                 spin_lock_irq(ioa_cfg->host->host_lock);
6792         }
6793
6794         ioa_cfg->in_reset_reload = 0;
6795         ioa_cfg->reset_retries = 0;
6796         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6797                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6798                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6799                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6800         }
6801         wmb();
6802
6803         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6804         wake_up_all(&ioa_cfg->reset_wait_q);
6805         LEAVE;
6806
6807         return IPR_RC_JOB_RETURN;
6808 }
6809
6810 /**
6811  * ipr_ioa_reset_done - IOA reset completion.
6812  * @ipr_cmd:    ipr command struct
6813  *
6814  * This function processes the completion of an adapter reset.
6815  * It schedules any necessary mid-layer add/removes and
6816  * wakes any reset sleepers.
6817  *
6818  * Return value:
6819  *      IPR_RC_JOB_RETURN
6820  **/
6821 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6822 {
6823         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824         struct ipr_resource_entry *res;
6825         struct ipr_hostrcb *hostrcb, *temp;
6826         int i = 0, j;
6827
6828         ENTER;
6829         ioa_cfg->in_reset_reload = 0;
6830         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6831                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6832                 ioa_cfg->hrrq[j].allow_cmds = 1;
6833                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6834         }
6835         wmb();
6836         ioa_cfg->reset_cmd = NULL;
6837         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6838
6839         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6840                 if (res->add_to_ml || res->del_from_ml) {
6841                         ipr_trace;
6842                         break;
6843                 }
6844         }
6845         schedule_work(&ioa_cfg->work_q);
6846
6847         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6848                 list_del(&hostrcb->queue);
6849                 if (i++ < IPR_NUM_LOG_HCAMS)
6850                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6851                 else
6852                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6853         }
6854
6855         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6856         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6857
6858         ioa_cfg->reset_retries = 0;
6859         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6860         wake_up_all(&ioa_cfg->reset_wait_q);
6861
6862         spin_unlock(ioa_cfg->host->host_lock);
6863         scsi_unblock_requests(ioa_cfg->host);
6864         spin_lock(ioa_cfg->host->host_lock);
6865
6866         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6867                 scsi_block_requests(ioa_cfg->host);
6868
6869         schedule_work(&ioa_cfg->work_q);
6870         LEAVE;
6871         return IPR_RC_JOB_RETURN;
6872 }
6873
6874 /**
6875  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6876  * @supported_dev:      supported device struct
6877  * @vpids:                      vendor product id struct
6878  *
6879  * Return value:
6880  *      none
6881  **/
6882 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6883                                  struct ipr_std_inq_vpids *vpids)
6884 {
6885         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6886         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6887         supported_dev->num_records = 1;
6888         supported_dev->data_length =
6889                 cpu_to_be16(sizeof(struct ipr_supported_device));
6890         supported_dev->reserved = 0;
6891 }
6892
6893 /**
6894  * ipr_set_supported_devs - Send Set Supported Devices for a device
6895  * @ipr_cmd:    ipr command struct
6896  *
6897  * This function sends a Set Supported Devices to the adapter
6898  *
6899  * Return value:
6900  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6901  **/
6902 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6903 {
6904         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6905         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6906         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6907         struct ipr_resource_entry *res = ipr_cmd->u.res;
6908
6909         ipr_cmd->job_step = ipr_ioa_reset_done;
6910
6911         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6912                 if (!ipr_is_scsi_disk(res))
6913                         continue;
6914
6915                 ipr_cmd->u.res = res;
6916                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6917
6918                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6919                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6920                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6921
6922                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6923                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6924                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6925                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6926
6927                 ipr_init_ioadl(ipr_cmd,
6928                                ioa_cfg->vpd_cbs_dma +
6929                                  offsetof(struct ipr_misc_cbs, supp_dev),
6930                                sizeof(struct ipr_supported_device),
6931                                IPR_IOADL_FLAGS_WRITE_LAST);
6932
6933                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6934                            IPR_SET_SUP_DEVICE_TIMEOUT);
6935
6936                 if (!ioa_cfg->sis64)
6937                         ipr_cmd->job_step = ipr_set_supported_devs;
6938                 LEAVE;
6939                 return IPR_RC_JOB_RETURN;
6940         }
6941
6942         LEAVE;
6943         return IPR_RC_JOB_CONTINUE;
6944 }
6945
6946 /**
6947  * ipr_get_mode_page - Locate specified mode page
6948  * @mode_pages: mode page buffer
6949  * @page_code:  page code to find
6950  * @len:                minimum required length for mode page
6951  *
6952  * Return value:
6953  *      pointer to mode page / NULL on failure
6954  **/
6955 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6956                                u32 page_code, u32 len)
6957 {
6958         struct ipr_mode_page_hdr *mode_hdr;
6959         u32 page_length;
6960         u32 length;
6961
6962         if (!mode_pages || (mode_pages->hdr.length == 0))
6963                 return NULL;
6964
6965         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6966         mode_hdr = (struct ipr_mode_page_hdr *)
6967                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6968
6969         while (length) {
6970                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6971                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6972                                 return mode_hdr;
6973                         break;
6974                 } else {
6975                         page_length = (sizeof(struct ipr_mode_page_hdr) +
6976                                        mode_hdr->page_length);
6977                         length -= page_length;
6978                         mode_hdr = (struct ipr_mode_page_hdr *)
6979                                 ((unsigned long)mode_hdr + page_length);
6980                 }
6981         }
6982         return NULL;
6983 }
6984
6985 /**
6986  * ipr_check_term_power - Check for term power errors
6987  * @ioa_cfg:    ioa config struct
6988  * @mode_pages: IOAFP mode pages buffer
6989  *
6990  * Check the IOAFP's mode page 28 for term power errors
6991  *
6992  * Return value:
6993  *      nothing
6994  **/
6995 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6996                                  struct ipr_mode_pages *mode_pages)
6997 {
6998         int i;
6999         int entry_length;
7000         struct ipr_dev_bus_entry *bus;
7001         struct ipr_mode_page28 *mode_page;
7002
7003         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7004                                       sizeof(struct ipr_mode_page28));
7005
7006         entry_length = mode_page->entry_length;
7007
7008         bus = mode_page->bus;
7009
7010         for (i = 0; i < mode_page->num_entries; i++) {
7011                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7012                         dev_err(&ioa_cfg->pdev->dev,
7013                                 "Term power is absent on scsi bus %d\n",
7014                                 bus->res_addr.bus);
7015                 }
7016
7017                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7018         }
7019 }
7020
7021 /**
7022  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7023  * @ioa_cfg:    ioa config struct
7024  *
7025  * Looks through the config table checking for SES devices. If
7026  * the SES device is in the SES table indicating a maximum SCSI
7027  * bus speed, the speed is limited for the bus.
7028  *
7029  * Return value:
7030  *      none
7031  **/
7032 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7033 {
7034         u32 max_xfer_rate;
7035         int i;
7036
7037         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7038                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7039                                                        ioa_cfg->bus_attr[i].bus_width);
7040
7041                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7042                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7043         }
7044 }
7045
7046 /**
7047  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7048  * @ioa_cfg:    ioa config struct
7049  * @mode_pages: mode page 28 buffer
7050  *
7051  * Updates mode page 28 based on driver configuration
7052  *
7053  * Return value:
7054  *      none
7055  **/
7056 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7057                                           struct ipr_mode_pages *mode_pages)
7058 {
7059         int i, entry_length;
7060         struct ipr_dev_bus_entry *bus;
7061         struct ipr_bus_attributes *bus_attr;
7062         struct ipr_mode_page28 *mode_page;
7063
7064         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7065                                       sizeof(struct ipr_mode_page28));
7066
7067         entry_length = mode_page->entry_length;
7068
7069         /* Loop for each device bus entry */
7070         for (i = 0, bus = mode_page->bus;
7071              i < mode_page->num_entries;
7072              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7073                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7074                         dev_err(&ioa_cfg->pdev->dev,
7075                                 "Invalid resource address reported: 0x%08X\n",
7076                                 IPR_GET_PHYS_LOC(bus->res_addr));
7077                         continue;
7078                 }
7079
7080                 bus_attr = &ioa_cfg->bus_attr[i];
7081                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7082                 bus->bus_width = bus_attr->bus_width;
7083                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7084                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7085                 if (bus_attr->qas_enabled)
7086                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7087                 else
7088                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7089         }
7090 }
7091
7092 /**
7093  * ipr_build_mode_select - Build a mode select command
7094  * @ipr_cmd:    ipr command struct
7095  * @res_handle: resource handle to send command to
7096  * @parm:               Byte 2 of Mode Sense command
7097  * @dma_addr:   DMA buffer address
7098  * @xfer_len:   data transfer length
7099  *
7100  * Return value:
7101  *      none
7102  **/
7103 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7104                                   __be32 res_handle, u8 parm,
7105                                   dma_addr_t dma_addr, u8 xfer_len)
7106 {
7107         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7108
7109         ioarcb->res_handle = res_handle;
7110         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7111         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7112         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7113         ioarcb->cmd_pkt.cdb[1] = parm;
7114         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7115
7116         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7117 }
7118
7119 /**
7120  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7121  * @ipr_cmd:    ipr command struct
7122  *
7123  * This function sets up the SCSI bus attributes and sends
7124  * a Mode Select for Page 28 to activate them.
7125  *
7126  * Return value:
7127  *      IPR_RC_JOB_RETURN
7128  **/
7129 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7130 {
7131         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7132         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7133         int length;
7134
7135         ENTER;
7136         ipr_scsi_bus_speed_limit(ioa_cfg);
7137         ipr_check_term_power(ioa_cfg, mode_pages);
7138         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7139         length = mode_pages->hdr.length + 1;
7140         mode_pages->hdr.length = 0;
7141
7142         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7143                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7144                               length);
7145
7146         ipr_cmd->job_step = ipr_set_supported_devs;
7147         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7148                                     struct ipr_resource_entry, queue);
7149         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7150
7151         LEAVE;
7152         return IPR_RC_JOB_RETURN;
7153 }
7154
7155 /**
7156  * ipr_build_mode_sense - Builds a mode sense command
7157  * @ipr_cmd:    ipr command struct
7158  * @res:                resource entry struct
7159  * @parm:               Byte 2 of mode sense command
7160  * @dma_addr:   DMA address of mode sense buffer
7161  * @xfer_len:   Size of DMA buffer
7162  *
7163  * Return value:
7164  *      none
7165  **/
7166 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7167                                  __be32 res_handle,
7168                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7169 {
7170         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7171
7172         ioarcb->res_handle = res_handle;
7173         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7174         ioarcb->cmd_pkt.cdb[2] = parm;
7175         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7176         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7177
7178         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7179 }
7180
7181 /**
7182  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7183  * @ipr_cmd:    ipr command struct
7184  *
7185  * This function handles the failure of an IOA bringup command.
7186  *
7187  * Return value:
7188  *      IPR_RC_JOB_RETURN
7189  **/
7190 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7191 {
7192         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7193         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7194
7195         dev_err(&ioa_cfg->pdev->dev,
7196                 "0x%02X failed with IOASC: 0x%08X\n",
7197                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7198
7199         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7200         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7201         return IPR_RC_JOB_RETURN;
7202 }
7203
7204 /**
7205  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7206  * @ipr_cmd:    ipr command struct
7207  *
7208  * This function handles the failure of a Mode Sense to the IOAFP.
7209  * Some adapters do not handle all mode pages.
7210  *
7211  * Return value:
7212  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7213  **/
7214 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7215 {
7216         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7217         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7218
7219         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7220                 ipr_cmd->job_step = ipr_set_supported_devs;
7221                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7222                                             struct ipr_resource_entry, queue);
7223                 return IPR_RC_JOB_CONTINUE;
7224         }
7225
7226         return ipr_reset_cmd_failed(ipr_cmd);
7227 }
7228
7229 /**
7230  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7231  * @ipr_cmd:    ipr command struct
7232  *
7233  * This function send a Page 28 mode sense to the IOA to
7234  * retrieve SCSI bus attributes.
7235  *
7236  * Return value:
7237  *      IPR_RC_JOB_RETURN
7238  **/
7239 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7240 {
7241         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7242
7243         ENTER;
7244         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7245                              0x28, ioa_cfg->vpd_cbs_dma +
7246                              offsetof(struct ipr_misc_cbs, mode_pages),
7247                              sizeof(struct ipr_mode_pages));
7248
7249         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7250         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7251
7252         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7253
7254         LEAVE;
7255         return IPR_RC_JOB_RETURN;
7256 }
7257
7258 /**
7259  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7260  * @ipr_cmd:    ipr command struct
7261  *
7262  * This function enables dual IOA RAID support if possible.
7263  *
7264  * Return value:
7265  *      IPR_RC_JOB_RETURN
7266  **/
7267 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7268 {
7269         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7270         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7271         struct ipr_mode_page24 *mode_page;
7272         int length;
7273
7274         ENTER;
7275         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7276                                       sizeof(struct ipr_mode_page24));
7277
7278         if (mode_page)
7279                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7280
7281         length = mode_pages->hdr.length + 1;
7282         mode_pages->hdr.length = 0;
7283
7284         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7285                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7286                               length);
7287
7288         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7289         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7290
7291         LEAVE;
7292         return IPR_RC_JOB_RETURN;
7293 }
7294
7295 /**
7296  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7297  * @ipr_cmd:    ipr command struct
7298  *
7299  * This function handles the failure of a Mode Sense to the IOAFP.
7300  * Some adapters do not handle all mode pages.
7301  *
7302  * Return value:
7303  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7304  **/
7305 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7306 {
7307         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7308
7309         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7310                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7311                 return IPR_RC_JOB_CONTINUE;
7312         }
7313
7314         return ipr_reset_cmd_failed(ipr_cmd);
7315 }
7316
7317 /**
7318  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7319  * @ipr_cmd:    ipr command struct
7320  *
7321  * This function send a mode sense to the IOA to retrieve
7322  * the IOA Advanced Function Control mode page.
7323  *
7324  * Return value:
7325  *      IPR_RC_JOB_RETURN
7326  **/
7327 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7328 {
7329         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7330
7331         ENTER;
7332         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7333                              0x24, ioa_cfg->vpd_cbs_dma +
7334                              offsetof(struct ipr_misc_cbs, mode_pages),
7335                              sizeof(struct ipr_mode_pages));
7336
7337         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7338         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7339
7340         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7341
7342         LEAVE;
7343         return IPR_RC_JOB_RETURN;
7344 }
7345
7346 /**
7347  * ipr_init_res_table - Initialize the resource table
7348  * @ipr_cmd:    ipr command struct
7349  *
7350  * This function looks through the existing resource table, comparing
7351  * it with the config table. This function will take care of old/new
7352  * devices and schedule adding/removing them from the mid-layer
7353  * as appropriate.
7354  *
7355  * Return value:
7356  *      IPR_RC_JOB_CONTINUE
7357  **/
7358 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7359 {
7360         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7361         struct ipr_resource_entry *res, *temp;
7362         struct ipr_config_table_entry_wrapper cfgtew;
7363         int entries, found, flag, i;
7364         LIST_HEAD(old_res);
7365
7366         ENTER;
7367         if (ioa_cfg->sis64)
7368                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7369         else
7370                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7371
7372         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7373                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7374
7375         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7376                 list_move_tail(&res->queue, &old_res);
7377
7378         if (ioa_cfg->sis64)
7379                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7380         else
7381                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7382
7383         for (i = 0; i < entries; i++) {
7384                 if (ioa_cfg->sis64)
7385                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7386                 else
7387                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7388                 found = 0;
7389
7390                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7391                         if (ipr_is_same_device(res, &cfgtew)) {
7392                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7393                                 found = 1;
7394                                 break;
7395                         }
7396                 }
7397
7398                 if (!found) {
7399                         if (list_empty(&ioa_cfg->free_res_q)) {
7400                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7401                                 break;
7402                         }
7403
7404                         found = 1;
7405                         res = list_entry(ioa_cfg->free_res_q.next,
7406                                          struct ipr_resource_entry, queue);
7407                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7408                         ipr_init_res_entry(res, &cfgtew);
7409                         res->add_to_ml = 1;
7410                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7411                         res->sdev->allow_restart = 1;
7412
7413                 if (found)
7414                         ipr_update_res_entry(res, &cfgtew);
7415         }
7416
7417         list_for_each_entry_safe(res, temp, &old_res, queue) {
7418                 if (res->sdev) {
7419                         res->del_from_ml = 1;
7420                         res->res_handle = IPR_INVALID_RES_HANDLE;
7421                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7422                 }
7423         }
7424
7425         list_for_each_entry_safe(res, temp, &old_res, queue) {
7426                 ipr_clear_res_target(res);
7427                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7428         }
7429
7430         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7431                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7432         else
7433                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7434
7435         LEAVE;
7436         return IPR_RC_JOB_CONTINUE;
7437 }
7438
7439 /**
7440  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7441  * @ipr_cmd:    ipr command struct
7442  *
7443  * This function sends a Query IOA Configuration command
7444  * to the adapter to retrieve the IOA configuration table.
7445  *
7446  * Return value:
7447  *      IPR_RC_JOB_RETURN
7448  **/
7449 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7450 {
7451         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7452         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7453         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7454         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7455
7456         ENTER;
7457         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7458                 ioa_cfg->dual_raid = 1;
7459         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7460                  ucode_vpd->major_release, ucode_vpd->card_type,
7461                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7462         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7463         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7464
7465         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7466         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7467         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7468         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7469
7470         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7471                        IPR_IOADL_FLAGS_READ_LAST);
7472
7473         ipr_cmd->job_step = ipr_init_res_table;
7474
7475         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7476
7477         LEAVE;
7478         return IPR_RC_JOB_RETURN;
7479 }
7480
7481 /**
7482  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7483  * @ipr_cmd:    ipr command struct
7484  *
7485  * This utility function sends an inquiry to the adapter.
7486  *
7487  * Return value:
7488  *      none
7489  **/
7490 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7491                               dma_addr_t dma_addr, u8 xfer_len)
7492 {
7493         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7494
7495         ENTER;
7496         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7497         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7498
7499         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7500         ioarcb->cmd_pkt.cdb[1] = flags;
7501         ioarcb->cmd_pkt.cdb[2] = page;
7502         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7503
7504         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7505
7506         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7507         LEAVE;
7508 }
7509
7510 /**
7511  * ipr_inquiry_page_supported - Is the given inquiry page supported
7512  * @page0:              inquiry page 0 buffer
7513  * @page:               page code.
7514  *
7515  * This function determines if the specified inquiry page is supported.
7516  *
7517  * Return value:
7518  *      1 if page is supported / 0 if not
7519  **/
7520 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7521 {
7522         int i;
7523
7524         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7525                 if (page0->page[i] == page)
7526                         return 1;
7527
7528         return 0;
7529 }
7530
7531 /**
7532  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7533  * @ipr_cmd:    ipr command struct
7534  *
7535  * This function sends a Page 0xD0 inquiry to the adapter
7536  * to retrieve adapter capabilities.
7537  *
7538  * Return value:
7539  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7540  **/
7541 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7542 {
7543         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7544         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7545         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7546
7547         ENTER;
7548         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7549         memset(cap, 0, sizeof(*cap));
7550
7551         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7552                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7553                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7554                                   sizeof(struct ipr_inquiry_cap));
7555                 return IPR_RC_JOB_RETURN;
7556         }
7557
7558         LEAVE;
7559         return IPR_RC_JOB_CONTINUE;
7560 }
7561
7562 /**
7563  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7564  * @ipr_cmd:    ipr command struct
7565  *
7566  * This function sends a Page 3 inquiry to the adapter
7567  * to retrieve software VPD information.
7568  *
7569  * Return value:
7570  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7571  **/
7572 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7573 {
7574         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7575
7576         ENTER;
7577
7578         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7579
7580         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7581                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7582                           sizeof(struct ipr_inquiry_page3));
7583
7584         LEAVE;
7585         return IPR_RC_JOB_RETURN;
7586 }
7587
7588 /**
7589  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7590  * @ipr_cmd:    ipr command struct
7591  *
7592  * This function sends a Page 0 inquiry to the adapter
7593  * to retrieve supported inquiry pages.
7594  *
7595  * Return value:
7596  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7597  **/
7598 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7599 {
7600         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7601         char type[5];
7602
7603         ENTER;
7604
7605         /* Grab the type out of the VPD and store it away */
7606         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7607         type[4] = '\0';
7608         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7609
7610         if (ipr_invalid_adapter(ioa_cfg)) {
7611                 dev_err(&ioa_cfg->pdev->dev,
7612                         "Adapter not supported in this hardware configuration.\n");
7613
7614                 if (!ipr_testmode) {
7615                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7616                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7617                         list_add_tail(&ipr_cmd->queue,
7618                                         &ioa_cfg->hrrq->hrrq_free_q);
7619                         return IPR_RC_JOB_RETURN;
7620                 }
7621         }
7622
7623         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7624
7625         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7626                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7627                           sizeof(struct ipr_inquiry_page0));
7628
7629         LEAVE;
7630         return IPR_RC_JOB_RETURN;
7631 }
7632
7633 /**
7634  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7635  * @ipr_cmd:    ipr command struct
7636  *
7637  * This function sends a standard inquiry to the adapter.
7638  *
7639  * Return value:
7640  *      IPR_RC_JOB_RETURN
7641  **/
7642 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7643 {
7644         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7645
7646         ENTER;
7647         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7648
7649         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7650                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7651                           sizeof(struct ipr_ioa_vpd));
7652
7653         LEAVE;
7654         return IPR_RC_JOB_RETURN;
7655 }
7656
7657 /**
7658  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7659  * @ipr_cmd:    ipr command struct
7660  *
7661  * This function send an Identify Host Request Response Queue
7662  * command to establish the HRRQ with the adapter.
7663  *
7664  * Return value:
7665  *      IPR_RC_JOB_RETURN
7666  **/
7667 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7668 {
7669         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7671         struct ipr_hrr_queue *hrrq;
7672
7673         ENTER;
7674         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7675         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7676
7677         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7678                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7679
7680                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7681                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7682
7683                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7684                 if (ioa_cfg->sis64)
7685                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7686
7687                 if (ioa_cfg->nvectors == 1)
7688                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7689                 else
7690                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7691
7692                 ioarcb->cmd_pkt.cdb[2] =
7693                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7694                 ioarcb->cmd_pkt.cdb[3] =
7695                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7696                 ioarcb->cmd_pkt.cdb[4] =
7697                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7698                 ioarcb->cmd_pkt.cdb[5] =
7699                         ((u64) hrrq->host_rrq_dma) & 0xff;
7700                 ioarcb->cmd_pkt.cdb[7] =
7701                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7702                 ioarcb->cmd_pkt.cdb[8] =
7703                         (sizeof(u32) * hrrq->size) & 0xff;
7704
7705                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7706                         ioarcb->cmd_pkt.cdb[9] =
7707                                         ioa_cfg->identify_hrrq_index;
7708
7709                 if (ioa_cfg->sis64) {
7710                         ioarcb->cmd_pkt.cdb[10] =
7711                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7712                         ioarcb->cmd_pkt.cdb[11] =
7713                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7714                         ioarcb->cmd_pkt.cdb[12] =
7715                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7716                         ioarcb->cmd_pkt.cdb[13] =
7717                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7718                 }
7719
7720                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7721                         ioarcb->cmd_pkt.cdb[14] =
7722                                         ioa_cfg->identify_hrrq_index;
7723
7724                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7725                            IPR_INTERNAL_TIMEOUT);
7726
7727                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7728                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7729
7730                 LEAVE;
7731                 return IPR_RC_JOB_RETURN;
7732         }
7733
7734         LEAVE;
7735         return IPR_RC_JOB_CONTINUE;
7736 }
7737
7738 /**
7739  * ipr_reset_timer_done - Adapter reset timer function
7740  * @ipr_cmd:    ipr command struct
7741  *
7742  * Description: This function is used in adapter reset processing
7743  * for timing events. If the reset_cmd pointer in the IOA
7744  * config struct is not this adapter's we are doing nested
7745  * resets and fail_all_ops will take care of freeing the
7746  * command block.
7747  *
7748  * Return value:
7749  *      none
7750  **/
7751 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7752 {
7753         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7754         unsigned long lock_flags = 0;
7755
7756         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7757
7758         if (ioa_cfg->reset_cmd == ipr_cmd) {
7759                 list_del(&ipr_cmd->queue);
7760                 ipr_cmd->done(ipr_cmd);
7761         }
7762
7763         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7764 }
7765
7766 /**
7767  * ipr_reset_start_timer - Start a timer for adapter reset job
7768  * @ipr_cmd:    ipr command struct
7769  * @timeout:    timeout value
7770  *
7771  * Description: This function is used in adapter reset processing
7772  * for timing events. If the reset_cmd pointer in the IOA
7773  * config struct is not this adapter's we are doing nested
7774  * resets and fail_all_ops will take care of freeing the
7775  * command block.
7776  *
7777  * Return value:
7778  *      none
7779  **/
7780 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7781                                   unsigned long timeout)
7782 {
7783
7784         ENTER;
7785         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7786         ipr_cmd->done = ipr_reset_ioa_job;
7787
7788         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7789         ipr_cmd->timer.expires = jiffies + timeout;
7790         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7791         add_timer(&ipr_cmd->timer);
7792 }
7793
7794 /**
7795  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7796  * @ioa_cfg:    ioa cfg struct
7797  *
7798  * Return value:
7799  *      nothing
7800  **/
7801 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7802 {
7803         struct ipr_hrr_queue *hrrq;
7804
7805         for_each_hrrq(hrrq, ioa_cfg) {
7806                 spin_lock(&hrrq->_lock);
7807                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7808
7809                 /* Initialize Host RRQ pointers */
7810                 hrrq->hrrq_start = hrrq->host_rrq;
7811                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7812                 hrrq->hrrq_curr = hrrq->hrrq_start;
7813                 hrrq->toggle_bit = 1;
7814                 spin_unlock(&hrrq->_lock);
7815         }
7816         wmb();
7817
7818         ioa_cfg->identify_hrrq_index = 0;
7819         if (ioa_cfg->hrrq_num == 1)
7820                 atomic_set(&ioa_cfg->hrrq_index, 0);
7821         else
7822                 atomic_set(&ioa_cfg->hrrq_index, 1);
7823
7824         /* Zero out config table */
7825         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7826 }
7827
7828 /**
7829  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7830  * @ipr_cmd:    ipr command struct
7831  *
7832  * Return value:
7833  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7834  **/
7835 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7836 {
7837         unsigned long stage, stage_time;
7838         u32 feedback;
7839         volatile u32 int_reg;
7840         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7841         u64 maskval = 0;
7842
7843         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7844         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7845         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7846
7847         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7848
7849         /* sanity check the stage_time value */
7850         if (stage_time == 0)
7851                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7852         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7853                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7854         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7855                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7856
7857         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7858                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7859                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7860                 stage_time = ioa_cfg->transop_timeout;
7861                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7862         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7863                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7864                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7865                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7866                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7867                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7868                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7869                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7870                         return IPR_RC_JOB_CONTINUE;
7871                 }
7872         }
7873
7874         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7875         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7876         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7877         ipr_cmd->done = ipr_reset_ioa_job;
7878         add_timer(&ipr_cmd->timer);
7879
7880         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7881
7882         return IPR_RC_JOB_RETURN;
7883 }
7884
7885 /**
7886  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7887  * @ipr_cmd:    ipr command struct
7888  *
7889  * This function reinitializes some control blocks and
7890  * enables destructive diagnostics on the adapter.
7891  *
7892  * Return value:
7893  *      IPR_RC_JOB_RETURN
7894  **/
7895 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7896 {
7897         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7898         volatile u32 int_reg;
7899         volatile u64 maskval;
7900         int i;
7901
7902         ENTER;
7903         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7904         ipr_init_ioa_mem(ioa_cfg);
7905
7906         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7907                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7908                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7909                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7910         }
7911         wmb();
7912         if (ioa_cfg->sis64) {
7913                 /* Set the adapter to the correct endian mode. */
7914                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7915                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7916         }
7917
7918         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7919
7920         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7921                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7922                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7923                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7924                 return IPR_RC_JOB_CONTINUE;
7925         }
7926
7927         /* Enable destructive diagnostics on IOA */
7928         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7929
7930         if (ioa_cfg->sis64) {
7931                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7932                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7933                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7934         } else
7935                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7936
7937         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7938
7939         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7940
7941         if (ioa_cfg->sis64) {
7942                 ipr_cmd->job_step = ipr_reset_next_stage;
7943                 return IPR_RC_JOB_CONTINUE;
7944         }
7945
7946         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7947         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7948         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7949         ipr_cmd->done = ipr_reset_ioa_job;
7950         add_timer(&ipr_cmd->timer);
7951         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7952
7953         LEAVE;
7954         return IPR_RC_JOB_RETURN;
7955 }
7956
7957 /**
7958  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7959  * @ipr_cmd:    ipr command struct
7960  *
7961  * This function is invoked when an adapter dump has run out
7962  * of processing time.
7963  *
7964  * Return value:
7965  *      IPR_RC_JOB_CONTINUE
7966  **/
7967 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7968 {
7969         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7970
7971         if (ioa_cfg->sdt_state == GET_DUMP)
7972                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7973         else if (ioa_cfg->sdt_state == READ_DUMP)
7974                 ioa_cfg->sdt_state = ABORT_DUMP;
7975
7976         ioa_cfg->dump_timeout = 1;
7977         ipr_cmd->job_step = ipr_reset_alert;
7978
7979         return IPR_RC_JOB_CONTINUE;
7980 }
7981
7982 /**
7983  * ipr_unit_check_no_data - Log a unit check/no data error log
7984  * @ioa_cfg:            ioa config struct
7985  *
7986  * Logs an error indicating the adapter unit checked, but for some
7987  * reason, we were unable to fetch the unit check buffer.
7988  *
7989  * Return value:
7990  *      nothing
7991  **/
7992 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7993 {
7994         ioa_cfg->errors_logged++;
7995         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7996 }
7997
7998 /**
7999  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8000  * @ioa_cfg:            ioa config struct
8001  *
8002  * Fetches the unit check buffer from the adapter by clocking the data
8003  * through the mailbox register.
8004  *
8005  * Return value:
8006  *      nothing
8007  **/
8008 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8009 {
8010         unsigned long mailbox;
8011         struct ipr_hostrcb *hostrcb;
8012         struct ipr_uc_sdt sdt;
8013         int rc, length;
8014         u32 ioasc;
8015
8016         mailbox = readl(ioa_cfg->ioa_mailbox);
8017
8018         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8019                 ipr_unit_check_no_data(ioa_cfg);
8020                 return;
8021         }
8022
8023         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8024         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8025                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8026
8027         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8028             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8029             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8030                 ipr_unit_check_no_data(ioa_cfg);
8031                 return;
8032         }
8033
8034         /* Find length of the first sdt entry (UC buffer) */
8035         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8036                 length = be32_to_cpu(sdt.entry[0].end_token);
8037         else
8038                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8039                           be32_to_cpu(sdt.entry[0].start_token)) &
8040                           IPR_FMT2_MBX_ADDR_MASK;
8041
8042         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8043                              struct ipr_hostrcb, queue);
8044         list_del(&hostrcb->queue);
8045         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8046
8047         rc = ipr_get_ldump_data_section(ioa_cfg,
8048                                         be32_to_cpu(sdt.entry[0].start_token),
8049                                         (__be32 *)&hostrcb->hcam,
8050                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8051
8052         if (!rc) {
8053                 ipr_handle_log_data(ioa_cfg, hostrcb);
8054                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8055                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8056                     ioa_cfg->sdt_state == GET_DUMP)
8057                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8058         } else
8059                 ipr_unit_check_no_data(ioa_cfg);
8060
8061         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8062 }
8063
8064 /**
8065  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8066  * @ipr_cmd:    ipr command struct
8067  *
8068  * Description: This function will call to get the unit check buffer.
8069  *
8070  * Return value:
8071  *      IPR_RC_JOB_RETURN
8072  **/
8073 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8074 {
8075         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8076
8077         ENTER;
8078         ioa_cfg->ioa_unit_checked = 0;
8079         ipr_get_unit_check_buffer(ioa_cfg);
8080         ipr_cmd->job_step = ipr_reset_alert;
8081         ipr_reset_start_timer(ipr_cmd, 0);
8082
8083         LEAVE;
8084         return IPR_RC_JOB_RETURN;
8085 }
8086
8087 /**
8088  * ipr_reset_restore_cfg_space - Restore PCI config space.
8089  * @ipr_cmd:    ipr command struct
8090  *
8091  * Description: This function restores the saved PCI config space of
8092  * the adapter, fails all outstanding ops back to the callers, and
8093  * fetches the dump/unit check if applicable to this reset.
8094  *
8095  * Return value:
8096  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8097  **/
8098 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8099 {
8100         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8101         u32 int_reg;
8102
8103         ENTER;
8104         ioa_cfg->pdev->state_saved = true;
8105         pci_restore_state(ioa_cfg->pdev);
8106
8107         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8108                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8109                 return IPR_RC_JOB_CONTINUE;
8110         }
8111
8112         ipr_fail_all_ops(ioa_cfg);
8113
8114         if (ioa_cfg->sis64) {
8115                 /* Set the adapter to the correct endian mode. */
8116                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8117                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8118         }
8119
8120         if (ioa_cfg->ioa_unit_checked) {
8121                 if (ioa_cfg->sis64) {
8122                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8123                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8124                         return IPR_RC_JOB_RETURN;
8125                 } else {
8126                         ioa_cfg->ioa_unit_checked = 0;
8127                         ipr_get_unit_check_buffer(ioa_cfg);
8128                         ipr_cmd->job_step = ipr_reset_alert;
8129                         ipr_reset_start_timer(ipr_cmd, 0);
8130                         return IPR_RC_JOB_RETURN;
8131                 }
8132         }
8133
8134         if (ioa_cfg->in_ioa_bringdown) {
8135                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8136         } else {
8137                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8138
8139                 if (GET_DUMP == ioa_cfg->sdt_state) {
8140                         ioa_cfg->sdt_state = READ_DUMP;
8141                         ioa_cfg->dump_timeout = 0;
8142                         if (ioa_cfg->sis64)
8143                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8144                         else
8145                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8146                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8147                         schedule_work(&ioa_cfg->work_q);
8148                         return IPR_RC_JOB_RETURN;
8149                 }
8150         }
8151
8152         LEAVE;
8153         return IPR_RC_JOB_CONTINUE;
8154 }
8155
8156 /**
8157  * ipr_reset_bist_done - BIST has completed on the adapter.
8158  * @ipr_cmd:    ipr command struct
8159  *
8160  * Description: Unblock config space and resume the reset process.
8161  *
8162  * Return value:
8163  *      IPR_RC_JOB_CONTINUE
8164  **/
8165 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8166 {
8167         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8168
8169         ENTER;
8170         if (ioa_cfg->cfg_locked)
8171                 pci_cfg_access_unlock(ioa_cfg->pdev);
8172         ioa_cfg->cfg_locked = 0;
8173         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8174         LEAVE;
8175         return IPR_RC_JOB_CONTINUE;
8176 }
8177
8178 /**
8179  * ipr_reset_start_bist - Run BIST on the adapter.
8180  * @ipr_cmd:    ipr command struct
8181  *
8182  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8183  *
8184  * Return value:
8185  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8186  **/
8187 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8188 {
8189         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8190         int rc = PCIBIOS_SUCCESSFUL;
8191
8192         ENTER;
8193         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8194                 writel(IPR_UPROCI_SIS64_START_BIST,
8195                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8196         else
8197                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8198
8199         if (rc == PCIBIOS_SUCCESSFUL) {
8200                 ipr_cmd->job_step = ipr_reset_bist_done;
8201                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8202                 rc = IPR_RC_JOB_RETURN;
8203         } else {
8204                 if (ioa_cfg->cfg_locked)
8205                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8206                 ioa_cfg->cfg_locked = 0;
8207                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8208                 rc = IPR_RC_JOB_CONTINUE;
8209         }
8210
8211         LEAVE;
8212         return rc;
8213 }
8214
8215 /**
8216  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8217  * @ipr_cmd:    ipr command struct
8218  *
8219  * Description: This clears PCI reset to the adapter and delays two seconds.
8220  *
8221  * Return value:
8222  *      IPR_RC_JOB_RETURN
8223  **/
8224 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8225 {
8226         ENTER;
8227         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8228         ipr_cmd->job_step = ipr_reset_bist_done;
8229         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8230         LEAVE;
8231         return IPR_RC_JOB_RETURN;
8232 }
8233
8234 /**
8235  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8236  * @ipr_cmd:    ipr command struct
8237  *
8238  * Description: This asserts PCI reset to the adapter.
8239  *
8240  * Return value:
8241  *      IPR_RC_JOB_RETURN
8242  **/
8243 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8244 {
8245         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8246         struct pci_dev *pdev = ioa_cfg->pdev;
8247
8248         ENTER;
8249         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8250         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8251         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8252         LEAVE;
8253         return IPR_RC_JOB_RETURN;
8254 }
8255
8256 /**
8257  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8258  * @ipr_cmd:    ipr command struct
8259  *
8260  * Description: This attempts to block config access to the IOA.
8261  *
8262  * Return value:
8263  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8264  **/
8265 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8266 {
8267         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8268         int rc = IPR_RC_JOB_CONTINUE;
8269
8270         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8271                 ioa_cfg->cfg_locked = 1;
8272                 ipr_cmd->job_step = ioa_cfg->reset;
8273         } else {
8274                 if (ipr_cmd->u.time_left) {
8275                         rc = IPR_RC_JOB_RETURN;
8276                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8277                         ipr_reset_start_timer(ipr_cmd,
8278                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8279                 } else {
8280                         ipr_cmd->job_step = ioa_cfg->reset;
8281                         dev_err(&ioa_cfg->pdev->dev,
8282                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8283                 }
8284         }
8285
8286         return rc;
8287 }
8288
8289 /**
8290  * ipr_reset_block_config_access - Block config access to the IOA
8291  * @ipr_cmd:    ipr command struct
8292  *
8293  * Description: This attempts to block config access to the IOA
8294  *
8295  * Return value:
8296  *      IPR_RC_JOB_CONTINUE
8297  **/
8298 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8299 {
8300         ipr_cmd->ioa_cfg->cfg_locked = 0;
8301         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8302         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8303         return IPR_RC_JOB_CONTINUE;
8304 }
8305
8306 /**
8307  * ipr_reset_allowed - Query whether or not IOA can be reset
8308  * @ioa_cfg:    ioa config struct
8309  *
8310  * Return value:
8311  *      0 if reset not allowed / non-zero if reset is allowed
8312  **/
8313 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8314 {
8315         volatile u32 temp_reg;
8316
8317         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8318         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8319 }
8320
8321 /**
8322  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8323  * @ipr_cmd:    ipr command struct
8324  *
8325  * Description: This function waits for adapter permission to run BIST,
8326  * then runs BIST. If the adapter does not give permission after a
8327  * reasonable time, we will reset the adapter anyway. The impact of
8328  * resetting the adapter without warning the adapter is the risk of
8329  * losing the persistent error log on the adapter. If the adapter is
8330  * reset while it is writing to the flash on the adapter, the flash
8331  * segment will have bad ECC and be zeroed.
8332  *
8333  * Return value:
8334  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8335  **/
8336 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8337 {
8338         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8339         int rc = IPR_RC_JOB_RETURN;
8340
8341         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8342                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8343                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8344         } else {
8345                 ipr_cmd->job_step = ipr_reset_block_config_access;
8346                 rc = IPR_RC_JOB_CONTINUE;
8347         }
8348
8349         return rc;
8350 }
8351
8352 /**
8353  * ipr_reset_alert - Alert the adapter of a pending reset
8354  * @ipr_cmd:    ipr command struct
8355  *
8356  * Description: This function alerts the adapter that it will be reset.
8357  * If memory space is not currently enabled, proceed directly
8358  * to running BIST on the adapter. The timer must always be started
8359  * so we guarantee we do not run BIST from ipr_isr.
8360  *
8361  * Return value:
8362  *      IPR_RC_JOB_RETURN
8363  **/
8364 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8365 {
8366         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8367         u16 cmd_reg;
8368         int rc;
8369
8370         ENTER;
8371         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8372
8373         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8374                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8375                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8376                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8377         } else {
8378                 ipr_cmd->job_step = ipr_reset_block_config_access;
8379         }
8380
8381         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8382         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8383
8384         LEAVE;
8385         return IPR_RC_JOB_RETURN;
8386 }
8387
8388 /**
8389  * ipr_reset_ucode_download_done - Microcode download completion
8390  * @ipr_cmd:    ipr command struct
8391  *
8392  * Description: This function unmaps the microcode download buffer.
8393  *
8394  * Return value:
8395  *      IPR_RC_JOB_CONTINUE
8396  **/
8397 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8398 {
8399         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8400         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8401
8402         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8403                      sglist->num_sg, DMA_TO_DEVICE);
8404
8405         ipr_cmd->job_step = ipr_reset_alert;
8406         return IPR_RC_JOB_CONTINUE;
8407 }
8408
8409 /**
8410  * ipr_reset_ucode_download - Download microcode to the adapter
8411  * @ipr_cmd:    ipr command struct
8412  *
8413  * Description: This function checks to see if it there is microcode
8414  * to download to the adapter. If there is, a download is performed.
8415  *
8416  * Return value:
8417  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8418  **/
8419 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8420 {
8421         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8422         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8423
8424         ENTER;
8425         ipr_cmd->job_step = ipr_reset_alert;
8426
8427         if (!sglist)
8428                 return IPR_RC_JOB_CONTINUE;
8429
8430         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8431         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8432         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8433         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8434         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8435         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8436         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8437
8438         if (ioa_cfg->sis64)
8439                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8440         else
8441                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8442         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8443
8444         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8445                    IPR_WRITE_BUFFER_TIMEOUT);
8446
8447         LEAVE;
8448         return IPR_RC_JOB_RETURN;
8449 }
8450
8451 /**
8452  * ipr_reset_shutdown_ioa - Shutdown the adapter
8453  * @ipr_cmd:    ipr command struct
8454  *
8455  * Description: This function issues an adapter shutdown of the
8456  * specified type to the specified adapter as part of the
8457  * adapter reset job.
8458  *
8459  * Return value:
8460  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8461  **/
8462 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8463 {
8464         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8465         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8466         unsigned long timeout;
8467         int rc = IPR_RC_JOB_CONTINUE;
8468
8469         ENTER;
8470         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8471                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8472                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8473                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8474                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8475                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8476
8477                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8478                         timeout = IPR_SHUTDOWN_TIMEOUT;
8479                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8480                         timeout = IPR_INTERNAL_TIMEOUT;
8481                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8482                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8483                 else
8484                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8485
8486                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8487
8488                 rc = IPR_RC_JOB_RETURN;
8489                 ipr_cmd->job_step = ipr_reset_ucode_download;
8490         } else
8491                 ipr_cmd->job_step = ipr_reset_alert;
8492
8493         LEAVE;
8494         return rc;
8495 }
8496
8497 /**
8498  * ipr_reset_ioa_job - Adapter reset job
8499  * @ipr_cmd:    ipr command struct
8500  *
8501  * Description: This function is the job router for the adapter reset job.
8502  *
8503  * Return value:
8504  *      none
8505  **/
8506 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8507 {
8508         u32 rc, ioasc;
8509         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8510
8511         do {
8512                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8513
8514                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8515                         /*
8516                          * We are doing nested adapter resets and this is
8517                          * not the current reset job.
8518                          */
8519                         list_add_tail(&ipr_cmd->queue,
8520                                         &ipr_cmd->hrrq->hrrq_free_q);
8521                         return;
8522                 }
8523
8524                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8525                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8526                         if (rc == IPR_RC_JOB_RETURN)
8527                                 return;
8528                 }
8529
8530                 ipr_reinit_ipr_cmnd(ipr_cmd);
8531                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8532                 rc = ipr_cmd->job_step(ipr_cmd);
8533         } while (rc == IPR_RC_JOB_CONTINUE);
8534 }
8535
8536 /**
8537  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8538  * @ioa_cfg:            ioa config struct
8539  * @job_step:           first job step of reset job
8540  * @shutdown_type:      shutdown type
8541  *
8542  * Description: This function will initiate the reset of the given adapter
8543  * starting at the selected job step.
8544  * If the caller needs to wait on the completion of the reset,
8545  * the caller must sleep on the reset_wait_q.
8546  *
8547  * Return value:
8548  *      none
8549  **/
8550 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8551                                     int (*job_step) (struct ipr_cmnd *),
8552                                     enum ipr_shutdown_type shutdown_type)
8553 {
8554         struct ipr_cmnd *ipr_cmd;
8555         int i;
8556
8557         ioa_cfg->in_reset_reload = 1;
8558         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8559                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8560                 ioa_cfg->hrrq[i].allow_cmds = 0;
8561                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8562         }
8563         wmb();
8564         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8565                 scsi_block_requests(ioa_cfg->host);
8566
8567         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8568         ioa_cfg->reset_cmd = ipr_cmd;
8569         ipr_cmd->job_step = job_step;
8570         ipr_cmd->u.shutdown_type = shutdown_type;
8571
8572         ipr_reset_ioa_job(ipr_cmd);
8573 }
8574
8575 /**
8576  * ipr_initiate_ioa_reset - Initiate an adapter reset
8577  * @ioa_cfg:            ioa config struct
8578  * @shutdown_type:      shutdown type
8579  *
8580  * Description: This function will initiate the reset of the given adapter.
8581  * If the caller needs to wait on the completion of the reset,
8582  * the caller must sleep on the reset_wait_q.
8583  *
8584  * Return value:
8585  *      none
8586  **/
8587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8588                                    enum ipr_shutdown_type shutdown_type)
8589 {
8590         int i;
8591
8592         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8593                 return;
8594
8595         if (ioa_cfg->in_reset_reload) {
8596                 if (ioa_cfg->sdt_state == GET_DUMP)
8597                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8598                 else if (ioa_cfg->sdt_state == READ_DUMP)
8599                         ioa_cfg->sdt_state = ABORT_DUMP;
8600         }
8601
8602         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8603                 dev_err(&ioa_cfg->pdev->dev,
8604                         "IOA taken offline - error recovery failed\n");
8605
8606                 ioa_cfg->reset_retries = 0;
8607                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8608                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8609                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8610                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8611                 }
8612                 wmb();
8613
8614                 if (ioa_cfg->in_ioa_bringdown) {
8615                         ioa_cfg->reset_cmd = NULL;
8616                         ioa_cfg->in_reset_reload = 0;
8617                         ipr_fail_all_ops(ioa_cfg);
8618                         wake_up_all(&ioa_cfg->reset_wait_q);
8619
8620                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8621                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8622                                 scsi_unblock_requests(ioa_cfg->host);
8623                                 spin_lock_irq(ioa_cfg->host->host_lock);
8624                         }
8625                         return;
8626                 } else {
8627                         ioa_cfg->in_ioa_bringdown = 1;
8628                         shutdown_type = IPR_SHUTDOWN_NONE;
8629                 }
8630         }
8631
8632         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8633                                 shutdown_type);
8634 }
8635
8636 /**
8637  * ipr_reset_freeze - Hold off all I/O activity
8638  * @ipr_cmd:    ipr command struct
8639  *
8640  * Description: If the PCI slot is frozen, hold off all I/O
8641  * activity; then, as soon as the slot is available again,
8642  * initiate an adapter reset.
8643  */
8644 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8645 {
8646         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8647         int i;
8648
8649         /* Disallow new interrupts, avoid loop */
8650         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8651                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8652                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8653                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8654         }
8655         wmb();
8656         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8657         ipr_cmd->done = ipr_reset_ioa_job;
8658         return IPR_RC_JOB_RETURN;
8659 }
8660
8661 /**
8662  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8663  * @pdev:       PCI device struct
8664  *
8665  * Description: This routine is called to tell us that the MMIO
8666  * access to the IOA has been restored
8667  */
8668 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8669 {
8670         unsigned long flags = 0;
8671         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8672
8673         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8674         if (!ioa_cfg->probe_done)
8675                 pci_save_state(pdev);
8676         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8677         return PCI_ERS_RESULT_NEED_RESET;
8678 }
8679
8680 /**
8681  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8682  * @pdev:       PCI device struct
8683  *
8684  * Description: This routine is called to tell us that the PCI bus
8685  * is down. Can't do anything here, except put the device driver
8686  * into a holding pattern, waiting for the PCI bus to come back.
8687  */
8688 static void ipr_pci_frozen(struct pci_dev *pdev)
8689 {
8690         unsigned long flags = 0;
8691         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8692
8693         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8694         if (ioa_cfg->probe_done)
8695                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8696         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8697 }
8698
8699 /**
8700  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8701  * @pdev:       PCI device struct
8702  *
8703  * Description: This routine is called by the pci error recovery
8704  * code after the PCI slot has been reset, just before we
8705  * should resume normal operations.
8706  */
8707 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8708 {
8709         unsigned long flags = 0;
8710         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8711
8712         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8713         if (ioa_cfg->probe_done) {
8714                 if (ioa_cfg->needs_warm_reset)
8715                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8716                 else
8717                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8718                                                 IPR_SHUTDOWN_NONE);
8719         } else
8720                 wake_up_all(&ioa_cfg->eeh_wait_q);
8721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8722         return PCI_ERS_RESULT_RECOVERED;
8723 }
8724
8725 /**
8726  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8727  * @pdev:       PCI device struct
8728  *
8729  * Description: This routine is called when the PCI bus has
8730  * permanently failed.
8731  */
8732 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8733 {
8734         unsigned long flags = 0;
8735         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8736         int i;
8737
8738         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8739         if (ioa_cfg->probe_done) {
8740                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8741                         ioa_cfg->sdt_state = ABORT_DUMP;
8742                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8743                 ioa_cfg->in_ioa_bringdown = 1;
8744                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8745                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8746                         ioa_cfg->hrrq[i].allow_cmds = 0;
8747                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8748                 }
8749                 wmb();
8750                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8751         } else
8752                 wake_up_all(&ioa_cfg->eeh_wait_q);
8753         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8754 }
8755
8756 /**
8757  * ipr_pci_error_detected - Called when a PCI error is detected.
8758  * @pdev:       PCI device struct
8759  * @state:      PCI channel state
8760  *
8761  * Description: Called when a PCI error is detected.
8762  *
8763  * Return value:
8764  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8765  */
8766 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8767                                                pci_channel_state_t state)
8768 {
8769         switch (state) {
8770         case pci_channel_io_frozen:
8771                 ipr_pci_frozen(pdev);
8772                 return PCI_ERS_RESULT_CAN_RECOVER;
8773         case pci_channel_io_perm_failure:
8774                 ipr_pci_perm_failure(pdev);
8775                 return PCI_ERS_RESULT_DISCONNECT;
8776                 break;
8777         default:
8778                 break;
8779         }
8780         return PCI_ERS_RESULT_NEED_RESET;
8781 }
8782
8783 /**
8784  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8785  * @ioa_cfg:    ioa cfg struct
8786  *
8787  * Description: This is the second phase of adapter intialization
8788  * This function takes care of initilizing the adapter to the point
8789  * where it can accept new commands.
8790
8791  * Return value:
8792  *      0 on success / -EIO on failure
8793  **/
8794 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8795 {
8796         int rc = 0;
8797         unsigned long host_lock_flags = 0;
8798
8799         ENTER;
8800         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8801         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8802         ioa_cfg->probe_done = 1;
8803         if (ioa_cfg->needs_hard_reset) {
8804                 ioa_cfg->needs_hard_reset = 0;
8805                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8806         } else
8807                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8808                                         IPR_SHUTDOWN_NONE);
8809         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8810
8811         LEAVE;
8812         return rc;
8813 }
8814
8815 /**
8816  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8817  * @ioa_cfg:    ioa config struct
8818  *
8819  * Return value:
8820  *      none
8821  **/
8822 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8823 {
8824         int i;
8825
8826         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8827                 if (ioa_cfg->ipr_cmnd_list[i])
8828                         dma_pool_free(ioa_cfg->ipr_cmd_pool,
8829                                       ioa_cfg->ipr_cmnd_list[i],
8830                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8831
8832                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8833         }
8834
8835         if (ioa_cfg->ipr_cmd_pool)
8836                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8837
8838         kfree(ioa_cfg->ipr_cmnd_list);
8839         kfree(ioa_cfg->ipr_cmnd_list_dma);
8840         ioa_cfg->ipr_cmnd_list = NULL;
8841         ioa_cfg->ipr_cmnd_list_dma = NULL;
8842         ioa_cfg->ipr_cmd_pool = NULL;
8843 }
8844
8845 /**
8846  * ipr_free_mem - Frees memory allocated for an adapter
8847  * @ioa_cfg:    ioa cfg struct
8848  *
8849  * Return value:
8850  *      nothing
8851  **/
8852 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8853 {
8854         int i;
8855
8856         kfree(ioa_cfg->res_entries);
8857         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8858                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8859         ipr_free_cmd_blks(ioa_cfg);
8860
8861         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8862                 dma_free_coherent(&ioa_cfg->pdev->dev,
8863                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
8864                                   ioa_cfg->hrrq[i].host_rrq,
8865                                   ioa_cfg->hrrq[i].host_rrq_dma);
8866
8867         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8868                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8869
8870         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8871                 dma_free_coherent(&ioa_cfg->pdev->dev,
8872                                   sizeof(struct ipr_hostrcb),
8873                                   ioa_cfg->hostrcb[i],
8874                                   ioa_cfg->hostrcb_dma[i]);
8875         }
8876
8877         ipr_free_dump(ioa_cfg);
8878         kfree(ioa_cfg->trace);
8879 }
8880
8881 /**
8882  * ipr_free_all_resources - Free all allocated resources for an adapter.
8883  * @ipr_cmd:    ipr command struct
8884  *
8885  * This function frees all allocated resources for the
8886  * specified adapter.
8887  *
8888  * Return value:
8889  *      none
8890  **/
8891 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8892 {
8893         struct pci_dev *pdev = ioa_cfg->pdev;
8894
8895         ENTER;
8896         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8897             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8898                 int i;
8899                 for (i = 0; i < ioa_cfg->nvectors; i++)
8900                         free_irq(ioa_cfg->vectors_info[i].vec,
8901                                 &ioa_cfg->hrrq[i]);
8902         } else
8903                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8904
8905         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8906                 pci_disable_msi(pdev);
8907                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8908         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8909                 pci_disable_msix(pdev);
8910                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8911         }
8912
8913         iounmap(ioa_cfg->hdw_dma_regs);
8914         pci_release_regions(pdev);
8915         ipr_free_mem(ioa_cfg);
8916         scsi_host_put(ioa_cfg->host);
8917         pci_disable_device(pdev);
8918         LEAVE;
8919 }
8920
8921 /**
8922  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8923  * @ioa_cfg:    ioa config struct
8924  *
8925  * Return value:
8926  *      0 on success / -ENOMEM on allocation failure
8927  **/
8928 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8929 {
8930         struct ipr_cmnd *ipr_cmd;
8931         struct ipr_ioarcb *ioarcb;
8932         dma_addr_t dma_addr;
8933         int i, entries_each_hrrq, hrrq_id = 0;
8934
8935         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8936                                                 sizeof(struct ipr_cmnd), 512, 0);
8937
8938         if (!ioa_cfg->ipr_cmd_pool)
8939                 return -ENOMEM;
8940
8941         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8942         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8943
8944         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8945                 ipr_free_cmd_blks(ioa_cfg);
8946                 return -ENOMEM;
8947         }
8948
8949         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8950                 if (ioa_cfg->hrrq_num > 1) {
8951                         if (i == 0) {
8952                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8953                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8954                                         ioa_cfg->hrrq[i].max_cmd_id =
8955                                                 (entries_each_hrrq - 1);
8956                         } else {
8957                                 entries_each_hrrq =
8958                                         IPR_NUM_BASE_CMD_BLKS/
8959                                         (ioa_cfg->hrrq_num - 1);
8960                                 ioa_cfg->hrrq[i].min_cmd_id =
8961                                         IPR_NUM_INTERNAL_CMD_BLKS +
8962                                         (i - 1) * entries_each_hrrq;
8963                                 ioa_cfg->hrrq[i].max_cmd_id =
8964                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8965                                         i * entries_each_hrrq - 1);
8966                         }
8967                 } else {
8968                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
8969                         ioa_cfg->hrrq[i].min_cmd_id = 0;
8970                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8971                 }
8972                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8973         }
8974
8975         BUG_ON(ioa_cfg->hrrq_num == 0);
8976
8977         i = IPR_NUM_CMD_BLKS -
8978                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8979         if (i > 0) {
8980                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8981                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8982         }
8983
8984         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8985                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8986
8987                 if (!ipr_cmd) {
8988                         ipr_free_cmd_blks(ioa_cfg);
8989                         return -ENOMEM;
8990                 }
8991
8992                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8993                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8994                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8995
8996                 ioarcb = &ipr_cmd->ioarcb;
8997                 ipr_cmd->dma_addr = dma_addr;
8998                 if (ioa_cfg->sis64)
8999                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9000                 else
9001                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9002
9003                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9004                 if (ioa_cfg->sis64) {
9005                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9006                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9007                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9008                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9009                 } else {
9010                         ioarcb->write_ioadl_addr =
9011                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9012                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9013                         ioarcb->ioasa_host_pci_addr =
9014                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9015                 }
9016                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9017                 ipr_cmd->cmd_index = i;
9018                 ipr_cmd->ioa_cfg = ioa_cfg;
9019                 ipr_cmd->sense_buffer_dma = dma_addr +
9020                         offsetof(struct ipr_cmnd, sense_buffer);
9021
9022                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9023                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9024                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9025                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9026                         hrrq_id++;
9027         }
9028
9029         return 0;
9030 }
9031
9032 /**
9033  * ipr_alloc_mem - Allocate memory for an adapter
9034  * @ioa_cfg:    ioa config struct
9035  *
9036  * Return value:
9037  *      0 on success / non-zero for error
9038  **/
9039 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9040 {
9041         struct pci_dev *pdev = ioa_cfg->pdev;
9042         int i, rc = -ENOMEM;
9043
9044         ENTER;
9045         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9046                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9047
9048         if (!ioa_cfg->res_entries)
9049                 goto out;
9050
9051         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9052                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9053                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9054         }
9055
9056         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9057                                               sizeof(struct ipr_misc_cbs),
9058                                               &ioa_cfg->vpd_cbs_dma,
9059                                               GFP_KERNEL);
9060
9061         if (!ioa_cfg->vpd_cbs)
9062                 goto out_free_res_entries;
9063
9064         if (ipr_alloc_cmd_blks(ioa_cfg))
9065                 goto out_free_vpd_cbs;
9066
9067         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9068                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9069                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9070                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9071                                         GFP_KERNEL);
9072
9073                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9074                         while (--i > 0)
9075                                 dma_free_coherent(&pdev->dev,
9076                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9077                                         ioa_cfg->hrrq[i].host_rrq,
9078                                         ioa_cfg->hrrq[i].host_rrq_dma);
9079                         goto out_ipr_free_cmd_blocks;
9080                 }
9081                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9082         }
9083
9084         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9085                                                   ioa_cfg->cfg_table_size,
9086                                                   &ioa_cfg->cfg_table_dma,
9087                                                   GFP_KERNEL);
9088
9089         if (!ioa_cfg->u.cfg_table)
9090                 goto out_free_host_rrq;
9091
9092         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9093                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9094                                                          sizeof(struct ipr_hostrcb),
9095                                                          &ioa_cfg->hostrcb_dma[i],
9096                                                          GFP_KERNEL);
9097
9098                 if (!ioa_cfg->hostrcb[i])
9099                         goto out_free_hostrcb_dma;
9100
9101                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9102                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9103                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9104                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9105         }
9106
9107         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9108                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9109
9110         if (!ioa_cfg->trace)
9111                 goto out_free_hostrcb_dma;
9112
9113         rc = 0;
9114 out:
9115         LEAVE;
9116         return rc;
9117
9118 out_free_hostrcb_dma:
9119         while (i-- > 0) {
9120                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9121                                   ioa_cfg->hostrcb[i],
9122                                   ioa_cfg->hostrcb_dma[i]);
9123         }
9124         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9125                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9126 out_free_host_rrq:
9127         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9128                 dma_free_coherent(&pdev->dev,
9129                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9130                                   ioa_cfg->hrrq[i].host_rrq,
9131                                   ioa_cfg->hrrq[i].host_rrq_dma);
9132         }
9133 out_ipr_free_cmd_blocks:
9134         ipr_free_cmd_blks(ioa_cfg);
9135 out_free_vpd_cbs:
9136         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9137                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9138 out_free_res_entries:
9139         kfree(ioa_cfg->res_entries);
9140         goto out;
9141 }
9142
9143 /**
9144  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9145  * @ioa_cfg:    ioa config struct
9146  *
9147  * Return value:
9148  *      none
9149  **/
9150 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9151 {
9152         int i;
9153
9154         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9155                 ioa_cfg->bus_attr[i].bus = i;
9156                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9157                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9158                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9159                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9160                 else
9161                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9162         }
9163 }
9164
9165 /**
9166  * ipr_init_regs - Initialize IOA registers
9167  * @ioa_cfg:    ioa config struct
9168  *
9169  * Return value:
9170  *      none
9171  **/
9172 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9173 {
9174         const struct ipr_interrupt_offsets *p;
9175         struct ipr_interrupts *t;
9176         void __iomem *base;
9177
9178         p = &ioa_cfg->chip_cfg->regs;
9179         t = &ioa_cfg->regs;
9180         base = ioa_cfg->hdw_dma_regs;
9181
9182         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9183         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9184         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9185         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9186         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9187         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9188         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9189         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9190         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9191         t->ioarrin_reg = base + p->ioarrin_reg;
9192         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9193         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9194         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9195         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9196         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9197         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9198
9199         if (ioa_cfg->sis64) {
9200                 t->init_feedback_reg = base + p->init_feedback_reg;
9201                 t->dump_addr_reg = base + p->dump_addr_reg;
9202                 t->dump_data_reg = base + p->dump_data_reg;
9203                 t->endian_swap_reg = base + p->endian_swap_reg;
9204         }
9205 }
9206
9207 /**
9208  * ipr_init_ioa_cfg - Initialize IOA config struct
9209  * @ioa_cfg:    ioa config struct
9210  * @host:               scsi host struct
9211  * @pdev:               PCI dev struct
9212  *
9213  * Return value:
9214  *      none
9215  **/
9216 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9217                              struct Scsi_Host *host, struct pci_dev *pdev)
9218 {
9219         int i;
9220
9221         ioa_cfg->host = host;
9222         ioa_cfg->pdev = pdev;
9223         ioa_cfg->log_level = ipr_log_level;
9224         ioa_cfg->doorbell = IPR_DOORBELL;
9225         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9226         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9227         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9228         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9229         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9230         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9231
9232         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9233         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9234         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9235         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9236         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9237         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9238         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9239         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9240         ioa_cfg->sdt_state = INACTIVE;
9241
9242         ipr_initialize_bus_attr(ioa_cfg);
9243         ioa_cfg->max_devs_supported = ipr_max_devs;
9244
9245         if (ioa_cfg->sis64) {
9246                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9247                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9248                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9249                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9250                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9251                                            + ((sizeof(struct ipr_config_table_entry64)
9252                                                * ioa_cfg->max_devs_supported)));
9253         } else {
9254                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9255                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9256                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9257                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9258                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9259                                            + ((sizeof(struct ipr_config_table_entry)
9260                                                * ioa_cfg->max_devs_supported)));
9261         }
9262
9263         host->max_channel = IPR_VSET_BUS;
9264         host->unique_id = host->host_no;
9265         host->max_cmd_len = IPR_MAX_CDB_LEN;
9266         host->can_queue = ioa_cfg->max_cmds;
9267         pci_set_drvdata(pdev, ioa_cfg);
9268
9269         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9270                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9271                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9272                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9273                 if (i == 0)
9274                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9275                 else
9276                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9277         }
9278 }
9279
9280 /**
9281  * ipr_get_chip_info - Find adapter chip information
9282  * @dev_id:             PCI device id struct
9283  *
9284  * Return value:
9285  *      ptr to chip information on success / NULL on failure
9286  **/
9287 static const struct ipr_chip_t *
9288 ipr_get_chip_info(const struct pci_device_id *dev_id)
9289 {
9290         int i;
9291
9292         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9293                 if (ipr_chip[i].vendor == dev_id->vendor &&
9294                     ipr_chip[i].device == dev_id->device)
9295                         return &ipr_chip[i];
9296         return NULL;
9297 }
9298
9299 /**
9300  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9301  *                                              during probe time
9302  * @ioa_cfg:    ioa config struct
9303  *
9304  * Return value:
9305  *      None
9306  **/
9307 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9308 {
9309         struct pci_dev *pdev = ioa_cfg->pdev;
9310
9311         if (pci_channel_offline(pdev)) {
9312                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9313                                    !pci_channel_offline(pdev),
9314                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9315                 pci_restore_state(pdev);
9316         }
9317 }
9318
9319 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9320 {
9321         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9322         int i, vectors;
9323
9324         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9325                 entries[i].entry = i;
9326
9327         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9328                                         entries, 1, ipr_number_of_msix);
9329         if (vectors < 0) {
9330                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9331                 return vectors;
9332         }
9333
9334         for (i = 0; i < vectors; i++)
9335                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9336         ioa_cfg->nvectors = vectors;
9337
9338         return 0;
9339 }
9340
9341 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9342 {
9343         int i, vectors;
9344
9345         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9346         if (vectors < 0) {
9347                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9348                 return vectors;
9349         }
9350
9351         for (i = 0; i < vectors; i++)
9352                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9353         ioa_cfg->nvectors = vectors;
9354
9355         return 0;
9356 }
9357
9358 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9359 {
9360         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9361
9362         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9363                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9364                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9365                 ioa_cfg->vectors_info[vec_idx].
9366                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9367         }
9368 }
9369
9370 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9371 {
9372         int i, rc;
9373
9374         for (i = 1; i < ioa_cfg->nvectors; i++) {
9375                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9376                         ipr_isr_mhrrq,
9377                         0,
9378                         ioa_cfg->vectors_info[i].desc,
9379                         &ioa_cfg->hrrq[i]);
9380                 if (rc) {
9381                         while (--i >= 0)
9382                                 free_irq(ioa_cfg->vectors_info[i].vec,
9383                                         &ioa_cfg->hrrq[i]);
9384                         return rc;
9385                 }
9386         }
9387         return 0;
9388 }
9389
9390 /**
9391  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9392  * @pdev:               PCI device struct
9393  *
9394  * Description: Simply set the msi_received flag to 1 indicating that
9395  * Message Signaled Interrupts are supported.
9396  *
9397  * Return value:
9398  *      0 on success / non-zero on failure
9399  **/
9400 static irqreturn_t ipr_test_intr(int irq, void *devp)
9401 {
9402         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9403         unsigned long lock_flags = 0;
9404         irqreturn_t rc = IRQ_HANDLED;
9405
9406         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9407         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9408
9409         ioa_cfg->msi_received = 1;
9410         wake_up(&ioa_cfg->msi_wait_q);
9411
9412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9413         return rc;
9414 }
9415
9416 /**
9417  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9418  * @pdev:               PCI device struct
9419  *
9420  * Description: The return value from pci_enable_msi_range() can not always be
9421  * trusted.  This routine sets up and initiates a test interrupt to determine
9422  * if the interrupt is received via the ipr_test_intr() service routine.
9423  * If the tests fails, the driver will fall back to LSI.
9424  *
9425  * Return value:
9426  *      0 on success / non-zero on failure
9427  **/
9428 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9429 {
9430         int rc;
9431         volatile u32 int_reg;
9432         unsigned long lock_flags = 0;
9433
9434         ENTER;
9435
9436         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9437         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9438         ioa_cfg->msi_received = 0;
9439         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9440         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9441         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9442         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9443
9444         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9445                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9446         else
9447                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9448         if (rc) {
9449                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9450                 return rc;
9451         } else if (ipr_debug)
9452                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9453
9454         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9455         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9456         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9457         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9458         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9459
9460         if (!ioa_cfg->msi_received) {
9461                 /* MSI test failed */
9462                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9463                 rc = -EOPNOTSUPP;
9464         } else if (ipr_debug)
9465                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9466
9467         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9468
9469         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9470                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9471         else
9472                 free_irq(pdev->irq, ioa_cfg);
9473
9474         LEAVE;
9475
9476         return rc;
9477 }
9478
9479  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9480  * @pdev:               PCI device struct
9481  * @dev_id:             PCI device id struct
9482  *
9483  * Return value:
9484  *      0 on success / non-zero on failure
9485  **/
9486 static int ipr_probe_ioa(struct pci_dev *pdev,
9487                          const struct pci_device_id *dev_id)
9488 {
9489         struct ipr_ioa_cfg *ioa_cfg;
9490         struct Scsi_Host *host;
9491         unsigned long ipr_regs_pci;
9492         void __iomem *ipr_regs;
9493         int rc = PCIBIOS_SUCCESSFUL;
9494         volatile u32 mask, uproc, interrupts;
9495         unsigned long lock_flags, driver_lock_flags;
9496
9497         ENTER;
9498
9499         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9500         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9501
9502         if (!host) {
9503                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9504                 rc = -ENOMEM;
9505                 goto out;
9506         }
9507
9508         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9509         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9510         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9511
9512         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9513
9514         if (!ioa_cfg->ipr_chip) {
9515                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9516                         dev_id->vendor, dev_id->device);
9517                 goto out_scsi_host_put;
9518         }
9519
9520         /* set SIS 32 or SIS 64 */
9521         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9522         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9523         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9524         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9525
9526         if (ipr_transop_timeout)
9527                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9528         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9529                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9530         else
9531                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9532
9533         ioa_cfg->revid = pdev->revision;
9534
9535         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9536
9537         ipr_regs_pci = pci_resource_start(pdev, 0);
9538
9539         rc = pci_request_regions(pdev, IPR_NAME);
9540         if (rc < 0) {
9541                 dev_err(&pdev->dev,
9542                         "Couldn't register memory range of registers\n");
9543                 goto out_scsi_host_put;
9544         }
9545
9546         rc = pci_enable_device(pdev);
9547
9548         if (rc || pci_channel_offline(pdev)) {
9549                 if (pci_channel_offline(pdev)) {
9550                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9551                         rc = pci_enable_device(pdev);
9552                 }
9553
9554                 if (rc) {
9555                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9556                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9557                         goto out_release_regions;
9558                 }
9559         }
9560
9561         ipr_regs = pci_ioremap_bar(pdev, 0);
9562
9563         if (!ipr_regs) {
9564                 dev_err(&pdev->dev,
9565                         "Couldn't map memory range of registers\n");
9566                 rc = -ENOMEM;
9567                 goto out_disable;
9568         }
9569
9570         ioa_cfg->hdw_dma_regs = ipr_regs;
9571         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9572         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9573
9574         ipr_init_regs(ioa_cfg);
9575
9576         if (ioa_cfg->sis64) {
9577                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9578                 if (rc < 0) {
9579                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9580                         rc = dma_set_mask_and_coherent(&pdev->dev,
9581                                                        DMA_BIT_MASK(32));
9582                 }
9583         } else
9584                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9585
9586         if (rc < 0) {
9587                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9588                 goto cleanup_nomem;
9589         }
9590
9591         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9592                                    ioa_cfg->chip_cfg->cache_line_size);
9593
9594         if (rc != PCIBIOS_SUCCESSFUL) {
9595                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9596                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9597                 rc = -EIO;
9598                 goto cleanup_nomem;
9599         }
9600
9601         /* Issue MMIO read to ensure card is not in EEH */
9602         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9603         ipr_wait_for_pci_err_recovery(ioa_cfg);
9604
9605         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9606                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9607                         IPR_MAX_MSIX_VECTORS);
9608                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9609         }
9610
9611         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9612                         ipr_enable_msix(ioa_cfg) == 0)
9613                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9614         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9615                         ipr_enable_msi(ioa_cfg) == 0)
9616                 ioa_cfg->intr_flag = IPR_USE_MSI;
9617         else {
9618                 ioa_cfg->intr_flag = IPR_USE_LSI;
9619                 ioa_cfg->nvectors = 1;
9620                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9621         }
9622
9623         pci_set_master(pdev);
9624
9625         if (pci_channel_offline(pdev)) {
9626                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9627                 pci_set_master(pdev);
9628                 if (pci_channel_offline(pdev)) {
9629                         rc = -EIO;
9630                         goto out_msi_disable;
9631                 }
9632         }
9633
9634         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9635             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9636                 rc = ipr_test_msi(ioa_cfg, pdev);
9637                 if (rc == -EOPNOTSUPP) {
9638                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9639                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9640                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9641                                 pci_disable_msi(pdev);
9642                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9643                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9644                                 pci_disable_msix(pdev);
9645                         }
9646
9647                         ioa_cfg->intr_flag = IPR_USE_LSI;
9648                         ioa_cfg->nvectors = 1;
9649                 }
9650                 else if (rc)
9651                         goto out_msi_disable;
9652                 else {
9653                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9654                                 dev_info(&pdev->dev,
9655                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9656                                         ioa_cfg->nvectors, pdev->irq);
9657                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9658                                 dev_info(&pdev->dev,
9659                                         "Request for %d MSIXs succeeded.",
9660                                         ioa_cfg->nvectors);
9661                 }
9662         }
9663
9664         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9665                                 (unsigned int)num_online_cpus(),
9666                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9667
9668         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9669                 goto out_msi_disable;
9670
9671         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9672                 goto out_msi_disable;
9673
9674         rc = ipr_alloc_mem(ioa_cfg);
9675         if (rc < 0) {
9676                 dev_err(&pdev->dev,
9677                         "Couldn't allocate enough memory for device driver!\n");
9678                 goto out_msi_disable;
9679         }
9680
9681         /* Save away PCI config space for use following IOA reset */
9682         rc = pci_save_state(pdev);
9683
9684         if (rc != PCIBIOS_SUCCESSFUL) {
9685                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9686                 rc = -EIO;
9687                 goto cleanup_nolog;
9688         }
9689
9690         /*
9691          * If HRRQ updated interrupt is not masked, or reset alert is set,
9692          * the card is in an unknown state and needs a hard reset
9693          */
9694         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9695         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9696         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9697         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9698                 ioa_cfg->needs_hard_reset = 1;
9699         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9700                 ioa_cfg->needs_hard_reset = 1;
9701         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9702                 ioa_cfg->ioa_unit_checked = 1;
9703
9704         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9705         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9706         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9707
9708         if (ioa_cfg->intr_flag == IPR_USE_MSI
9709                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9710                 name_msi_vectors(ioa_cfg);
9711                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9712                         0,
9713                         ioa_cfg->vectors_info[0].desc,
9714                         &ioa_cfg->hrrq[0]);
9715                 if (!rc)
9716                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9717         } else {
9718                 rc = request_irq(pdev->irq, ipr_isr,
9719                          IRQF_SHARED,
9720                          IPR_NAME, &ioa_cfg->hrrq[0]);
9721         }
9722         if (rc) {
9723                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9724                         pdev->irq, rc);
9725                 goto cleanup_nolog;
9726         }
9727
9728         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9729             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9730                 ioa_cfg->needs_warm_reset = 1;
9731                 ioa_cfg->reset = ipr_reset_slot_reset;
9732         } else
9733                 ioa_cfg->reset = ipr_reset_start_bist;
9734
9735         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9736         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9737         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9738
9739         LEAVE;
9740 out:
9741         return rc;
9742
9743 cleanup_nolog:
9744         ipr_free_mem(ioa_cfg);
9745 out_msi_disable:
9746         ipr_wait_for_pci_err_recovery(ioa_cfg);
9747         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9748                 pci_disable_msi(pdev);
9749         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9750                 pci_disable_msix(pdev);
9751 cleanup_nomem:
9752         iounmap(ipr_regs);
9753 out_disable:
9754         pci_disable_device(pdev);
9755 out_release_regions:
9756         pci_release_regions(pdev);
9757 out_scsi_host_put:
9758         scsi_host_put(host);
9759         goto out;
9760 }
9761
9762 /**
9763  * ipr_initiate_ioa_bringdown - Bring down an adapter
9764  * @ioa_cfg:            ioa config struct
9765  * @shutdown_type:      shutdown type
9766  *
9767  * Description: This function will initiate bringing down the adapter.
9768  * This consists of issuing an IOA shutdown to the adapter
9769  * to flush the cache, and running BIST.
9770  * If the caller needs to wait on the completion of the reset,
9771  * the caller must sleep on the reset_wait_q.
9772  *
9773  * Return value:
9774  *      none
9775  **/
9776 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9777                                        enum ipr_shutdown_type shutdown_type)
9778 {
9779         ENTER;
9780         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9781                 ioa_cfg->sdt_state = ABORT_DUMP;
9782         ioa_cfg->reset_retries = 0;
9783         ioa_cfg->in_ioa_bringdown = 1;
9784         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9785         LEAVE;
9786 }
9787
9788 /**
9789  * __ipr_remove - Remove a single adapter
9790  * @pdev:       pci device struct
9791  *
9792  * Adapter hot plug remove entry point.
9793  *
9794  * Return value:
9795  *      none
9796  **/
9797 static void __ipr_remove(struct pci_dev *pdev)
9798 {
9799         unsigned long host_lock_flags = 0;
9800         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9801         int i;
9802         unsigned long driver_lock_flags;
9803         ENTER;
9804
9805         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9806         while (ioa_cfg->in_reset_reload) {
9807                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9808                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9809                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9810         }
9811
9812         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9813                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9814                 ioa_cfg->hrrq[i].removing_ioa = 1;
9815                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9816         }
9817         wmb();
9818         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9819
9820         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9821         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9822         flush_work(&ioa_cfg->work_q);
9823         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9824         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9825
9826         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9827         list_del(&ioa_cfg->queue);
9828         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9829
9830         if (ioa_cfg->sdt_state == ABORT_DUMP)
9831                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9832         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9833
9834         ipr_free_all_resources(ioa_cfg);
9835
9836         LEAVE;
9837 }
9838
9839 /**
9840  * ipr_remove - IOA hot plug remove entry point
9841  * @pdev:       pci device struct
9842  *
9843  * Adapter hot plug remove entry point.
9844  *
9845  * Return value:
9846  *      none
9847  **/
9848 static void ipr_remove(struct pci_dev *pdev)
9849 {
9850         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9851
9852         ENTER;
9853
9854         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9855                               &ipr_trace_attr);
9856         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9857                              &ipr_dump_attr);
9858         scsi_remove_host(ioa_cfg->host);
9859
9860         __ipr_remove(pdev);
9861
9862         LEAVE;
9863 }
9864
9865 /**
9866  * ipr_probe - Adapter hot plug add entry point
9867  *
9868  * Return value:
9869  *      0 on success / non-zero on failure
9870  **/
9871 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9872 {
9873         struct ipr_ioa_cfg *ioa_cfg;
9874         int rc, i;
9875
9876         rc = ipr_probe_ioa(pdev, dev_id);
9877
9878         if (rc)
9879                 return rc;
9880
9881         ioa_cfg = pci_get_drvdata(pdev);
9882         rc = ipr_probe_ioa_part2(ioa_cfg);
9883
9884         if (rc) {
9885                 __ipr_remove(pdev);
9886                 return rc;
9887         }
9888
9889         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9890
9891         if (rc) {
9892                 __ipr_remove(pdev);
9893                 return rc;
9894         }
9895
9896         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9897                                    &ipr_trace_attr);
9898
9899         if (rc) {
9900                 scsi_remove_host(ioa_cfg->host);
9901                 __ipr_remove(pdev);
9902                 return rc;
9903         }
9904
9905         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9906                                    &ipr_dump_attr);
9907
9908         if (rc) {
9909                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9910                                       &ipr_trace_attr);
9911                 scsi_remove_host(ioa_cfg->host);
9912                 __ipr_remove(pdev);
9913                 return rc;
9914         }
9915
9916         scsi_scan_host(ioa_cfg->host);
9917         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9918
9919         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9920                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9921                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9922                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9923                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9924                 }
9925         }
9926
9927         schedule_work(&ioa_cfg->work_q);
9928         return 0;
9929 }
9930
9931 /**
9932  * ipr_shutdown - Shutdown handler.
9933  * @pdev:       pci device struct
9934  *
9935  * This function is invoked upon system shutdown/reboot. It will issue
9936  * an adapter shutdown to the adapter to flush the write cache.
9937  *
9938  * Return value:
9939  *      none
9940  **/
9941 static void ipr_shutdown(struct pci_dev *pdev)
9942 {
9943         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9944         unsigned long lock_flags = 0;
9945         int i;
9946
9947         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9948         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9949                 ioa_cfg->iopoll_weight = 0;
9950                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9951                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9952         }
9953
9954         while (ioa_cfg->in_reset_reload) {
9955                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9956                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9957                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9958         }
9959
9960         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9961         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9962         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9963 }
9964
9965 static struct pci_device_id ipr_pci_table[] = {
9966         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9967                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9968         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9969                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9970         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9971                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9972         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9973                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9974         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9975                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9976         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9977                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9978         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9979                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9980         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9981                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9982                 IPR_USE_LONG_TRANSOP_TIMEOUT },
9983         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9984               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9985         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9986               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9987               IPR_USE_LONG_TRANSOP_TIMEOUT },
9988         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9989               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9990               IPR_USE_LONG_TRANSOP_TIMEOUT },
9991         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9992               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9993         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9994               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9995               IPR_USE_LONG_TRANSOP_TIMEOUT},
9996         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9997               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9998               IPR_USE_LONG_TRANSOP_TIMEOUT },
9999         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10000               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10001               IPR_USE_LONG_TRANSOP_TIMEOUT },
10002         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10003               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10004         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10005               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10006         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10007               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10008               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10009         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10010                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10011         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10012                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10013         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10014                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10015                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10016         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10017                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10018                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10019         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10020                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10021         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10022                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10023         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10024                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10025         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10026                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10027         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10028                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10029         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10030                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10031         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10032                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10033         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10034                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10035         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10036                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10037         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10038                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10039         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10040                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10041         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10042                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10043         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10044                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10045         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10046                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10047         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10048                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10049         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10050                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10051         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10052                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10053         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10054                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10055         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10056                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10057         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10058                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10059         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10060                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10061         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10062                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10063         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10064                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10065         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10066                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10067         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10068                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10069         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10070                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10071         { }
10072 };
10073 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10074
10075 static const struct pci_error_handlers ipr_err_handler = {
10076         .error_detected = ipr_pci_error_detected,
10077         .mmio_enabled = ipr_pci_mmio_enabled,
10078         .slot_reset = ipr_pci_slot_reset,
10079 };
10080
10081 static struct pci_driver ipr_driver = {
10082         .name = IPR_NAME,
10083         .id_table = ipr_pci_table,
10084         .probe = ipr_probe,
10085         .remove = ipr_remove,
10086         .shutdown = ipr_shutdown,
10087         .err_handler = &ipr_err_handler,
10088 };
10089
10090 /**
10091  * ipr_halt_done - Shutdown prepare completion
10092  *
10093  * Return value:
10094  *      none
10095  **/
10096 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10097 {
10098         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10099 }
10100
10101 /**
10102  * ipr_halt - Issue shutdown prepare to all adapters
10103  *
10104  * Return value:
10105  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10106  **/
10107 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10108 {
10109         struct ipr_cmnd *ipr_cmd;
10110         struct ipr_ioa_cfg *ioa_cfg;
10111         unsigned long flags = 0, driver_lock_flags;
10112
10113         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10114                 return NOTIFY_DONE;
10115
10116         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10117
10118         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10119                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10120                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10121                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10122                         continue;
10123                 }
10124
10125                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10126                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10127                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10128                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10129                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10130
10131                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10132                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10133         }
10134         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10135
10136         return NOTIFY_OK;
10137 }
10138
10139 static struct notifier_block ipr_notifier = {
10140         ipr_halt, NULL, 0
10141 };
10142
10143 /**
10144  * ipr_init - Module entry point
10145  *
10146  * Return value:
10147  *      0 on success / negative value on failure
10148  **/
10149 static int __init ipr_init(void)
10150 {
10151         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10152                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10153
10154         register_reboot_notifier(&ipr_notifier);
10155         return pci_register_driver(&ipr_driver);
10156 }
10157
10158 /**
10159  * ipr_exit - Module unload
10160  *
10161  * Module unload entry point.
10162  *
10163  * Return value:
10164  *      none
10165  **/
10166 static void __exit ipr_exit(void)
10167 {
10168         unregister_reboot_notifier(&ipr_notifier);
10169         pci_unregister_driver(&ipr_driver);
10170 }
10171
10172 module_init(ipr_init);
10173 module_exit(ipr_exit);