scsi: provide a generic change_queue_type method
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         if (ioa_cfg->allow_ml_add_del)
1430                                 schedule_work(&ioa_cfg->work_q);
1431                 } else {
1432                         ipr_clear_res_target(res);
1433                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434                 }
1435         } else if (!res->sdev || res->del_from_ml) {
1436                 res->add_to_ml = 1;
1437                 if (ioa_cfg->allow_ml_add_del)
1438                         schedule_work(&ioa_cfg->work_q);
1439         }
1440
1441         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:    ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  *      none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460         list_del(&hostrcb->queue);
1461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463         if (ioasc) {
1464                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465                         dev_err(&ioa_cfg->pdev->dev,
1466                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469         } else {
1470                 ipr_handle_config_change(ioa_cfg, hostrcb);
1471         }
1472 }
1473
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:          index into buffer
1477  * @buf:                string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  *      new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487         while (i && buf[i] == ' ')
1488                 i--;
1489         buf[i+1] = ' ';
1490         buf[i+2] = '\0';
1491         return i + 2;
1492 }
1493
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:             string to print at start of printk
1497  * @hostrcb:    hostrcb pointer
1498  * @vpd:                vendor/product id/sn struct
1499  *
1500  * Return value:
1501  *      none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504                                 struct ipr_vpd *vpd)
1505 {
1506         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507         int i = 0;
1508
1509         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:                vendor/product id/sn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531                     + IPR_SERIAL_NUM_LEN];
1532
1533         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535                IPR_PROD_ID_LEN);
1536         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537         ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541         ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:             string to print at start of printk
1547  * @hostrcb:    hostrcb pointer
1548  * @vpd:                vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554                                     struct ipr_ext_vpd *vpd)
1555 {
1556         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:                vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  *      none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570         ipr_log_vpd(&vpd->vpd);
1571         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572                 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:    ioa config struct
1578  * @hostrcb:    hostrcb struct
1579  *
1580  * Return value:
1581  *      none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584                                          struct ipr_hostrcb *hostrcb)
1585 {
1586         struct ipr_hostrcb_type_12_error *error;
1587
1588         if (ioa_cfg->sis64)
1589                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590         else
1591                 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593         ipr_err("-----Current Configuration-----\n");
1594         ipr_err("Cache Directory Card Information:\n");
1595         ipr_log_ext_vpd(&error->ioa_vpd);
1596         ipr_err("Adapter Card Information:\n");
1597         ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599         ipr_err("-----Expected Configuration-----\n");
1600         ipr_err("Cache Directory Card Information:\n");
1601         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602         ipr_err("Adapter Card Information:\n");
1603         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606                      be32_to_cpu(error->ioa_data[0]),
1607                      be32_to_cpu(error->ioa_data[1]),
1608                      be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:    ioa config struct
1614  * @hostrcb:    hostrcb struct
1615  *
1616  * Return value:
1617  *      none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620                                 struct ipr_hostrcb *hostrcb)
1621 {
1622         struct ipr_hostrcb_type_02_error *error =
1623                 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625         ipr_err("-----Current Configuration-----\n");
1626         ipr_err("Cache Directory Card Information:\n");
1627         ipr_log_vpd(&error->ioa_vpd);
1628         ipr_err("Adapter Card Information:\n");
1629         ipr_log_vpd(&error->cfc_vpd);
1630
1631         ipr_err("-----Expected Configuration-----\n");
1632         ipr_err("Cache Directory Card Information:\n");
1633         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634         ipr_err("Adapter Card Information:\n");
1635         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638                      be32_to_cpu(error->ioa_data[0]),
1639                      be32_to_cpu(error->ioa_data[1]),
1640                      be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                           struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_13_error *error;
1657
1658         error = &hostrcb->hcam.u.error.u.type_13_error;
1659         errors_logged = be32_to_cpu(error->errors_logged);
1660
1661         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662                 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664         dev_entry = error->dev;
1665
1666         for (i = 0; i < errors_logged; i++, dev_entry++) {
1667                 ipr_err_separator;
1668
1669                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670                 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672                 ipr_err("-----New Device Information-----\n");
1673                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675                 ipr_err("Cache Directory Card Information:\n");
1676                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678                 ipr_err("Adapter Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680         }
1681 }
1682
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:    ioa config struct
1686  * @hostrcb:    hostrcb struct
1687  *
1688  * Return value:
1689  *      none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692                                        struct ipr_hostrcb *hostrcb)
1693 {
1694         int errors_logged, i;
1695         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696         struct ipr_hostrcb_type_23_error *error;
1697         char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699         error = &hostrcb->hcam.u.error64.u.type_23_error;
1700         errors_logged = be32_to_cpu(error->errors_logged);
1701
1702         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703                 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705         dev_entry = error->dev;
1706
1707         for (i = 0; i < errors_logged; i++, dev_entry++) {
1708                 ipr_err_separator;
1709
1710                 ipr_err("Device %d : %s", i + 1,
1711                         __ipr_format_res_path(dev_entry->res_path,
1712                                               buffer, sizeof(buffer)));
1713                 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723         }
1724 }
1725
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:    ioa config struct
1729  * @hostrcb:    hostrcb struct
1730  *
1731  * Return value:
1732  *      none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735                                  struct ipr_hostrcb *hostrcb)
1736 {
1737         int errors_logged, i;
1738         struct ipr_hostrcb_device_data_entry *dev_entry;
1739         struct ipr_hostrcb_type_03_error *error;
1740
1741         error = &hostrcb->hcam.u.error.u.type_03_error;
1742         errors_logged = be32_to_cpu(error->errors_logged);
1743
1744         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745                 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747         dev_entry = error->dev;
1748
1749         for (i = 0; i < errors_logged; i++, dev_entry++) {
1750                 ipr_err_separator;
1751
1752                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753                 ipr_log_vpd(&dev_entry->vpd);
1754
1755                 ipr_err("-----New Device Information-----\n");
1756                 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758                 ipr_err("Cache Directory Card Information:\n");
1759                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761                 ipr_err("Adapter Card Information:\n");
1762                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765                         be32_to_cpu(dev_entry->ioa_data[0]),
1766                         be32_to_cpu(dev_entry->ioa_data[1]),
1767                         be32_to_cpu(dev_entry->ioa_data[2]),
1768                         be32_to_cpu(dev_entry->ioa_data[3]),
1769                         be32_to_cpu(dev_entry->ioa_data[4]));
1770         }
1771 }
1772
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:    ioa config struct
1776  * @hostrcb:    hostrcb struct
1777  *
1778  * Return value:
1779  *      none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782                                          struct ipr_hostrcb *hostrcb)
1783 {
1784         int i, num_entries;
1785         struct ipr_hostrcb_type_14_error *error;
1786         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789         error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791         ipr_err_separator;
1792
1793         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794                 error->protection_level,
1795                 ioa_cfg->host->host_no,
1796                 error->last_func_vset_res_addr.bus,
1797                 error->last_func_vset_res_addr.target,
1798                 error->last_func_vset_res_addr.lun);
1799
1800         ipr_err_separator;
1801
1802         array_entry = error->array_member;
1803         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804                             ARRAY_SIZE(error->array_member));
1805
1806         for (i = 0; i < num_entries; i++, array_entry++) {
1807                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808                         continue;
1809
1810                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811                         ipr_err("Exposed Array Member %d:\n", i);
1812                 else
1813                         ipr_err("Array Member %d:\n", i);
1814
1815                 ipr_log_ext_vpd(&array_entry->vpd);
1816                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818                                  "Expected Location");
1819
1820                 ipr_err_separator;
1821         }
1822 }
1823
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:    ioa config struct
1827  * @hostrcb:    hostrcb struct
1828  *
1829  * Return value:
1830  *      none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833                                 struct ipr_hostrcb *hostrcb)
1834 {
1835         int i;
1836         struct ipr_hostrcb_type_04_error *error;
1837         struct ipr_hostrcb_array_data_entry *array_entry;
1838         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840         error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842         ipr_err_separator;
1843
1844         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845                 error->protection_level,
1846                 ioa_cfg->host->host_no,
1847                 error->last_func_vset_res_addr.bus,
1848                 error->last_func_vset_res_addr.target,
1849                 error->last_func_vset_res_addr.lun);
1850
1851         ipr_err_separator;
1852
1853         array_entry = error->array_member;
1854
1855         for (i = 0; i < 18; i++) {
1856                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857                         continue;
1858
1859                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860                         ipr_err("Exposed Array Member %d:\n", i);
1861                 else
1862                         ipr_err("Array Member %d:\n", i);
1863
1864                 ipr_log_vpd(&array_entry->vpd);
1865
1866                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868                                  "Expected Location");
1869
1870                 ipr_err_separator;
1871
1872                 if (i == 9)
1873                         array_entry = error->array_member2;
1874                 else
1875                         array_entry++;
1876         }
1877 }
1878
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:    ioa config struct
1882  * @data:               IOA error data
1883  * @len:                data length
1884  *
1885  * Return value:
1886  *      none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890         int i;
1891
1892         if (len == 0)
1893                 return;
1894
1895         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898         for (i = 0; i < len / 4; i += 4) {
1899                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900                         be32_to_cpu(data[i]),
1901                         be32_to_cpu(data[i+1]),
1902                         be32_to_cpu(data[i+2]),
1903                         be32_to_cpu(data[i+3]));
1904         }
1905 }
1906
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                             struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_17_error *error;
1919
1920         if (ioa_cfg->sis64)
1921                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922         else
1923                 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926         strim(error->failure_reason);
1927
1928         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1930         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931         ipr_log_hex_data(ioa_cfg, error->data,
1932                          be32_to_cpu(hostrcb->hcam.length) -
1933                          (offsetof(struct ipr_hostrcb_error, u) +
1934                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:    ioa config struct
1940  * @hostrcb:    hostrcb struct
1941  *
1942  * Return value:
1943  *      none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946                                    struct ipr_hostrcb *hostrcb)
1947 {
1948         struct ipr_hostrcb_type_07_error *error;
1949
1950         error = &hostrcb->hcam.u.error.u.type_07_error;
1951         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952         strim(error->failure_reason);
1953
1954         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1956         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957         ipr_log_hex_data(ioa_cfg, error->data,
1958                          be32_to_cpu(hostrcb->hcam.length) -
1959                          (offsetof(struct ipr_hostrcb_error, u) +
1960                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964         u8 active;
1965         char *desc;
1966 } path_active_desc[] = {
1967         { IPR_PATH_NO_INFO, "Path" },
1968         { IPR_PATH_ACTIVE, "Active path" },
1969         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973         u8 state;
1974         char *desc;
1975 } path_state_desc[] = {
1976         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977         { IPR_PATH_HEALTHY, "is healthy" },
1978         { IPR_PATH_DEGRADED, "is degraded" },
1979         { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:    hostrcb struct
1985  * @fabric:             fabric descriptor
1986  *
1987  * Return value:
1988  *      none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991                                 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993         int i, j;
1994         u8 path_state = fabric->path_state;
1995         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996         u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999                 if (path_active_desc[i].active != active)
2000                         continue;
2001
2002                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003                         if (path_state_desc[j].state != state)
2004                                 continue;
2005
2006                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008                                              path_active_desc[i].desc, path_state_desc[j].desc,
2009                                              fabric->ioa_port);
2010                         } else if (fabric->cascaded_expander == 0xff) {
2011                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012                                              path_active_desc[i].desc, path_state_desc[j].desc,
2013                                              fabric->ioa_port, fabric->phy);
2014                         } else if (fabric->phy == 0xff) {
2015                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016                                              path_active_desc[i].desc, path_state_desc[j].desc,
2017                                              fabric->ioa_port, fabric->cascaded_expander);
2018                         } else {
2019                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020                                              path_active_desc[i].desc, path_state_desc[j].desc,
2021                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022                         }
2023                         return;
2024                 }
2025         }
2026
2027         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:    hostrcb struct
2034  * @fabric:             fabric descriptor
2035  *
2036  * Return value:
2037  *      none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040                                   struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042         int i, j;
2043         u8 path_state = fabric->path_state;
2044         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045         u8 state = path_state & IPR_PATH_STATE_MASK;
2046         char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049                 if (path_active_desc[i].active != active)
2050                         continue;
2051
2052                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053                         if (path_state_desc[j].state != state)
2054                                 continue;
2055
2056                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057                                      path_active_desc[i].desc, path_state_desc[j].desc,
2058                                      ipr_format_res_path(hostrcb->ioa_cfg,
2059                                                 fabric->res_path,
2060                                                 buffer, sizeof(buffer)));
2061                         return;
2062                 }
2063         }
2064
2065         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067                                     buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071         u8 type;
2072         char *desc;
2073 } path_type_desc[] = {
2074         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081         u8 status;
2082         char *desc;
2083 } path_status_desc[] = {
2084         { IPR_PATH_CFG_NO_PROB, "Functional" },
2085         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086         { IPR_PATH_CFG_FAILED, "Failed" },
2087         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088         { IPR_PATH_NOT_DETECTED, "Missing" },
2089         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093         "unknown",
2094         "disabled",
2095         "phy reset problem",
2096         "spinup hold",
2097         "port selector",
2098         "unknown",
2099         "unknown",
2100         "unknown",
2101         "1.5Gbps",
2102         "3.0Gbps",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown",
2107         "unknown",
2108         "unknown"
2109 };
2110
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:    hostrcb struct
2114  * @cfg:                fabric path element struct
2115  *
2116  * Return value:
2117  *      none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120                               struct ipr_hostrcb_config_element *cfg)
2121 {
2122         int i, j;
2123         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126         if (type == IPR_PATH_CFG_NOT_EXIST)
2127                 return;
2128
2129         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130                 if (path_type_desc[i].type != type)
2131                         continue;
2132
2133                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134                         if (path_status_desc[j].status != status)
2135                                 continue;
2136
2137                         if (type == IPR_PATH_CFG_IOA_PORT) {
2138                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139                                              path_status_desc[j].desc, path_type_desc[i].desc,
2140                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142                         } else {
2143                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2146                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148                                 } else if (cfg->cascaded_expander == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2151                                                      path_type_desc[i].desc, cfg->phy,
2152                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154                                 } else if (cfg->phy == 0xff) {
2155                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2157                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2158                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160                                 } else {
2161                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2163                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166                                 }
2167                         }
2168                         return;
2169                 }
2170         }
2171
2172         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:    hostrcb struct
2181  * @cfg:                fabric path element struct
2182  *
2183  * Return value:
2184  *      none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187                                 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189         int i, j;
2190         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193         char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196                 return;
2197
2198         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199                 if (path_type_desc[i].type != type)
2200                         continue;
2201
2202                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203                         if (path_status_desc[j].status != status)
2204                                 continue;
2205
2206                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207                                      path_status_desc[j].desc, path_type_desc[i].desc,
2208                                      ipr_format_res_path(hostrcb->ioa_cfg,
2209                                         cfg->res_path, buffer, sizeof(buffer)),
2210                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                         be32_to_cpu(cfg->wwid[0]),
2212                                         be32_to_cpu(cfg->wwid[1]));
2213                         return;
2214                 }
2215         }
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status,
2218                      ipr_format_res_path(hostrcb->ioa_cfg,
2219                         cfg->res_path, buffer, sizeof(buffer)),
2220                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:    ioa config struct
2227  * @hostrcb:    hostrcb struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233                                  struct ipr_hostrcb *hostrcb)
2234 {
2235         struct ipr_hostrcb_type_20_error *error;
2236         struct ipr_hostrcb_fabric_desc *fabric;
2237         struct ipr_hostrcb_config_element *cfg;
2238         int i, add_len;
2239
2240         error = &hostrcb->hcam.u.error.u.type_20_error;
2241         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244         add_len = be32_to_cpu(hostrcb->hcam.length) -
2245                 (offsetof(struct ipr_hostrcb_error, u) +
2246                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249                 ipr_log_fabric_path(hostrcb, fabric);
2250                 for_each_fabric_cfg(fabric, cfg)
2251                         ipr_log_path_elem(hostrcb, cfg);
2252
2253                 add_len -= be16_to_cpu(fabric->length);
2254                 fabric = (struct ipr_hostrcb_fabric_desc *)
2255                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256         }
2257
2258         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:    ioa config struct
2264  * @hostrcb:    hostrcb struct
2265  *
2266  * Return value:
2267  *      none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270                                       struct ipr_hostrcb *hostrcb)
2271 {
2272         int i, num_entries;
2273         struct ipr_hostrcb_type_24_error *error;
2274         struct ipr_hostrcb64_array_data_entry *array_entry;
2275         char buffer[IPR_MAX_RES_PATH_LENGTH];
2276         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278         error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280         ipr_err_separator;
2281
2282         ipr_err("RAID %s Array Configuration: %s\n",
2283                 error->protection_level,
2284                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285                         buffer, sizeof(buffer)));
2286
2287         ipr_err_separator;
2288
2289         array_entry = error->array_member;
2290         num_entries = min_t(u32, error->num_entries,
2291                             ARRAY_SIZE(error->array_member));
2292
2293         for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296                         continue;
2297
2298                 if (error->exposed_mode_adn == i)
2299                         ipr_err("Exposed Array Member %d:\n", i);
2300                 else
2301                         ipr_err("Array Member %d:\n", i);
2302
2303                 ipr_err("Array Member %d:\n", i);
2304                 ipr_log_ext_vpd(&array_entry->vpd);
2305                 ipr_err("Current Location: %s\n",
2306                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307                                 buffer, sizeof(buffer)));
2308                 ipr_err("Expected Location: %s\n",
2309                          ipr_format_res_path(ioa_cfg,
2310                                 array_entry->expected_res_path,
2311                                 buffer, sizeof(buffer)));
2312
2313                 ipr_err_separator;
2314         }
2315 }
2316
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:    ioa config struct
2320  * @hostrcb:    hostrcb struct
2321  *
2322  * Return value:
2323  *      none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326                                        struct ipr_hostrcb *hostrcb)
2327 {
2328         struct ipr_hostrcb_type_30_error *error;
2329         struct ipr_hostrcb64_fabric_desc *fabric;
2330         struct ipr_hostrcb64_config_element *cfg;
2331         int i, add_len;
2332
2333         error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338         add_len = be32_to_cpu(hostrcb->hcam.length) -
2339                 (offsetof(struct ipr_hostrcb64_error, u) +
2340                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343                 ipr_log64_fabric_path(hostrcb, fabric);
2344                 for_each_fabric_cfg(fabric, cfg)
2345                         ipr_log64_path_elem(hostrcb, cfg);
2346
2347                 add_len -= be16_to_cpu(fabric->length);
2348                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350         }
2351
2352         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:    ioa config struct
2358  * @hostrcb:    hostrcb struct
2359  *
2360  * Return value:
2361  *      none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364                                   struct ipr_hostrcb *hostrcb)
2365 {
2366         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367                          be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:    ioa config struct
2373  * @hostrcb:    hostrcb struct
2374  *
2375  * Return value:
2376  *      none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379                                          struct ipr_hostrcb *hostrcb)
2380 {
2381         struct ipr_hostrcb_type_21_error *error;
2382         char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384         error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386         ipr_err("-----Failing Device Information-----\n");
2387         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390         ipr_err("Device Resource Path: %s\n",
2391                 __ipr_format_res_path(error->res_path,
2392                                       buffer, sizeof(buffer)));
2393         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397         ipr_err("SCSI Sense Data:\n");
2398         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399         ipr_err("SCSI Command Descriptor Block: \n");
2400         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402         ipr_err("Additional IOA Data:\n");
2403         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:      IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  *      index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419         int i;
2420
2421         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423                         return i;
2424
2425         return 0;
2426 }
2427
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439                                 struct ipr_hostrcb *hostrcb)
2440 {
2441         u32 ioasc;
2442         int error_index;
2443         struct ipr_hostrcb_type_21_error *error;
2444
2445         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446                 return;
2447
2448         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2449                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450
2451         if (ioa_cfg->sis64)
2452                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2453         else
2454                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2455
2456         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2457             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2458                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459                 scsi_report_bus_reset(ioa_cfg->host,
2460                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2461         }
2462
2463         error_index = ipr_get_error(ioasc);
2464
2465         if (!ipr_error_table[error_index].log_hcam)
2466                 return;
2467
2468         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2471
2472                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474                                 return;
2475         }
2476
2477         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2478
2479         /* Set indication we have logged an error */
2480         ioa_cfg->errors_logged++;
2481
2482         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2483                 return;
2484         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2485                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2486
2487         switch (hostrcb->hcam.overlay_id) {
2488         case IPR_HOST_RCB_OVERLAY_ID_2:
2489                 ipr_log_cache_error(ioa_cfg, hostrcb);
2490                 break;
2491         case IPR_HOST_RCB_OVERLAY_ID_3:
2492                 ipr_log_config_error(ioa_cfg, hostrcb);
2493                 break;
2494         case IPR_HOST_RCB_OVERLAY_ID_4:
2495         case IPR_HOST_RCB_OVERLAY_ID_6:
2496                 ipr_log_array_error(ioa_cfg, hostrcb);
2497                 break;
2498         case IPR_HOST_RCB_OVERLAY_ID_7:
2499                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2500                 break;
2501         case IPR_HOST_RCB_OVERLAY_ID_12:
2502                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2503                 break;
2504         case IPR_HOST_RCB_OVERLAY_ID_13:
2505                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_14:
2508         case IPR_HOST_RCB_OVERLAY_ID_16:
2509                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2510                 break;
2511         case IPR_HOST_RCB_OVERLAY_ID_17:
2512                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2513                 break;
2514         case IPR_HOST_RCB_OVERLAY_ID_20:
2515                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_21:
2518                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_23:
2521                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_24:
2524         case IPR_HOST_RCB_OVERLAY_ID_26:
2525                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2526                 break;
2527         case IPR_HOST_RCB_OVERLAY_ID_30:
2528                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_1:
2531         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2532         default:
2533                 ipr_log_generic_error(ioa_cfg, hostrcb);
2534                 break;
2535         }
2536 }
2537
2538 /**
2539  * ipr_process_error - Op done function for an adapter error log.
2540  * @ipr_cmd:    ipr command struct
2541  *
2542  * This function is the op done function for an error log host
2543  * controlled async from the adapter. It will log the error and
2544  * send the HCAM back to the adapter.
2545  *
2546  * Return value:
2547  *      none
2548  **/
2549 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2550 {
2551         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2552         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2553         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2554         u32 fd_ioasc;
2555
2556         if (ioa_cfg->sis64)
2557                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2558         else
2559                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2560
2561         list_del(&hostrcb->queue);
2562         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563
2564         if (!ioasc) {
2565                 ipr_handle_log_data(ioa_cfg, hostrcb);
2566                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2567                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2569                 dev_err(&ioa_cfg->pdev->dev,
2570                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571         }
2572
2573         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2574 }
2575
2576 /**
2577  * ipr_timeout -  An internally generated op has timed out.
2578  * @ipr_cmd:    ipr command struct
2579  *
2580  * This function blocks host requests and initiates an
2581  * adapter reset.
2582  *
2583  * Return value:
2584  *      none
2585  **/
2586 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2587 {
2588         unsigned long lock_flags = 0;
2589         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590
2591         ENTER;
2592         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2593
2594         ioa_cfg->errors_logged++;
2595         dev_err(&ioa_cfg->pdev->dev,
2596                 "Adapter being reset due to command timeout.\n");
2597
2598         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2599                 ioa_cfg->sdt_state = GET_DUMP;
2600
2601         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2602                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2603
2604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605         LEAVE;
2606 }
2607
2608 /**
2609  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2610  * @ipr_cmd:    ipr command struct
2611  *
2612  * This function blocks host requests and initiates an
2613  * adapter reset.
2614  *
2615  * Return value:
2616  *      none
2617  **/
2618 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2619 {
2620         unsigned long lock_flags = 0;
2621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622
2623         ENTER;
2624         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625
2626         ioa_cfg->errors_logged++;
2627         dev_err(&ioa_cfg->pdev->dev,
2628                 "Adapter timed out transitioning to operational.\n");
2629
2630         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2631                 ioa_cfg->sdt_state = GET_DUMP;
2632
2633         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2634                 if (ipr_fastfail)
2635                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2636                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637         }
2638
2639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640         LEAVE;
2641 }
2642
2643 /**
2644  * ipr_find_ses_entry - Find matching SES in SES table
2645  * @res:        resource entry struct of SES
2646  *
2647  * Return value:
2648  *      pointer to SES table entry / NULL on failure
2649  **/
2650 static const struct ipr_ses_table_entry *
2651 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 {
2653         int i, j, matches;
2654         struct ipr_std_inq_vpids *vpids;
2655         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2656
2657         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2658                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2659                         if (ste->compare_product_id_byte[j] == 'X') {
2660                                 vpids = &res->std_inq_data.vpids;
2661                                 if (vpids->product_id[j] == ste->product_id[j])
2662                                         matches++;
2663                                 else
2664                                         break;
2665                         } else
2666                                 matches++;
2667                 }
2668
2669                 if (matches == IPR_PROD_ID_LEN)
2670                         return ste;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 /**
2677  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678  * @ioa_cfg:    ioa config struct
2679  * @bus:                SCSI bus
2680  * @bus_width:  bus width
2681  *
2682  * Return value:
2683  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2685  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2686  *      max 160MHz = max 320MB/sec).
2687  **/
2688 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2689 {
2690         struct ipr_resource_entry *res;
2691         const struct ipr_ses_table_entry *ste;
2692         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2693
2694         /* Loop through each config table entry in the config table buffer */
2695         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2696                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697                         continue;
2698
2699                 if (bus != res->bus)
2700                         continue;
2701
2702                 if (!(ste = ipr_find_ses_entry(res)))
2703                         continue;
2704
2705                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706         }
2707
2708         return max_xfer_rate;
2709 }
2710
2711 /**
2712  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713  * @ioa_cfg:            ioa config struct
2714  * @max_delay:          max delay in micro-seconds to wait
2715  *
2716  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717  *
2718  * Return value:
2719  *      0 on success / other on failure
2720  **/
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2722 {
2723         volatile u32 pcii_reg;
2724         int delay = 1;
2725
2726         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727         while (delay < max_delay) {
2728                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2729
2730                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731                         return 0;
2732
2733                 /* udelay cannot be used if delay is more than a few milliseconds */
2734                 if ((delay / 1000) > MAX_UDELAY_MS)
2735                         mdelay(delay / 1000);
2736                 else
2737                         udelay(delay);
2738
2739                 delay += delay;
2740         }
2741         return -EIO;
2742 }
2743
2744 /**
2745  * ipr_get_sis64_dump_data_section - Dump IOA memory
2746  * @ioa_cfg:                    ioa config struct
2747  * @start_addr:                 adapter address to dump
2748  * @dest:                       destination kernel buffer
2749  * @length_in_words:            length to dump in 4 byte words
2750  *
2751  * Return value:
2752  *      0 on success
2753  **/
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2755                                            u32 start_addr,
2756                                            __be32 *dest, u32 length_in_words)
2757 {
2758         int i;
2759
2760         for (i = 0; i < length_in_words; i++) {
2761                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2762                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2763                 dest++;
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * ipr_get_ldump_data_section - Dump IOA memory
2771  * @ioa_cfg:                    ioa config struct
2772  * @start_addr:                 adapter address to dump
2773  * @dest:                               destination kernel buffer
2774  * @length_in_words:    length to dump in 4 byte words
2775  *
2776  * Return value:
2777  *      0 on success / -EIO on failure
2778  **/
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2780                                       u32 start_addr,
2781                                       __be32 *dest, u32 length_in_words)
2782 {
2783         volatile u32 temp_pcii_reg;
2784         int i, delay = 0;
2785
2786         if (ioa_cfg->sis64)
2787                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2788                                                        dest, length_in_words);
2789
2790         /* Write IOA interrupt reg starting LDUMP state  */
2791         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2792                ioa_cfg->regs.set_uproc_interrupt_reg32);
2793
2794         /* Wait for IO debug acknowledge */
2795         if (ipr_wait_iodbg_ack(ioa_cfg,
2796                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2797                 dev_err(&ioa_cfg->pdev->dev,
2798                         "IOA dump long data transfer timeout\n");
2799                 return -EIO;
2800         }
2801
2802         /* Signal LDUMP interlocked - clear IO debug ack */
2803         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2804                ioa_cfg->regs.clr_interrupt_reg);
2805
2806         /* Write Mailbox with starting address */
2807         writel(start_addr, ioa_cfg->ioa_mailbox);
2808
2809         /* Signal address valid - clear IOA Reset alert */
2810         writel(IPR_UPROCI_RESET_ALERT,
2811                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2812
2813         for (i = 0; i < length_in_words; i++) {
2814                 /* Wait for IO debug acknowledge */
2815                 if (ipr_wait_iodbg_ack(ioa_cfg,
2816                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2817                         dev_err(&ioa_cfg->pdev->dev,
2818                                 "IOA dump short data transfer timeout\n");
2819                         return -EIO;
2820                 }
2821
2822                 /* Read data from mailbox and increment destination pointer */
2823                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824                 dest++;
2825
2826                 /* For all but the last word of data, signal data received */
2827                 if (i < (length_in_words - 1)) {
2828                         /* Signal dump data received - Clear IO debug Ack */
2829                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2830                                ioa_cfg->regs.clr_interrupt_reg);
2831                 }
2832         }
2833
2834         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835         writel(IPR_UPROCI_RESET_ALERT,
2836                ioa_cfg->regs.set_uproc_interrupt_reg32);
2837
2838         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2839                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2840
2841         /* Signal dump data received - Clear IO debug Ack */
2842         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2843                ioa_cfg->regs.clr_interrupt_reg);
2844
2845         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2847                 temp_pcii_reg =
2848                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2849
2850                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2851                         return 0;
2852
2853                 udelay(10);
2854                 delay += 10;
2855         }
2856
2857         return 0;
2858 }
2859
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2861 /**
2862  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863  * @ioa_cfg:            ioa config struct
2864  * @pci_address:        adapter address
2865  * @length:                     length of data to copy
2866  *
2867  * Copy data from PCI adapter to kernel buffer.
2868  * Note: length MUST be a 4 byte multiple
2869  * Return value:
2870  *      0 on success / other on failure
2871  **/
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2873                         unsigned long pci_address, u32 length)
2874 {
2875         int bytes_copied = 0;
2876         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2877         __be32 *page;
2878         unsigned long lock_flags = 0;
2879         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880
2881         if (ioa_cfg->sis64)
2882                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2883         else
2884                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2885
2886         while (bytes_copied < length &&
2887                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2888                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2889                     ioa_dump->page_offset == 0) {
2890                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2891
2892                         if (!page) {
2893                                 ipr_trace;
2894                                 return bytes_copied;
2895                         }
2896
2897                         ioa_dump->page_offset = 0;
2898                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2899                         ioa_dump->next_page_index++;
2900                 } else
2901                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2902
2903                 rem_len = length - bytes_copied;
2904                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2905                 cur_len = min(rem_len, rem_page_len);
2906
2907                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2908                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909                         rc = -EIO;
2910                 } else {
2911                         rc = ipr_get_ldump_data_section(ioa_cfg,
2912                                                         pci_address + bytes_copied,
2913                                                         &page[ioa_dump->page_offset / 4],
2914                                                         (cur_len / sizeof(u32)));
2915                 }
2916                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917
2918                 if (!rc) {
2919                         ioa_dump->page_offset += cur_len;
2920                         bytes_copied += cur_len;
2921                 } else {
2922                         ipr_trace;
2923                         break;
2924                 }
2925                 schedule();
2926         }
2927
2928         return bytes_copied;
2929 }
2930
2931 /**
2932  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933  * @hdr:        dump entry header struct
2934  *
2935  * Return value:
2936  *      nothing
2937  **/
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2939 {
2940         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2941         hdr->num_elems = 1;
2942         hdr->offset = sizeof(*hdr);
2943         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2944 }
2945
2946 /**
2947  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948  * @ioa_cfg:    ioa config struct
2949  * @driver_dump:        driver dump struct
2950  *
2951  * Return value:
2952  *      nothing
2953  **/
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2955                                    struct ipr_driver_dump *driver_dump)
2956 {
2957         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2958
2959         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2960         driver_dump->ioa_type_entry.hdr.len =
2961                 sizeof(struct ipr_dump_ioa_type_entry) -
2962                 sizeof(struct ipr_dump_entry_header);
2963         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2964         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2965         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2966         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2967                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2968                 ucode_vpd->minor_release[1];
2969         driver_dump->hdr.num_entries++;
2970 }
2971
2972 /**
2973  * ipr_dump_version_data - Fill in the driver version in the dump.
2974  * @ioa_cfg:    ioa config struct
2975  * @driver_dump:        driver dump struct
2976  *
2977  * Return value:
2978  *      nothing
2979  **/
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2981                                   struct ipr_driver_dump *driver_dump)
2982 {
2983         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2984         driver_dump->version_entry.hdr.len =
2985                 sizeof(struct ipr_dump_version_entry) -
2986                 sizeof(struct ipr_dump_entry_header);
2987         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2988         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2989         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2990         driver_dump->hdr.num_entries++;
2991 }
2992
2993 /**
2994  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995  * @ioa_cfg:    ioa config struct
2996  * @driver_dump:        driver dump struct
2997  *
2998  * Return value:
2999  *      nothing
3000  **/
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3002                                    struct ipr_driver_dump *driver_dump)
3003 {
3004         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3005         driver_dump->trace_entry.hdr.len =
3006                 sizeof(struct ipr_dump_trace_entry) -
3007                 sizeof(struct ipr_dump_entry_header);
3008         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3009         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3010         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3011         driver_dump->hdr.num_entries++;
3012 }
3013
3014 /**
3015  * ipr_dump_location_data - Fill in the IOA location in the dump.
3016  * @ioa_cfg:    ioa config struct
3017  * @driver_dump:        driver dump struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3023                                    struct ipr_driver_dump *driver_dump)
3024 {
3025         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3026         driver_dump->location_entry.hdr.len =
3027                 sizeof(struct ipr_dump_location_entry) -
3028                 sizeof(struct ipr_dump_entry_header);
3029         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3030         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3031         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3032         driver_dump->hdr.num_entries++;
3033 }
3034
3035 /**
3036  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037  * @ioa_cfg:    ioa config struct
3038  * @dump:               dump struct
3039  *
3040  * Return value:
3041  *      nothing
3042  **/
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3044 {
3045         unsigned long start_addr, sdt_word;
3046         unsigned long lock_flags = 0;
3047         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3048         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3049         u32 num_entries, max_num_entries, start_off, end_off;
3050         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3051         struct ipr_sdt *sdt;
3052         int valid = 1;
3053         int i;
3054
3055         ENTER;
3056
3057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058
3059         if (ioa_cfg->sdt_state != READ_DUMP) {
3060                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061                 return;
3062         }
3063
3064         if (ioa_cfg->sis64) {
3065                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066                 ssleep(IPR_DUMP_DELAY_SECONDS);
3067                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068         }
3069
3070         start_addr = readl(ioa_cfg->ioa_mailbox);
3071
3072         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3073                 dev_err(&ioa_cfg->pdev->dev,
3074                         "Invalid dump table format: %lx\n", start_addr);
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 return;
3077         }
3078
3079         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3080
3081         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3082
3083         /* Initialize the overall dump header */
3084         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3085         driver_dump->hdr.num_entries = 1;
3086         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3087         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3088         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3089         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3090
3091         ipr_dump_version_data(ioa_cfg, driver_dump);
3092         ipr_dump_location_data(ioa_cfg, driver_dump);
3093         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3094         ipr_dump_trace_data(ioa_cfg, driver_dump);
3095
3096         /* Update dump_header */
3097         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3098
3099         /* IOA Dump entry */
3100         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3101         ioa_dump->hdr.len = 0;
3102         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3104
3105         /* First entries in sdt are actually a list of dump addresses and
3106          lengths to gather the real dump data.  sdt represents the pointer
3107          to the ioa generated dump table.  Dump data will be extracted based
3108          on entries in this table */
3109         sdt = &ioa_dump->sdt;
3110
3111         if (ioa_cfg->sis64) {
3112                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3113                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3114         } else {
3115                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3116                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117         }
3118
3119         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3120                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3121         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3122                                         bytes_to_copy / sizeof(__be32));
3123
3124         /* Smart Dump table is ready to use and the first entry is valid */
3125         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3126             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3127                 dev_err(&ioa_cfg->pdev->dev,
3128                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129                         rc, be32_to_cpu(sdt->hdr.state));
3130                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3131                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3132                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133                 return;
3134         }
3135
3136         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3137
3138         if (num_entries > max_num_entries)
3139                 num_entries = max_num_entries;
3140
3141         /* Update dump length to the actual data to be copied */
3142         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3143         if (ioa_cfg->sis64)
3144                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3145         else
3146                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3147
3148         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149
3150         for (i = 0; i < num_entries; i++) {
3151                 if (ioa_dump->hdr.len > max_dump_size) {
3152                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3153                         break;
3154                 }
3155
3156                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3157                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3158                         if (ioa_cfg->sis64)
3159                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3160                         else {
3161                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3162                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3163
3164                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3165                                         bytes_to_copy = end_off - start_off;
3166                                 else
3167                                         valid = 0;
3168                         }
3169                         if (valid) {
3170                                 if (bytes_to_copy > max_dump_size) {
3171                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3172                                         continue;
3173                                 }
3174
3175                                 /* Copy data from adapter to driver buffers */
3176                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177                                                             bytes_to_copy);
3178
3179                                 ioa_dump->hdr.len += bytes_copied;
3180
3181                                 if (bytes_copied != bytes_to_copy) {
3182                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3183                                         break;
3184                                 }
3185                         }
3186                 }
3187         }
3188
3189         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3190
3191         /* Update dump_header */
3192         driver_dump->hdr.len += ioa_dump->hdr.len;
3193         wmb();
3194         ioa_cfg->sdt_state = DUMP_OBTAINED;
3195         LEAVE;
3196 }
3197
3198 #else
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3200 #endif
3201
3202 /**
3203  * ipr_release_dump - Free adapter dump memory
3204  * @kref:       kref struct
3205  *
3206  * Return value:
3207  *      nothing
3208  **/
3209 static void ipr_release_dump(struct kref *kref)
3210 {
3211         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3212         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3213         unsigned long lock_flags = 0;
3214         int i;
3215
3216         ENTER;
3217         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218         ioa_cfg->dump = NULL;
3219         ioa_cfg->sdt_state = INACTIVE;
3220         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221
3222         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3223                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3224
3225         vfree(dump->ioa_dump.ioa_data);
3226         kfree(dump);
3227         LEAVE;
3228 }
3229
3230 /**
3231  * ipr_worker_thread - Worker thread
3232  * @work:               ioa config struct
3233  *
3234  * Called at task level from a work thread. This function takes care
3235  * of adding and removing device from the mid-layer as configuration
3236  * changes are detected by the adapter.
3237  *
3238  * Return value:
3239  *      nothing
3240  **/
3241 static void ipr_worker_thread(struct work_struct *work)
3242 {
3243         unsigned long lock_flags;
3244         struct ipr_resource_entry *res;
3245         struct scsi_device *sdev;
3246         struct ipr_dump *dump;
3247         struct ipr_ioa_cfg *ioa_cfg =
3248                 container_of(work, struct ipr_ioa_cfg, work_q);
3249         u8 bus, target, lun;
3250         int did_work;
3251
3252         ENTER;
3253         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254
3255         if (ioa_cfg->sdt_state == READ_DUMP) {
3256                 dump = ioa_cfg->dump;
3257                 if (!dump) {
3258                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259                         return;
3260                 }
3261                 kref_get(&dump->kref);
3262                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263                 ipr_get_ioa_dump(ioa_cfg, dump);
3264                 kref_put(&dump->kref, ipr_release_dump);
3265
3266                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3268                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270                 return;
3271         }
3272
3273 restart:
3274         do {
3275                 did_work = 0;
3276                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3277                     !ioa_cfg->allow_ml_add_del) {
3278                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279                         return;
3280                 }
3281
3282                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283                         if (res->del_from_ml && res->sdev) {
3284                                 did_work = 1;
3285                                 sdev = res->sdev;
3286                                 if (!scsi_device_get(sdev)) {
3287                                         if (!res->add_to_ml)
3288                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289                                         else
3290                                                 res->del_from_ml = 0;
3291                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292                                         scsi_remove_device(sdev);
3293                                         scsi_device_put(sdev);
3294                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295                                 }
3296                                 break;
3297                         }
3298                 }
3299         } while (did_work);
3300
3301         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302                 if (res->add_to_ml) {
3303                         bus = res->bus;
3304                         target = res->target;
3305                         lun = res->lun;
3306                         res->add_to_ml = 0;
3307                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3309                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310                         goto restart;
3311                 }
3312         }
3313
3314         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316         LEAVE;
3317 }
3318
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3320 /**
3321  * ipr_read_trace - Dump the adapter trace
3322  * @filp:               open sysfs file
3323  * @kobj:               kobject struct
3324  * @bin_attr:           bin_attribute struct
3325  * @buf:                buffer
3326  * @off:                offset
3327  * @count:              buffer size
3328  *
3329  * Return value:
3330  *      number of bytes printed to buffer
3331  **/
3332 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3333                               struct bin_attribute *bin_attr,
3334                               char *buf, loff_t off, size_t count)
3335 {
3336         struct device *dev = container_of(kobj, struct device, kobj);
3337         struct Scsi_Host *shost = class_to_shost(dev);
3338         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339         unsigned long lock_flags = 0;
3340         ssize_t ret;
3341
3342         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3344                                 IPR_TRACE_SIZE);
3345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346
3347         return ret;
3348 }
3349
3350 static struct bin_attribute ipr_trace_attr = {
3351         .attr = {
3352                 .name = "trace",
3353                 .mode = S_IRUGO,
3354         },
3355         .size = 0,
3356         .read = ipr_read_trace,
3357 };
3358 #endif
3359
3360 /**
3361  * ipr_show_fw_version - Show the firmware version
3362  * @dev:        class device struct
3363  * @buf:        buffer
3364  *
3365  * Return value:
3366  *      number of bytes printed to buffer
3367  **/
3368 static ssize_t ipr_show_fw_version(struct device *dev,
3369                                    struct device_attribute *attr, char *buf)
3370 {
3371         struct Scsi_Host *shost = class_to_shost(dev);
3372         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3373         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3374         unsigned long lock_flags = 0;
3375         int len;
3376
3377         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3379                        ucode_vpd->major_release, ucode_vpd->card_type,
3380                        ucode_vpd->minor_release[0],
3381                        ucode_vpd->minor_release[1]);
3382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3383         return len;
3384 }
3385
3386 static struct device_attribute ipr_fw_version_attr = {
3387         .attr = {
3388                 .name =         "fw_version",
3389                 .mode =         S_IRUGO,
3390         },
3391         .show = ipr_show_fw_version,
3392 };
3393
3394 /**
3395  * ipr_show_log_level - Show the adapter's error logging level
3396  * @dev:        class device struct
3397  * @buf:        buffer
3398  *
3399  * Return value:
3400  *      number of bytes printed to buffer
3401  **/
3402 static ssize_t ipr_show_log_level(struct device *dev,
3403                                    struct device_attribute *attr, char *buf)
3404 {
3405         struct Scsi_Host *shost = class_to_shost(dev);
3406         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407         unsigned long lock_flags = 0;
3408         int len;
3409
3410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413         return len;
3414 }
3415
3416 /**
3417  * ipr_store_log_level - Change the adapter's error logging level
3418  * @dev:        class device struct
3419  * @buf:        buffer
3420  *
3421  * Return value:
3422  *      number of bytes printed to buffer
3423  **/
3424 static ssize_t ipr_store_log_level(struct device *dev,
3425                                    struct device_attribute *attr,
3426                                    const char *buf, size_t count)
3427 {
3428         struct Scsi_Host *shost = class_to_shost(dev);
3429         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3430         unsigned long lock_flags = 0;
3431
3432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3433         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435         return strlen(buf);
3436 }
3437
3438 static struct device_attribute ipr_log_level_attr = {
3439         .attr = {
3440                 .name =         "log_level",
3441                 .mode =         S_IRUGO | S_IWUSR,
3442         },
3443         .show = ipr_show_log_level,
3444         .store = ipr_store_log_level
3445 };
3446
3447 /**
3448  * ipr_store_diagnostics - IOA Diagnostics interface
3449  * @dev:        device struct
3450  * @buf:        buffer
3451  * @count:      buffer size
3452  *
3453  * This function will reset the adapter and wait a reasonable
3454  * amount of time for any errors that the adapter might log.
3455  *
3456  * Return value:
3457  *      count on success / other on failure
3458  **/
3459 static ssize_t ipr_store_diagnostics(struct device *dev,
3460                                      struct device_attribute *attr,
3461                                      const char *buf, size_t count)
3462 {
3463         struct Scsi_Host *shost = class_to_shost(dev);
3464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465         unsigned long lock_flags = 0;
3466         int rc = count;
3467
3468         if (!capable(CAP_SYS_ADMIN))
3469                 return -EACCES;
3470
3471         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472         while (ioa_cfg->in_reset_reload) {
3473                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3475                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         }
3477
3478         ioa_cfg->errors_logged = 0;
3479         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3480
3481         if (ioa_cfg->in_reset_reload) {
3482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484
3485                 /* Wait for a second for any errors to be logged */
3486                 msleep(1000);
3487         } else {
3488                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489                 return -EIO;
3490         }
3491
3492         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3494                 rc = -EIO;
3495         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496
3497         return rc;
3498 }
3499
3500 static struct device_attribute ipr_diagnostics_attr = {
3501         .attr = {
3502                 .name =         "run_diagnostics",
3503                 .mode =         S_IWUSR,
3504         },
3505         .store = ipr_store_diagnostics
3506 };
3507
3508 /**
3509  * ipr_show_adapter_state - Show the adapter's state
3510  * @class_dev:  device struct
3511  * @buf:        buffer
3512  *
3513  * Return value:
3514  *      number of bytes printed to buffer
3515  **/
3516 static ssize_t ipr_show_adapter_state(struct device *dev,
3517                                       struct device_attribute *attr, char *buf)
3518 {
3519         struct Scsi_Host *shost = class_to_shost(dev);
3520         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3521         unsigned long lock_flags = 0;
3522         int len;
3523
3524         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3525         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3526                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3527         else
3528                 len = snprintf(buf, PAGE_SIZE, "online\n");
3529         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530         return len;
3531 }
3532
3533 /**
3534  * ipr_store_adapter_state - Change adapter state
3535  * @dev:        device struct
3536  * @buf:        buffer
3537  * @count:      buffer size
3538  *
3539  * This function will change the adapter's state.
3540  *
3541  * Return value:
3542  *      count on success / other on failure
3543  **/
3544 static ssize_t ipr_store_adapter_state(struct device *dev,
3545                                        struct device_attribute *attr,
3546                                        const char *buf, size_t count)
3547 {
3548         struct Scsi_Host *shost = class_to_shost(dev);
3549         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550         unsigned long lock_flags;
3551         int result = count, i;
3552
3553         if (!capable(CAP_SYS_ADMIN))
3554                 return -EACCES;
3555
3556         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3558             !strncmp(buf, "online", 6)) {
3559                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3560                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3561                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3562                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563                 }
3564                 wmb();
3565                 ioa_cfg->reset_retries = 0;
3566                 ioa_cfg->in_ioa_bringdown = 0;
3567                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3568         }
3569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571
3572         return result;
3573 }
3574
3575 static struct device_attribute ipr_ioa_state_attr = {
3576         .attr = {
3577                 .name =         "online_state",
3578                 .mode =         S_IRUGO | S_IWUSR,
3579         },
3580         .show = ipr_show_adapter_state,
3581         .store = ipr_store_adapter_state
3582 };
3583
3584 /**
3585  * ipr_store_reset_adapter - Reset the adapter
3586  * @dev:        device struct
3587  * @buf:        buffer
3588  * @count:      buffer size
3589  *
3590  * This function will reset the adapter.
3591  *
3592  * Return value:
3593  *      count on success / other on failure
3594  **/
3595 static ssize_t ipr_store_reset_adapter(struct device *dev,
3596                                        struct device_attribute *attr,
3597                                        const char *buf, size_t count)
3598 {
3599         struct Scsi_Host *shost = class_to_shost(dev);
3600         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3601         unsigned long lock_flags;
3602         int result = count;
3603
3604         if (!capable(CAP_SYS_ADMIN))
3605                 return -EACCES;
3606
3607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608         if (!ioa_cfg->in_reset_reload)
3609                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613         return result;
3614 }
3615
3616 static struct device_attribute ipr_ioa_reset_attr = {
3617         .attr = {
3618                 .name =         "reset_host",
3619                 .mode =         S_IWUSR,
3620         },
3621         .store = ipr_store_reset_adapter
3622 };
3623
3624 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3625  /**
3626  * ipr_show_iopoll_weight - Show ipr polling mode
3627  * @dev:        class device struct
3628  * @buf:        buffer
3629  *
3630  * Return value:
3631  *      number of bytes printed to buffer
3632  **/
3633 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3634                                    struct device_attribute *attr, char *buf)
3635 {
3636         struct Scsi_Host *shost = class_to_shost(dev);
3637         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3638         unsigned long lock_flags = 0;
3639         int len;
3640
3641         spin_lock_irqsave(shost->host_lock, lock_flags);
3642         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3643         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3644
3645         return len;
3646 }
3647
3648 /**
3649  * ipr_store_iopoll_weight - Change the adapter's polling mode
3650  * @dev:        class device struct
3651  * @buf:        buffer
3652  *
3653  * Return value:
3654  *      number of bytes printed to buffer
3655  **/
3656 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3657                                         struct device_attribute *attr,
3658                                         const char *buf, size_t count)
3659 {
3660         struct Scsi_Host *shost = class_to_shost(dev);
3661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662         unsigned long user_iopoll_weight;
3663         unsigned long lock_flags = 0;
3664         int i;
3665
3666         if (!ioa_cfg->sis64) {
3667                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668                 return -EINVAL;
3669         }
3670         if (kstrtoul(buf, 10, &user_iopoll_weight))
3671                 return -EINVAL;
3672
3673         if (user_iopoll_weight > 256) {
3674                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3675                 return -EINVAL;
3676         }
3677
3678         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3679                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3680                 return strlen(buf);
3681         }
3682
3683         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3684                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3685                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686         }
3687
3688         spin_lock_irqsave(shost->host_lock, lock_flags);
3689         ioa_cfg->iopoll_weight = user_iopoll_weight;
3690         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3691                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3692                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3693                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3694                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695                 }
3696         }
3697         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3698
3699         return strlen(buf);
3700 }
3701
3702 static struct device_attribute ipr_iopoll_weight_attr = {
3703         .attr = {
3704                 .name =         "iopoll_weight",
3705                 .mode =         S_IRUGO | S_IWUSR,
3706         },
3707         .show = ipr_show_iopoll_weight,
3708         .store = ipr_store_iopoll_weight
3709 };
3710
3711 /**
3712  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713  * @buf_len:            buffer length
3714  *
3715  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716  * list to use for microcode download
3717  *
3718  * Return value:
3719  *      pointer to sglist / NULL on failure
3720  **/
3721 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3722 {
3723         int sg_size, order, bsize_elem, num_elem, i, j;
3724         struct ipr_sglist *sglist;
3725         struct scatterlist *scatterlist;
3726         struct page *page;
3727
3728         /* Get the minimum size per scatter/gather element */
3729         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3730
3731         /* Get the actual size per element */
3732         order = get_order(sg_size);
3733
3734         /* Determine the actual number of bytes per element */
3735         bsize_elem = PAGE_SIZE * (1 << order);
3736
3737         /* Determine the actual number of sg entries needed */
3738         if (buf_len % bsize_elem)
3739                 num_elem = (buf_len / bsize_elem) + 1;
3740         else
3741                 num_elem = buf_len / bsize_elem;
3742
3743         /* Allocate a scatter/gather list for the DMA */
3744         sglist = kzalloc(sizeof(struct ipr_sglist) +
3745                          (sizeof(struct scatterlist) * (num_elem - 1)),
3746                          GFP_KERNEL);
3747
3748         if (sglist == NULL) {
3749                 ipr_trace;
3750                 return NULL;
3751         }
3752
3753         scatterlist = sglist->scatterlist;
3754         sg_init_table(scatterlist, num_elem);
3755
3756         sglist->order = order;
3757         sglist->num_sg = num_elem;
3758
3759         /* Allocate a bunch of sg elements */
3760         for (i = 0; i < num_elem; i++) {
3761                 page = alloc_pages(GFP_KERNEL, order);
3762                 if (!page) {
3763                         ipr_trace;
3764
3765                         /* Free up what we already allocated */
3766                         for (j = i - 1; j >= 0; j--)
3767                                 __free_pages(sg_page(&scatterlist[j]), order);
3768                         kfree(sglist);
3769                         return NULL;
3770                 }
3771
3772                 sg_set_page(&scatterlist[i], page, 0, 0);
3773         }
3774
3775         return sglist;
3776 }
3777
3778 /**
3779  * ipr_free_ucode_buffer - Frees a microcode download buffer
3780  * @p_dnld:             scatter/gather list pointer
3781  *
3782  * Free a DMA'able ucode download buffer previously allocated with
3783  * ipr_alloc_ucode_buffer
3784  *
3785  * Return value:
3786  *      nothing
3787  **/
3788 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3789 {
3790         int i;
3791
3792         for (i = 0; i < sglist->num_sg; i++)
3793                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3794
3795         kfree(sglist);
3796 }
3797
3798 /**
3799  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800  * @sglist:             scatter/gather list pointer
3801  * @buffer:             buffer pointer
3802  * @len:                buffer length
3803  *
3804  * Copy a microcode image from a user buffer into a buffer allocated by
3805  * ipr_alloc_ucode_buffer
3806  *
3807  * Return value:
3808  *      0 on success / other on failure
3809  **/
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3811                                  u8 *buffer, u32 len)
3812 {
3813         int bsize_elem, i, result = 0;
3814         struct scatterlist *scatterlist;
3815         void *kaddr;
3816
3817         /* Determine the actual number of bytes per element */
3818         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3819
3820         scatterlist = sglist->scatterlist;
3821
3822         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3823                 struct page *page = sg_page(&scatterlist[i]);
3824
3825                 kaddr = kmap(page);
3826                 memcpy(kaddr, buffer, bsize_elem);
3827                 kunmap(page);
3828
3829                 scatterlist[i].length = bsize_elem;
3830
3831                 if (result != 0) {
3832                         ipr_trace;
3833                         return result;
3834                 }
3835         }
3836
3837         if (len % bsize_elem) {
3838                 struct page *page = sg_page(&scatterlist[i]);
3839
3840                 kaddr = kmap(page);
3841                 memcpy(kaddr, buffer, len % bsize_elem);
3842                 kunmap(page);
3843
3844                 scatterlist[i].length = len % bsize_elem;
3845         }
3846
3847         sglist->buffer_len = len;
3848         return result;
3849 }
3850
3851 /**
3852  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853  * @ipr_cmd:            ipr command struct
3854  * @sglist:             scatter/gather list
3855  *
3856  * Builds a microcode download IOA data list (IOADL).
3857  *
3858  **/
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3860                                     struct ipr_sglist *sglist)
3861 {
3862         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3863         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3864         struct scatterlist *scatterlist = sglist->scatterlist;
3865         int i;
3866
3867         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3868         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3869         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870
3871         ioarcb->ioadl_len =
3872                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3873         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3874                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3875                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3876                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877         }
3878
3879         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3880 }
3881
3882 /**
3883  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884  * @ipr_cmd:    ipr command struct
3885  * @sglist:             scatter/gather list
3886  *
3887  * Builds a microcode download IOA data list (IOADL).
3888  *
3889  **/
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3891                                   struct ipr_sglist *sglist)
3892 {
3893         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3894         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3895         struct scatterlist *scatterlist = sglist->scatterlist;
3896         int i;
3897
3898         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3899         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3900         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901
3902         ioarcb->ioadl_len =
3903                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3904
3905         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3906                 ioadl[i].flags_and_data_len =
3907                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3908                 ioadl[i].address =
3909                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910         }
3911
3912         ioadl[i-1].flags_and_data_len |=
3913                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3914 }
3915
3916 /**
3917  * ipr_update_ioa_ucode - Update IOA's microcode
3918  * @ioa_cfg:    ioa config struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Initiate an adapter reset to update the IOA's microcode
3922  *
3923  * Return value:
3924  *      0 on success / -EIO on failure
3925  **/
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3927                                 struct ipr_sglist *sglist)
3928 {
3929         unsigned long lock_flags;
3930
3931         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932         while (ioa_cfg->in_reset_reload) {
3933                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3935                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936         }
3937
3938         if (ioa_cfg->ucode_sglist) {
3939                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940                 dev_err(&ioa_cfg->pdev->dev,
3941                         "Microcode download already in progress\n");
3942                 return -EIO;
3943         }
3944
3945         sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3946                                         sglist->num_sg, DMA_TO_DEVICE);
3947
3948         if (!sglist->num_dma_sg) {
3949                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950                 dev_err(&ioa_cfg->pdev->dev,
3951                         "Failed to map microcode download buffer!\n");
3952                 return -EIO;
3953         }
3954
3955         ioa_cfg->ucode_sglist = sglist;
3956         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3957         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3958         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3959
3960         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3961         ioa_cfg->ucode_sglist = NULL;
3962         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3963         return 0;
3964 }
3965
3966 /**
3967  * ipr_store_update_fw - Update the firmware on the adapter
3968  * @class_dev:  device struct
3969  * @buf:        buffer
3970  * @count:      buffer size
3971  *
3972  * This function will update the firmware on the adapter.
3973  *
3974  * Return value:
3975  *      count on success / other on failure
3976  **/
3977 static ssize_t ipr_store_update_fw(struct device *dev,
3978                                    struct device_attribute *attr,
3979                                    const char *buf, size_t count)
3980 {
3981         struct Scsi_Host *shost = class_to_shost(dev);
3982         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3983         struct ipr_ucode_image_header *image_hdr;
3984         const struct firmware *fw_entry;
3985         struct ipr_sglist *sglist;
3986         char fname[100];
3987         char *src;
3988         int len, result, dnld_size;
3989
3990         if (!capable(CAP_SYS_ADMIN))
3991                 return -EACCES;
3992
3993         len = snprintf(fname, 99, "%s", buf);
3994         fname[len-1] = '\0';
3995
3996         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3997                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3998                 return -EIO;
3999         }
4000
4001         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4002
4003         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4004         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4005         sglist = ipr_alloc_ucode_buffer(dnld_size);
4006
4007         if (!sglist) {
4008                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4009                 release_firmware(fw_entry);
4010                 return -ENOMEM;
4011         }
4012
4013         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4014
4015         if (result) {
4016                 dev_err(&ioa_cfg->pdev->dev,
4017                         "Microcode buffer copy to DMA buffer failed\n");
4018                 goto out;
4019         }
4020
4021         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4022
4023         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4024
4025         if (!result)
4026                 result = count;
4027 out:
4028         ipr_free_ucode_buffer(sglist);
4029         release_firmware(fw_entry);
4030         return result;
4031 }
4032
4033 static struct device_attribute ipr_update_fw_attr = {
4034         .attr = {
4035                 .name =         "update_fw",
4036                 .mode =         S_IWUSR,
4037         },
4038         .store = ipr_store_update_fw
4039 };
4040
4041 /**
4042  * ipr_show_fw_type - Show the adapter's firmware type.
4043  * @dev:        class device struct
4044  * @buf:        buffer
4045  *
4046  * Return value:
4047  *      number of bytes printed to buffer
4048  **/
4049 static ssize_t ipr_show_fw_type(struct device *dev,
4050                                 struct device_attribute *attr, char *buf)
4051 {
4052         struct Scsi_Host *shost = class_to_shost(dev);
4053         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4054         unsigned long lock_flags = 0;
4055         int len;
4056
4057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4058         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4059         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4060         return len;
4061 }
4062
4063 static struct device_attribute ipr_ioa_fw_type_attr = {
4064         .attr = {
4065                 .name =         "fw_type",
4066                 .mode =         S_IRUGO,
4067         },
4068         .show = ipr_show_fw_type
4069 };
4070
4071 static struct device_attribute *ipr_ioa_attrs[] = {
4072         &ipr_fw_version_attr,
4073         &ipr_log_level_attr,
4074         &ipr_diagnostics_attr,
4075         &ipr_ioa_state_attr,
4076         &ipr_ioa_reset_attr,
4077         &ipr_update_fw_attr,
4078         &ipr_ioa_fw_type_attr,
4079         &ipr_iopoll_weight_attr,
4080         NULL,
4081 };
4082
4083 #ifdef CONFIG_SCSI_IPR_DUMP
4084 /**
4085  * ipr_read_dump - Dump the adapter
4086  * @filp:               open sysfs file
4087  * @kobj:               kobject struct
4088  * @bin_attr:           bin_attribute struct
4089  * @buf:                buffer
4090  * @off:                offset
4091  * @count:              buffer size
4092  *
4093  * Return value:
4094  *      number of bytes printed to buffer
4095  **/
4096 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4097                              struct bin_attribute *bin_attr,
4098                              char *buf, loff_t off, size_t count)
4099 {
4100         struct device *cdev = container_of(kobj, struct device, kobj);
4101         struct Scsi_Host *shost = class_to_shost(cdev);
4102         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4103         struct ipr_dump *dump;
4104         unsigned long lock_flags = 0;
4105         char *src;
4106         int len, sdt_end;
4107         size_t rc = count;
4108
4109         if (!capable(CAP_SYS_ADMIN))
4110                 return -EACCES;
4111
4112         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4113         dump = ioa_cfg->dump;
4114
4115         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4116                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4117                 return 0;
4118         }
4119         kref_get(&dump->kref);
4120         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4121
4122         if (off > dump->driver_dump.hdr.len) {
4123                 kref_put(&dump->kref, ipr_release_dump);
4124                 return 0;
4125         }
4126
4127         if (off + count > dump->driver_dump.hdr.len) {
4128                 count = dump->driver_dump.hdr.len - off;
4129                 rc = count;
4130         }
4131
4132         if (count && off < sizeof(dump->driver_dump)) {
4133                 if (off + count > sizeof(dump->driver_dump))
4134                         len = sizeof(dump->driver_dump) - off;
4135                 else
4136                         len = count;
4137                 src = (u8 *)&dump->driver_dump + off;
4138                 memcpy(buf, src, len);
4139                 buf += len;
4140                 off += len;
4141                 count -= len;
4142         }
4143
4144         off -= sizeof(dump->driver_dump);
4145
4146         if (ioa_cfg->sis64)
4147                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4148                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4149                            sizeof(struct ipr_sdt_entry));
4150         else
4151                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4152                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4153
4154         if (count && off < sdt_end) {
4155                 if (off + count > sdt_end)
4156                         len = sdt_end - off;
4157                 else
4158                         len = count;
4159                 src = (u8 *)&dump->ioa_dump + off;
4160                 memcpy(buf, src, len);
4161                 buf += len;
4162                 off += len;
4163                 count -= len;
4164         }
4165
4166         off -= sdt_end;
4167
4168         while (count) {
4169                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4170                         len = PAGE_ALIGN(off) - off;
4171                 else
4172                         len = count;
4173                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4174                 src += off & ~PAGE_MASK;
4175                 memcpy(buf, src, len);
4176                 buf += len;
4177                 off += len;
4178                 count -= len;
4179         }
4180
4181         kref_put(&dump->kref, ipr_release_dump);
4182         return rc;
4183 }
4184
4185 /**
4186  * ipr_alloc_dump - Prepare for adapter dump
4187  * @ioa_cfg:    ioa config struct
4188  *
4189  * Return value:
4190  *      0 on success / other on failure
4191  **/
4192 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4193 {
4194         struct ipr_dump *dump;
4195         __be32 **ioa_data;
4196         unsigned long lock_flags = 0;
4197
4198         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4199
4200         if (!dump) {
4201                 ipr_err("Dump memory allocation failed\n");
4202                 return -ENOMEM;
4203         }
4204
4205         if (ioa_cfg->sis64)
4206                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4207         else
4208                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4209
4210         if (!ioa_data) {
4211                 ipr_err("Dump memory allocation failed\n");
4212                 kfree(dump);
4213                 return -ENOMEM;
4214         }
4215
4216         dump->ioa_dump.ioa_data = ioa_data;
4217
4218         kref_init(&dump->kref);
4219         dump->ioa_cfg = ioa_cfg;
4220
4221         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222
4223         if (INACTIVE != ioa_cfg->sdt_state) {
4224                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225                 vfree(dump->ioa_dump.ioa_data);
4226                 kfree(dump);
4227                 return 0;
4228         }
4229
4230         ioa_cfg->dump = dump;
4231         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4232         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4233                 ioa_cfg->dump_taken = 1;
4234                 schedule_work(&ioa_cfg->work_q);
4235         }
4236         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237
4238         return 0;
4239 }
4240
4241 /**
4242  * ipr_free_dump - Free adapter dump memory
4243  * @ioa_cfg:    ioa config struct
4244  *
4245  * Return value:
4246  *      0 on success / other on failure
4247  **/
4248 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4249 {
4250         struct ipr_dump *dump;
4251         unsigned long lock_flags = 0;
4252
4253         ENTER;
4254
4255         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4256         dump = ioa_cfg->dump;
4257         if (!dump) {
4258                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4259                 return 0;
4260         }
4261
4262         ioa_cfg->dump = NULL;
4263         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4264
4265         kref_put(&dump->kref, ipr_release_dump);
4266
4267         LEAVE;
4268         return 0;
4269 }
4270
4271 /**
4272  * ipr_write_dump - Setup dump state of adapter
4273  * @filp:               open sysfs file
4274  * @kobj:               kobject struct
4275  * @bin_attr:           bin_attribute struct
4276  * @buf:                buffer
4277  * @off:                offset
4278  * @count:              buffer size
4279  *
4280  * Return value:
4281  *      number of bytes printed to buffer
4282  **/
4283 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4284                               struct bin_attribute *bin_attr,
4285                               char *buf, loff_t off, size_t count)
4286 {
4287         struct device *cdev = container_of(kobj, struct device, kobj);
4288         struct Scsi_Host *shost = class_to_shost(cdev);
4289         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4290         int rc;
4291
4292         if (!capable(CAP_SYS_ADMIN))
4293                 return -EACCES;
4294
4295         if (buf[0] == '1')
4296                 rc = ipr_alloc_dump(ioa_cfg);
4297         else if (buf[0] == '0')
4298                 rc = ipr_free_dump(ioa_cfg);
4299         else
4300                 return -EINVAL;
4301
4302         if (rc)
4303                 return rc;
4304         else
4305                 return count;
4306 }
4307
4308 static struct bin_attribute ipr_dump_attr = {
4309         .attr = {
4310                 .name = "dump",
4311                 .mode = S_IRUSR | S_IWUSR,
4312         },
4313         .size = 0,
4314         .read = ipr_read_dump,
4315         .write = ipr_write_dump
4316 };
4317 #else
4318 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4319 #endif
4320
4321 /**
4322  * ipr_change_queue_depth - Change the device's queue depth
4323  * @sdev:       scsi device struct
4324  * @qdepth:     depth to set
4325  * @reason:     calling context
4326  *
4327  * Return value:
4328  *      actual depth set
4329  **/
4330 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4331                                   int reason)
4332 {
4333         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334         struct ipr_resource_entry *res;
4335         unsigned long lock_flags = 0;
4336
4337         if (reason != SCSI_QDEPTH_DEFAULT)
4338                 return -EOPNOTSUPP;
4339
4340         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341         res = (struct ipr_resource_entry *)sdev->hostdata;
4342
4343         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4344                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346
4347         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4348         return sdev->queue_depth;
4349 }
4350
4351 /**
4352  * ipr_change_queue_type - Change the device's queue type
4353  * @dsev:               scsi device struct
4354  * @tag_type:   type of tags to use
4355  *
4356  * Return value:
4357  *      actual queue type set
4358  **/
4359 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4360 {
4361         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4362         struct ipr_resource_entry *res;
4363         unsigned long lock_flags = 0;
4364
4365         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4366         res = (struct ipr_resource_entry *)sdev->hostdata;
4367         if (res && ipr_is_gscsi(res))
4368                 tag_type = scsi_change_queue_type(sdev, tag_type);
4369         else
4370                 tag_type = 0;
4371         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372         return tag_type;
4373 }
4374
4375 /**
4376  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4377  * @dev:        device struct
4378  * @attr:       device attribute structure
4379  * @buf:        buffer
4380  *
4381  * Return value:
4382  *      number of bytes printed to buffer
4383  **/
4384 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4385 {
4386         struct scsi_device *sdev = to_scsi_device(dev);
4387         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4388         struct ipr_resource_entry *res;
4389         unsigned long lock_flags = 0;
4390         ssize_t len = -ENXIO;
4391
4392         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4393         res = (struct ipr_resource_entry *)sdev->hostdata;
4394         if (res)
4395                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4396         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4397         return len;
4398 }
4399
4400 static struct device_attribute ipr_adapter_handle_attr = {
4401         .attr = {
4402                 .name =         "adapter_handle",
4403                 .mode =         S_IRUSR,
4404         },
4405         .show = ipr_show_adapter_handle
4406 };
4407
4408 /**
4409  * ipr_show_resource_path - Show the resource path or the resource address for
4410  *                          this device.
4411  * @dev:        device struct
4412  * @attr:       device attribute structure
4413  * @buf:        buffer
4414  *
4415  * Return value:
4416  *      number of bytes printed to buffer
4417  **/
4418 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4419 {
4420         struct scsi_device *sdev = to_scsi_device(dev);
4421         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4422         struct ipr_resource_entry *res;
4423         unsigned long lock_flags = 0;
4424         ssize_t len = -ENXIO;
4425         char buffer[IPR_MAX_RES_PATH_LENGTH];
4426
4427         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4428         res = (struct ipr_resource_entry *)sdev->hostdata;
4429         if (res && ioa_cfg->sis64)
4430                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4431                                __ipr_format_res_path(res->res_path, buffer,
4432                                                      sizeof(buffer)));
4433         else if (res)
4434                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4435                                res->bus, res->target, res->lun);
4436
4437         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438         return len;
4439 }
4440
4441 static struct device_attribute ipr_resource_path_attr = {
4442         .attr = {
4443                 .name =         "resource_path",
4444                 .mode =         S_IRUGO,
4445         },
4446         .show = ipr_show_resource_path
4447 };
4448
4449 /**
4450  * ipr_show_device_id - Show the device_id for this device.
4451  * @dev:        device struct
4452  * @attr:       device attribute structure
4453  * @buf:        buffer
4454  *
4455  * Return value:
4456  *      number of bytes printed to buffer
4457  **/
4458 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4459 {
4460         struct scsi_device *sdev = to_scsi_device(dev);
4461         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4462         struct ipr_resource_entry *res;
4463         unsigned long lock_flags = 0;
4464         ssize_t len = -ENXIO;
4465
4466         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467         res = (struct ipr_resource_entry *)sdev->hostdata;
4468         if (res && ioa_cfg->sis64)
4469                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4470         else if (res)
4471                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4472
4473         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4474         return len;
4475 }
4476
4477 static struct device_attribute ipr_device_id_attr = {
4478         .attr = {
4479                 .name =         "device_id",
4480                 .mode =         S_IRUGO,
4481         },
4482         .show = ipr_show_device_id
4483 };
4484
4485 /**
4486  * ipr_show_resource_type - Show the resource type for this device.
4487  * @dev:        device struct
4488  * @attr:       device attribute structure
4489  * @buf:        buffer
4490  *
4491  * Return value:
4492  *      number of bytes printed to buffer
4493  **/
4494 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4495 {
4496         struct scsi_device *sdev = to_scsi_device(dev);
4497         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4498         struct ipr_resource_entry *res;
4499         unsigned long lock_flags = 0;
4500         ssize_t len = -ENXIO;
4501
4502         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4503         res = (struct ipr_resource_entry *)sdev->hostdata;
4504
4505         if (res)
4506                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4507
4508         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4509         return len;
4510 }
4511
4512 static struct device_attribute ipr_resource_type_attr = {
4513         .attr = {
4514                 .name =         "resource_type",
4515                 .mode =         S_IRUGO,
4516         },
4517         .show = ipr_show_resource_type
4518 };
4519
4520 static struct device_attribute *ipr_dev_attrs[] = {
4521         &ipr_adapter_handle_attr,
4522         &ipr_resource_path_attr,
4523         &ipr_device_id_attr,
4524         &ipr_resource_type_attr,
4525         NULL,
4526 };
4527
4528 /**
4529  * ipr_biosparam - Return the HSC mapping
4530  * @sdev:                       scsi device struct
4531  * @block_device:       block device pointer
4532  * @capacity:           capacity of the device
4533  * @parm:                       Array containing returned HSC values.
4534  *
4535  * This function generates the HSC parms that fdisk uses.
4536  * We want to make sure we return something that places partitions
4537  * on 4k boundaries for best performance with the IOA.
4538  *
4539  * Return value:
4540  *      0 on success
4541  **/
4542 static int ipr_biosparam(struct scsi_device *sdev,
4543                          struct block_device *block_device,
4544                          sector_t capacity, int *parm)
4545 {
4546         int heads, sectors;
4547         sector_t cylinders;
4548
4549         heads = 128;
4550         sectors = 32;
4551
4552         cylinders = capacity;
4553         sector_div(cylinders, (128 * 32));
4554
4555         /* return result */
4556         parm[0] = heads;
4557         parm[1] = sectors;
4558         parm[2] = cylinders;
4559
4560         return 0;
4561 }
4562
4563 /**
4564  * ipr_find_starget - Find target based on bus/target.
4565  * @starget:    scsi target struct
4566  *
4567  * Return value:
4568  *      resource entry pointer if found / NULL if not found
4569  **/
4570 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4571 {
4572         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4573         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4574         struct ipr_resource_entry *res;
4575
4576         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4577                 if ((res->bus == starget->channel) &&
4578                     (res->target == starget->id)) {
4579                         return res;
4580                 }
4581         }
4582
4583         return NULL;
4584 }
4585
4586 static struct ata_port_info sata_port_info;
4587
4588 /**
4589  * ipr_target_alloc - Prepare for commands to a SCSI target
4590  * @starget:    scsi target struct
4591  *
4592  * If the device is a SATA device, this function allocates an
4593  * ATA port with libata, else it does nothing.
4594  *
4595  * Return value:
4596  *      0 on success / non-0 on failure
4597  **/
4598 static int ipr_target_alloc(struct scsi_target *starget)
4599 {
4600         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4601         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4602         struct ipr_sata_port *sata_port;
4603         struct ata_port *ap;
4604         struct ipr_resource_entry *res;
4605         unsigned long lock_flags;
4606
4607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4608         res = ipr_find_starget(starget);
4609         starget->hostdata = NULL;
4610
4611         if (res && ipr_is_gata(res)) {
4612                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4613                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4614                 if (!sata_port)
4615                         return -ENOMEM;
4616
4617                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4618                 if (ap) {
4619                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4620                         sata_port->ioa_cfg = ioa_cfg;
4621                         sata_port->ap = ap;
4622                         sata_port->res = res;
4623
4624                         res->sata_port = sata_port;
4625                         ap->private_data = sata_port;
4626                         starget->hostdata = sata_port;
4627                 } else {
4628                         kfree(sata_port);
4629                         return -ENOMEM;
4630                 }
4631         }
4632         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4633
4634         return 0;
4635 }
4636
4637 /**
4638  * ipr_target_destroy - Destroy a SCSI target
4639  * @starget:    scsi target struct
4640  *
4641  * If the device was a SATA device, this function frees the libata
4642  * ATA port, else it does nothing.
4643  *
4644  **/
4645 static void ipr_target_destroy(struct scsi_target *starget)
4646 {
4647         struct ipr_sata_port *sata_port = starget->hostdata;
4648         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4649         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4650
4651         if (ioa_cfg->sis64) {
4652                 if (!ipr_find_starget(starget)) {
4653                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4654                                 clear_bit(starget->id, ioa_cfg->array_ids);
4655                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4656                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4657                         else if (starget->channel == 0)
4658                                 clear_bit(starget->id, ioa_cfg->target_ids);
4659                 }
4660         }
4661
4662         if (sata_port) {
4663                 starget->hostdata = NULL;
4664                 ata_sas_port_destroy(sata_port->ap);
4665                 kfree(sata_port);
4666         }
4667 }
4668
4669 /**
4670  * ipr_find_sdev - Find device based on bus/target/lun.
4671  * @sdev:       scsi device struct
4672  *
4673  * Return value:
4674  *      resource entry pointer if found / NULL if not found
4675  **/
4676 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4677 {
4678         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4679         struct ipr_resource_entry *res;
4680
4681         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4682                 if ((res->bus == sdev->channel) &&
4683                     (res->target == sdev->id) &&
4684                     (res->lun == sdev->lun))
4685                         return res;
4686         }
4687
4688         return NULL;
4689 }
4690
4691 /**
4692  * ipr_slave_destroy - Unconfigure a SCSI device
4693  * @sdev:       scsi device struct
4694  *
4695  * Return value:
4696  *      nothing
4697  **/
4698 static void ipr_slave_destroy(struct scsi_device *sdev)
4699 {
4700         struct ipr_resource_entry *res;
4701         struct ipr_ioa_cfg *ioa_cfg;
4702         unsigned long lock_flags = 0;
4703
4704         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4705
4706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707         res = (struct ipr_resource_entry *) sdev->hostdata;
4708         if (res) {
4709                 if (res->sata_port)
4710                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4711                 sdev->hostdata = NULL;
4712                 res->sdev = NULL;
4713                 res->sata_port = NULL;
4714         }
4715         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4716 }
4717
4718 /**
4719  * ipr_slave_configure - Configure a SCSI device
4720  * @sdev:       scsi device struct
4721  *
4722  * This function configures the specified scsi device.
4723  *
4724  * Return value:
4725  *      0 on success
4726  **/
4727 static int ipr_slave_configure(struct scsi_device *sdev)
4728 {
4729         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4730         struct ipr_resource_entry *res;
4731         struct ata_port *ap = NULL;
4732         unsigned long lock_flags = 0;
4733         char buffer[IPR_MAX_RES_PATH_LENGTH];
4734
4735         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4736         res = sdev->hostdata;
4737         if (res) {
4738                 if (ipr_is_af_dasd_device(res))
4739                         sdev->type = TYPE_RAID;
4740                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4741                         sdev->scsi_level = 4;
4742                         sdev->no_uld_attach = 1;
4743                 }
4744                 if (ipr_is_vset_device(res)) {
4745                         blk_queue_rq_timeout(sdev->request_queue,
4746                                              IPR_VSET_RW_TIMEOUT);
4747                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4748                 }
4749                 if (ipr_is_gata(res) && res->sata_port)
4750                         ap = res->sata_port->ap;
4751                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4752
4753                 if (ap) {
4754                         scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4755                         ata_sas_slave_configure(sdev, ap);
4756                 } else
4757                         scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4758                 if (ioa_cfg->sis64)
4759                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4760                                     ipr_format_res_path(ioa_cfg,
4761                                 res->res_path, buffer, sizeof(buffer)));
4762                 return 0;
4763         }
4764         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4765         return 0;
4766 }
4767
4768 /**
4769  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4770  * @sdev:       scsi device struct
4771  *
4772  * This function initializes an ATA port so that future commands
4773  * sent through queuecommand will work.
4774  *
4775  * Return value:
4776  *      0 on success
4777  **/
4778 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4779 {
4780         struct ipr_sata_port *sata_port = NULL;
4781         int rc = -ENXIO;
4782
4783         ENTER;
4784         if (sdev->sdev_target)
4785                 sata_port = sdev->sdev_target->hostdata;
4786         if (sata_port) {
4787                 rc = ata_sas_port_init(sata_port->ap);
4788                 if (rc == 0)
4789                         rc = ata_sas_sync_probe(sata_port->ap);
4790         }
4791
4792         if (rc)
4793                 ipr_slave_destroy(sdev);
4794
4795         LEAVE;
4796         return rc;
4797 }
4798
4799 /**
4800  * ipr_slave_alloc - Prepare for commands to a device.
4801  * @sdev:       scsi device struct
4802  *
4803  * This function saves a pointer to the resource entry
4804  * in the scsi device struct if the device exists. We
4805  * can then use this pointer in ipr_queuecommand when
4806  * handling new commands.
4807  *
4808  * Return value:
4809  *      0 on success / -ENXIO if device does not exist
4810  **/
4811 static int ipr_slave_alloc(struct scsi_device *sdev)
4812 {
4813         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4814         struct ipr_resource_entry *res;
4815         unsigned long lock_flags;
4816         int rc = -ENXIO;
4817
4818         sdev->hostdata = NULL;
4819
4820         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4821
4822         res = ipr_find_sdev(sdev);
4823         if (res) {
4824                 res->sdev = sdev;
4825                 res->add_to_ml = 0;
4826                 res->in_erp = 0;
4827                 sdev->hostdata = res;
4828                 if (!ipr_is_naca_model(res))
4829                         res->needs_sync_complete = 1;
4830                 rc = 0;
4831                 if (ipr_is_gata(res)) {
4832                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833                         return ipr_ata_slave_alloc(sdev);
4834                 }
4835         }
4836
4837         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4838
4839         return rc;
4840 }
4841
4842 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4843 {
4844         struct ipr_ioa_cfg *ioa_cfg;
4845         unsigned long lock_flags = 0;
4846         int rc = SUCCESS;
4847
4848         ENTER;
4849         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4850         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4851
4852         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4853                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4854                 dev_err(&ioa_cfg->pdev->dev,
4855                         "Adapter being reset as a result of error recovery.\n");
4856
4857                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4858                         ioa_cfg->sdt_state = GET_DUMP;
4859         }
4860
4861         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4862         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4863         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4864
4865         /* If we got hit with a host reset while we were already resetting
4866          the adapter for some reason, and the reset failed. */
4867         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4868                 ipr_trace;
4869                 rc = FAILED;
4870         }
4871
4872         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4873         LEAVE;
4874         return rc;
4875 }
4876
4877 /**
4878  * ipr_device_reset - Reset the device
4879  * @ioa_cfg:    ioa config struct
4880  * @res:                resource entry struct
4881  *
4882  * This function issues a device reset to the affected device.
4883  * If the device is a SCSI device, a LUN reset will be sent
4884  * to the device first. If that does not work, a target reset
4885  * will be sent. If the device is a SATA device, a PHY reset will
4886  * be sent.
4887  *
4888  * Return value:
4889  *      0 on success / non-zero on failure
4890  **/
4891 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4892                             struct ipr_resource_entry *res)
4893 {
4894         struct ipr_cmnd *ipr_cmd;
4895         struct ipr_ioarcb *ioarcb;
4896         struct ipr_cmd_pkt *cmd_pkt;
4897         struct ipr_ioarcb_ata_regs *regs;
4898         u32 ioasc;
4899
4900         ENTER;
4901         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4902         ioarcb = &ipr_cmd->ioarcb;
4903         cmd_pkt = &ioarcb->cmd_pkt;
4904
4905         if (ipr_cmd->ioa_cfg->sis64) {
4906                 regs = &ipr_cmd->i.ata_ioadl.regs;
4907                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4908         } else
4909                 regs = &ioarcb->u.add_data.u.regs;
4910
4911         ioarcb->res_handle = res->res_handle;
4912         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4913         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4914         if (ipr_is_gata(res)) {
4915                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4916                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4917                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4918         }
4919
4920         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4921         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4922         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4923         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4924                 if (ipr_cmd->ioa_cfg->sis64)
4925                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4926                                sizeof(struct ipr_ioasa_gata));
4927                 else
4928                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4929                                sizeof(struct ipr_ioasa_gata));
4930         }
4931
4932         LEAVE;
4933         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4934 }
4935
4936 /**
4937  * ipr_sata_reset - Reset the SATA port
4938  * @link:       SATA link to reset
4939  * @classes:    class of the attached device
4940  *
4941  * This function issues a SATA phy reset to the affected ATA link.
4942  *
4943  * Return value:
4944  *      0 on success / non-zero on failure
4945  **/
4946 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4947                                 unsigned long deadline)
4948 {
4949         struct ipr_sata_port *sata_port = link->ap->private_data;
4950         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4951         struct ipr_resource_entry *res;
4952         unsigned long lock_flags = 0;
4953         int rc = -ENXIO;
4954
4955         ENTER;
4956         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4957         while (ioa_cfg->in_reset_reload) {
4958                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4959                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4960                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4961         }
4962
4963         res = sata_port->res;
4964         if (res) {
4965                 rc = ipr_device_reset(ioa_cfg, res);
4966                 *classes = res->ata_class;
4967         }
4968
4969         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970         LEAVE;
4971         return rc;
4972 }
4973
4974 /**
4975  * ipr_eh_dev_reset - Reset the device
4976  * @scsi_cmd:   scsi command struct
4977  *
4978  * This function issues a device reset to the affected device.
4979  * A LUN reset will be sent to the device first. If that does
4980  * not work, a target reset will be sent.
4981  *
4982  * Return value:
4983  *      SUCCESS / FAILED
4984  **/
4985 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4986 {
4987         struct ipr_cmnd *ipr_cmd;
4988         struct ipr_ioa_cfg *ioa_cfg;
4989         struct ipr_resource_entry *res;
4990         struct ata_port *ap;
4991         int rc = 0;
4992         struct ipr_hrr_queue *hrrq;
4993
4994         ENTER;
4995         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4996         res = scsi_cmd->device->hostdata;
4997
4998         if (!res)
4999                 return FAILED;
5000
5001         /*
5002          * If we are currently going through reset/reload, return failed. This will force the
5003          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5004          * reset to complete
5005          */
5006         if (ioa_cfg->in_reset_reload)
5007                 return FAILED;
5008         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5009                 return FAILED;
5010
5011         for_each_hrrq(hrrq, ioa_cfg) {
5012                 spin_lock(&hrrq->_lock);
5013                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5014                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5015                                 if (ipr_cmd->scsi_cmd)
5016                                         ipr_cmd->done = ipr_scsi_eh_done;
5017                                 if (ipr_cmd->qc)
5018                                         ipr_cmd->done = ipr_sata_eh_done;
5019                                 if (ipr_cmd->qc &&
5020                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5021                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5022                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5023                                 }
5024                         }
5025                 }
5026                 spin_unlock(&hrrq->_lock);
5027         }
5028         res->resetting_device = 1;
5029         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5030
5031         if (ipr_is_gata(res) && res->sata_port) {
5032                 ap = res->sata_port->ap;
5033                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5034                 ata_std_error_handler(ap);
5035                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5036
5037                 for_each_hrrq(hrrq, ioa_cfg) {
5038                         spin_lock(&hrrq->_lock);
5039                         list_for_each_entry(ipr_cmd,
5040                                             &hrrq->hrrq_pending_q, queue) {
5041                                 if (ipr_cmd->ioarcb.res_handle ==
5042                                     res->res_handle) {
5043                                         rc = -EIO;
5044                                         break;
5045                                 }
5046                         }
5047                         spin_unlock(&hrrq->_lock);
5048                 }
5049         } else
5050                 rc = ipr_device_reset(ioa_cfg, res);
5051         res->resetting_device = 0;
5052         res->reset_occurred = 1;
5053
5054         LEAVE;
5055         return rc ? FAILED : SUCCESS;
5056 }
5057
5058 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5059 {
5060         int rc;
5061
5062         spin_lock_irq(cmd->device->host->host_lock);
5063         rc = __ipr_eh_dev_reset(cmd);
5064         spin_unlock_irq(cmd->device->host->host_lock);
5065
5066         return rc;
5067 }
5068
5069 /**
5070  * ipr_bus_reset_done - Op done function for bus reset.
5071  * @ipr_cmd:    ipr command struct
5072  *
5073  * This function is the op done function for a bus reset
5074  *
5075  * Return value:
5076  *      none
5077  **/
5078 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5079 {
5080         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5081         struct ipr_resource_entry *res;
5082
5083         ENTER;
5084         if (!ioa_cfg->sis64)
5085                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5086                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5087                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5088                                 break;
5089                         }
5090                 }
5091
5092         /*
5093          * If abort has not completed, indicate the reset has, else call the
5094          * abort's done function to wake the sleeping eh thread
5095          */
5096         if (ipr_cmd->sibling->sibling)
5097                 ipr_cmd->sibling->sibling = NULL;
5098         else
5099                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5100
5101         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5102         LEAVE;
5103 }
5104
5105 /**
5106  * ipr_abort_timeout - An abort task has timed out
5107  * @ipr_cmd:    ipr command struct
5108  *
5109  * This function handles when an abort task times out. If this
5110  * happens we issue a bus reset since we have resources tied
5111  * up that must be freed before returning to the midlayer.
5112  *
5113  * Return value:
5114  *      none
5115  **/
5116 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5117 {
5118         struct ipr_cmnd *reset_cmd;
5119         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5120         struct ipr_cmd_pkt *cmd_pkt;
5121         unsigned long lock_flags = 0;
5122
5123         ENTER;
5124         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5125         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5126                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5127                 return;
5128         }
5129
5130         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5131         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5132         ipr_cmd->sibling = reset_cmd;
5133         reset_cmd->sibling = ipr_cmd;
5134         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5135         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5136         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5137         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5138         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5139
5140         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5141         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5142         LEAVE;
5143 }
5144
5145 /**
5146  * ipr_cancel_op - Cancel specified op
5147  * @scsi_cmd:   scsi command struct
5148  *
5149  * This function cancels specified op.
5150  *
5151  * Return value:
5152  *      SUCCESS / FAILED
5153  **/
5154 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5155 {
5156         struct ipr_cmnd *ipr_cmd;
5157         struct ipr_ioa_cfg *ioa_cfg;
5158         struct ipr_resource_entry *res;
5159         struct ipr_cmd_pkt *cmd_pkt;
5160         u32 ioasc, int_reg;
5161         int op_found = 0;
5162         struct ipr_hrr_queue *hrrq;
5163
5164         ENTER;
5165         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5166         res = scsi_cmd->device->hostdata;
5167
5168         /* If we are currently going through reset/reload, return failed.
5169          * This will force the mid-layer to call ipr_eh_host_reset,
5170          * which will then go to sleep and wait for the reset to complete
5171          */
5172         if (ioa_cfg->in_reset_reload ||
5173             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5174                 return FAILED;
5175         if (!res)
5176                 return FAILED;
5177
5178         /*
5179          * If we are aborting a timed out op, chances are that the timeout was caused
5180          * by a still not detected EEH error. In such cases, reading a register will
5181          * trigger the EEH recovery infrastructure.
5182          */
5183         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5184
5185         if (!ipr_is_gscsi(res))
5186                 return FAILED;
5187
5188         for_each_hrrq(hrrq, ioa_cfg) {
5189                 spin_lock(&hrrq->_lock);
5190                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5191                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5192                                 ipr_cmd->done = ipr_scsi_eh_done;
5193                                 op_found = 1;
5194                                 break;
5195                         }
5196                 }
5197                 spin_unlock(&hrrq->_lock);
5198         }
5199
5200         if (!op_found)
5201                 return SUCCESS;
5202
5203         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5204         ipr_cmd->ioarcb.res_handle = res->res_handle;
5205         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5206         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5207         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5208         ipr_cmd->u.sdev = scsi_cmd->device;
5209
5210         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5211                     scsi_cmd->cmnd[0]);
5212         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5213         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5214
5215         /*
5216          * If the abort task timed out and we sent a bus reset, we will get
5217          * one the following responses to the abort
5218          */
5219         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5220                 ioasc = 0;
5221                 ipr_trace;
5222         }
5223
5224         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5225         if (!ipr_is_naca_model(res))
5226                 res->needs_sync_complete = 1;
5227
5228         LEAVE;
5229         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5230 }
5231
5232 /**
5233  * ipr_eh_abort - Abort a single op
5234  * @scsi_cmd:   scsi command struct
5235  *
5236  * Return value:
5237  *      SUCCESS / FAILED
5238  **/
5239 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5240 {
5241         unsigned long flags;
5242         int rc;
5243
5244         ENTER;
5245
5246         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5247         rc = ipr_cancel_op(scsi_cmd);
5248         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5249
5250         LEAVE;
5251         return rc;
5252 }
5253
5254 /**
5255  * ipr_handle_other_interrupt - Handle "other" interrupts
5256  * @ioa_cfg:    ioa config struct
5257  * @int_reg:    interrupt register
5258  *
5259  * Return value:
5260  *      IRQ_NONE / IRQ_HANDLED
5261  **/
5262 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5263                                               u32 int_reg)
5264 {
5265         irqreturn_t rc = IRQ_HANDLED;
5266         u32 int_mask_reg;
5267
5268         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5269         int_reg &= ~int_mask_reg;
5270
5271         /* If an interrupt on the adapter did not occur, ignore it.
5272          * Or in the case of SIS 64, check for a stage change interrupt.
5273          */
5274         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5275                 if (ioa_cfg->sis64) {
5276                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5277                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5278                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5279
5280                                 /* clear stage change */
5281                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5282                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5283                                 list_del(&ioa_cfg->reset_cmd->queue);
5284                                 del_timer(&ioa_cfg->reset_cmd->timer);
5285                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5286                                 return IRQ_HANDLED;
5287                         }
5288                 }
5289
5290                 return IRQ_NONE;
5291         }
5292
5293         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5294                 /* Mask the interrupt */
5295                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5296
5297                 /* Clear the interrupt */
5298                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5299                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5300
5301                 list_del(&ioa_cfg->reset_cmd->queue);
5302                 del_timer(&ioa_cfg->reset_cmd->timer);
5303                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5304         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5305                 if (ioa_cfg->clear_isr) {
5306                         if (ipr_debug && printk_ratelimit())
5307                                 dev_err(&ioa_cfg->pdev->dev,
5308                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5309                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5310                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5311                         return IRQ_NONE;
5312                 }
5313         } else {
5314                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5315                         ioa_cfg->ioa_unit_checked = 1;
5316                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5317                         dev_err(&ioa_cfg->pdev->dev,
5318                                 "No Host RRQ. 0x%08X\n", int_reg);
5319                 else
5320                         dev_err(&ioa_cfg->pdev->dev,
5321                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5322
5323                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5324                         ioa_cfg->sdt_state = GET_DUMP;
5325
5326                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5327                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5328         }
5329
5330         return rc;
5331 }
5332
5333 /**
5334  * ipr_isr_eh - Interrupt service routine error handler
5335  * @ioa_cfg:    ioa config struct
5336  * @msg:        message to log
5337  *
5338  * Return value:
5339  *      none
5340  **/
5341 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5342 {
5343         ioa_cfg->errors_logged++;
5344         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5345
5346         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5347                 ioa_cfg->sdt_state = GET_DUMP;
5348
5349         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5350 }
5351
5352 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5353                                                 struct list_head *doneq)
5354 {
5355         u32 ioasc;
5356         u16 cmd_index;
5357         struct ipr_cmnd *ipr_cmd;
5358         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5359         int num_hrrq = 0;
5360
5361         /* If interrupts are disabled, ignore the interrupt */
5362         if (!hrr_queue->allow_interrupts)
5363                 return 0;
5364
5365         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5366                hrr_queue->toggle_bit) {
5367
5368                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5369                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5370                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5371
5372                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5373                              cmd_index < hrr_queue->min_cmd_id)) {
5374                         ipr_isr_eh(ioa_cfg,
5375                                 "Invalid response handle from IOA: ",
5376                                 cmd_index);
5377                         break;
5378                 }
5379
5380                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5381                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5382
5383                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5384
5385                 list_move_tail(&ipr_cmd->queue, doneq);
5386
5387                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5388                         hrr_queue->hrrq_curr++;
5389                 } else {
5390                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5391                         hrr_queue->toggle_bit ^= 1u;
5392                 }
5393                 num_hrrq++;
5394                 if (budget > 0 && num_hrrq >= budget)
5395                         break;
5396         }
5397
5398         return num_hrrq;
5399 }
5400
5401 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5402 {
5403         struct ipr_ioa_cfg *ioa_cfg;
5404         struct ipr_hrr_queue *hrrq;
5405         struct ipr_cmnd *ipr_cmd, *temp;
5406         unsigned long hrrq_flags;
5407         int completed_ops;
5408         LIST_HEAD(doneq);
5409
5410         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5411         ioa_cfg = hrrq->ioa_cfg;
5412
5413         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5414         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5415
5416         if (completed_ops < budget)
5417                 blk_iopoll_complete(iop);
5418         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5419
5420         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5421                 list_del(&ipr_cmd->queue);
5422                 del_timer(&ipr_cmd->timer);
5423                 ipr_cmd->fast_done(ipr_cmd);
5424         }
5425
5426         return completed_ops;
5427 }
5428
5429 /**
5430  * ipr_isr - Interrupt service routine
5431  * @irq:        irq number
5432  * @devp:       pointer to ioa config struct
5433  *
5434  * Return value:
5435  *      IRQ_NONE / IRQ_HANDLED
5436  **/
5437 static irqreturn_t ipr_isr(int irq, void *devp)
5438 {
5439         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5440         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5441         unsigned long hrrq_flags = 0;
5442         u32 int_reg = 0;
5443         int num_hrrq = 0;
5444         int irq_none = 0;
5445         struct ipr_cmnd *ipr_cmd, *temp;
5446         irqreturn_t rc = IRQ_NONE;
5447         LIST_HEAD(doneq);
5448
5449         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5450         /* If interrupts are disabled, ignore the interrupt */
5451         if (!hrrq->allow_interrupts) {
5452                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5453                 return IRQ_NONE;
5454         }
5455
5456         while (1) {
5457                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5458                         rc =  IRQ_HANDLED;
5459
5460                         if (!ioa_cfg->clear_isr)
5461                                 break;
5462
5463                         /* Clear the PCI interrupt */
5464                         num_hrrq = 0;
5465                         do {
5466                                 writel(IPR_PCII_HRRQ_UPDATED,
5467                                      ioa_cfg->regs.clr_interrupt_reg32);
5468                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5469                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5470                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5471
5472                 } else if (rc == IRQ_NONE && irq_none == 0) {
5473                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5474                         irq_none++;
5475                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5476                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5477                         ipr_isr_eh(ioa_cfg,
5478                                 "Error clearing HRRQ: ", num_hrrq);
5479                         rc = IRQ_HANDLED;
5480                         break;
5481                 } else
5482                         break;
5483         }
5484
5485         if (unlikely(rc == IRQ_NONE))
5486                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5487
5488         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5489         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5490                 list_del(&ipr_cmd->queue);
5491                 del_timer(&ipr_cmd->timer);
5492                 ipr_cmd->fast_done(ipr_cmd);
5493         }
5494         return rc;
5495 }
5496
5497 /**
5498  * ipr_isr_mhrrq - Interrupt service routine
5499  * @irq:        irq number
5500  * @devp:       pointer to ioa config struct
5501  *
5502  * Return value:
5503  *      IRQ_NONE / IRQ_HANDLED
5504  **/
5505 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5506 {
5507         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5508         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5509         unsigned long hrrq_flags = 0;
5510         struct ipr_cmnd *ipr_cmd, *temp;
5511         irqreturn_t rc = IRQ_NONE;
5512         LIST_HEAD(doneq);
5513
5514         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5515
5516         /* If interrupts are disabled, ignore the interrupt */
5517         if (!hrrq->allow_interrupts) {
5518                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5519                 return IRQ_NONE;
5520         }
5521
5522         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5523                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5524                        hrrq->toggle_bit) {
5525                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5526                                 blk_iopoll_sched(&hrrq->iopoll);
5527                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5528                         return IRQ_HANDLED;
5529                 }
5530         } else {
5531                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5532                         hrrq->toggle_bit)
5533
5534                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5535                                 rc =  IRQ_HANDLED;
5536         }
5537
5538         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5539
5540         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5541                 list_del(&ipr_cmd->queue);
5542                 del_timer(&ipr_cmd->timer);
5543                 ipr_cmd->fast_done(ipr_cmd);
5544         }
5545         return rc;
5546 }
5547
5548 /**
5549  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5550  * @ioa_cfg:    ioa config struct
5551  * @ipr_cmd:    ipr command struct
5552  *
5553  * Return value:
5554  *      0 on success / -1 on failure
5555  **/
5556 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5557                              struct ipr_cmnd *ipr_cmd)
5558 {
5559         int i, nseg;
5560         struct scatterlist *sg;
5561         u32 length;
5562         u32 ioadl_flags = 0;
5563         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5564         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5565         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5566
5567         length = scsi_bufflen(scsi_cmd);
5568         if (!length)
5569                 return 0;
5570
5571         nseg = scsi_dma_map(scsi_cmd);
5572         if (nseg < 0) {
5573                 if (printk_ratelimit())
5574                         dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5575                 return -1;
5576         }
5577
5578         ipr_cmd->dma_use_sg = nseg;
5579
5580         ioarcb->data_transfer_length = cpu_to_be32(length);
5581         ioarcb->ioadl_len =
5582                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5583
5584         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5585                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5586                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5587         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5588                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5589
5590         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5591                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5592                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5593                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5594         }
5595
5596         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5597         return 0;
5598 }
5599
5600 /**
5601  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5602  * @ioa_cfg:    ioa config struct
5603  * @ipr_cmd:    ipr command struct
5604  *
5605  * Return value:
5606  *      0 on success / -1 on failure
5607  **/
5608 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5609                            struct ipr_cmnd *ipr_cmd)
5610 {
5611         int i, nseg;
5612         struct scatterlist *sg;
5613         u32 length;
5614         u32 ioadl_flags = 0;
5615         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5616         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5617         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5618
5619         length = scsi_bufflen(scsi_cmd);
5620         if (!length)
5621                 return 0;
5622
5623         nseg = scsi_dma_map(scsi_cmd);
5624         if (nseg < 0) {
5625                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5626                 return -1;
5627         }
5628
5629         ipr_cmd->dma_use_sg = nseg;
5630
5631         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5632                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5633                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5634                 ioarcb->data_transfer_length = cpu_to_be32(length);
5635                 ioarcb->ioadl_len =
5636                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5637         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5638                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5639                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5640                 ioarcb->read_ioadl_len =
5641                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5642         }
5643
5644         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5645                 ioadl = ioarcb->u.add_data.u.ioadl;
5646                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5647                                     offsetof(struct ipr_ioarcb, u.add_data));
5648                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5649         }
5650
5651         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5652                 ioadl[i].flags_and_data_len =
5653                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5654                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5655         }
5656
5657         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5658         return 0;
5659 }
5660
5661 /**
5662  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5663  * @scsi_cmd:   scsi command struct
5664  *
5665  * Return value:
5666  *      task attributes
5667  **/
5668 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5669 {
5670         u8 tag[2];
5671         u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5672
5673         if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5674                 switch (tag[0]) {
5675                 case MSG_SIMPLE_TAG:
5676                         rc = IPR_FLAGS_LO_SIMPLE_TASK;
5677                         break;
5678                 case MSG_HEAD_TAG:
5679                         rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5680                         break;
5681                 case MSG_ORDERED_TAG:
5682                         rc = IPR_FLAGS_LO_ORDERED_TASK;
5683                         break;
5684                 };
5685         }
5686
5687         return rc;
5688 }
5689
5690 /**
5691  * ipr_erp_done - Process completion of ERP for a device
5692  * @ipr_cmd:            ipr command struct
5693  *
5694  * This function copies the sense buffer into the scsi_cmd
5695  * struct and pushes the scsi_done function.
5696  *
5697  * Return value:
5698  *      nothing
5699  **/
5700 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5701 {
5702         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5703         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5704         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5705
5706         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5707                 scsi_cmd->result |= (DID_ERROR << 16);
5708                 scmd_printk(KERN_ERR, scsi_cmd,
5709                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5710         } else {
5711                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5712                        SCSI_SENSE_BUFFERSIZE);
5713         }
5714
5715         if (res) {
5716                 if (!ipr_is_naca_model(res))
5717                         res->needs_sync_complete = 1;
5718                 res->in_erp = 0;
5719         }
5720         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5721         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5722         scsi_cmd->scsi_done(scsi_cmd);
5723 }
5724
5725 /**
5726  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5727  * @ipr_cmd:    ipr command struct
5728  *
5729  * Return value:
5730  *      none
5731  **/
5732 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5733 {
5734         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5735         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5736         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5737
5738         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5739         ioarcb->data_transfer_length = 0;
5740         ioarcb->read_data_transfer_length = 0;
5741         ioarcb->ioadl_len = 0;
5742         ioarcb->read_ioadl_len = 0;
5743         ioasa->hdr.ioasc = 0;
5744         ioasa->hdr.residual_data_len = 0;
5745
5746         if (ipr_cmd->ioa_cfg->sis64)
5747                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5748                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5749         else {
5750                 ioarcb->write_ioadl_addr =
5751                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5752                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5753         }
5754 }
5755
5756 /**
5757  * ipr_erp_request_sense - Send request sense to a device
5758  * @ipr_cmd:    ipr command struct
5759  *
5760  * This function sends a request sense to a device as a result
5761  * of a check condition.
5762  *
5763  * Return value:
5764  *      nothing
5765  **/
5766 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5767 {
5768         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5769         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5770
5771         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5772                 ipr_erp_done(ipr_cmd);
5773                 return;
5774         }
5775
5776         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5777
5778         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5779         cmd_pkt->cdb[0] = REQUEST_SENSE;
5780         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5781         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5782         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5783         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5784
5785         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5786                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5787
5788         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5789                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5790 }
5791
5792 /**
5793  * ipr_erp_cancel_all - Send cancel all to a device
5794  * @ipr_cmd:    ipr command struct
5795  *
5796  * This function sends a cancel all to a device to clear the
5797  * queue. If we are running TCQ on the device, QERR is set to 1,
5798  * which means all outstanding ops have been dropped on the floor.
5799  * Cancel all will return them to us.
5800  *
5801  * Return value:
5802  *      nothing
5803  **/
5804 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5805 {
5806         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5807         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5808         struct ipr_cmd_pkt *cmd_pkt;
5809
5810         res->in_erp = 1;
5811
5812         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5813
5814         if (!scsi_get_tag_type(scsi_cmd->device)) {
5815                 ipr_erp_request_sense(ipr_cmd);
5816                 return;
5817         }
5818
5819         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5820         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5821         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5822
5823         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5824                    IPR_CANCEL_ALL_TIMEOUT);
5825 }
5826
5827 /**
5828  * ipr_dump_ioasa - Dump contents of IOASA
5829  * @ioa_cfg:    ioa config struct
5830  * @ipr_cmd:    ipr command struct
5831  * @res:                resource entry struct
5832  *
5833  * This function is invoked by the interrupt handler when ops
5834  * fail. It will log the IOASA if appropriate. Only called
5835  * for GPDD ops.
5836  *
5837  * Return value:
5838  *      none
5839  **/
5840 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5841                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5842 {
5843         int i;
5844         u16 data_len;
5845         u32 ioasc, fd_ioasc;
5846         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5847         __be32 *ioasa_data = (__be32 *)ioasa;
5848         int error_index;
5849
5850         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5851         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5852
5853         if (0 == ioasc)
5854                 return;
5855
5856         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5857                 return;
5858
5859         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5860                 error_index = ipr_get_error(fd_ioasc);
5861         else
5862                 error_index = ipr_get_error(ioasc);
5863
5864         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5865                 /* Don't log an error if the IOA already logged one */
5866                 if (ioasa->hdr.ilid != 0)
5867                         return;
5868
5869                 if (!ipr_is_gscsi(res))
5870                         return;
5871
5872                 if (ipr_error_table[error_index].log_ioasa == 0)
5873                         return;
5874         }
5875
5876         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5877
5878         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5879         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5880                 data_len = sizeof(struct ipr_ioasa64);
5881         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5882                 data_len = sizeof(struct ipr_ioasa);
5883
5884         ipr_err("IOASA Dump:\n");
5885
5886         for (i = 0; i < data_len / 4; i += 4) {
5887                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5888                         be32_to_cpu(ioasa_data[i]),
5889                         be32_to_cpu(ioasa_data[i+1]),
5890                         be32_to_cpu(ioasa_data[i+2]),
5891                         be32_to_cpu(ioasa_data[i+3]));
5892         }
5893 }
5894
5895 /**
5896  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5897  * @ioasa:              IOASA
5898  * @sense_buf:  sense data buffer
5899  *
5900  * Return value:
5901  *      none
5902  **/
5903 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5904 {
5905         u32 failing_lba;
5906         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5907         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5908         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5909         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5910
5911         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5912
5913         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5914                 return;
5915
5916         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5917
5918         if (ipr_is_vset_device(res) &&
5919             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5920             ioasa->u.vset.failing_lba_hi != 0) {
5921                 sense_buf[0] = 0x72;
5922                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5923                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5924                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5925
5926                 sense_buf[7] = 12;
5927                 sense_buf[8] = 0;
5928                 sense_buf[9] = 0x0A;
5929                 sense_buf[10] = 0x80;
5930
5931                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5932
5933                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5934                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5935                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5936                 sense_buf[15] = failing_lba & 0x000000ff;
5937
5938                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5939
5940                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5941                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5942                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5943                 sense_buf[19] = failing_lba & 0x000000ff;
5944         } else {
5945                 sense_buf[0] = 0x70;
5946                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5947                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5948                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5949
5950                 /* Illegal request */
5951                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5952                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5953                         sense_buf[7] = 10;      /* additional length */
5954
5955                         /* IOARCB was in error */
5956                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5957                                 sense_buf[15] = 0xC0;
5958                         else    /* Parameter data was invalid */
5959                                 sense_buf[15] = 0x80;
5960
5961                         sense_buf[16] =
5962                             ((IPR_FIELD_POINTER_MASK &
5963                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5964                         sense_buf[17] =
5965                             (IPR_FIELD_POINTER_MASK &
5966                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5967                 } else {
5968                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5969                                 if (ipr_is_vset_device(res))
5970                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5971                                 else
5972                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5973
5974                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5975                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5976                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5977                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5978                                 sense_buf[6] = failing_lba & 0x000000ff;
5979                         }
5980
5981                         sense_buf[7] = 6;       /* additional length */
5982                 }
5983         }
5984 }
5985
5986 /**
5987  * ipr_get_autosense - Copy autosense data to sense buffer
5988  * @ipr_cmd:    ipr command struct
5989  *
5990  * This function copies the autosense buffer to the buffer
5991  * in the scsi_cmd, if there is autosense available.
5992  *
5993  * Return value:
5994  *      1 if autosense was available / 0 if not
5995  **/
5996 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5997 {
5998         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5999         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6000
6001         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6002                 return 0;
6003
6004         if (ipr_cmd->ioa_cfg->sis64)
6005                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6006                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6007                            SCSI_SENSE_BUFFERSIZE));
6008         else
6009                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6010                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6011                            SCSI_SENSE_BUFFERSIZE));
6012         return 1;
6013 }
6014
6015 /**
6016  * ipr_erp_start - Process an error response for a SCSI op
6017  * @ioa_cfg:    ioa config struct
6018  * @ipr_cmd:    ipr command struct
6019  *
6020  * This function determines whether or not to initiate ERP
6021  * on the affected device.
6022  *
6023  * Return value:
6024  *      nothing
6025  **/
6026 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6027                               struct ipr_cmnd *ipr_cmd)
6028 {
6029         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6030         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6031         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6032         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6033
6034         if (!res) {
6035                 ipr_scsi_eh_done(ipr_cmd);
6036                 return;
6037         }
6038
6039         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6040                 ipr_gen_sense(ipr_cmd);
6041
6042         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6043
6044         switch (masked_ioasc) {
6045         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6046                 if (ipr_is_naca_model(res))
6047                         scsi_cmd->result |= (DID_ABORT << 16);
6048                 else
6049                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6050                 break;
6051         case IPR_IOASC_IR_RESOURCE_HANDLE:
6052         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6053                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6054                 break;
6055         case IPR_IOASC_HW_SEL_TIMEOUT:
6056                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6057                 if (!ipr_is_naca_model(res))
6058                         res->needs_sync_complete = 1;
6059                 break;
6060         case IPR_IOASC_SYNC_REQUIRED:
6061                 if (!res->in_erp)
6062                         res->needs_sync_complete = 1;
6063                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6064                 break;
6065         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6066         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6067                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6068                 break;
6069         case IPR_IOASC_BUS_WAS_RESET:
6070         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6071                 /*
6072                  * Report the bus reset and ask for a retry. The device
6073                  * will give CC/UA the next command.
6074                  */
6075                 if (!res->resetting_device)
6076                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6077                 scsi_cmd->result |= (DID_ERROR << 16);
6078                 if (!ipr_is_naca_model(res))
6079                         res->needs_sync_complete = 1;
6080                 break;
6081         case IPR_IOASC_HW_DEV_BUS_STATUS:
6082                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6083                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6084                         if (!ipr_get_autosense(ipr_cmd)) {
6085                                 if (!ipr_is_naca_model(res)) {
6086                                         ipr_erp_cancel_all(ipr_cmd);
6087                                         return;
6088                                 }
6089                         }
6090                 }
6091                 if (!ipr_is_naca_model(res))
6092                         res->needs_sync_complete = 1;
6093                 break;
6094         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6095                 break;
6096         default:
6097                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6098                         scsi_cmd->result |= (DID_ERROR << 16);
6099                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6100                         res->needs_sync_complete = 1;
6101                 break;
6102         }
6103
6104         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6105         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6106         scsi_cmd->scsi_done(scsi_cmd);
6107 }
6108
6109 /**
6110  * ipr_scsi_done - mid-layer done function
6111  * @ipr_cmd:    ipr command struct
6112  *
6113  * This function is invoked by the interrupt handler for
6114  * ops generated by the SCSI mid-layer
6115  *
6116  * Return value:
6117  *      none
6118  **/
6119 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6120 {
6121         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6122         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6123         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6124         unsigned long hrrq_flags;
6125
6126         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6127
6128         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6129                 scsi_dma_unmap(scsi_cmd);
6130
6131                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6132                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6133                 scsi_cmd->scsi_done(scsi_cmd);
6134                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6135         } else {
6136                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6137                 ipr_erp_start(ioa_cfg, ipr_cmd);
6138                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6139         }
6140 }
6141
6142 /**
6143  * ipr_queuecommand - Queue a mid-layer request
6144  * @shost:              scsi host struct
6145  * @scsi_cmd:   scsi command struct
6146  *
6147  * This function queues a request generated by the mid-layer.
6148  *
6149  * Return value:
6150  *      0 on success
6151  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6152  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6153  **/
6154 static int ipr_queuecommand(struct Scsi_Host *shost,
6155                             struct scsi_cmnd *scsi_cmd)
6156 {
6157         struct ipr_ioa_cfg *ioa_cfg;
6158         struct ipr_resource_entry *res;
6159         struct ipr_ioarcb *ioarcb;
6160         struct ipr_cmnd *ipr_cmd;
6161         unsigned long hrrq_flags, lock_flags;
6162         int rc;
6163         struct ipr_hrr_queue *hrrq;
6164         int hrrq_id;
6165
6166         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6167
6168         scsi_cmd->result = (DID_OK << 16);
6169         res = scsi_cmd->device->hostdata;
6170
6171         if (ipr_is_gata(res) && res->sata_port) {
6172                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6173                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6174                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6175                 return rc;
6176         }
6177
6178         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6179         hrrq = &ioa_cfg->hrrq[hrrq_id];
6180
6181         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6182         /*
6183          * We are currently blocking all devices due to a host reset
6184          * We have told the host to stop giving us new requests, but
6185          * ERP ops don't count. FIXME
6186          */
6187         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6188                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6189                 return SCSI_MLQUEUE_HOST_BUSY;
6190         }
6191
6192         /*
6193          * FIXME - Create scsi_set_host_offline interface
6194          *  and the ioa_is_dead check can be removed
6195          */
6196         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6197                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6198                 goto err_nodev;
6199         }
6200
6201         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6202         if (ipr_cmd == NULL) {
6203                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6204                 return SCSI_MLQUEUE_HOST_BUSY;
6205         }
6206         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6207
6208         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6209         ioarcb = &ipr_cmd->ioarcb;
6210
6211         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6212         ipr_cmd->scsi_cmd = scsi_cmd;
6213         ipr_cmd->done = ipr_scsi_eh_done;
6214
6215         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6216                 if (scsi_cmd->underflow == 0)
6217                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6218
6219                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6220                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6221                         res->reset_occurred = 0;
6222                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6223                 }
6224                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6225                 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6226         }
6227
6228         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6229             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6230                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6231         }
6232
6233         if (ioa_cfg->sis64)
6234                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6235         else
6236                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6237
6238         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6239         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6240                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6241                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6242                 if (!rc)
6243                         scsi_dma_unmap(scsi_cmd);
6244                 return SCSI_MLQUEUE_HOST_BUSY;
6245         }
6246
6247         if (unlikely(hrrq->ioa_is_dead)) {
6248                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6249                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6250                 scsi_dma_unmap(scsi_cmd);
6251                 goto err_nodev;
6252         }
6253
6254         ioarcb->res_handle = res->res_handle;
6255         if (res->needs_sync_complete) {
6256                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6257                 res->needs_sync_complete = 0;
6258         }
6259         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6260         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6261         ipr_send_command(ipr_cmd);
6262         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6263         return 0;
6264
6265 err_nodev:
6266         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6267         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6268         scsi_cmd->result = (DID_NO_CONNECT << 16);
6269         scsi_cmd->scsi_done(scsi_cmd);
6270         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6271         return 0;
6272 }
6273
6274 /**
6275  * ipr_ioctl - IOCTL handler
6276  * @sdev:       scsi device struct
6277  * @cmd:        IOCTL cmd
6278  * @arg:        IOCTL arg
6279  *
6280  * Return value:
6281  *      0 on success / other on failure
6282  **/
6283 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6284 {
6285         struct ipr_resource_entry *res;
6286
6287         res = (struct ipr_resource_entry *)sdev->hostdata;
6288         if (res && ipr_is_gata(res)) {
6289                 if (cmd == HDIO_GET_IDENTITY)
6290                         return -ENOTTY;
6291                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6292         }
6293
6294         return -EINVAL;
6295 }
6296
6297 /**
6298  * ipr_info - Get information about the card/driver
6299  * @scsi_host:  scsi host struct
6300  *
6301  * Return value:
6302  *      pointer to buffer with description string
6303  **/
6304 static const char *ipr_ioa_info(struct Scsi_Host *host)
6305 {
6306         static char buffer[512];
6307         struct ipr_ioa_cfg *ioa_cfg;
6308         unsigned long lock_flags = 0;
6309
6310         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6311
6312         spin_lock_irqsave(host->host_lock, lock_flags);
6313         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6314         spin_unlock_irqrestore(host->host_lock, lock_flags);
6315
6316         return buffer;
6317 }
6318
6319 static struct scsi_host_template driver_template = {
6320         .module = THIS_MODULE,
6321         .name = "IPR",
6322         .info = ipr_ioa_info,
6323         .ioctl = ipr_ioctl,
6324         .queuecommand = ipr_queuecommand,
6325         .eh_abort_handler = ipr_eh_abort,
6326         .eh_device_reset_handler = ipr_eh_dev_reset,
6327         .eh_host_reset_handler = ipr_eh_host_reset,
6328         .slave_alloc = ipr_slave_alloc,
6329         .slave_configure = ipr_slave_configure,
6330         .slave_destroy = ipr_slave_destroy,
6331         .target_alloc = ipr_target_alloc,
6332         .target_destroy = ipr_target_destroy,
6333         .change_queue_depth = ipr_change_queue_depth,
6334         .change_queue_type = ipr_change_queue_type,
6335         .bios_param = ipr_biosparam,
6336         .can_queue = IPR_MAX_COMMANDS,
6337         .this_id = -1,
6338         .sg_tablesize = IPR_MAX_SGLIST,
6339         .max_sectors = IPR_IOA_MAX_SECTORS,
6340         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6341         .use_clustering = ENABLE_CLUSTERING,
6342         .shost_attrs = ipr_ioa_attrs,
6343         .sdev_attrs = ipr_dev_attrs,
6344         .proc_name = IPR_NAME,
6345         .no_write_same = 1,
6346 };
6347
6348 /**
6349  * ipr_ata_phy_reset - libata phy_reset handler
6350  * @ap:         ata port to reset
6351  *
6352  **/
6353 static void ipr_ata_phy_reset(struct ata_port *ap)
6354 {
6355         unsigned long flags;
6356         struct ipr_sata_port *sata_port = ap->private_data;
6357         struct ipr_resource_entry *res = sata_port->res;
6358         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6359         int rc;
6360
6361         ENTER;
6362         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6363         while (ioa_cfg->in_reset_reload) {
6364                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6365                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6366                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6367         }
6368
6369         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6370                 goto out_unlock;
6371
6372         rc = ipr_device_reset(ioa_cfg, res);
6373
6374         if (rc) {
6375                 ap->link.device[0].class = ATA_DEV_NONE;
6376                 goto out_unlock;
6377         }
6378
6379         ap->link.device[0].class = res->ata_class;
6380         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6381                 ap->link.device[0].class = ATA_DEV_NONE;
6382
6383 out_unlock:
6384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6385         LEAVE;
6386 }
6387
6388 /**
6389  * ipr_ata_post_internal - Cleanup after an internal command
6390  * @qc: ATA queued command
6391  *
6392  * Return value:
6393  *      none
6394  **/
6395 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6396 {
6397         struct ipr_sata_port *sata_port = qc->ap->private_data;
6398         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6399         struct ipr_cmnd *ipr_cmd;
6400         struct ipr_hrr_queue *hrrq;
6401         unsigned long flags;
6402
6403         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6404         while (ioa_cfg->in_reset_reload) {
6405                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6406                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6407                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6408         }
6409
6410         for_each_hrrq(hrrq, ioa_cfg) {
6411                 spin_lock(&hrrq->_lock);
6412                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6413                         if (ipr_cmd->qc == qc) {
6414                                 ipr_device_reset(ioa_cfg, sata_port->res);
6415                                 break;
6416                         }
6417                 }
6418                 spin_unlock(&hrrq->_lock);
6419         }
6420         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6421 }
6422
6423 /**
6424  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6425  * @regs:       destination
6426  * @tf: source ATA taskfile
6427  *
6428  * Return value:
6429  *      none
6430  **/
6431 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6432                              struct ata_taskfile *tf)
6433 {
6434         regs->feature = tf->feature;
6435         regs->nsect = tf->nsect;
6436         regs->lbal = tf->lbal;
6437         regs->lbam = tf->lbam;
6438         regs->lbah = tf->lbah;
6439         regs->device = tf->device;
6440         regs->command = tf->command;
6441         regs->hob_feature = tf->hob_feature;
6442         regs->hob_nsect = tf->hob_nsect;
6443         regs->hob_lbal = tf->hob_lbal;
6444         regs->hob_lbam = tf->hob_lbam;
6445         regs->hob_lbah = tf->hob_lbah;
6446         regs->ctl = tf->ctl;
6447 }
6448
6449 /**
6450  * ipr_sata_done - done function for SATA commands
6451  * @ipr_cmd:    ipr command struct
6452  *
6453  * This function is invoked by the interrupt handler for
6454  * ops generated by the SCSI mid-layer to SATA devices
6455  *
6456  * Return value:
6457  *      none
6458  **/
6459 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6460 {
6461         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6462         struct ata_queued_cmd *qc = ipr_cmd->qc;
6463         struct ipr_sata_port *sata_port = qc->ap->private_data;
6464         struct ipr_resource_entry *res = sata_port->res;
6465         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6466
6467         spin_lock(&ipr_cmd->hrrq->_lock);
6468         if (ipr_cmd->ioa_cfg->sis64)
6469                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6470                        sizeof(struct ipr_ioasa_gata));
6471         else
6472                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6473                        sizeof(struct ipr_ioasa_gata));
6474         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6475
6476         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6477                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6478
6479         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6480                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6481         else
6482                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6483         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6484         spin_unlock(&ipr_cmd->hrrq->_lock);
6485         ata_qc_complete(qc);
6486 }
6487
6488 /**
6489  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6490  * @ipr_cmd:    ipr command struct
6491  * @qc:         ATA queued command
6492  *
6493  **/
6494 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6495                                   struct ata_queued_cmd *qc)
6496 {
6497         u32 ioadl_flags = 0;
6498         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6499         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6500         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6501         int len = qc->nbytes;
6502         struct scatterlist *sg;
6503         unsigned int si;
6504         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6505
6506         if (len == 0)
6507                 return;
6508
6509         if (qc->dma_dir == DMA_TO_DEVICE) {
6510                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6511                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6512         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6513                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6514
6515         ioarcb->data_transfer_length = cpu_to_be32(len);
6516         ioarcb->ioadl_len =
6517                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6518         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6519                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6520
6521         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6522                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6523                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6524                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6525
6526                 last_ioadl64 = ioadl64;
6527                 ioadl64++;
6528         }
6529
6530         if (likely(last_ioadl64))
6531                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6532 }
6533
6534 /**
6535  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6536  * @ipr_cmd:    ipr command struct
6537  * @qc:         ATA queued command
6538  *
6539  **/
6540 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6541                                 struct ata_queued_cmd *qc)
6542 {
6543         u32 ioadl_flags = 0;
6544         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6545         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6546         struct ipr_ioadl_desc *last_ioadl = NULL;
6547         int len = qc->nbytes;
6548         struct scatterlist *sg;
6549         unsigned int si;
6550
6551         if (len == 0)
6552                 return;
6553
6554         if (qc->dma_dir == DMA_TO_DEVICE) {
6555                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6556                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6557                 ioarcb->data_transfer_length = cpu_to_be32(len);
6558                 ioarcb->ioadl_len =
6559                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6560         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6561                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6562                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6563                 ioarcb->read_ioadl_len =
6564                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6565         }
6566
6567         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6568                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6569                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6570
6571                 last_ioadl = ioadl;
6572                 ioadl++;
6573         }
6574
6575         if (likely(last_ioadl))
6576                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6577 }
6578
6579 /**
6580  * ipr_qc_defer - Get a free ipr_cmd
6581  * @qc: queued command
6582  *
6583  * Return value:
6584  *      0 if success
6585  **/
6586 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6587 {
6588         struct ata_port *ap = qc->ap;
6589         struct ipr_sata_port *sata_port = ap->private_data;
6590         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6591         struct ipr_cmnd *ipr_cmd;
6592         struct ipr_hrr_queue *hrrq;
6593         int hrrq_id;
6594
6595         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6596         hrrq = &ioa_cfg->hrrq[hrrq_id];
6597
6598         qc->lldd_task = NULL;
6599         spin_lock(&hrrq->_lock);
6600         if (unlikely(hrrq->ioa_is_dead)) {
6601                 spin_unlock(&hrrq->_lock);
6602                 return 0;
6603         }
6604
6605         if (unlikely(!hrrq->allow_cmds)) {
6606                 spin_unlock(&hrrq->_lock);
6607                 return ATA_DEFER_LINK;
6608         }
6609
6610         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6611         if (ipr_cmd == NULL) {
6612                 spin_unlock(&hrrq->_lock);
6613                 return ATA_DEFER_LINK;
6614         }
6615
6616         qc->lldd_task = ipr_cmd;
6617         spin_unlock(&hrrq->_lock);
6618         return 0;
6619 }
6620
6621 /**
6622  * ipr_qc_issue - Issue a SATA qc to a device
6623  * @qc: queued command
6624  *
6625  * Return value:
6626  *      0 if success
6627  **/
6628 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6629 {
6630         struct ata_port *ap = qc->ap;
6631         struct ipr_sata_port *sata_port = ap->private_data;
6632         struct ipr_resource_entry *res = sata_port->res;
6633         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6634         struct ipr_cmnd *ipr_cmd;
6635         struct ipr_ioarcb *ioarcb;
6636         struct ipr_ioarcb_ata_regs *regs;
6637
6638         if (qc->lldd_task == NULL)
6639                 ipr_qc_defer(qc);
6640
6641         ipr_cmd = qc->lldd_task;
6642         if (ipr_cmd == NULL)
6643                 return AC_ERR_SYSTEM;
6644
6645         qc->lldd_task = NULL;
6646         spin_lock(&ipr_cmd->hrrq->_lock);
6647         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6648                         ipr_cmd->hrrq->ioa_is_dead)) {
6649                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6650                 spin_unlock(&ipr_cmd->hrrq->_lock);
6651                 return AC_ERR_SYSTEM;
6652         }
6653
6654         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6655         ioarcb = &ipr_cmd->ioarcb;
6656
6657         if (ioa_cfg->sis64) {
6658                 regs = &ipr_cmd->i.ata_ioadl.regs;
6659                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6660         } else
6661                 regs = &ioarcb->u.add_data.u.regs;
6662
6663         memset(regs, 0, sizeof(*regs));
6664         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6665
6666         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6667         ipr_cmd->qc = qc;
6668         ipr_cmd->done = ipr_sata_done;
6669         ipr_cmd->ioarcb.res_handle = res->res_handle;
6670         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6671         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6672         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6673         ipr_cmd->dma_use_sg = qc->n_elem;
6674
6675         if (ioa_cfg->sis64)
6676                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6677         else
6678                 ipr_build_ata_ioadl(ipr_cmd, qc);
6679
6680         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6681         ipr_copy_sata_tf(regs, &qc->tf);
6682         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6683         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6684
6685         switch (qc->tf.protocol) {
6686         case ATA_PROT_NODATA:
6687         case ATA_PROT_PIO:
6688                 break;
6689
6690         case ATA_PROT_DMA:
6691                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6692                 break;
6693
6694         case ATAPI_PROT_PIO:
6695         case ATAPI_PROT_NODATA:
6696                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6697                 break;
6698
6699         case ATAPI_PROT_DMA:
6700                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6701                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6702                 break;
6703
6704         default:
6705                 WARN_ON(1);
6706                 spin_unlock(&ipr_cmd->hrrq->_lock);
6707                 return AC_ERR_INVALID;
6708         }
6709
6710         ipr_send_command(ipr_cmd);
6711         spin_unlock(&ipr_cmd->hrrq->_lock);
6712
6713         return 0;
6714 }
6715
6716 /**
6717  * ipr_qc_fill_rtf - Read result TF
6718  * @qc: ATA queued command
6719  *
6720  * Return value:
6721  *      true
6722  **/
6723 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6724 {
6725         struct ipr_sata_port *sata_port = qc->ap->private_data;
6726         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6727         struct ata_taskfile *tf = &qc->result_tf;
6728
6729         tf->feature = g->error;
6730         tf->nsect = g->nsect;
6731         tf->lbal = g->lbal;
6732         tf->lbam = g->lbam;
6733         tf->lbah = g->lbah;
6734         tf->device = g->device;
6735         tf->command = g->status;
6736         tf->hob_nsect = g->hob_nsect;
6737         tf->hob_lbal = g->hob_lbal;
6738         tf->hob_lbam = g->hob_lbam;
6739         tf->hob_lbah = g->hob_lbah;
6740
6741         return true;
6742 }
6743
6744 static struct ata_port_operations ipr_sata_ops = {
6745         .phy_reset = ipr_ata_phy_reset,
6746         .hardreset = ipr_sata_reset,
6747         .post_internal_cmd = ipr_ata_post_internal,
6748         .qc_prep = ata_noop_qc_prep,
6749         .qc_defer = ipr_qc_defer,
6750         .qc_issue = ipr_qc_issue,
6751         .qc_fill_rtf = ipr_qc_fill_rtf,
6752         .port_start = ata_sas_port_start,
6753         .port_stop = ata_sas_port_stop
6754 };
6755
6756 static struct ata_port_info sata_port_info = {
6757         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6758         .pio_mask       = ATA_PIO4_ONLY,
6759         .mwdma_mask     = ATA_MWDMA2,
6760         .udma_mask      = ATA_UDMA6,
6761         .port_ops       = &ipr_sata_ops
6762 };
6763
6764 #ifdef CONFIG_PPC_PSERIES
6765 static const u16 ipr_blocked_processors[] = {
6766         PVR_NORTHSTAR,
6767         PVR_PULSAR,
6768         PVR_POWER4,
6769         PVR_ICESTAR,
6770         PVR_SSTAR,
6771         PVR_POWER4p,
6772         PVR_630,
6773         PVR_630p
6774 };
6775
6776 /**
6777  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6778  * @ioa_cfg:    ioa cfg struct
6779  *
6780  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6781  * certain pSeries hardware. This function determines if the given
6782  * adapter is in one of these confgurations or not.
6783  *
6784  * Return value:
6785  *      1 if adapter is not supported / 0 if adapter is supported
6786  **/
6787 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6788 {
6789         int i;
6790
6791         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6792                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6793                         if (pvr_version_is(ipr_blocked_processors[i]))
6794                                 return 1;
6795                 }
6796         }
6797         return 0;
6798 }
6799 #else
6800 #define ipr_invalid_adapter(ioa_cfg) 0
6801 #endif
6802
6803 /**
6804  * ipr_ioa_bringdown_done - IOA bring down completion.
6805  * @ipr_cmd:    ipr command struct
6806  *
6807  * This function processes the completion of an adapter bring down.
6808  * It wakes any reset sleepers.
6809  *
6810  * Return value:
6811  *      IPR_RC_JOB_RETURN
6812  **/
6813 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6814 {
6815         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6816         int i;
6817
6818         ENTER;
6819         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6820                 ipr_trace;
6821                 spin_unlock_irq(ioa_cfg->host->host_lock);
6822                 scsi_unblock_requests(ioa_cfg->host);
6823                 spin_lock_irq(ioa_cfg->host->host_lock);
6824         }
6825
6826         ioa_cfg->in_reset_reload = 0;
6827         ioa_cfg->reset_retries = 0;
6828         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6829                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6830                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6831                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6832         }
6833         wmb();
6834
6835         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6836         wake_up_all(&ioa_cfg->reset_wait_q);
6837         LEAVE;
6838
6839         return IPR_RC_JOB_RETURN;
6840 }
6841
6842 /**
6843  * ipr_ioa_reset_done - IOA reset completion.
6844  * @ipr_cmd:    ipr command struct
6845  *
6846  * This function processes the completion of an adapter reset.
6847  * It schedules any necessary mid-layer add/removes and
6848  * wakes any reset sleepers.
6849  *
6850  * Return value:
6851  *      IPR_RC_JOB_RETURN
6852  **/
6853 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6854 {
6855         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6856         struct ipr_resource_entry *res;
6857         struct ipr_hostrcb *hostrcb, *temp;
6858         int i = 0, j;
6859
6860         ENTER;
6861         ioa_cfg->in_reset_reload = 0;
6862         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6863                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6864                 ioa_cfg->hrrq[j].allow_cmds = 1;
6865                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6866         }
6867         wmb();
6868         ioa_cfg->reset_cmd = NULL;
6869         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6870
6871         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6872                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6873                         ipr_trace;
6874                         break;
6875                 }
6876         }
6877         schedule_work(&ioa_cfg->work_q);
6878
6879         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6880                 list_del(&hostrcb->queue);
6881                 if (i++ < IPR_NUM_LOG_HCAMS)
6882                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6883                 else
6884                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6885         }
6886
6887         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6888         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6889
6890         ioa_cfg->reset_retries = 0;
6891         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6892         wake_up_all(&ioa_cfg->reset_wait_q);
6893
6894         spin_unlock(ioa_cfg->host->host_lock);
6895         scsi_unblock_requests(ioa_cfg->host);
6896         spin_lock(ioa_cfg->host->host_lock);
6897
6898         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6899                 scsi_block_requests(ioa_cfg->host);
6900
6901         LEAVE;
6902         return IPR_RC_JOB_RETURN;
6903 }
6904
6905 /**
6906  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6907  * @supported_dev:      supported device struct
6908  * @vpids:                      vendor product id struct
6909  *
6910  * Return value:
6911  *      none
6912  **/
6913 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6914                                  struct ipr_std_inq_vpids *vpids)
6915 {
6916         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6917         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6918         supported_dev->num_records = 1;
6919         supported_dev->data_length =
6920                 cpu_to_be16(sizeof(struct ipr_supported_device));
6921         supported_dev->reserved = 0;
6922 }
6923
6924 /**
6925  * ipr_set_supported_devs - Send Set Supported Devices for a device
6926  * @ipr_cmd:    ipr command struct
6927  *
6928  * This function sends a Set Supported Devices to the adapter
6929  *
6930  * Return value:
6931  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6932  **/
6933 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6934 {
6935         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6936         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6937         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6938         struct ipr_resource_entry *res = ipr_cmd->u.res;
6939
6940         ipr_cmd->job_step = ipr_ioa_reset_done;
6941
6942         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6943                 if (!ipr_is_scsi_disk(res))
6944                         continue;
6945
6946                 ipr_cmd->u.res = res;
6947                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6948
6949                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6950                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6951                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6952
6953                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6954                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6955                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6956                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6957
6958                 ipr_init_ioadl(ipr_cmd,
6959                                ioa_cfg->vpd_cbs_dma +
6960                                  offsetof(struct ipr_misc_cbs, supp_dev),
6961                                sizeof(struct ipr_supported_device),
6962                                IPR_IOADL_FLAGS_WRITE_LAST);
6963
6964                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6965                            IPR_SET_SUP_DEVICE_TIMEOUT);
6966
6967                 if (!ioa_cfg->sis64)
6968                         ipr_cmd->job_step = ipr_set_supported_devs;
6969                 LEAVE;
6970                 return IPR_RC_JOB_RETURN;
6971         }
6972
6973         LEAVE;
6974         return IPR_RC_JOB_CONTINUE;
6975 }
6976
6977 /**
6978  * ipr_get_mode_page - Locate specified mode page
6979  * @mode_pages: mode page buffer
6980  * @page_code:  page code to find
6981  * @len:                minimum required length for mode page
6982  *
6983  * Return value:
6984  *      pointer to mode page / NULL on failure
6985  **/
6986 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6987                                u32 page_code, u32 len)
6988 {
6989         struct ipr_mode_page_hdr *mode_hdr;
6990         u32 page_length;
6991         u32 length;
6992
6993         if (!mode_pages || (mode_pages->hdr.length == 0))
6994                 return NULL;
6995
6996         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6997         mode_hdr = (struct ipr_mode_page_hdr *)
6998                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6999
7000         while (length) {
7001                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7002                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7003                                 return mode_hdr;
7004                         break;
7005                 } else {
7006                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7007                                        mode_hdr->page_length);
7008                         length -= page_length;
7009                         mode_hdr = (struct ipr_mode_page_hdr *)
7010                                 ((unsigned long)mode_hdr + page_length);
7011                 }
7012         }
7013         return NULL;
7014 }
7015
7016 /**
7017  * ipr_check_term_power - Check for term power errors
7018  * @ioa_cfg:    ioa config struct
7019  * @mode_pages: IOAFP mode pages buffer
7020  *
7021  * Check the IOAFP's mode page 28 for term power errors
7022  *
7023  * Return value:
7024  *      nothing
7025  **/
7026 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7027                                  struct ipr_mode_pages *mode_pages)
7028 {
7029         int i;
7030         int entry_length;
7031         struct ipr_dev_bus_entry *bus;
7032         struct ipr_mode_page28 *mode_page;
7033
7034         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7035                                       sizeof(struct ipr_mode_page28));
7036
7037         entry_length = mode_page->entry_length;
7038
7039         bus = mode_page->bus;
7040
7041         for (i = 0; i < mode_page->num_entries; i++) {
7042                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7043                         dev_err(&ioa_cfg->pdev->dev,
7044                                 "Term power is absent on scsi bus %d\n",
7045                                 bus->res_addr.bus);
7046                 }
7047
7048                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7049         }
7050 }
7051
7052 /**
7053  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7054  * @ioa_cfg:    ioa config struct
7055  *
7056  * Looks through the config table checking for SES devices. If
7057  * the SES device is in the SES table indicating a maximum SCSI
7058  * bus speed, the speed is limited for the bus.
7059  *
7060  * Return value:
7061  *      none
7062  **/
7063 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7064 {
7065         u32 max_xfer_rate;
7066         int i;
7067
7068         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7069                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7070                                                        ioa_cfg->bus_attr[i].bus_width);
7071
7072                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7073                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7074         }
7075 }
7076
7077 /**
7078  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7079  * @ioa_cfg:    ioa config struct
7080  * @mode_pages: mode page 28 buffer
7081  *
7082  * Updates mode page 28 based on driver configuration
7083  *
7084  * Return value:
7085  *      none
7086  **/
7087 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7088                                           struct ipr_mode_pages *mode_pages)
7089 {
7090         int i, entry_length;
7091         struct ipr_dev_bus_entry *bus;
7092         struct ipr_bus_attributes *bus_attr;
7093         struct ipr_mode_page28 *mode_page;
7094
7095         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7096                                       sizeof(struct ipr_mode_page28));
7097
7098         entry_length = mode_page->entry_length;
7099
7100         /* Loop for each device bus entry */
7101         for (i = 0, bus = mode_page->bus;
7102              i < mode_page->num_entries;
7103              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7104                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7105                         dev_err(&ioa_cfg->pdev->dev,
7106                                 "Invalid resource address reported: 0x%08X\n",
7107                                 IPR_GET_PHYS_LOC(bus->res_addr));
7108                         continue;
7109                 }
7110
7111                 bus_attr = &ioa_cfg->bus_attr[i];
7112                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7113                 bus->bus_width = bus_attr->bus_width;
7114                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7115                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7116                 if (bus_attr->qas_enabled)
7117                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7118                 else
7119                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7120         }
7121 }
7122
7123 /**
7124  * ipr_build_mode_select - Build a mode select command
7125  * @ipr_cmd:    ipr command struct
7126  * @res_handle: resource handle to send command to
7127  * @parm:               Byte 2 of Mode Sense command
7128  * @dma_addr:   DMA buffer address
7129  * @xfer_len:   data transfer length
7130  *
7131  * Return value:
7132  *      none
7133  **/
7134 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7135                                   __be32 res_handle, u8 parm,
7136                                   dma_addr_t dma_addr, u8 xfer_len)
7137 {
7138         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7139
7140         ioarcb->res_handle = res_handle;
7141         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7142         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7143         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7144         ioarcb->cmd_pkt.cdb[1] = parm;
7145         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7146
7147         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7148 }
7149
7150 /**
7151  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7152  * @ipr_cmd:    ipr command struct
7153  *
7154  * This function sets up the SCSI bus attributes and sends
7155  * a Mode Select for Page 28 to activate them.
7156  *
7157  * Return value:
7158  *      IPR_RC_JOB_RETURN
7159  **/
7160 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7161 {
7162         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7163         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7164         int length;
7165
7166         ENTER;
7167         ipr_scsi_bus_speed_limit(ioa_cfg);
7168         ipr_check_term_power(ioa_cfg, mode_pages);
7169         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7170         length = mode_pages->hdr.length + 1;
7171         mode_pages->hdr.length = 0;
7172
7173         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7174                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7175                               length);
7176
7177         ipr_cmd->job_step = ipr_set_supported_devs;
7178         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7179                                     struct ipr_resource_entry, queue);
7180         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7181
7182         LEAVE;
7183         return IPR_RC_JOB_RETURN;
7184 }
7185
7186 /**
7187  * ipr_build_mode_sense - Builds a mode sense command
7188  * @ipr_cmd:    ipr command struct
7189  * @res:                resource entry struct
7190  * @parm:               Byte 2 of mode sense command
7191  * @dma_addr:   DMA address of mode sense buffer
7192  * @xfer_len:   Size of DMA buffer
7193  *
7194  * Return value:
7195  *      none
7196  **/
7197 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7198                                  __be32 res_handle,
7199                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7200 {
7201         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7202
7203         ioarcb->res_handle = res_handle;
7204         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7205         ioarcb->cmd_pkt.cdb[2] = parm;
7206         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7207         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7208
7209         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7210 }
7211
7212 /**
7213  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7214  * @ipr_cmd:    ipr command struct
7215  *
7216  * This function handles the failure of an IOA bringup command.
7217  *
7218  * Return value:
7219  *      IPR_RC_JOB_RETURN
7220  **/
7221 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7222 {
7223         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7224         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7225
7226         dev_err(&ioa_cfg->pdev->dev,
7227                 "0x%02X failed with IOASC: 0x%08X\n",
7228                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7229
7230         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7231         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7232         return IPR_RC_JOB_RETURN;
7233 }
7234
7235 /**
7236  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7237  * @ipr_cmd:    ipr command struct
7238  *
7239  * This function handles the failure of a Mode Sense to the IOAFP.
7240  * Some adapters do not handle all mode pages.
7241  *
7242  * Return value:
7243  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7244  **/
7245 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7246 {
7247         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7248         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7249
7250         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7251                 ipr_cmd->job_step = ipr_set_supported_devs;
7252                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7253                                             struct ipr_resource_entry, queue);
7254                 return IPR_RC_JOB_CONTINUE;
7255         }
7256
7257         return ipr_reset_cmd_failed(ipr_cmd);
7258 }
7259
7260 /**
7261  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7262  * @ipr_cmd:    ipr command struct
7263  *
7264  * This function send a Page 28 mode sense to the IOA to
7265  * retrieve SCSI bus attributes.
7266  *
7267  * Return value:
7268  *      IPR_RC_JOB_RETURN
7269  **/
7270 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7271 {
7272         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7273
7274         ENTER;
7275         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7276                              0x28, ioa_cfg->vpd_cbs_dma +
7277                              offsetof(struct ipr_misc_cbs, mode_pages),
7278                              sizeof(struct ipr_mode_pages));
7279
7280         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7281         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7282
7283         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7284
7285         LEAVE;
7286         return IPR_RC_JOB_RETURN;
7287 }
7288
7289 /**
7290  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7291  * @ipr_cmd:    ipr command struct
7292  *
7293  * This function enables dual IOA RAID support if possible.
7294  *
7295  * Return value:
7296  *      IPR_RC_JOB_RETURN
7297  **/
7298 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7299 {
7300         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7301         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7302         struct ipr_mode_page24 *mode_page;
7303         int length;
7304
7305         ENTER;
7306         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7307                                       sizeof(struct ipr_mode_page24));
7308
7309         if (mode_page)
7310                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7311
7312         length = mode_pages->hdr.length + 1;
7313         mode_pages->hdr.length = 0;
7314
7315         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7316                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7317                               length);
7318
7319         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7320         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7321
7322         LEAVE;
7323         return IPR_RC_JOB_RETURN;
7324 }
7325
7326 /**
7327  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7328  * @ipr_cmd:    ipr command struct
7329  *
7330  * This function handles the failure of a Mode Sense to the IOAFP.
7331  * Some adapters do not handle all mode pages.
7332  *
7333  * Return value:
7334  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7335  **/
7336 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7337 {
7338         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7339
7340         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7341                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7342                 return IPR_RC_JOB_CONTINUE;
7343         }
7344
7345         return ipr_reset_cmd_failed(ipr_cmd);
7346 }
7347
7348 /**
7349  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7350  * @ipr_cmd:    ipr command struct
7351  *
7352  * This function send a mode sense to the IOA to retrieve
7353  * the IOA Advanced Function Control mode page.
7354  *
7355  * Return value:
7356  *      IPR_RC_JOB_RETURN
7357  **/
7358 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7359 {
7360         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7361
7362         ENTER;
7363         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7364                              0x24, ioa_cfg->vpd_cbs_dma +
7365                              offsetof(struct ipr_misc_cbs, mode_pages),
7366                              sizeof(struct ipr_mode_pages));
7367
7368         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7369         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7370
7371         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7372
7373         LEAVE;
7374         return IPR_RC_JOB_RETURN;
7375 }
7376
7377 /**
7378  * ipr_init_res_table - Initialize the resource table
7379  * @ipr_cmd:    ipr command struct
7380  *
7381  * This function looks through the existing resource table, comparing
7382  * it with the config table. This function will take care of old/new
7383  * devices and schedule adding/removing them from the mid-layer
7384  * as appropriate.
7385  *
7386  * Return value:
7387  *      IPR_RC_JOB_CONTINUE
7388  **/
7389 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7390 {
7391         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7392         struct ipr_resource_entry *res, *temp;
7393         struct ipr_config_table_entry_wrapper cfgtew;
7394         int entries, found, flag, i;
7395         LIST_HEAD(old_res);
7396
7397         ENTER;
7398         if (ioa_cfg->sis64)
7399                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7400         else
7401                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7402
7403         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7404                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7405
7406         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7407                 list_move_tail(&res->queue, &old_res);
7408
7409         if (ioa_cfg->sis64)
7410                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7411         else
7412                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7413
7414         for (i = 0; i < entries; i++) {
7415                 if (ioa_cfg->sis64)
7416                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7417                 else
7418                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7419                 found = 0;
7420
7421                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7422                         if (ipr_is_same_device(res, &cfgtew)) {
7423                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7424                                 found = 1;
7425                                 break;
7426                         }
7427                 }
7428
7429                 if (!found) {
7430                         if (list_empty(&ioa_cfg->free_res_q)) {
7431                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7432                                 break;
7433                         }
7434
7435                         found = 1;
7436                         res = list_entry(ioa_cfg->free_res_q.next,
7437                                          struct ipr_resource_entry, queue);
7438                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7439                         ipr_init_res_entry(res, &cfgtew);
7440                         res->add_to_ml = 1;
7441                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7442                         res->sdev->allow_restart = 1;
7443
7444                 if (found)
7445                         ipr_update_res_entry(res, &cfgtew);
7446         }
7447
7448         list_for_each_entry_safe(res, temp, &old_res, queue) {
7449                 if (res->sdev) {
7450                         res->del_from_ml = 1;
7451                         res->res_handle = IPR_INVALID_RES_HANDLE;
7452                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7453                 }
7454         }
7455
7456         list_for_each_entry_safe(res, temp, &old_res, queue) {
7457                 ipr_clear_res_target(res);
7458                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7459         }
7460
7461         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7462                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7463         else
7464                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7465
7466         LEAVE;
7467         return IPR_RC_JOB_CONTINUE;
7468 }
7469
7470 /**
7471  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7472  * @ipr_cmd:    ipr command struct
7473  *
7474  * This function sends a Query IOA Configuration command
7475  * to the adapter to retrieve the IOA configuration table.
7476  *
7477  * Return value:
7478  *      IPR_RC_JOB_RETURN
7479  **/
7480 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7481 {
7482         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7483         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7484         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7485         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7486
7487         ENTER;
7488         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7489                 ioa_cfg->dual_raid = 1;
7490         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7491                  ucode_vpd->major_release, ucode_vpd->card_type,
7492                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7493         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7494         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7495
7496         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7497         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7498         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7499         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7500
7501         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7502                        IPR_IOADL_FLAGS_READ_LAST);
7503
7504         ipr_cmd->job_step = ipr_init_res_table;
7505
7506         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7507
7508         LEAVE;
7509         return IPR_RC_JOB_RETURN;
7510 }
7511
7512 /**
7513  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7514  * @ipr_cmd:    ipr command struct
7515  *
7516  * This utility function sends an inquiry to the adapter.
7517  *
7518  * Return value:
7519  *      none
7520  **/
7521 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7522                               dma_addr_t dma_addr, u8 xfer_len)
7523 {
7524         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7525
7526         ENTER;
7527         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7528         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7529
7530         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7531         ioarcb->cmd_pkt.cdb[1] = flags;
7532         ioarcb->cmd_pkt.cdb[2] = page;
7533         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7534
7535         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7536
7537         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7538         LEAVE;
7539 }
7540
7541 /**
7542  * ipr_inquiry_page_supported - Is the given inquiry page supported
7543  * @page0:              inquiry page 0 buffer
7544  * @page:               page code.
7545  *
7546  * This function determines if the specified inquiry page is supported.
7547  *
7548  * Return value:
7549  *      1 if page is supported / 0 if not
7550  **/
7551 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7552 {
7553         int i;
7554
7555         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7556                 if (page0->page[i] == page)
7557                         return 1;
7558
7559         return 0;
7560 }
7561
7562 /**
7563  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7564  * @ipr_cmd:    ipr command struct
7565  *
7566  * This function sends a Page 0xD0 inquiry to the adapter
7567  * to retrieve adapter capabilities.
7568  *
7569  * Return value:
7570  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7571  **/
7572 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7573 {
7574         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7575         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7576         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7577
7578         ENTER;
7579         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7580         memset(cap, 0, sizeof(*cap));
7581
7582         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7583                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7584                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7585                                   sizeof(struct ipr_inquiry_cap));
7586                 return IPR_RC_JOB_RETURN;
7587         }
7588
7589         LEAVE;
7590         return IPR_RC_JOB_CONTINUE;
7591 }
7592
7593 /**
7594  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7595  * @ipr_cmd:    ipr command struct
7596  *
7597  * This function sends a Page 3 inquiry to the adapter
7598  * to retrieve software VPD information.
7599  *
7600  * Return value:
7601  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7602  **/
7603 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7604 {
7605         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7606
7607         ENTER;
7608
7609         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7610
7611         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7612                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7613                           sizeof(struct ipr_inquiry_page3));
7614
7615         LEAVE;
7616         return IPR_RC_JOB_RETURN;
7617 }
7618
7619 /**
7620  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7621  * @ipr_cmd:    ipr command struct
7622  *
7623  * This function sends a Page 0 inquiry to the adapter
7624  * to retrieve supported inquiry pages.
7625  *
7626  * Return value:
7627  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7628  **/
7629 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7630 {
7631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7632         char type[5];
7633
7634         ENTER;
7635
7636         /* Grab the type out of the VPD and store it away */
7637         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7638         type[4] = '\0';
7639         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7640
7641         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7642
7643         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7644                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7645                           sizeof(struct ipr_inquiry_page0));
7646
7647         LEAVE;
7648         return IPR_RC_JOB_RETURN;
7649 }
7650
7651 /**
7652  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7653  * @ipr_cmd:    ipr command struct
7654  *
7655  * This function sends a standard inquiry to the adapter.
7656  *
7657  * Return value:
7658  *      IPR_RC_JOB_RETURN
7659  **/
7660 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7661 {
7662         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7663
7664         ENTER;
7665         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7666
7667         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7668                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7669                           sizeof(struct ipr_ioa_vpd));
7670
7671         LEAVE;
7672         return IPR_RC_JOB_RETURN;
7673 }
7674
7675 /**
7676  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7677  * @ipr_cmd:    ipr command struct
7678  *
7679  * This function send an Identify Host Request Response Queue
7680  * command to establish the HRRQ with the adapter.
7681  *
7682  * Return value:
7683  *      IPR_RC_JOB_RETURN
7684  **/
7685 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7686 {
7687         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7688         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7689         struct ipr_hrr_queue *hrrq;
7690
7691         ENTER;
7692         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7693         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7694
7695         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7696                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7697
7698                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7699                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7700
7701                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7702                 if (ioa_cfg->sis64)
7703                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7704
7705                 if (ioa_cfg->nvectors == 1)
7706                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7707                 else
7708                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7709
7710                 ioarcb->cmd_pkt.cdb[2] =
7711                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7712                 ioarcb->cmd_pkt.cdb[3] =
7713                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7714                 ioarcb->cmd_pkt.cdb[4] =
7715                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7716                 ioarcb->cmd_pkt.cdb[5] =
7717                         ((u64) hrrq->host_rrq_dma) & 0xff;
7718                 ioarcb->cmd_pkt.cdb[7] =
7719                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7720                 ioarcb->cmd_pkt.cdb[8] =
7721                         (sizeof(u32) * hrrq->size) & 0xff;
7722
7723                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7724                         ioarcb->cmd_pkt.cdb[9] =
7725                                         ioa_cfg->identify_hrrq_index;
7726
7727                 if (ioa_cfg->sis64) {
7728                         ioarcb->cmd_pkt.cdb[10] =
7729                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7730                         ioarcb->cmd_pkt.cdb[11] =
7731                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7732                         ioarcb->cmd_pkt.cdb[12] =
7733                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7734                         ioarcb->cmd_pkt.cdb[13] =
7735                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7736                 }
7737
7738                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7739                         ioarcb->cmd_pkt.cdb[14] =
7740                                         ioa_cfg->identify_hrrq_index;
7741
7742                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7743                            IPR_INTERNAL_TIMEOUT);
7744
7745                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7746                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7747
7748                 LEAVE;
7749                 return IPR_RC_JOB_RETURN;
7750         }
7751
7752         LEAVE;
7753         return IPR_RC_JOB_CONTINUE;
7754 }
7755
7756 /**
7757  * ipr_reset_timer_done - Adapter reset timer function
7758  * @ipr_cmd:    ipr command struct
7759  *
7760  * Description: This function is used in adapter reset processing
7761  * for timing events. If the reset_cmd pointer in the IOA
7762  * config struct is not this adapter's we are doing nested
7763  * resets and fail_all_ops will take care of freeing the
7764  * command block.
7765  *
7766  * Return value:
7767  *      none
7768  **/
7769 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7770 {
7771         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7772         unsigned long lock_flags = 0;
7773
7774         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7775
7776         if (ioa_cfg->reset_cmd == ipr_cmd) {
7777                 list_del(&ipr_cmd->queue);
7778                 ipr_cmd->done(ipr_cmd);
7779         }
7780
7781         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7782 }
7783
7784 /**
7785  * ipr_reset_start_timer - Start a timer for adapter reset job
7786  * @ipr_cmd:    ipr command struct
7787  * @timeout:    timeout value
7788  *
7789  * Description: This function is used in adapter reset processing
7790  * for timing events. If the reset_cmd pointer in the IOA
7791  * config struct is not this adapter's we are doing nested
7792  * resets and fail_all_ops will take care of freeing the
7793  * command block.
7794  *
7795  * Return value:
7796  *      none
7797  **/
7798 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7799                                   unsigned long timeout)
7800 {
7801
7802         ENTER;
7803         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7804         ipr_cmd->done = ipr_reset_ioa_job;
7805
7806         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7807         ipr_cmd->timer.expires = jiffies + timeout;
7808         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7809         add_timer(&ipr_cmd->timer);
7810 }
7811
7812 /**
7813  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7814  * @ioa_cfg:    ioa cfg struct
7815  *
7816  * Return value:
7817  *      nothing
7818  **/
7819 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7820 {
7821         struct ipr_hrr_queue *hrrq;
7822
7823         for_each_hrrq(hrrq, ioa_cfg) {
7824                 spin_lock(&hrrq->_lock);
7825                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7826
7827                 /* Initialize Host RRQ pointers */
7828                 hrrq->hrrq_start = hrrq->host_rrq;
7829                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7830                 hrrq->hrrq_curr = hrrq->hrrq_start;
7831                 hrrq->toggle_bit = 1;
7832                 spin_unlock(&hrrq->_lock);
7833         }
7834         wmb();
7835
7836         ioa_cfg->identify_hrrq_index = 0;
7837         if (ioa_cfg->hrrq_num == 1)
7838                 atomic_set(&ioa_cfg->hrrq_index, 0);
7839         else
7840                 atomic_set(&ioa_cfg->hrrq_index, 1);
7841
7842         /* Zero out config table */
7843         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7844 }
7845
7846 /**
7847  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7848  * @ipr_cmd:    ipr command struct
7849  *
7850  * Return value:
7851  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7852  **/
7853 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7854 {
7855         unsigned long stage, stage_time;
7856         u32 feedback;
7857         volatile u32 int_reg;
7858         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7859         u64 maskval = 0;
7860
7861         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7862         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7863         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7864
7865         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7866
7867         /* sanity check the stage_time value */
7868         if (stage_time == 0)
7869                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7870         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7871                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7872         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7873                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7874
7875         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7876                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7877                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7878                 stage_time = ioa_cfg->transop_timeout;
7879                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7880         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7881                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7882                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7883                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7884                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7885                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7886                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7887                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7888                         return IPR_RC_JOB_CONTINUE;
7889                 }
7890         }
7891
7892         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7893         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7894         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7895         ipr_cmd->done = ipr_reset_ioa_job;
7896         add_timer(&ipr_cmd->timer);
7897
7898         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7899
7900         return IPR_RC_JOB_RETURN;
7901 }
7902
7903 /**
7904  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7905  * @ipr_cmd:    ipr command struct
7906  *
7907  * This function reinitializes some control blocks and
7908  * enables destructive diagnostics on the adapter.
7909  *
7910  * Return value:
7911  *      IPR_RC_JOB_RETURN
7912  **/
7913 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7914 {
7915         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7916         volatile u32 int_reg;
7917         volatile u64 maskval;
7918         int i;
7919
7920         ENTER;
7921         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7922         ipr_init_ioa_mem(ioa_cfg);
7923
7924         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7925                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7926                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7927                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7928         }
7929         wmb();
7930         if (ioa_cfg->sis64) {
7931                 /* Set the adapter to the correct endian mode. */
7932                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7933                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7934         }
7935
7936         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7937
7938         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7939                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7940                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7941                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7942                 return IPR_RC_JOB_CONTINUE;
7943         }
7944
7945         /* Enable destructive diagnostics on IOA */
7946         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7947
7948         if (ioa_cfg->sis64) {
7949                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7950                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7951                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7952         } else
7953                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7954
7955         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7956
7957         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7958
7959         if (ioa_cfg->sis64) {
7960                 ipr_cmd->job_step = ipr_reset_next_stage;
7961                 return IPR_RC_JOB_CONTINUE;
7962         }
7963
7964         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7965         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7966         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7967         ipr_cmd->done = ipr_reset_ioa_job;
7968         add_timer(&ipr_cmd->timer);
7969         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7970
7971         LEAVE;
7972         return IPR_RC_JOB_RETURN;
7973 }
7974
7975 /**
7976  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7977  * @ipr_cmd:    ipr command struct
7978  *
7979  * This function is invoked when an adapter dump has run out
7980  * of processing time.
7981  *
7982  * Return value:
7983  *      IPR_RC_JOB_CONTINUE
7984  **/
7985 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7986 {
7987         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7988
7989         if (ioa_cfg->sdt_state == GET_DUMP)
7990                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7991         else if (ioa_cfg->sdt_state == READ_DUMP)
7992                 ioa_cfg->sdt_state = ABORT_DUMP;
7993
7994         ioa_cfg->dump_timeout = 1;
7995         ipr_cmd->job_step = ipr_reset_alert;
7996
7997         return IPR_RC_JOB_CONTINUE;
7998 }
7999
8000 /**
8001  * ipr_unit_check_no_data - Log a unit check/no data error log
8002  * @ioa_cfg:            ioa config struct
8003  *
8004  * Logs an error indicating the adapter unit checked, but for some
8005  * reason, we were unable to fetch the unit check buffer.
8006  *
8007  * Return value:
8008  *      nothing
8009  **/
8010 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8011 {
8012         ioa_cfg->errors_logged++;
8013         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8014 }
8015
8016 /**
8017  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8018  * @ioa_cfg:            ioa config struct
8019  *
8020  * Fetches the unit check buffer from the adapter by clocking the data
8021  * through the mailbox register.
8022  *
8023  * Return value:
8024  *      nothing
8025  **/
8026 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8027 {
8028         unsigned long mailbox;
8029         struct ipr_hostrcb *hostrcb;
8030         struct ipr_uc_sdt sdt;
8031         int rc, length;
8032         u32 ioasc;
8033
8034         mailbox = readl(ioa_cfg->ioa_mailbox);
8035
8036         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8037                 ipr_unit_check_no_data(ioa_cfg);
8038                 return;
8039         }
8040
8041         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8042         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8043                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8044
8045         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8046             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8047             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8048                 ipr_unit_check_no_data(ioa_cfg);
8049                 return;
8050         }
8051
8052         /* Find length of the first sdt entry (UC buffer) */
8053         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8054                 length = be32_to_cpu(sdt.entry[0].end_token);
8055         else
8056                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8057                           be32_to_cpu(sdt.entry[0].start_token)) &
8058                           IPR_FMT2_MBX_ADDR_MASK;
8059
8060         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8061                              struct ipr_hostrcb, queue);
8062         list_del(&hostrcb->queue);
8063         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8064
8065         rc = ipr_get_ldump_data_section(ioa_cfg,
8066                                         be32_to_cpu(sdt.entry[0].start_token),
8067                                         (__be32 *)&hostrcb->hcam,
8068                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8069
8070         if (!rc) {
8071                 ipr_handle_log_data(ioa_cfg, hostrcb);
8072                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8073                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8074                     ioa_cfg->sdt_state == GET_DUMP)
8075                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8076         } else
8077                 ipr_unit_check_no_data(ioa_cfg);
8078
8079         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8080 }
8081
8082 /**
8083  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8084  * @ipr_cmd:    ipr command struct
8085  *
8086  * Description: This function will call to get the unit check buffer.
8087  *
8088  * Return value:
8089  *      IPR_RC_JOB_RETURN
8090  **/
8091 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8092 {
8093         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8094
8095         ENTER;
8096         ioa_cfg->ioa_unit_checked = 0;
8097         ipr_get_unit_check_buffer(ioa_cfg);
8098         ipr_cmd->job_step = ipr_reset_alert;
8099         ipr_reset_start_timer(ipr_cmd, 0);
8100
8101         LEAVE;
8102         return IPR_RC_JOB_RETURN;
8103 }
8104
8105 /**
8106  * ipr_reset_restore_cfg_space - Restore PCI config space.
8107  * @ipr_cmd:    ipr command struct
8108  *
8109  * Description: This function restores the saved PCI config space of
8110  * the adapter, fails all outstanding ops back to the callers, and
8111  * fetches the dump/unit check if applicable to this reset.
8112  *
8113  * Return value:
8114  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8115  **/
8116 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8117 {
8118         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8119         u32 int_reg;
8120
8121         ENTER;
8122         ioa_cfg->pdev->state_saved = true;
8123         pci_restore_state(ioa_cfg->pdev);
8124
8125         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8126                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8127                 return IPR_RC_JOB_CONTINUE;
8128         }
8129
8130         ipr_fail_all_ops(ioa_cfg);
8131
8132         if (ioa_cfg->sis64) {
8133                 /* Set the adapter to the correct endian mode. */
8134                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8135                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8136         }
8137
8138         if (ioa_cfg->ioa_unit_checked) {
8139                 if (ioa_cfg->sis64) {
8140                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8141                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8142                         return IPR_RC_JOB_RETURN;
8143                 } else {
8144                         ioa_cfg->ioa_unit_checked = 0;
8145                         ipr_get_unit_check_buffer(ioa_cfg);
8146                         ipr_cmd->job_step = ipr_reset_alert;
8147                         ipr_reset_start_timer(ipr_cmd, 0);
8148                         return IPR_RC_JOB_RETURN;
8149                 }
8150         }
8151
8152         if (ioa_cfg->in_ioa_bringdown) {
8153                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8154         } else {
8155                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8156
8157                 if (GET_DUMP == ioa_cfg->sdt_state) {
8158                         ioa_cfg->sdt_state = READ_DUMP;
8159                         ioa_cfg->dump_timeout = 0;
8160                         if (ioa_cfg->sis64)
8161                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8162                         else
8163                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8164                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8165                         schedule_work(&ioa_cfg->work_q);
8166                         return IPR_RC_JOB_RETURN;
8167                 }
8168         }
8169
8170         LEAVE;
8171         return IPR_RC_JOB_CONTINUE;
8172 }
8173
8174 /**
8175  * ipr_reset_bist_done - BIST has completed on the adapter.
8176  * @ipr_cmd:    ipr command struct
8177  *
8178  * Description: Unblock config space and resume the reset process.
8179  *
8180  * Return value:
8181  *      IPR_RC_JOB_CONTINUE
8182  **/
8183 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8184 {
8185         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8186
8187         ENTER;
8188         if (ioa_cfg->cfg_locked)
8189                 pci_cfg_access_unlock(ioa_cfg->pdev);
8190         ioa_cfg->cfg_locked = 0;
8191         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8192         LEAVE;
8193         return IPR_RC_JOB_CONTINUE;
8194 }
8195
8196 /**
8197  * ipr_reset_start_bist - Run BIST on the adapter.
8198  * @ipr_cmd:    ipr command struct
8199  *
8200  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8201  *
8202  * Return value:
8203  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8204  **/
8205 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8206 {
8207         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8208         int rc = PCIBIOS_SUCCESSFUL;
8209
8210         ENTER;
8211         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8212                 writel(IPR_UPROCI_SIS64_START_BIST,
8213                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8214         else
8215                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8216
8217         if (rc == PCIBIOS_SUCCESSFUL) {
8218                 ipr_cmd->job_step = ipr_reset_bist_done;
8219                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8220                 rc = IPR_RC_JOB_RETURN;
8221         } else {
8222                 if (ioa_cfg->cfg_locked)
8223                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8224                 ioa_cfg->cfg_locked = 0;
8225                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8226                 rc = IPR_RC_JOB_CONTINUE;
8227         }
8228
8229         LEAVE;
8230         return rc;
8231 }
8232
8233 /**
8234  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8235  * @ipr_cmd:    ipr command struct
8236  *
8237  * Description: This clears PCI reset to the adapter and delays two seconds.
8238  *
8239  * Return value:
8240  *      IPR_RC_JOB_RETURN
8241  **/
8242 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8243 {
8244         ENTER;
8245         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8246         ipr_cmd->job_step = ipr_reset_bist_done;
8247         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8248         LEAVE;
8249         return IPR_RC_JOB_RETURN;
8250 }
8251
8252 /**
8253  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8254  * @ipr_cmd:    ipr command struct
8255  *
8256  * Description: This asserts PCI reset to the adapter.
8257  *
8258  * Return value:
8259  *      IPR_RC_JOB_RETURN
8260  **/
8261 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8262 {
8263         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8264         struct pci_dev *pdev = ioa_cfg->pdev;
8265
8266         ENTER;
8267         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8268         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8269         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8270         LEAVE;
8271         return IPR_RC_JOB_RETURN;
8272 }
8273
8274 /**
8275  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8276  * @ipr_cmd:    ipr command struct
8277  *
8278  * Description: This attempts to block config access to the IOA.
8279  *
8280  * Return value:
8281  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8282  **/
8283 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8284 {
8285         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8286         int rc = IPR_RC_JOB_CONTINUE;
8287
8288         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8289                 ioa_cfg->cfg_locked = 1;
8290                 ipr_cmd->job_step = ioa_cfg->reset;
8291         } else {
8292                 if (ipr_cmd->u.time_left) {
8293                         rc = IPR_RC_JOB_RETURN;
8294                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8295                         ipr_reset_start_timer(ipr_cmd,
8296                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8297                 } else {
8298                         ipr_cmd->job_step = ioa_cfg->reset;
8299                         dev_err(&ioa_cfg->pdev->dev,
8300                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8301                 }
8302         }
8303
8304         return rc;
8305 }
8306
8307 /**
8308  * ipr_reset_block_config_access - Block config access to the IOA
8309  * @ipr_cmd:    ipr command struct
8310  *
8311  * Description: This attempts to block config access to the IOA
8312  *
8313  * Return value:
8314  *      IPR_RC_JOB_CONTINUE
8315  **/
8316 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8317 {
8318         ipr_cmd->ioa_cfg->cfg_locked = 0;
8319         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8320         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8321         return IPR_RC_JOB_CONTINUE;
8322 }
8323
8324 /**
8325  * ipr_reset_allowed - Query whether or not IOA can be reset
8326  * @ioa_cfg:    ioa config struct
8327  *
8328  * Return value:
8329  *      0 if reset not allowed / non-zero if reset is allowed
8330  **/
8331 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8332 {
8333         volatile u32 temp_reg;
8334
8335         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8336         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8337 }
8338
8339 /**
8340  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8341  * @ipr_cmd:    ipr command struct
8342  *
8343  * Description: This function waits for adapter permission to run BIST,
8344  * then runs BIST. If the adapter does not give permission after a
8345  * reasonable time, we will reset the adapter anyway. The impact of
8346  * resetting the adapter without warning the adapter is the risk of
8347  * losing the persistent error log on the adapter. If the adapter is
8348  * reset while it is writing to the flash on the adapter, the flash
8349  * segment will have bad ECC and be zeroed.
8350  *
8351  * Return value:
8352  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8353  **/
8354 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8355 {
8356         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8357         int rc = IPR_RC_JOB_RETURN;
8358
8359         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8360                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8361                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8362         } else {
8363                 ipr_cmd->job_step = ipr_reset_block_config_access;
8364                 rc = IPR_RC_JOB_CONTINUE;
8365         }
8366
8367         return rc;
8368 }
8369
8370 /**
8371  * ipr_reset_alert - Alert the adapter of a pending reset
8372  * @ipr_cmd:    ipr command struct
8373  *
8374  * Description: This function alerts the adapter that it will be reset.
8375  * If memory space is not currently enabled, proceed directly
8376  * to running BIST on the adapter. The timer must always be started
8377  * so we guarantee we do not run BIST from ipr_isr.
8378  *
8379  * Return value:
8380  *      IPR_RC_JOB_RETURN
8381  **/
8382 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8383 {
8384         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8385         u16 cmd_reg;
8386         int rc;
8387
8388         ENTER;
8389         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8390
8391         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8392                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8393                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8394                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8395         } else {
8396                 ipr_cmd->job_step = ipr_reset_block_config_access;
8397         }
8398
8399         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8400         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8401
8402         LEAVE;
8403         return IPR_RC_JOB_RETURN;
8404 }
8405
8406 /**
8407  * ipr_reset_ucode_download_done - Microcode download completion
8408  * @ipr_cmd:    ipr command struct
8409  *
8410  * Description: This function unmaps the microcode download buffer.
8411  *
8412  * Return value:
8413  *      IPR_RC_JOB_CONTINUE
8414  **/
8415 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8416 {
8417         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8418         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8419
8420         pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8421                      sglist->num_sg, DMA_TO_DEVICE);
8422
8423         ipr_cmd->job_step = ipr_reset_alert;
8424         return IPR_RC_JOB_CONTINUE;
8425 }
8426
8427 /**
8428  * ipr_reset_ucode_download - Download microcode to the adapter
8429  * @ipr_cmd:    ipr command struct
8430  *
8431  * Description: This function checks to see if it there is microcode
8432  * to download to the adapter. If there is, a download is performed.
8433  *
8434  * Return value:
8435  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8436  **/
8437 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8438 {
8439         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8440         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8441
8442         ENTER;
8443         ipr_cmd->job_step = ipr_reset_alert;
8444
8445         if (!sglist)
8446                 return IPR_RC_JOB_CONTINUE;
8447
8448         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8449         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8450         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8451         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8452         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8453         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8454         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8455
8456         if (ioa_cfg->sis64)
8457                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8458         else
8459                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8460         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8461
8462         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8463                    IPR_WRITE_BUFFER_TIMEOUT);
8464
8465         LEAVE;
8466         return IPR_RC_JOB_RETURN;
8467 }
8468
8469 /**
8470  * ipr_reset_shutdown_ioa - Shutdown the adapter
8471  * @ipr_cmd:    ipr command struct
8472  *
8473  * Description: This function issues an adapter shutdown of the
8474  * specified type to the specified adapter as part of the
8475  * adapter reset job.
8476  *
8477  * Return value:
8478  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8479  **/
8480 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8481 {
8482         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8483         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8484         unsigned long timeout;
8485         int rc = IPR_RC_JOB_CONTINUE;
8486
8487         ENTER;
8488         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8489                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8490                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8491                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8492                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8493                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8494
8495                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8496                         timeout = IPR_SHUTDOWN_TIMEOUT;
8497                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8498                         timeout = IPR_INTERNAL_TIMEOUT;
8499                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8500                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8501                 else
8502                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8503
8504                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8505
8506                 rc = IPR_RC_JOB_RETURN;
8507                 ipr_cmd->job_step = ipr_reset_ucode_download;
8508         } else
8509                 ipr_cmd->job_step = ipr_reset_alert;
8510
8511         LEAVE;
8512         return rc;
8513 }
8514
8515 /**
8516  * ipr_reset_ioa_job - Adapter reset job
8517  * @ipr_cmd:    ipr command struct
8518  *
8519  * Description: This function is the job router for the adapter reset job.
8520  *
8521  * Return value:
8522  *      none
8523  **/
8524 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8525 {
8526         u32 rc, ioasc;
8527         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8528
8529         do {
8530                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8531
8532                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8533                         /*
8534                          * We are doing nested adapter resets and this is
8535                          * not the current reset job.
8536                          */
8537                         list_add_tail(&ipr_cmd->queue,
8538                                         &ipr_cmd->hrrq->hrrq_free_q);
8539                         return;
8540                 }
8541
8542                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8543                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8544                         if (rc == IPR_RC_JOB_RETURN)
8545                                 return;
8546                 }
8547
8548                 ipr_reinit_ipr_cmnd(ipr_cmd);
8549                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8550                 rc = ipr_cmd->job_step(ipr_cmd);
8551         } while (rc == IPR_RC_JOB_CONTINUE);
8552 }
8553
8554 /**
8555  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8556  * @ioa_cfg:            ioa config struct
8557  * @job_step:           first job step of reset job
8558  * @shutdown_type:      shutdown type
8559  *
8560  * Description: This function will initiate the reset of the given adapter
8561  * starting at the selected job step.
8562  * If the caller needs to wait on the completion of the reset,
8563  * the caller must sleep on the reset_wait_q.
8564  *
8565  * Return value:
8566  *      none
8567  **/
8568 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8569                                     int (*job_step) (struct ipr_cmnd *),
8570                                     enum ipr_shutdown_type shutdown_type)
8571 {
8572         struct ipr_cmnd *ipr_cmd;
8573         int i;
8574
8575         ioa_cfg->in_reset_reload = 1;
8576         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8577                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8578                 ioa_cfg->hrrq[i].allow_cmds = 0;
8579                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8580         }
8581         wmb();
8582         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8583                 scsi_block_requests(ioa_cfg->host);
8584
8585         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8586         ioa_cfg->reset_cmd = ipr_cmd;
8587         ipr_cmd->job_step = job_step;
8588         ipr_cmd->u.shutdown_type = shutdown_type;
8589
8590         ipr_reset_ioa_job(ipr_cmd);
8591 }
8592
8593 /**
8594  * ipr_initiate_ioa_reset - Initiate an adapter reset
8595  * @ioa_cfg:            ioa config struct
8596  * @shutdown_type:      shutdown type
8597  *
8598  * Description: This function will initiate the reset of the given adapter.
8599  * If the caller needs to wait on the completion of the reset,
8600  * the caller must sleep on the reset_wait_q.
8601  *
8602  * Return value:
8603  *      none
8604  **/
8605 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8606                                    enum ipr_shutdown_type shutdown_type)
8607 {
8608         int i;
8609
8610         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8611                 return;
8612
8613         if (ioa_cfg->in_reset_reload) {
8614                 if (ioa_cfg->sdt_state == GET_DUMP)
8615                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8616                 else if (ioa_cfg->sdt_state == READ_DUMP)
8617                         ioa_cfg->sdt_state = ABORT_DUMP;
8618         }
8619
8620         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8621                 dev_err(&ioa_cfg->pdev->dev,
8622                         "IOA taken offline - error recovery failed\n");
8623
8624                 ioa_cfg->reset_retries = 0;
8625                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8626                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8627                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8628                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8629                 }
8630                 wmb();
8631
8632                 if (ioa_cfg->in_ioa_bringdown) {
8633                         ioa_cfg->reset_cmd = NULL;
8634                         ioa_cfg->in_reset_reload = 0;
8635                         ipr_fail_all_ops(ioa_cfg);
8636                         wake_up_all(&ioa_cfg->reset_wait_q);
8637
8638                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8639                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8640                                 scsi_unblock_requests(ioa_cfg->host);
8641                                 spin_lock_irq(ioa_cfg->host->host_lock);
8642                         }
8643                         return;
8644                 } else {
8645                         ioa_cfg->in_ioa_bringdown = 1;
8646                         shutdown_type = IPR_SHUTDOWN_NONE;
8647                 }
8648         }
8649
8650         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8651                                 shutdown_type);
8652 }
8653
8654 /**
8655  * ipr_reset_freeze - Hold off all I/O activity
8656  * @ipr_cmd:    ipr command struct
8657  *
8658  * Description: If the PCI slot is frozen, hold off all I/O
8659  * activity; then, as soon as the slot is available again,
8660  * initiate an adapter reset.
8661  */
8662 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8663 {
8664         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8665         int i;
8666
8667         /* Disallow new interrupts, avoid loop */
8668         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8669                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8670                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8671                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8672         }
8673         wmb();
8674         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8675         ipr_cmd->done = ipr_reset_ioa_job;
8676         return IPR_RC_JOB_RETURN;
8677 }
8678
8679 /**
8680  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8681  * @pdev:       PCI device struct
8682  *
8683  * Description: This routine is called to tell us that the MMIO
8684  * access to the IOA has been restored
8685  */
8686 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8687 {
8688         unsigned long flags = 0;
8689         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8690
8691         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8692         if (!ioa_cfg->probe_done)
8693                 pci_save_state(pdev);
8694         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8695         return PCI_ERS_RESULT_NEED_RESET;
8696 }
8697
8698 /**
8699  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8700  * @pdev:       PCI device struct
8701  *
8702  * Description: This routine is called to tell us that the PCI bus
8703  * is down. Can't do anything here, except put the device driver
8704  * into a holding pattern, waiting for the PCI bus to come back.
8705  */
8706 static void ipr_pci_frozen(struct pci_dev *pdev)
8707 {
8708         unsigned long flags = 0;
8709         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8710
8711         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8712         if (ioa_cfg->probe_done)
8713                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8714         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8715 }
8716
8717 /**
8718  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8719  * @pdev:       PCI device struct
8720  *
8721  * Description: This routine is called by the pci error recovery
8722  * code after the PCI slot has been reset, just before we
8723  * should resume normal operations.
8724  */
8725 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8726 {
8727         unsigned long flags = 0;
8728         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8729
8730         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8731         if (ioa_cfg->probe_done) {
8732                 if (ioa_cfg->needs_warm_reset)
8733                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8734                 else
8735                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8736                                                 IPR_SHUTDOWN_NONE);
8737         } else
8738                 wake_up_all(&ioa_cfg->eeh_wait_q);
8739         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8740         return PCI_ERS_RESULT_RECOVERED;
8741 }
8742
8743 /**
8744  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8745  * @pdev:       PCI device struct
8746  *
8747  * Description: This routine is called when the PCI bus has
8748  * permanently failed.
8749  */
8750 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8751 {
8752         unsigned long flags = 0;
8753         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8754         int i;
8755
8756         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8757         if (ioa_cfg->probe_done) {
8758                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8759                         ioa_cfg->sdt_state = ABORT_DUMP;
8760                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8761                 ioa_cfg->in_ioa_bringdown = 1;
8762                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8763                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8764                         ioa_cfg->hrrq[i].allow_cmds = 0;
8765                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8766                 }
8767                 wmb();
8768                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8769         } else
8770                 wake_up_all(&ioa_cfg->eeh_wait_q);
8771         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8772 }
8773
8774 /**
8775  * ipr_pci_error_detected - Called when a PCI error is detected.
8776  * @pdev:       PCI device struct
8777  * @state:      PCI channel state
8778  *
8779  * Description: Called when a PCI error is detected.
8780  *
8781  * Return value:
8782  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8783  */
8784 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8785                                                pci_channel_state_t state)
8786 {
8787         switch (state) {
8788         case pci_channel_io_frozen:
8789                 ipr_pci_frozen(pdev);
8790                 return PCI_ERS_RESULT_CAN_RECOVER;
8791         case pci_channel_io_perm_failure:
8792                 ipr_pci_perm_failure(pdev);
8793                 return PCI_ERS_RESULT_DISCONNECT;
8794                 break;
8795         default:
8796                 break;
8797         }
8798         return PCI_ERS_RESULT_NEED_RESET;
8799 }
8800
8801 /**
8802  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8803  * @ioa_cfg:    ioa cfg struct
8804  *
8805  * Description: This is the second phase of adapter intialization
8806  * This function takes care of initilizing the adapter to the point
8807  * where it can accept new commands.
8808
8809  * Return value:
8810  *      0 on success / -EIO on failure
8811  **/
8812 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8813 {
8814         int rc = 0;
8815         unsigned long host_lock_flags = 0;
8816
8817         ENTER;
8818         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8819         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8820         ioa_cfg->probe_done = 1;
8821         if (ioa_cfg->needs_hard_reset) {
8822                 ioa_cfg->needs_hard_reset = 0;
8823                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8824         } else
8825                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8826                                         IPR_SHUTDOWN_NONE);
8827         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8828         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8829         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8830
8831         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8832                 rc = -EIO;
8833         } else if (ipr_invalid_adapter(ioa_cfg)) {
8834                 if (!ipr_testmode)
8835                         rc = -EIO;
8836
8837                 dev_err(&ioa_cfg->pdev->dev,
8838                         "Adapter not supported in this hardware configuration.\n");
8839         }
8840
8841         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8842
8843         LEAVE;
8844         return rc;
8845 }
8846
8847 /**
8848  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8849  * @ioa_cfg:    ioa config struct
8850  *
8851  * Return value:
8852  *      none
8853  **/
8854 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8855 {
8856         int i;
8857
8858         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8859                 if (ioa_cfg->ipr_cmnd_list[i])
8860                         pci_pool_free(ioa_cfg->ipr_cmd_pool,
8861                                       ioa_cfg->ipr_cmnd_list[i],
8862                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8863
8864                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8865         }
8866
8867         if (ioa_cfg->ipr_cmd_pool)
8868                 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8869
8870         kfree(ioa_cfg->ipr_cmnd_list);
8871         kfree(ioa_cfg->ipr_cmnd_list_dma);
8872         ioa_cfg->ipr_cmnd_list = NULL;
8873         ioa_cfg->ipr_cmnd_list_dma = NULL;
8874         ioa_cfg->ipr_cmd_pool = NULL;
8875 }
8876
8877 /**
8878  * ipr_free_mem - Frees memory allocated for an adapter
8879  * @ioa_cfg:    ioa cfg struct
8880  *
8881  * Return value:
8882  *      nothing
8883  **/
8884 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8885 {
8886         int i;
8887
8888         kfree(ioa_cfg->res_entries);
8889         pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8890                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8891         ipr_free_cmd_blks(ioa_cfg);
8892
8893         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8894                 pci_free_consistent(ioa_cfg->pdev,
8895                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
8896                                         ioa_cfg->hrrq[i].host_rrq,
8897                                         ioa_cfg->hrrq[i].host_rrq_dma);
8898
8899         pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8900                             ioa_cfg->u.cfg_table,
8901                             ioa_cfg->cfg_table_dma);
8902
8903         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8904                 pci_free_consistent(ioa_cfg->pdev,
8905                                     sizeof(struct ipr_hostrcb),
8906                                     ioa_cfg->hostrcb[i],
8907                                     ioa_cfg->hostrcb_dma[i]);
8908         }
8909
8910         ipr_free_dump(ioa_cfg);
8911         kfree(ioa_cfg->trace);
8912 }
8913
8914 /**
8915  * ipr_free_all_resources - Free all allocated resources for an adapter.
8916  * @ipr_cmd:    ipr command struct
8917  *
8918  * This function frees all allocated resources for the
8919  * specified adapter.
8920  *
8921  * Return value:
8922  *      none
8923  **/
8924 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8925 {
8926         struct pci_dev *pdev = ioa_cfg->pdev;
8927
8928         ENTER;
8929         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8930             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8931                 int i;
8932                 for (i = 0; i < ioa_cfg->nvectors; i++)
8933                         free_irq(ioa_cfg->vectors_info[i].vec,
8934                                 &ioa_cfg->hrrq[i]);
8935         } else
8936                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8937
8938         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8939                 pci_disable_msi(pdev);
8940                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8941         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8942                 pci_disable_msix(pdev);
8943                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8944         }
8945
8946         iounmap(ioa_cfg->hdw_dma_regs);
8947         pci_release_regions(pdev);
8948         ipr_free_mem(ioa_cfg);
8949         scsi_host_put(ioa_cfg->host);
8950         pci_disable_device(pdev);
8951         LEAVE;
8952 }
8953
8954 /**
8955  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8956  * @ioa_cfg:    ioa config struct
8957  *
8958  * Return value:
8959  *      0 on success / -ENOMEM on allocation failure
8960  **/
8961 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8962 {
8963         struct ipr_cmnd *ipr_cmd;
8964         struct ipr_ioarcb *ioarcb;
8965         dma_addr_t dma_addr;
8966         int i, entries_each_hrrq, hrrq_id = 0;
8967
8968         ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8969                                                 sizeof(struct ipr_cmnd), 512, 0);
8970
8971         if (!ioa_cfg->ipr_cmd_pool)
8972                 return -ENOMEM;
8973
8974         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8975         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8976
8977         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8978                 ipr_free_cmd_blks(ioa_cfg);
8979                 return -ENOMEM;
8980         }
8981
8982         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8983                 if (ioa_cfg->hrrq_num > 1) {
8984                         if (i == 0) {
8985                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8986                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8987                                         ioa_cfg->hrrq[i].max_cmd_id =
8988                                                 (entries_each_hrrq - 1);
8989                         } else {
8990                                 entries_each_hrrq =
8991                                         IPR_NUM_BASE_CMD_BLKS/
8992                                         (ioa_cfg->hrrq_num - 1);
8993                                 ioa_cfg->hrrq[i].min_cmd_id =
8994                                         IPR_NUM_INTERNAL_CMD_BLKS +
8995                                         (i - 1) * entries_each_hrrq;
8996                                 ioa_cfg->hrrq[i].max_cmd_id =
8997                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8998                                         i * entries_each_hrrq - 1);
8999                         }
9000                 } else {
9001                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9002                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9003                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9004                 }
9005                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9006         }
9007
9008         BUG_ON(ioa_cfg->hrrq_num == 0);
9009
9010         i = IPR_NUM_CMD_BLKS -
9011                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9012         if (i > 0) {
9013                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9014                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9015         }
9016
9017         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9018                 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9019
9020                 if (!ipr_cmd) {
9021                         ipr_free_cmd_blks(ioa_cfg);
9022                         return -ENOMEM;
9023                 }
9024
9025                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9026                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9027                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9028
9029                 ioarcb = &ipr_cmd->ioarcb;
9030                 ipr_cmd->dma_addr = dma_addr;
9031                 if (ioa_cfg->sis64)
9032                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9033                 else
9034                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9035
9036                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9037                 if (ioa_cfg->sis64) {
9038                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9039                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9040                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9041                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9042                 } else {
9043                         ioarcb->write_ioadl_addr =
9044                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9045                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9046                         ioarcb->ioasa_host_pci_addr =
9047                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9048                 }
9049                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9050                 ipr_cmd->cmd_index = i;
9051                 ipr_cmd->ioa_cfg = ioa_cfg;
9052                 ipr_cmd->sense_buffer_dma = dma_addr +
9053                         offsetof(struct ipr_cmnd, sense_buffer);
9054
9055                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9056                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9057                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9058                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9059                         hrrq_id++;
9060         }
9061
9062         return 0;
9063 }
9064
9065 /**
9066  * ipr_alloc_mem - Allocate memory for an adapter
9067  * @ioa_cfg:    ioa config struct
9068  *
9069  * Return value:
9070  *      0 on success / non-zero for error
9071  **/
9072 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9073 {
9074         struct pci_dev *pdev = ioa_cfg->pdev;
9075         int i, rc = -ENOMEM;
9076
9077         ENTER;
9078         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9079                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9080
9081         if (!ioa_cfg->res_entries)
9082                 goto out;
9083
9084         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9085                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9086                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9087         }
9088
9089         ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9090                                                 sizeof(struct ipr_misc_cbs),
9091                                                 &ioa_cfg->vpd_cbs_dma);
9092
9093         if (!ioa_cfg->vpd_cbs)
9094                 goto out_free_res_entries;
9095
9096         if (ipr_alloc_cmd_blks(ioa_cfg))
9097                 goto out_free_vpd_cbs;
9098
9099         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9100                 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9101                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9102                                         &ioa_cfg->hrrq[i].host_rrq_dma);
9103
9104                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9105                         while (--i > 0)
9106                                 pci_free_consistent(pdev,
9107                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9108                                         ioa_cfg->hrrq[i].host_rrq,
9109                                         ioa_cfg->hrrq[i].host_rrq_dma);
9110                         goto out_ipr_free_cmd_blocks;
9111                 }
9112                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9113         }
9114
9115         ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9116                                                     ioa_cfg->cfg_table_size,
9117                                                     &ioa_cfg->cfg_table_dma);
9118
9119         if (!ioa_cfg->u.cfg_table)
9120                 goto out_free_host_rrq;
9121
9122         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9123                 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9124                                                            sizeof(struct ipr_hostrcb),
9125                                                            &ioa_cfg->hostrcb_dma[i]);
9126
9127                 if (!ioa_cfg->hostrcb[i])
9128                         goto out_free_hostrcb_dma;
9129
9130                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9131                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9132                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9133                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9134         }
9135
9136         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9137                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9138
9139         if (!ioa_cfg->trace)
9140                 goto out_free_hostrcb_dma;
9141
9142         rc = 0;
9143 out:
9144         LEAVE;
9145         return rc;
9146
9147 out_free_hostrcb_dma:
9148         while (i-- > 0) {
9149                 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9150                                     ioa_cfg->hostrcb[i],
9151                                     ioa_cfg->hostrcb_dma[i]);
9152         }
9153         pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9154                             ioa_cfg->u.cfg_table,
9155                             ioa_cfg->cfg_table_dma);
9156 out_free_host_rrq:
9157         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9158                 pci_free_consistent(pdev,
9159                                 sizeof(u32) * ioa_cfg->hrrq[i].size,
9160                                 ioa_cfg->hrrq[i].host_rrq,
9161                                 ioa_cfg->hrrq[i].host_rrq_dma);
9162         }
9163 out_ipr_free_cmd_blocks:
9164         ipr_free_cmd_blks(ioa_cfg);
9165 out_free_vpd_cbs:
9166         pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9167                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9168 out_free_res_entries:
9169         kfree(ioa_cfg->res_entries);
9170         goto out;
9171 }
9172
9173 /**
9174  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9175  * @ioa_cfg:    ioa config struct
9176  *
9177  * Return value:
9178  *      none
9179  **/
9180 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9181 {
9182         int i;
9183
9184         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9185                 ioa_cfg->bus_attr[i].bus = i;
9186                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9187                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9188                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9189                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9190                 else
9191                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9192         }
9193 }
9194
9195 /**
9196  * ipr_init_regs - Initialize IOA registers
9197  * @ioa_cfg:    ioa config struct
9198  *
9199  * Return value:
9200  *      none
9201  **/
9202 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9203 {
9204         const struct ipr_interrupt_offsets *p;
9205         struct ipr_interrupts *t;
9206         void __iomem *base;
9207
9208         p = &ioa_cfg->chip_cfg->regs;
9209         t = &ioa_cfg->regs;
9210         base = ioa_cfg->hdw_dma_regs;
9211
9212         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9213         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9214         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9215         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9216         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9217         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9218         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9219         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9220         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9221         t->ioarrin_reg = base + p->ioarrin_reg;
9222         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9223         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9224         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9225         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9226         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9227         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9228
9229         if (ioa_cfg->sis64) {
9230                 t->init_feedback_reg = base + p->init_feedback_reg;
9231                 t->dump_addr_reg = base + p->dump_addr_reg;
9232                 t->dump_data_reg = base + p->dump_data_reg;
9233                 t->endian_swap_reg = base + p->endian_swap_reg;
9234         }
9235 }
9236
9237 /**
9238  * ipr_init_ioa_cfg - Initialize IOA config struct
9239  * @ioa_cfg:    ioa config struct
9240  * @host:               scsi host struct
9241  * @pdev:               PCI dev struct
9242  *
9243  * Return value:
9244  *      none
9245  **/
9246 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9247                              struct Scsi_Host *host, struct pci_dev *pdev)
9248 {
9249         int i;
9250
9251         ioa_cfg->host = host;
9252         ioa_cfg->pdev = pdev;
9253         ioa_cfg->log_level = ipr_log_level;
9254         ioa_cfg->doorbell = IPR_DOORBELL;
9255         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9256         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9257         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9258         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9259         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9260         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9261
9262         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9263         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9264         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9265         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9266         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9267         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9268         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9269         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9270         ioa_cfg->sdt_state = INACTIVE;
9271
9272         ipr_initialize_bus_attr(ioa_cfg);
9273         ioa_cfg->max_devs_supported = ipr_max_devs;
9274
9275         if (ioa_cfg->sis64) {
9276                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9277                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9278                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9279                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9280                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9281                                            + ((sizeof(struct ipr_config_table_entry64)
9282                                                * ioa_cfg->max_devs_supported)));
9283         } else {
9284                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9285                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9286                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9287                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9288                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9289                                            + ((sizeof(struct ipr_config_table_entry)
9290                                                * ioa_cfg->max_devs_supported)));
9291         }
9292
9293         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9294         host->unique_id = host->host_no;
9295         host->max_cmd_len = IPR_MAX_CDB_LEN;
9296         host->can_queue = ioa_cfg->max_cmds;
9297         pci_set_drvdata(pdev, ioa_cfg);
9298
9299         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9300                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9301                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9302                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9303                 if (i == 0)
9304                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9305                 else
9306                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9307         }
9308 }
9309
9310 /**
9311  * ipr_get_chip_info - Find adapter chip information
9312  * @dev_id:             PCI device id struct
9313  *
9314  * Return value:
9315  *      ptr to chip information on success / NULL on failure
9316  **/
9317 static const struct ipr_chip_t *
9318 ipr_get_chip_info(const struct pci_device_id *dev_id)
9319 {
9320         int i;
9321
9322         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9323                 if (ipr_chip[i].vendor == dev_id->vendor &&
9324                     ipr_chip[i].device == dev_id->device)
9325                         return &ipr_chip[i];
9326         return NULL;
9327 }
9328
9329 /**
9330  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9331  *                                              during probe time
9332  * @ioa_cfg:    ioa config struct
9333  *
9334  * Return value:
9335  *      None
9336  **/
9337 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9338 {
9339         struct pci_dev *pdev = ioa_cfg->pdev;
9340
9341         if (pci_channel_offline(pdev)) {
9342                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9343                                    !pci_channel_offline(pdev),
9344                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9345                 pci_restore_state(pdev);
9346         }
9347 }
9348
9349 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9350 {
9351         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9352         int i, vectors;
9353
9354         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9355                 entries[i].entry = i;
9356
9357         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9358                                         entries, 1, ipr_number_of_msix);
9359         if (vectors < 0) {
9360                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9361                 return vectors;
9362         }
9363
9364         for (i = 0; i < vectors; i++)
9365                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9366         ioa_cfg->nvectors = vectors;
9367
9368         return 0;
9369 }
9370
9371 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9372 {
9373         int i, vectors;
9374
9375         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9376         if (vectors < 0) {
9377                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9378                 return vectors;
9379         }
9380
9381         for (i = 0; i < vectors; i++)
9382                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9383         ioa_cfg->nvectors = vectors;
9384
9385         return 0;
9386 }
9387
9388 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9389 {
9390         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9391
9392         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9393                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9394                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9395                 ioa_cfg->vectors_info[vec_idx].
9396                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9397         }
9398 }
9399
9400 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9401 {
9402         int i, rc;
9403
9404         for (i = 1; i < ioa_cfg->nvectors; i++) {
9405                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9406                         ipr_isr_mhrrq,
9407                         0,
9408                         ioa_cfg->vectors_info[i].desc,
9409                         &ioa_cfg->hrrq[i]);
9410                 if (rc) {
9411                         while (--i >= 0)
9412                                 free_irq(ioa_cfg->vectors_info[i].vec,
9413                                         &ioa_cfg->hrrq[i]);
9414                         return rc;
9415                 }
9416         }
9417         return 0;
9418 }
9419
9420 /**
9421  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9422  * @pdev:               PCI device struct
9423  *
9424  * Description: Simply set the msi_received flag to 1 indicating that
9425  * Message Signaled Interrupts are supported.
9426  *
9427  * Return value:
9428  *      0 on success / non-zero on failure
9429  **/
9430 static irqreturn_t ipr_test_intr(int irq, void *devp)
9431 {
9432         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9433         unsigned long lock_flags = 0;
9434         irqreturn_t rc = IRQ_HANDLED;
9435
9436         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9437         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9438
9439         ioa_cfg->msi_received = 1;
9440         wake_up(&ioa_cfg->msi_wait_q);
9441
9442         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9443         return rc;
9444 }
9445
9446 /**
9447  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9448  * @pdev:               PCI device struct
9449  *
9450  * Description: The return value from pci_enable_msi_range() can not always be
9451  * trusted.  This routine sets up and initiates a test interrupt to determine
9452  * if the interrupt is received via the ipr_test_intr() service routine.
9453  * If the tests fails, the driver will fall back to LSI.
9454  *
9455  * Return value:
9456  *      0 on success / non-zero on failure
9457  **/
9458 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9459 {
9460         int rc;
9461         volatile u32 int_reg;
9462         unsigned long lock_flags = 0;
9463
9464         ENTER;
9465
9466         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9467         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9468         ioa_cfg->msi_received = 0;
9469         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9470         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9471         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9472         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9473
9474         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9475                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9476         else
9477                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9478         if (rc) {
9479                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9480                 return rc;
9481         } else if (ipr_debug)
9482                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9483
9484         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9485         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9486         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9487         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9488         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9489
9490         if (!ioa_cfg->msi_received) {
9491                 /* MSI test failed */
9492                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9493                 rc = -EOPNOTSUPP;
9494         } else if (ipr_debug)
9495                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9496
9497         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9498
9499         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9500                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9501         else
9502                 free_irq(pdev->irq, ioa_cfg);
9503
9504         LEAVE;
9505
9506         return rc;
9507 }
9508
9509  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9510  * @pdev:               PCI device struct
9511  * @dev_id:             PCI device id struct
9512  *
9513  * Return value:
9514  *      0 on success / non-zero on failure
9515  **/
9516 static int ipr_probe_ioa(struct pci_dev *pdev,
9517                          const struct pci_device_id *dev_id)
9518 {
9519         struct ipr_ioa_cfg *ioa_cfg;
9520         struct Scsi_Host *host;
9521         unsigned long ipr_regs_pci;
9522         void __iomem *ipr_regs;
9523         int rc = PCIBIOS_SUCCESSFUL;
9524         volatile u32 mask, uproc, interrupts;
9525         unsigned long lock_flags, driver_lock_flags;
9526
9527         ENTER;
9528
9529         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9530         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9531
9532         if (!host) {
9533                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9534                 rc = -ENOMEM;
9535                 goto out;
9536         }
9537
9538         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9539         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9540         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9541
9542         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9543
9544         if (!ioa_cfg->ipr_chip) {
9545                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9546                         dev_id->vendor, dev_id->device);
9547                 goto out_scsi_host_put;
9548         }
9549
9550         /* set SIS 32 or SIS 64 */
9551         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9552         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9553         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9554         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9555
9556         if (ipr_transop_timeout)
9557                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9558         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9559                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9560         else
9561                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9562
9563         ioa_cfg->revid = pdev->revision;
9564
9565         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9566
9567         ipr_regs_pci = pci_resource_start(pdev, 0);
9568
9569         rc = pci_request_regions(pdev, IPR_NAME);
9570         if (rc < 0) {
9571                 dev_err(&pdev->dev,
9572                         "Couldn't register memory range of registers\n");
9573                 goto out_scsi_host_put;
9574         }
9575
9576         rc = pci_enable_device(pdev);
9577
9578         if (rc || pci_channel_offline(pdev)) {
9579                 if (pci_channel_offline(pdev)) {
9580                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9581                         rc = pci_enable_device(pdev);
9582                 }
9583
9584                 if (rc) {
9585                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9586                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9587                         goto out_release_regions;
9588                 }
9589         }
9590
9591         ipr_regs = pci_ioremap_bar(pdev, 0);
9592
9593         if (!ipr_regs) {
9594                 dev_err(&pdev->dev,
9595                         "Couldn't map memory range of registers\n");
9596                 rc = -ENOMEM;
9597                 goto out_disable;
9598         }
9599
9600         ioa_cfg->hdw_dma_regs = ipr_regs;
9601         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9602         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9603
9604         ipr_init_regs(ioa_cfg);
9605
9606         if (ioa_cfg->sis64) {
9607                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9608                 if (rc < 0) {
9609                         dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9610                         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9611                 }
9612         } else
9613                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9614
9615         if (rc < 0) {
9616                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9617                 goto cleanup_nomem;
9618         }
9619
9620         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9621                                    ioa_cfg->chip_cfg->cache_line_size);
9622
9623         if (rc != PCIBIOS_SUCCESSFUL) {
9624                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9625                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9626                 rc = -EIO;
9627                 goto cleanup_nomem;
9628         }
9629
9630         /* Issue MMIO read to ensure card is not in EEH */
9631         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9632         ipr_wait_for_pci_err_recovery(ioa_cfg);
9633
9634         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9635                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9636                         IPR_MAX_MSIX_VECTORS);
9637                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9638         }
9639
9640         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9641                         ipr_enable_msix(ioa_cfg) == 0)
9642                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9643         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9644                         ipr_enable_msi(ioa_cfg) == 0)
9645                 ioa_cfg->intr_flag = IPR_USE_MSI;
9646         else {
9647                 ioa_cfg->intr_flag = IPR_USE_LSI;
9648                 ioa_cfg->nvectors = 1;
9649                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9650         }
9651
9652         pci_set_master(pdev);
9653
9654         if (pci_channel_offline(pdev)) {
9655                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9656                 pci_set_master(pdev);
9657                 if (pci_channel_offline(pdev)) {
9658                         rc = -EIO;
9659                         goto out_msi_disable;
9660                 }
9661         }
9662
9663         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9664             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9665                 rc = ipr_test_msi(ioa_cfg, pdev);
9666                 if (rc == -EOPNOTSUPP) {
9667                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9668                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9669                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9670                                 pci_disable_msi(pdev);
9671                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9672                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9673                                 pci_disable_msix(pdev);
9674                         }
9675
9676                         ioa_cfg->intr_flag = IPR_USE_LSI;
9677                         ioa_cfg->nvectors = 1;
9678                 }
9679                 else if (rc)
9680                         goto out_msi_disable;
9681                 else {
9682                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9683                                 dev_info(&pdev->dev,
9684                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9685                                         ioa_cfg->nvectors, pdev->irq);
9686                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9687                                 dev_info(&pdev->dev,
9688                                         "Request for %d MSIXs succeeded.",
9689                                         ioa_cfg->nvectors);
9690                 }
9691         }
9692
9693         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9694                                 (unsigned int)num_online_cpus(),
9695                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9696
9697         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9698                 goto out_msi_disable;
9699
9700         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9701                 goto out_msi_disable;
9702
9703         rc = ipr_alloc_mem(ioa_cfg);
9704         if (rc < 0) {
9705                 dev_err(&pdev->dev,
9706                         "Couldn't allocate enough memory for device driver!\n");
9707                 goto out_msi_disable;
9708         }
9709
9710         /* Save away PCI config space for use following IOA reset */
9711         rc = pci_save_state(pdev);
9712
9713         if (rc != PCIBIOS_SUCCESSFUL) {
9714                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9715                 rc = -EIO;
9716                 goto cleanup_nolog;
9717         }
9718
9719         /*
9720          * If HRRQ updated interrupt is not masked, or reset alert is set,
9721          * the card is in an unknown state and needs a hard reset
9722          */
9723         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9724         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9725         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9726         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9727                 ioa_cfg->needs_hard_reset = 1;
9728         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9729                 ioa_cfg->needs_hard_reset = 1;
9730         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9731                 ioa_cfg->ioa_unit_checked = 1;
9732
9733         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9734         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9735         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9736
9737         if (ioa_cfg->intr_flag == IPR_USE_MSI
9738                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9739                 name_msi_vectors(ioa_cfg);
9740                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9741                         0,
9742                         ioa_cfg->vectors_info[0].desc,
9743                         &ioa_cfg->hrrq[0]);
9744                 if (!rc)
9745                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9746         } else {
9747                 rc = request_irq(pdev->irq, ipr_isr,
9748                          IRQF_SHARED,
9749                          IPR_NAME, &ioa_cfg->hrrq[0]);
9750         }
9751         if (rc) {
9752                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9753                         pdev->irq, rc);
9754                 goto cleanup_nolog;
9755         }
9756
9757         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9758             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9759                 ioa_cfg->needs_warm_reset = 1;
9760                 ioa_cfg->reset = ipr_reset_slot_reset;
9761         } else
9762                 ioa_cfg->reset = ipr_reset_start_bist;
9763
9764         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9765         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9766         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9767
9768         LEAVE;
9769 out:
9770         return rc;
9771
9772 cleanup_nolog:
9773         ipr_free_mem(ioa_cfg);
9774 out_msi_disable:
9775         ipr_wait_for_pci_err_recovery(ioa_cfg);
9776         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9777                 pci_disable_msi(pdev);
9778         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9779                 pci_disable_msix(pdev);
9780 cleanup_nomem:
9781         iounmap(ipr_regs);
9782 out_disable:
9783         pci_disable_device(pdev);
9784 out_release_regions:
9785         pci_release_regions(pdev);
9786 out_scsi_host_put:
9787         scsi_host_put(host);
9788         goto out;
9789 }
9790
9791 /**
9792  * ipr_scan_vsets - Scans for VSET devices
9793  * @ioa_cfg:    ioa config struct
9794  *
9795  * Description: Since the VSET resources do not follow SAM in that we can have
9796  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9797  *
9798  * Return value:
9799  *      none
9800  **/
9801 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9802 {
9803         int target, lun;
9804
9805         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9806                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9807                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9808 }
9809
9810 /**
9811  * ipr_initiate_ioa_bringdown - Bring down an adapter
9812  * @ioa_cfg:            ioa config struct
9813  * @shutdown_type:      shutdown type
9814  *
9815  * Description: This function will initiate bringing down the adapter.
9816  * This consists of issuing an IOA shutdown to the adapter
9817  * to flush the cache, and running BIST.
9818  * If the caller needs to wait on the completion of the reset,
9819  * the caller must sleep on the reset_wait_q.
9820  *
9821  * Return value:
9822  *      none
9823  **/
9824 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9825                                        enum ipr_shutdown_type shutdown_type)
9826 {
9827         ENTER;
9828         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9829                 ioa_cfg->sdt_state = ABORT_DUMP;
9830         ioa_cfg->reset_retries = 0;
9831         ioa_cfg->in_ioa_bringdown = 1;
9832         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9833         LEAVE;
9834 }
9835
9836 /**
9837  * __ipr_remove - Remove a single adapter
9838  * @pdev:       pci device struct
9839  *
9840  * Adapter hot plug remove entry point.
9841  *
9842  * Return value:
9843  *      none
9844  **/
9845 static void __ipr_remove(struct pci_dev *pdev)
9846 {
9847         unsigned long host_lock_flags = 0;
9848         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9849         int i;
9850         unsigned long driver_lock_flags;
9851         ENTER;
9852
9853         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9854         while (ioa_cfg->in_reset_reload) {
9855                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9856                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9857                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9858         }
9859
9860         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9861                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9862                 ioa_cfg->hrrq[i].removing_ioa = 1;
9863                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9864         }
9865         wmb();
9866         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9867
9868         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9869         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9870         flush_work(&ioa_cfg->work_q);
9871         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9872         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9873
9874         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9875         list_del(&ioa_cfg->queue);
9876         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9877
9878         if (ioa_cfg->sdt_state == ABORT_DUMP)
9879                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9880         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9881
9882         ipr_free_all_resources(ioa_cfg);
9883
9884         LEAVE;
9885 }
9886
9887 /**
9888  * ipr_remove - IOA hot plug remove entry point
9889  * @pdev:       pci device struct
9890  *
9891  * Adapter hot plug remove entry point.
9892  *
9893  * Return value:
9894  *      none
9895  **/
9896 static void ipr_remove(struct pci_dev *pdev)
9897 {
9898         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9899
9900         ENTER;
9901
9902         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9903                               &ipr_trace_attr);
9904         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9905                              &ipr_dump_attr);
9906         scsi_remove_host(ioa_cfg->host);
9907
9908         __ipr_remove(pdev);
9909
9910         LEAVE;
9911 }
9912
9913 /**
9914  * ipr_probe - Adapter hot plug add entry point
9915  *
9916  * Return value:
9917  *      0 on success / non-zero on failure
9918  **/
9919 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9920 {
9921         struct ipr_ioa_cfg *ioa_cfg;
9922         int rc, i;
9923
9924         rc = ipr_probe_ioa(pdev, dev_id);
9925
9926         if (rc)
9927                 return rc;
9928
9929         ioa_cfg = pci_get_drvdata(pdev);
9930         rc = ipr_probe_ioa_part2(ioa_cfg);
9931
9932         if (rc) {
9933                 __ipr_remove(pdev);
9934                 return rc;
9935         }
9936
9937         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9938
9939         if (rc) {
9940                 __ipr_remove(pdev);
9941                 return rc;
9942         }
9943
9944         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9945                                    &ipr_trace_attr);
9946
9947         if (rc) {
9948                 scsi_remove_host(ioa_cfg->host);
9949                 __ipr_remove(pdev);
9950                 return rc;
9951         }
9952
9953         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9954                                    &ipr_dump_attr);
9955
9956         if (rc) {
9957                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9958                                       &ipr_trace_attr);
9959                 scsi_remove_host(ioa_cfg->host);
9960                 __ipr_remove(pdev);
9961                 return rc;
9962         }
9963
9964         scsi_scan_host(ioa_cfg->host);
9965         ipr_scan_vsets(ioa_cfg);
9966         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9967         ioa_cfg->allow_ml_add_del = 1;
9968         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9969         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9970
9971         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9972                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9973                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9974                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9975                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9976                 }
9977         }
9978
9979         schedule_work(&ioa_cfg->work_q);
9980         return 0;
9981 }
9982
9983 /**
9984  * ipr_shutdown - Shutdown handler.
9985  * @pdev:       pci device struct
9986  *
9987  * This function is invoked upon system shutdown/reboot. It will issue
9988  * an adapter shutdown to the adapter to flush the write cache.
9989  *
9990  * Return value:
9991  *      none
9992  **/
9993 static void ipr_shutdown(struct pci_dev *pdev)
9994 {
9995         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9996         unsigned long lock_flags = 0;
9997         int i;
9998
9999         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10000         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10001                 ioa_cfg->iopoll_weight = 0;
10002                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10003                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10004         }
10005
10006         while (ioa_cfg->in_reset_reload) {
10007                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10008                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10009                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10010         }
10011
10012         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10013         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10014         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10015 }
10016
10017 static struct pci_device_id ipr_pci_table[] = {
10018         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10019                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10020         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10021                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10022         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10023                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10024         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10025                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10026         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10027                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10028         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10029                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10030         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10031                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10032         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10033                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10034                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10035         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10036               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10037         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10038               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10039               IPR_USE_LONG_TRANSOP_TIMEOUT },
10040         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10041               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10042               IPR_USE_LONG_TRANSOP_TIMEOUT },
10043         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10044               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10045         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10046               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10047               IPR_USE_LONG_TRANSOP_TIMEOUT},
10048         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10049               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10050               IPR_USE_LONG_TRANSOP_TIMEOUT },
10051         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10052               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10053               IPR_USE_LONG_TRANSOP_TIMEOUT },
10054         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10055               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10056         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10057               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10058         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10059               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10060               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10061         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10062                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10063         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10064                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10065         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10066                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10067                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10068         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10069                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10070                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10071         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10072                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10073         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10074                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10075         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10076                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10077         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10078                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10079         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10080                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10081         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10082                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10083         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10084                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10085         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10086                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10087         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10088                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10089         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10090                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10091         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10092                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10093         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10094                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10095         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10096                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10097         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10098                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10099         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10100                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10101         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10102                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10103         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10104                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10105         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10106                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10107         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10108                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10109         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10110                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10111         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10112                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10113         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10114                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10115         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10116                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10117         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10118                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10119         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10120                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10121         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10122                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10123         { }
10124 };
10125 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10126
10127 static const struct pci_error_handlers ipr_err_handler = {
10128         .error_detected = ipr_pci_error_detected,
10129         .mmio_enabled = ipr_pci_mmio_enabled,
10130         .slot_reset = ipr_pci_slot_reset,
10131 };
10132
10133 static struct pci_driver ipr_driver = {
10134         .name = IPR_NAME,
10135         .id_table = ipr_pci_table,
10136         .probe = ipr_probe,
10137         .remove = ipr_remove,
10138         .shutdown = ipr_shutdown,
10139         .err_handler = &ipr_err_handler,
10140 };
10141
10142 /**
10143  * ipr_halt_done - Shutdown prepare completion
10144  *
10145  * Return value:
10146  *      none
10147  **/
10148 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10149 {
10150         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10151 }
10152
10153 /**
10154  * ipr_halt - Issue shutdown prepare to all adapters
10155  *
10156  * Return value:
10157  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10158  **/
10159 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10160 {
10161         struct ipr_cmnd *ipr_cmd;
10162         struct ipr_ioa_cfg *ioa_cfg;
10163         unsigned long flags = 0, driver_lock_flags;
10164
10165         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10166                 return NOTIFY_DONE;
10167
10168         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10169
10170         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10171                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10172                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10173                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10174                         continue;
10175                 }
10176
10177                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10178                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10179                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10180                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10181                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10182
10183                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10184                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10185         }
10186         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10187
10188         return NOTIFY_OK;
10189 }
10190
10191 static struct notifier_block ipr_notifier = {
10192         ipr_halt, NULL, 0
10193 };
10194
10195 /**
10196  * ipr_init - Module entry point
10197  *
10198  * Return value:
10199  *      0 on success / negative value on failure
10200  **/
10201 static int __init ipr_init(void)
10202 {
10203         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10204                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10205
10206         register_reboot_notifier(&ipr_notifier);
10207         return pci_register_driver(&ipr_driver);
10208 }
10209
10210 /**
10211  * ipr_exit - Module unload
10212  *
10213  * Module unload entry point.
10214  *
10215  * Return value:
10216  *      none
10217  **/
10218 static void __exit ipr_exit(void)
10219 {
10220         unregister_reboot_notifier(&ipr_notifier);
10221         pci_unregister_driver(&ipr_driver);
10222 }
10223
10224 module_init(ipr_init);
10225 module_exit(ipr_exit);