2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size = 0x20,
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size = 0x20,
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
159 .cache_line_size = 0x20,
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
285 "34FF: Disk device format in progress"},
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
289 "Synchronization required"},
291 "No ready, IOA shutdown"},
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
297 "FFF5: Medium error, data unreadable, recommend reassign"},
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
361 "ATA device status error"},
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "FFF4: Command to logical unit failed"},
391 "Illegal request, invalid request type or request packet"},
393 "Illegal request, invalid resource handle"},
395 "Illegal request, commands not allowed to this device"},
397 "Illegal request, command not allowed to a secondary adapter"},
399 "Illegal request, command not allowed to a non-optimized resource"},
401 "Illegal request, invalid field in parameter list"},
403 "Illegal request, parameter not supported"},
405 "Illegal request, parameter value invalid"},
407 "Illegal request, command sequence error"},
409 "Illegal request, dual adapter support not enabled"},
410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9031: Array protection temporarily suspended, protection resuming"},
412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9040: Array protection temporarily suspended, protection resuming"},
414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415 "3140: Device bus not ready to ready transition"},
416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset"},
419 "FFFE: SCSI bus transition to single ended"},
421 "FFFE: SCSI bus transition to LVD"},
422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "FFFB: SCSI bus was reset by another initiator"},
424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "3029: A device replacement has occurred"},
426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9051: IOA cache data exists for a missing or failed device"},
428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9025: Disk unit is not supported at its physical location"},
432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "3020: IOA detected a SCSI bus configuration error"},
434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3150: SCSI bus configuration error"},
436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9074: Asymmetric advanced function disk configuration"},
438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4040: Incomplete multipath connection between IOA and enclosure"},
440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4041: Incomplete multipath connection between enclosure and device"},
442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9075: Incomplete multipath connection between IOA and remote IOA"},
444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9076: Configuration error, missing remote IOA"},
446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4050: Enclosure does not support a required multipath function"},
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9041: Array protection temporarily suspended"},
452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9042: Corrupt array parity detected on specified device"},
454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9030: Array no longer protected due to missing or failed disk unit"},
456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9071: Link operational transition"},
458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9072: Link not operational transition"},
460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9032: Array exposed but still protected"},
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4061: Multipath redundancy level got better"},
466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4060: Multipath redundancy level got worse"},
469 "Failure due to other device"},
470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9008: IOA does not support functions expected by devices"},
472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9010: Cache data associated with attached devices cannot be found"},
474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9011: Cache data belongs to devices other than those attached"},
476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9020: Array missing 2 or more devices with only 1 device present"},
478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9021: Array missing 2 or more devices with 2 or more devices present"},
480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9022: Exposed array is missing a required device"},
482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9023: Array member(s) not at required physical locations"},
484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9024: Array not functional due to present hardware configuration"},
486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9026: Array not functional due to present hardware configuration"},
488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9027: Array is missing a device and parity is out of sync"},
490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9028: Maximum number of arrays already exist"},
492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9050: Required cache data cannot be located for a disk unit"},
494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9052: Cache data exists for a device that has been modified"},
496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9054: IOA resources not available due to previous problems"},
498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9092: Disk unit requires initialization before use"},
500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9029: Incorrect hardware configuration change has been detected"},
502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9060: One or more disk pairs are missing from an array"},
504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9061: One or more disks are missing from an array"},
506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9062: One or more disks are missing from an array"},
508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9063: Maximum number of functional arrays has been exceeded"},
511 "Aborted command, invalid descriptor"},
513 "Command terminated by host"}
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
533 * Function Prototypes
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
542 #ifdef CONFIG_SCSI_IPR_TRACE
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
547 * @add_data: additional data
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
573 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
583 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
600 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
611 ioarcb->data_transfer_length = 0;
612 ioarcb->read_data_transfer_length = 0;
613 ioarcb->ioadl_len = 0;
614 ioarcb->read_ioadl_len = 0;
616 if (ipr_cmd->ioa_cfg->sis64) {
617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
619 ioasa64->u.gata.status = 0;
621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
624 ioasa->u.gata.status = 0;
627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
629 ipr_cmd->scsi_cmd = NULL;
631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
642 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
648 ipr_cmd->eh_comp = NULL;
649 ipr_cmd->fast_done = fast_done;
650 init_timer(&ipr_cmd->timer);
654 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
655 * @ioa_cfg: ioa config struct
658 * pointer to ipr command struct
661 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
663 struct ipr_cmnd *ipr_cmd = NULL;
665 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
666 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
667 struct ipr_cmnd, queue);
668 list_del(&ipr_cmd->queue);
676 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
677 * @ioa_cfg: ioa config struct
680 * pointer to ipr command struct
683 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
685 struct ipr_cmnd *ipr_cmd =
686 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
687 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
692 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
693 * @ioa_cfg: ioa config struct
694 * @clr_ints: interrupts to clear
696 * This function masks all interrupts on the adapter, then clears the
697 * interrupts specified in the mask
702 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
705 volatile u32 int_reg;
708 /* Stop new interrupts */
709 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
710 spin_lock(&ioa_cfg->hrrq[i]._lock);
711 ioa_cfg->hrrq[i].allow_interrupts = 0;
712 spin_unlock(&ioa_cfg->hrrq[i]._lock);
716 /* Set interrupt mask to stop all new interrupts */
718 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
720 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
722 /* Clear any pending interrupts */
724 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
725 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
726 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
730 * ipr_save_pcix_cmd_reg - Save PCI-X command register
731 * @ioa_cfg: ioa config struct
734 * 0 on success / -EIO on failure
736 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
738 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
740 if (pcix_cmd_reg == 0)
743 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
744 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
745 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
749 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
754 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
755 * @ioa_cfg: ioa config struct
758 * 0 on success / -EIO on failure
760 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
762 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
765 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
766 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
767 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
776 * ipr_sata_eh_done - done function for aborted SATA commands
777 * @ipr_cmd: ipr command struct
779 * This function is invoked for ops generated to SATA
780 * devices which are being aborted.
785 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
787 struct ata_queued_cmd *qc = ipr_cmd->qc;
788 struct ipr_sata_port *sata_port = qc->ap->private_data;
790 qc->err_mask |= AC_ERR_OTHER;
791 sata_port->ioasa.status |= ATA_BUSY;
792 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
797 * ipr_scsi_eh_done - mid-layer done function for aborted ops
798 * @ipr_cmd: ipr command struct
800 * This function is invoked by the interrupt handler for
801 * ops generated by the SCSI mid-layer which are being aborted.
806 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
808 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
810 scsi_cmd->result |= (DID_ERROR << 16);
812 scsi_dma_unmap(ipr_cmd->scsi_cmd);
813 scsi_cmd->scsi_done(scsi_cmd);
814 if (ipr_cmd->eh_comp)
815 complete(ipr_cmd->eh_comp);
816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
820 * ipr_fail_all_ops - Fails all outstanding ops.
821 * @ioa_cfg: ioa config struct
823 * This function fails all outstanding ops.
828 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
830 struct ipr_cmnd *ipr_cmd, *temp;
831 struct ipr_hrr_queue *hrrq;
834 for_each_hrrq(hrrq, ioa_cfg) {
835 spin_lock(&hrrq->_lock);
836 list_for_each_entry_safe(ipr_cmd,
837 temp, &hrrq->hrrq_pending_q, queue) {
838 list_del(&ipr_cmd->queue);
840 ipr_cmd->s.ioasa.hdr.ioasc =
841 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
842 ipr_cmd->s.ioasa.hdr.ilid =
843 cpu_to_be32(IPR_DRIVER_ILID);
845 if (ipr_cmd->scsi_cmd)
846 ipr_cmd->done = ipr_scsi_eh_done;
847 else if (ipr_cmd->qc)
848 ipr_cmd->done = ipr_sata_eh_done;
850 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
851 IPR_IOASC_IOA_WAS_RESET);
852 del_timer(&ipr_cmd->timer);
853 ipr_cmd->done(ipr_cmd);
855 spin_unlock(&hrrq->_lock);
861 * ipr_send_command - Send driver initiated requests.
862 * @ipr_cmd: ipr command struct
864 * This function sends a command to the adapter using the correct write call.
865 * In the case of sis64, calculate the ioarcb size required. Then or in the
871 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
873 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
874 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
876 if (ioa_cfg->sis64) {
877 /* The default size is 256 bytes */
878 send_dma_addr |= 0x1;
880 /* If the number of ioadls * size of ioadl > 128 bytes,
881 then use a 512 byte ioarcb */
882 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
883 send_dma_addr |= 0x4;
884 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
886 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
890 * ipr_do_req - Send driver initiated requests.
891 * @ipr_cmd: ipr command struct
892 * @done: done function
893 * @timeout_func: timeout function
894 * @timeout: timeout value
896 * This function sends the specified command to the adapter with the
897 * timeout given. The done function is invoked on command completion.
902 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
903 void (*done) (struct ipr_cmnd *),
904 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
908 ipr_cmd->done = done;
910 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
911 ipr_cmd->timer.expires = jiffies + timeout;
912 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
914 add_timer(&ipr_cmd->timer);
916 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
918 ipr_send_command(ipr_cmd);
922 * ipr_internal_cmd_done - Op done function for an internally generated op.
923 * @ipr_cmd: ipr command struct
925 * This function is the op done function for an internally generated,
926 * blocking op. It simply wakes the sleeping thread.
931 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
933 if (ipr_cmd->sibling)
934 ipr_cmd->sibling = NULL;
936 complete(&ipr_cmd->completion);
940 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
941 * @ipr_cmd: ipr command struct
942 * @dma_addr: dma address
943 * @len: transfer length
944 * @flags: ioadl flag value
946 * This function initializes an ioadl in the case where there is only a single
952 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
955 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
956 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
958 ipr_cmd->dma_use_sg = 1;
960 if (ipr_cmd->ioa_cfg->sis64) {
961 ioadl64->flags = cpu_to_be32(flags);
962 ioadl64->data_len = cpu_to_be32(len);
963 ioadl64->address = cpu_to_be64(dma_addr);
965 ipr_cmd->ioarcb.ioadl_len =
966 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
967 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
969 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
970 ioadl->address = cpu_to_be32(dma_addr);
972 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
973 ipr_cmd->ioarcb.read_ioadl_len =
974 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
975 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
977 ipr_cmd->ioarcb.ioadl_len =
978 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
979 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
985 * ipr_send_blocking_cmd - Send command and sleep on its completion.
986 * @ipr_cmd: ipr command struct
987 * @timeout_func: function to invoke if command times out
993 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
994 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
997 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
999 init_completion(&ipr_cmd->completion);
1000 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1002 spin_unlock_irq(ioa_cfg->host->host_lock);
1003 wait_for_completion(&ipr_cmd->completion);
1004 spin_lock_irq(ioa_cfg->host->host_lock);
1007 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1009 if (ioa_cfg->hrrq_num == 1)
1012 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1016 * ipr_send_hcam - Send an HCAM to the adapter.
1017 * @ioa_cfg: ioa config struct
1019 * @hostrcb: hostrcb struct
1021 * This function will send a Host Controlled Async command to the adapter.
1022 * If HCAMs are currently not allowed to be issued to the adapter, it will
1023 * place the hostrcb on the free queue.
1028 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1029 struct ipr_hostrcb *hostrcb)
1031 struct ipr_cmnd *ipr_cmd;
1032 struct ipr_ioarcb *ioarcb;
1034 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1035 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1036 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1037 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1039 ipr_cmd->u.hostrcb = hostrcb;
1040 ioarcb = &ipr_cmd->ioarcb;
1042 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1043 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1044 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1045 ioarcb->cmd_pkt.cdb[1] = type;
1046 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1047 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1049 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1050 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1052 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1053 ipr_cmd->done = ipr_process_ccn;
1055 ipr_cmd->done = ipr_process_error;
1057 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1059 ipr_send_command(ipr_cmd);
1061 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1066 * ipr_update_ata_class - Update the ata class in the resource entry
1067 * @res: resource entry struct
1068 * @proto: cfgte device bus protocol value
1073 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1076 case IPR_PROTO_SATA:
1077 case IPR_PROTO_SAS_STP:
1078 res->ata_class = ATA_DEV_ATA;
1080 case IPR_PROTO_SATA_ATAPI:
1081 case IPR_PROTO_SAS_STP_ATAPI:
1082 res->ata_class = ATA_DEV_ATAPI;
1085 res->ata_class = ATA_DEV_UNKNOWN;
1091 * ipr_init_res_entry - Initialize a resource entry struct.
1092 * @res: resource entry struct
1093 * @cfgtew: config table entry wrapper struct
1098 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1099 struct ipr_config_table_entry_wrapper *cfgtew)
1103 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1104 struct ipr_resource_entry *gscsi_res = NULL;
1106 res->needs_sync_complete = 0;
1109 res->del_from_ml = 0;
1110 res->resetting_device = 0;
1112 res->sata_port = NULL;
1114 if (ioa_cfg->sis64) {
1115 proto = cfgtew->u.cfgte64->proto;
1116 res->res_flags = cfgtew->u.cfgte64->res_flags;
1117 res->qmodel = IPR_QUEUEING_MODEL64(res);
1118 res->type = cfgtew->u.cfgte64->res_type;
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1124 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1125 sizeof(res->dev_lun.scsi_lun));
1126 res->lun = scsilun_to_int(&res->dev_lun);
1128 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1129 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1130 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1132 res->target = gscsi_res->target;
1137 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1138 ioa_cfg->max_devs_supported);
1139 set_bit(res->target, ioa_cfg->target_ids);
1141 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1142 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1144 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1145 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1146 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1147 ioa_cfg->max_devs_supported);
1148 set_bit(res->target, ioa_cfg->array_ids);
1149 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1150 res->bus = IPR_VSET_VIRTUAL_BUS;
1151 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1152 ioa_cfg->max_devs_supported);
1153 set_bit(res->target, ioa_cfg->vset_ids);
1155 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1156 ioa_cfg->max_devs_supported);
1157 set_bit(res->target, ioa_cfg->target_ids);
1160 proto = cfgtew->u.cfgte->proto;
1161 res->qmodel = IPR_QUEUEING_MODEL(res);
1162 res->flags = cfgtew->u.cfgte->flags;
1163 if (res->flags & IPR_IS_IOA_RESOURCE)
1164 res->type = IPR_RES_TYPE_IOAFP;
1166 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1168 res->bus = cfgtew->u.cfgte->res_addr.bus;
1169 res->target = cfgtew->u.cfgte->res_addr.target;
1170 res->lun = cfgtew->u.cfgte->res_addr.lun;
1171 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1174 ipr_update_ata_class(res, proto);
1178 * ipr_is_same_device - Determine if two devices are the same.
1179 * @res: resource entry struct
1180 * @cfgtew: config table entry wrapper struct
1183 * 1 if the devices are the same / 0 otherwise
1185 static int ipr_is_same_device(struct ipr_resource_entry *res,
1186 struct ipr_config_table_entry_wrapper *cfgtew)
1188 if (res->ioa_cfg->sis64) {
1189 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1190 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1191 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1192 sizeof(cfgtew->u.cfgte64->lun))) {
1196 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1197 res->target == cfgtew->u.cfgte->res_addr.target &&
1198 res->lun == cfgtew->u.cfgte->res_addr.lun)
1206 * __ipr_format_res_path - Format the resource path for printing.
1207 * @res_path: resource path
1209 * @len: length of buffer provided
1214 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1220 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1221 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1222 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1228 * ipr_format_res_path - Format the resource path for printing.
1229 * @ioa_cfg: ioa config struct
1230 * @res_path: resource path
1232 * @len: length of buffer provided
1237 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1238 u8 *res_path, char *buffer, int len)
1243 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1244 __ipr_format_res_path(res_path, p, len - (buffer - p));
1249 * ipr_update_res_entry - Update the resource entry.
1250 * @res: resource entry struct
1251 * @cfgtew: config table entry wrapper struct
1256 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1257 struct ipr_config_table_entry_wrapper *cfgtew)
1259 char buffer[IPR_MAX_RES_PATH_LENGTH];
1263 if (res->ioa_cfg->sis64) {
1264 res->flags = cfgtew->u.cfgte64->flags;
1265 res->res_flags = cfgtew->u.cfgte64->res_flags;
1266 res->type = cfgtew->u.cfgte64->res_type;
1268 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1269 sizeof(struct ipr_std_inq_data));
1271 res->qmodel = IPR_QUEUEING_MODEL64(res);
1272 proto = cfgtew->u.cfgte64->proto;
1273 res->res_handle = cfgtew->u.cfgte64->res_handle;
1274 res->dev_id = cfgtew->u.cfgte64->dev_id;
1276 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1277 sizeof(res->dev_lun.scsi_lun));
1279 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1280 sizeof(res->res_path))) {
1281 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1282 sizeof(res->res_path));
1286 if (res->sdev && new_path)
1287 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1288 ipr_format_res_path(res->ioa_cfg,
1289 res->res_path, buffer, sizeof(buffer)));
1291 res->flags = cfgtew->u.cfgte->flags;
1292 if (res->flags & IPR_IS_IOA_RESOURCE)
1293 res->type = IPR_RES_TYPE_IOAFP;
1295 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1297 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1298 sizeof(struct ipr_std_inq_data));
1300 res->qmodel = IPR_QUEUEING_MODEL(res);
1301 proto = cfgtew->u.cfgte->proto;
1302 res->res_handle = cfgtew->u.cfgte->res_handle;
1305 ipr_update_ata_class(res, proto);
1309 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1311 * @res: resource entry struct
1312 * @cfgtew: config table entry wrapper struct
1317 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1319 struct ipr_resource_entry *gscsi_res = NULL;
1320 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1322 if (!ioa_cfg->sis64)
1325 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1326 clear_bit(res->target, ioa_cfg->array_ids);
1327 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1328 clear_bit(res->target, ioa_cfg->vset_ids);
1329 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1330 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1331 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1333 clear_bit(res->target, ioa_cfg->target_ids);
1335 } else if (res->bus == 0)
1336 clear_bit(res->target, ioa_cfg->target_ids);
1340 * ipr_handle_config_change - Handle a config change from the adapter
1341 * @ioa_cfg: ioa config struct
1347 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1348 struct ipr_hostrcb *hostrcb)
1350 struct ipr_resource_entry *res = NULL;
1351 struct ipr_config_table_entry_wrapper cfgtew;
1352 __be32 cc_res_handle;
1356 if (ioa_cfg->sis64) {
1357 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1358 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1360 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1361 cc_res_handle = cfgtew.u.cfgte->res_handle;
1364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1365 if (res->res_handle == cc_res_handle) {
1372 if (list_empty(&ioa_cfg->free_res_q)) {
1373 ipr_send_hcam(ioa_cfg,
1374 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1379 res = list_entry(ioa_cfg->free_res_q.next,
1380 struct ipr_resource_entry, queue);
1382 list_del(&res->queue);
1383 ipr_init_res_entry(res, &cfgtew);
1384 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1387 ipr_update_res_entry(res, &cfgtew);
1389 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1391 res->del_from_ml = 1;
1392 res->res_handle = IPR_INVALID_RES_HANDLE;
1393 if (ioa_cfg->allow_ml_add_del)
1394 schedule_work(&ioa_cfg->work_q);
1396 ipr_clear_res_target(res);
1397 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1399 } else if (!res->sdev || res->del_from_ml) {
1401 if (ioa_cfg->allow_ml_add_del)
1402 schedule_work(&ioa_cfg->work_q);
1405 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1409 * ipr_process_ccn - Op done function for a CCN.
1410 * @ipr_cmd: ipr command struct
1412 * This function is the op done function for a configuration
1413 * change notification host controlled async from the adapter.
1418 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1420 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1421 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1422 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1424 list_del(&hostrcb->queue);
1425 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1428 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1429 dev_err(&ioa_cfg->pdev->dev,
1430 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1432 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1434 ipr_handle_config_change(ioa_cfg, hostrcb);
1439 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1440 * @i: index into buffer
1441 * @buf: string to modify
1443 * This function will strip all trailing whitespace, pad the end
1444 * of the string with a single space, and NULL terminate the string.
1447 * new length of string
1449 static int strip_and_pad_whitespace(int i, char *buf)
1451 while (i && buf[i] == ' ')
1459 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1460 * @prefix: string to print at start of printk
1461 * @hostrcb: hostrcb pointer
1462 * @vpd: vendor/product id/sn struct
1467 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1468 struct ipr_vpd *vpd)
1470 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1473 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1474 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1476 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1477 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1479 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1480 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1482 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1486 * ipr_log_vpd - Log the passed VPD to the error log.
1487 * @vpd: vendor/product id/sn struct
1492 static void ipr_log_vpd(struct ipr_vpd *vpd)
1494 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1495 + IPR_SERIAL_NUM_LEN];
1497 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1498 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1500 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1501 ipr_err("Vendor/Product ID: %s\n", buffer);
1503 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1504 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1505 ipr_err(" Serial Number: %s\n", buffer);
1509 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1510 * @prefix: string to print at start of printk
1511 * @hostrcb: hostrcb pointer
1512 * @vpd: vendor/product id/sn/wwn struct
1517 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1518 struct ipr_ext_vpd *vpd)
1520 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1521 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1522 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1526 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1527 * @vpd: vendor/product id/sn/wwn struct
1532 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1534 ipr_log_vpd(&vpd->vpd);
1535 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1536 be32_to_cpu(vpd->wwid[1]));
1540 * ipr_log_enhanced_cache_error - Log a cache error.
1541 * @ioa_cfg: ioa config struct
1542 * @hostrcb: hostrcb struct
1547 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1548 struct ipr_hostrcb *hostrcb)
1550 struct ipr_hostrcb_type_12_error *error;
1553 error = &hostrcb->hcam.u.error64.u.type_12_error;
1555 error = &hostrcb->hcam.u.error.u.type_12_error;
1557 ipr_err("-----Current Configuration-----\n");
1558 ipr_err("Cache Directory Card Information:\n");
1559 ipr_log_ext_vpd(&error->ioa_vpd);
1560 ipr_err("Adapter Card Information:\n");
1561 ipr_log_ext_vpd(&error->cfc_vpd);
1563 ipr_err("-----Expected Configuration-----\n");
1564 ipr_err("Cache Directory Card Information:\n");
1565 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1566 ipr_err("Adapter Card Information:\n");
1567 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1569 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1570 be32_to_cpu(error->ioa_data[0]),
1571 be32_to_cpu(error->ioa_data[1]),
1572 be32_to_cpu(error->ioa_data[2]));
1576 * ipr_log_cache_error - Log a cache error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1583 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584 struct ipr_hostrcb *hostrcb)
1586 struct ipr_hostrcb_type_02_error *error =
1587 &hostrcb->hcam.u.error.u.type_02_error;
1589 ipr_err("-----Current Configuration-----\n");
1590 ipr_err("Cache Directory Card Information:\n");
1591 ipr_log_vpd(&error->ioa_vpd);
1592 ipr_err("Adapter Card Information:\n");
1593 ipr_log_vpd(&error->cfc_vpd);
1595 ipr_err("-----Expected Configuration-----\n");
1596 ipr_err("Cache Directory Card Information:\n");
1597 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1598 ipr_err("Adapter Card Information:\n");
1599 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1601 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1602 be32_to_cpu(error->ioa_data[0]),
1603 be32_to_cpu(error->ioa_data[1]),
1604 be32_to_cpu(error->ioa_data[2]));
1608 * ipr_log_enhanced_config_error - Log a configuration error.
1609 * @ioa_cfg: ioa config struct
1610 * @hostrcb: hostrcb struct
1615 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1616 struct ipr_hostrcb *hostrcb)
1618 int errors_logged, i;
1619 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1620 struct ipr_hostrcb_type_13_error *error;
1622 error = &hostrcb->hcam.u.error.u.type_13_error;
1623 errors_logged = be32_to_cpu(error->errors_logged);
1625 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1626 be32_to_cpu(error->errors_detected), errors_logged);
1628 dev_entry = error->dev;
1630 for (i = 0; i < errors_logged; i++, dev_entry++) {
1633 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1634 ipr_log_ext_vpd(&dev_entry->vpd);
1636 ipr_err("-----New Device Information-----\n");
1637 ipr_log_ext_vpd(&dev_entry->new_vpd);
1639 ipr_err("Cache Directory Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1642 ipr_err("Adapter Card Information:\n");
1643 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1648 * ipr_log_sis64_config_error - Log a device error.
1649 * @ioa_cfg: ioa config struct
1650 * @hostrcb: hostrcb struct
1655 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1656 struct ipr_hostrcb *hostrcb)
1658 int errors_logged, i;
1659 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1660 struct ipr_hostrcb_type_23_error *error;
1661 char buffer[IPR_MAX_RES_PATH_LENGTH];
1663 error = &hostrcb->hcam.u.error64.u.type_23_error;
1664 errors_logged = be32_to_cpu(error->errors_logged);
1666 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667 be32_to_cpu(error->errors_detected), errors_logged);
1669 dev_entry = error->dev;
1671 for (i = 0; i < errors_logged; i++, dev_entry++) {
1674 ipr_err("Device %d : %s", i + 1,
1675 __ipr_format_res_path(dev_entry->res_path,
1676 buffer, sizeof(buffer)));
1677 ipr_log_ext_vpd(&dev_entry->vpd);
1679 ipr_err("-----New Device Information-----\n");
1680 ipr_log_ext_vpd(&dev_entry->new_vpd);
1682 ipr_err("Cache Directory Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1685 ipr_err("Adapter Card Information:\n");
1686 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1691 * ipr_log_config_error - Log a configuration error.
1692 * @ioa_cfg: ioa config struct
1693 * @hostrcb: hostrcb struct
1698 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1699 struct ipr_hostrcb *hostrcb)
1701 int errors_logged, i;
1702 struct ipr_hostrcb_device_data_entry *dev_entry;
1703 struct ipr_hostrcb_type_03_error *error;
1705 error = &hostrcb->hcam.u.error.u.type_03_error;
1706 errors_logged = be32_to_cpu(error->errors_logged);
1708 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1709 be32_to_cpu(error->errors_detected), errors_logged);
1711 dev_entry = error->dev;
1713 for (i = 0; i < errors_logged; i++, dev_entry++) {
1716 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1717 ipr_log_vpd(&dev_entry->vpd);
1719 ipr_err("-----New Device Information-----\n");
1720 ipr_log_vpd(&dev_entry->new_vpd);
1722 ipr_err("Cache Directory Card Information:\n");
1723 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1725 ipr_err("Adapter Card Information:\n");
1726 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1728 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1729 be32_to_cpu(dev_entry->ioa_data[0]),
1730 be32_to_cpu(dev_entry->ioa_data[1]),
1731 be32_to_cpu(dev_entry->ioa_data[2]),
1732 be32_to_cpu(dev_entry->ioa_data[3]),
1733 be32_to_cpu(dev_entry->ioa_data[4]));
1738 * ipr_log_enhanced_array_error - Log an array configuration error.
1739 * @ioa_cfg: ioa config struct
1740 * @hostrcb: hostrcb struct
1745 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1746 struct ipr_hostrcb *hostrcb)
1749 struct ipr_hostrcb_type_14_error *error;
1750 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1751 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1753 error = &hostrcb->hcam.u.error.u.type_14_error;
1757 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1758 error->protection_level,
1759 ioa_cfg->host->host_no,
1760 error->last_func_vset_res_addr.bus,
1761 error->last_func_vset_res_addr.target,
1762 error->last_func_vset_res_addr.lun);
1766 array_entry = error->array_member;
1767 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1768 ARRAY_SIZE(error->array_member));
1770 for (i = 0; i < num_entries; i++, array_entry++) {
1771 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1774 if (be32_to_cpu(error->exposed_mode_adn) == i)
1775 ipr_err("Exposed Array Member %d:\n", i);
1777 ipr_err("Array Member %d:\n", i);
1779 ipr_log_ext_vpd(&array_entry->vpd);
1780 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1781 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1782 "Expected Location");
1789 * ipr_log_array_error - Log an array configuration error.
1790 * @ioa_cfg: ioa config struct
1791 * @hostrcb: hostrcb struct
1796 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1797 struct ipr_hostrcb *hostrcb)
1800 struct ipr_hostrcb_type_04_error *error;
1801 struct ipr_hostrcb_array_data_entry *array_entry;
1802 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1804 error = &hostrcb->hcam.u.error.u.type_04_error;
1808 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1809 error->protection_level,
1810 ioa_cfg->host->host_no,
1811 error->last_func_vset_res_addr.bus,
1812 error->last_func_vset_res_addr.target,
1813 error->last_func_vset_res_addr.lun);
1817 array_entry = error->array_member;
1819 for (i = 0; i < 18; i++) {
1820 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1823 if (be32_to_cpu(error->exposed_mode_adn) == i)
1824 ipr_err("Exposed Array Member %d:\n", i);
1826 ipr_err("Array Member %d:\n", i);
1828 ipr_log_vpd(&array_entry->vpd);
1830 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1831 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1832 "Expected Location");
1837 array_entry = error->array_member2;
1844 * ipr_log_hex_data - Log additional hex IOA error data.
1845 * @ioa_cfg: ioa config struct
1846 * @data: IOA error data
1852 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1859 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1860 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1862 for (i = 0; i < len / 4; i += 4) {
1863 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1864 be32_to_cpu(data[i]),
1865 be32_to_cpu(data[i+1]),
1866 be32_to_cpu(data[i+2]),
1867 be32_to_cpu(data[i+3]));
1872 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1873 * @ioa_cfg: ioa config struct
1874 * @hostrcb: hostrcb struct
1879 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1880 struct ipr_hostrcb *hostrcb)
1882 struct ipr_hostrcb_type_17_error *error;
1885 error = &hostrcb->hcam.u.error64.u.type_17_error;
1887 error = &hostrcb->hcam.u.error.u.type_17_error;
1889 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1890 strim(error->failure_reason);
1892 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1893 be32_to_cpu(hostrcb->hcam.u.error.prc));
1894 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1895 ipr_log_hex_data(ioa_cfg, error->data,
1896 be32_to_cpu(hostrcb->hcam.length) -
1897 (offsetof(struct ipr_hostrcb_error, u) +
1898 offsetof(struct ipr_hostrcb_type_17_error, data)));
1902 * ipr_log_dual_ioa_error - Log a dual adapter error.
1903 * @ioa_cfg: ioa config struct
1904 * @hostrcb: hostrcb struct
1909 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1910 struct ipr_hostrcb *hostrcb)
1912 struct ipr_hostrcb_type_07_error *error;
1914 error = &hostrcb->hcam.u.error.u.type_07_error;
1915 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1916 strim(error->failure_reason);
1918 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1919 be32_to_cpu(hostrcb->hcam.u.error.prc));
1920 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1921 ipr_log_hex_data(ioa_cfg, error->data,
1922 be32_to_cpu(hostrcb->hcam.length) -
1923 (offsetof(struct ipr_hostrcb_error, u) +
1924 offsetof(struct ipr_hostrcb_type_07_error, data)));
1927 static const struct {
1930 } path_active_desc[] = {
1931 { IPR_PATH_NO_INFO, "Path" },
1932 { IPR_PATH_ACTIVE, "Active path" },
1933 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1936 static const struct {
1939 } path_state_desc[] = {
1940 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1941 { IPR_PATH_HEALTHY, "is healthy" },
1942 { IPR_PATH_DEGRADED, "is degraded" },
1943 { IPR_PATH_FAILED, "is failed" }
1947 * ipr_log_fabric_path - Log a fabric path error
1948 * @hostrcb: hostrcb struct
1949 * @fabric: fabric descriptor
1954 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1955 struct ipr_hostrcb_fabric_desc *fabric)
1958 u8 path_state = fabric->path_state;
1959 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1960 u8 state = path_state & IPR_PATH_STATE_MASK;
1962 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1963 if (path_active_desc[i].active != active)
1966 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1967 if (path_state_desc[j].state != state)
1970 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1971 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1972 path_active_desc[i].desc, path_state_desc[j].desc,
1974 } else if (fabric->cascaded_expander == 0xff) {
1975 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1976 path_active_desc[i].desc, path_state_desc[j].desc,
1977 fabric->ioa_port, fabric->phy);
1978 } else if (fabric->phy == 0xff) {
1979 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1980 path_active_desc[i].desc, path_state_desc[j].desc,
1981 fabric->ioa_port, fabric->cascaded_expander);
1983 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1984 path_active_desc[i].desc, path_state_desc[j].desc,
1985 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1991 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1992 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1996 * ipr_log64_fabric_path - Log a fabric path error
1997 * @hostrcb: hostrcb struct
1998 * @fabric: fabric descriptor
2003 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2004 struct ipr_hostrcb64_fabric_desc *fabric)
2007 u8 path_state = fabric->path_state;
2008 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2009 u8 state = path_state & IPR_PATH_STATE_MASK;
2010 char buffer[IPR_MAX_RES_PATH_LENGTH];
2012 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2013 if (path_active_desc[i].active != active)
2016 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2017 if (path_state_desc[j].state != state)
2020 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 ipr_format_res_path(hostrcb->ioa_cfg,
2024 buffer, sizeof(buffer)));
2029 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2030 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2031 buffer, sizeof(buffer)));
2034 static const struct {
2037 } path_type_desc[] = {
2038 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2039 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2040 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2041 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2044 static const struct {
2047 } path_status_desc[] = {
2048 { IPR_PATH_CFG_NO_PROB, "Functional" },
2049 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2050 { IPR_PATH_CFG_FAILED, "Failed" },
2051 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2052 { IPR_PATH_NOT_DETECTED, "Missing" },
2053 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2056 static const char *link_rate[] = {
2059 "phy reset problem",
2076 * ipr_log_path_elem - Log a fabric path element.
2077 * @hostrcb: hostrcb struct
2078 * @cfg: fabric path element struct
2083 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2084 struct ipr_hostrcb_config_element *cfg)
2087 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2088 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2090 if (type == IPR_PATH_CFG_NOT_EXIST)
2093 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2094 if (path_type_desc[i].type != type)
2097 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2098 if (path_status_desc[j].status != status)
2101 if (type == IPR_PATH_CFG_IOA_PORT) {
2102 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2103 path_status_desc[j].desc, path_type_desc[i].desc,
2104 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2105 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2107 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2108 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2109 path_status_desc[j].desc, path_type_desc[i].desc,
2110 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2111 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2112 } else if (cfg->cascaded_expander == 0xff) {
2113 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2114 "WWN=%08X%08X\n", path_status_desc[j].desc,
2115 path_type_desc[i].desc, cfg->phy,
2116 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2117 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2118 } else if (cfg->phy == 0xff) {
2119 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2120 "WWN=%08X%08X\n", path_status_desc[j].desc,
2121 path_type_desc[i].desc, cfg->cascaded_expander,
2122 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2123 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2125 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2126 "WWN=%08X%08X\n", path_status_desc[j].desc,
2127 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2128 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2129 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2136 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2137 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2138 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2143 * ipr_log64_path_elem - Log a fabric path element.
2144 * @hostrcb: hostrcb struct
2145 * @cfg: fabric path element struct
2150 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2151 struct ipr_hostrcb64_config_element *cfg)
2154 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2155 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2156 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2157 char buffer[IPR_MAX_RES_PATH_LENGTH];
2159 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2162 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2163 if (path_type_desc[i].type != type)
2166 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2167 if (path_status_desc[j].status != status)
2170 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2171 path_status_desc[j].desc, path_type_desc[i].desc,
2172 ipr_format_res_path(hostrcb->ioa_cfg,
2173 cfg->res_path, buffer, sizeof(buffer)),
2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175 be32_to_cpu(cfg->wwid[0]),
2176 be32_to_cpu(cfg->wwid[1]));
2180 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2181 "WWN=%08X%08X\n", cfg->type_status,
2182 ipr_format_res_path(hostrcb->ioa_cfg,
2183 cfg->res_path, buffer, sizeof(buffer)),
2184 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2189 * ipr_log_fabric_error - Log a fabric error.
2190 * @ioa_cfg: ioa config struct
2191 * @hostrcb: hostrcb struct
2196 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2197 struct ipr_hostrcb *hostrcb)
2199 struct ipr_hostrcb_type_20_error *error;
2200 struct ipr_hostrcb_fabric_desc *fabric;
2201 struct ipr_hostrcb_config_element *cfg;
2204 error = &hostrcb->hcam.u.error.u.type_20_error;
2205 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2206 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2208 add_len = be32_to_cpu(hostrcb->hcam.length) -
2209 (offsetof(struct ipr_hostrcb_error, u) +
2210 offsetof(struct ipr_hostrcb_type_20_error, desc));
2212 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2213 ipr_log_fabric_path(hostrcb, fabric);
2214 for_each_fabric_cfg(fabric, cfg)
2215 ipr_log_path_elem(hostrcb, cfg);
2217 add_len -= be16_to_cpu(fabric->length);
2218 fabric = (struct ipr_hostrcb_fabric_desc *)
2219 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2222 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2226 * ipr_log_sis64_array_error - Log a sis64 array error.
2227 * @ioa_cfg: ioa config struct
2228 * @hostrcb: hostrcb struct
2233 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2234 struct ipr_hostrcb *hostrcb)
2237 struct ipr_hostrcb_type_24_error *error;
2238 struct ipr_hostrcb64_array_data_entry *array_entry;
2239 char buffer[IPR_MAX_RES_PATH_LENGTH];
2240 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2242 error = &hostrcb->hcam.u.error64.u.type_24_error;
2246 ipr_err("RAID %s Array Configuration: %s\n",
2247 error->protection_level,
2248 ipr_format_res_path(ioa_cfg, error->last_res_path,
2249 buffer, sizeof(buffer)));
2253 array_entry = error->array_member;
2254 num_entries = min_t(u32, error->num_entries,
2255 ARRAY_SIZE(error->array_member));
2257 for (i = 0; i < num_entries; i++, array_entry++) {
2259 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2262 if (error->exposed_mode_adn == i)
2263 ipr_err("Exposed Array Member %d:\n", i);
2265 ipr_err("Array Member %d:\n", i);
2267 ipr_err("Array Member %d:\n", i);
2268 ipr_log_ext_vpd(&array_entry->vpd);
2269 ipr_err("Current Location: %s\n",
2270 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2271 buffer, sizeof(buffer)));
2272 ipr_err("Expected Location: %s\n",
2273 ipr_format_res_path(ioa_cfg,
2274 array_entry->expected_res_path,
2275 buffer, sizeof(buffer)));
2282 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2283 * @ioa_cfg: ioa config struct
2284 * @hostrcb: hostrcb struct
2289 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2290 struct ipr_hostrcb *hostrcb)
2292 struct ipr_hostrcb_type_30_error *error;
2293 struct ipr_hostrcb64_fabric_desc *fabric;
2294 struct ipr_hostrcb64_config_element *cfg;
2297 error = &hostrcb->hcam.u.error64.u.type_30_error;
2299 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302 add_len = be32_to_cpu(hostrcb->hcam.length) -
2303 (offsetof(struct ipr_hostrcb64_error, u) +
2304 offsetof(struct ipr_hostrcb_type_30_error, desc));
2306 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307 ipr_log64_fabric_path(hostrcb, fabric);
2308 for_each_fabric_cfg(fabric, cfg)
2309 ipr_log64_path_elem(hostrcb, cfg);
2311 add_len -= be16_to_cpu(fabric->length);
2312 fabric = (struct ipr_hostrcb64_fabric_desc *)
2313 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2320 * ipr_log_generic_error - Log an adapter error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2327 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2328 struct ipr_hostrcb *hostrcb)
2330 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2331 be32_to_cpu(hostrcb->hcam.length));
2335 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2338 * This function will return the index of into the ipr_error_table
2339 * for the specified IOASC. If the IOASC is not in the table,
2340 * 0 will be returned, which points to the entry used for unknown errors.
2343 * index into the ipr_error_table
2345 static u32 ipr_get_error(u32 ioasc)
2349 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2350 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2357 * ipr_handle_log_data - Log an adapter error.
2358 * @ioa_cfg: ioa config struct
2359 * @hostrcb: hostrcb struct
2361 * This function logs an adapter error to the system.
2366 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2367 struct ipr_hostrcb *hostrcb)
2372 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2375 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2376 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2379 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2381 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2383 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2384 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2385 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2386 scsi_report_bus_reset(ioa_cfg->host,
2387 hostrcb->hcam.u.error.fd_res_addr.bus);
2390 error_index = ipr_get_error(ioasc);
2392 if (!ipr_error_table[error_index].log_hcam)
2395 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2397 /* Set indication we have logged an error */
2398 ioa_cfg->errors_logged++;
2400 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2402 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2403 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2405 switch (hostrcb->hcam.overlay_id) {
2406 case IPR_HOST_RCB_OVERLAY_ID_2:
2407 ipr_log_cache_error(ioa_cfg, hostrcb);
2409 case IPR_HOST_RCB_OVERLAY_ID_3:
2410 ipr_log_config_error(ioa_cfg, hostrcb);
2412 case IPR_HOST_RCB_OVERLAY_ID_4:
2413 case IPR_HOST_RCB_OVERLAY_ID_6:
2414 ipr_log_array_error(ioa_cfg, hostrcb);
2416 case IPR_HOST_RCB_OVERLAY_ID_7:
2417 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2419 case IPR_HOST_RCB_OVERLAY_ID_12:
2420 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2422 case IPR_HOST_RCB_OVERLAY_ID_13:
2423 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2425 case IPR_HOST_RCB_OVERLAY_ID_14:
2426 case IPR_HOST_RCB_OVERLAY_ID_16:
2427 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2429 case IPR_HOST_RCB_OVERLAY_ID_17:
2430 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2432 case IPR_HOST_RCB_OVERLAY_ID_20:
2433 ipr_log_fabric_error(ioa_cfg, hostrcb);
2435 case IPR_HOST_RCB_OVERLAY_ID_23:
2436 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2438 case IPR_HOST_RCB_OVERLAY_ID_24:
2439 case IPR_HOST_RCB_OVERLAY_ID_26:
2440 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2442 case IPR_HOST_RCB_OVERLAY_ID_30:
2443 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2445 case IPR_HOST_RCB_OVERLAY_ID_1:
2446 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2448 ipr_log_generic_error(ioa_cfg, hostrcb);
2454 * ipr_process_error - Op done function for an adapter error log.
2455 * @ipr_cmd: ipr command struct
2457 * This function is the op done function for an error log host
2458 * controlled async from the adapter. It will log the error and
2459 * send the HCAM back to the adapter.
2464 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2467 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2468 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2472 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2474 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2476 list_del(&hostrcb->queue);
2477 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2480 ipr_handle_log_data(ioa_cfg, hostrcb);
2481 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2483 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2484 dev_err(&ioa_cfg->pdev->dev,
2485 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2488 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2492 * ipr_timeout - An internally generated op has timed out.
2493 * @ipr_cmd: ipr command struct
2495 * This function blocks host requests and initiates an
2501 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2503 unsigned long lock_flags = 0;
2504 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2509 ioa_cfg->errors_logged++;
2510 dev_err(&ioa_cfg->pdev->dev,
2511 "Adapter being reset due to command timeout.\n");
2513 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2514 ioa_cfg->sdt_state = GET_DUMP;
2516 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2517 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2524 * ipr_oper_timeout - Adapter timed out transitioning to operational
2525 * @ipr_cmd: ipr command struct
2527 * This function blocks host requests and initiates an
2533 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2535 unsigned long lock_flags = 0;
2536 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2539 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2541 ioa_cfg->errors_logged++;
2542 dev_err(&ioa_cfg->pdev->dev,
2543 "Adapter timed out transitioning to operational.\n");
2545 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2546 ioa_cfg->sdt_state = GET_DUMP;
2548 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2550 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2551 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2559 * ipr_find_ses_entry - Find matching SES in SES table
2560 * @res: resource entry struct of SES
2563 * pointer to SES table entry / NULL on failure
2565 static const struct ipr_ses_table_entry *
2566 ipr_find_ses_entry(struct ipr_resource_entry *res)
2569 struct ipr_std_inq_vpids *vpids;
2570 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2572 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2573 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2574 if (ste->compare_product_id_byte[j] == 'X') {
2575 vpids = &res->std_inq_data.vpids;
2576 if (vpids->product_id[j] == ste->product_id[j])
2584 if (matches == IPR_PROD_ID_LEN)
2592 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2593 * @ioa_cfg: ioa config struct
2595 * @bus_width: bus width
2598 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2599 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2600 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2601 * max 160MHz = max 320MB/sec).
2603 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2605 struct ipr_resource_entry *res;
2606 const struct ipr_ses_table_entry *ste;
2607 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2609 /* Loop through each config table entry in the config table buffer */
2610 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2611 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2614 if (bus != res->bus)
2617 if (!(ste = ipr_find_ses_entry(res)))
2620 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2623 return max_xfer_rate;
2627 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2628 * @ioa_cfg: ioa config struct
2629 * @max_delay: max delay in micro-seconds to wait
2631 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2634 * 0 on success / other on failure
2636 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2638 volatile u32 pcii_reg;
2641 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2642 while (delay < max_delay) {
2643 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2645 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2648 /* udelay cannot be used if delay is more than a few milliseconds */
2649 if ((delay / 1000) > MAX_UDELAY_MS)
2650 mdelay(delay / 1000);
2660 * ipr_get_sis64_dump_data_section - Dump IOA memory
2661 * @ioa_cfg: ioa config struct
2662 * @start_addr: adapter address to dump
2663 * @dest: destination kernel buffer
2664 * @length_in_words: length to dump in 4 byte words
2669 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2671 __be32 *dest, u32 length_in_words)
2675 for (i = 0; i < length_in_words; i++) {
2676 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2677 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2685 * ipr_get_ldump_data_section - Dump IOA memory
2686 * @ioa_cfg: ioa config struct
2687 * @start_addr: adapter address to dump
2688 * @dest: destination kernel buffer
2689 * @length_in_words: length to dump in 4 byte words
2692 * 0 on success / -EIO on failure
2694 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2696 __be32 *dest, u32 length_in_words)
2698 volatile u32 temp_pcii_reg;
2702 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2703 dest, length_in_words);
2705 /* Write IOA interrupt reg starting LDUMP state */
2706 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2707 ioa_cfg->regs.set_uproc_interrupt_reg32);
2709 /* Wait for IO debug acknowledge */
2710 if (ipr_wait_iodbg_ack(ioa_cfg,
2711 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2712 dev_err(&ioa_cfg->pdev->dev,
2713 "IOA dump long data transfer timeout\n");
2717 /* Signal LDUMP interlocked - clear IO debug ack */
2718 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2719 ioa_cfg->regs.clr_interrupt_reg);
2721 /* Write Mailbox with starting address */
2722 writel(start_addr, ioa_cfg->ioa_mailbox);
2724 /* Signal address valid - clear IOA Reset alert */
2725 writel(IPR_UPROCI_RESET_ALERT,
2726 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2728 for (i = 0; i < length_in_words; i++) {
2729 /* Wait for IO debug acknowledge */
2730 if (ipr_wait_iodbg_ack(ioa_cfg,
2731 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2732 dev_err(&ioa_cfg->pdev->dev,
2733 "IOA dump short data transfer timeout\n");
2737 /* Read data from mailbox and increment destination pointer */
2738 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2741 /* For all but the last word of data, signal data received */
2742 if (i < (length_in_words - 1)) {
2743 /* Signal dump data received - Clear IO debug Ack */
2744 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2745 ioa_cfg->regs.clr_interrupt_reg);
2749 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2750 writel(IPR_UPROCI_RESET_ALERT,
2751 ioa_cfg->regs.set_uproc_interrupt_reg32);
2753 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2754 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2756 /* Signal dump data received - Clear IO debug Ack */
2757 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2758 ioa_cfg->regs.clr_interrupt_reg);
2760 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2761 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2763 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2765 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2775 #ifdef CONFIG_SCSI_IPR_DUMP
2777 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2778 * @ioa_cfg: ioa config struct
2779 * @pci_address: adapter address
2780 * @length: length of data to copy
2782 * Copy data from PCI adapter to kernel buffer.
2783 * Note: length MUST be a 4 byte multiple
2785 * 0 on success / other on failure
2787 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2788 unsigned long pci_address, u32 length)
2790 int bytes_copied = 0;
2791 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2793 unsigned long lock_flags = 0;
2794 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2797 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2799 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2801 while (bytes_copied < length &&
2802 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2803 if (ioa_dump->page_offset >= PAGE_SIZE ||
2804 ioa_dump->page_offset == 0) {
2805 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2809 return bytes_copied;
2812 ioa_dump->page_offset = 0;
2813 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2814 ioa_dump->next_page_index++;
2816 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2818 rem_len = length - bytes_copied;
2819 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2820 cur_len = min(rem_len, rem_page_len);
2822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2823 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2826 rc = ipr_get_ldump_data_section(ioa_cfg,
2827 pci_address + bytes_copied,
2828 &page[ioa_dump->page_offset / 4],
2829 (cur_len / sizeof(u32)));
2831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2834 ioa_dump->page_offset += cur_len;
2835 bytes_copied += cur_len;
2843 return bytes_copied;
2847 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2848 * @hdr: dump entry header struct
2853 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2855 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2857 hdr->offset = sizeof(*hdr);
2858 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2862 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2863 * @ioa_cfg: ioa config struct
2864 * @driver_dump: driver dump struct
2869 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2870 struct ipr_driver_dump *driver_dump)
2872 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2874 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2875 driver_dump->ioa_type_entry.hdr.len =
2876 sizeof(struct ipr_dump_ioa_type_entry) -
2877 sizeof(struct ipr_dump_entry_header);
2878 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2879 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2880 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2881 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2882 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2883 ucode_vpd->minor_release[1];
2884 driver_dump->hdr.num_entries++;
2888 * ipr_dump_version_data - Fill in the driver version in the dump.
2889 * @ioa_cfg: ioa config struct
2890 * @driver_dump: driver dump struct
2895 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2896 struct ipr_driver_dump *driver_dump)
2898 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2899 driver_dump->version_entry.hdr.len =
2900 sizeof(struct ipr_dump_version_entry) -
2901 sizeof(struct ipr_dump_entry_header);
2902 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2903 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2904 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2905 driver_dump->hdr.num_entries++;
2909 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2910 * @ioa_cfg: ioa config struct
2911 * @driver_dump: driver dump struct
2916 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2917 struct ipr_driver_dump *driver_dump)
2919 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2920 driver_dump->trace_entry.hdr.len =
2921 sizeof(struct ipr_dump_trace_entry) -
2922 sizeof(struct ipr_dump_entry_header);
2923 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2924 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2925 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2926 driver_dump->hdr.num_entries++;
2930 * ipr_dump_location_data - Fill in the IOA location in the dump.
2931 * @ioa_cfg: ioa config struct
2932 * @driver_dump: driver dump struct
2937 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2938 struct ipr_driver_dump *driver_dump)
2940 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2941 driver_dump->location_entry.hdr.len =
2942 sizeof(struct ipr_dump_location_entry) -
2943 sizeof(struct ipr_dump_entry_header);
2944 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2945 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2946 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2947 driver_dump->hdr.num_entries++;
2951 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2952 * @ioa_cfg: ioa config struct
2953 * @dump: dump struct
2958 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2960 unsigned long start_addr, sdt_word;
2961 unsigned long lock_flags = 0;
2962 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2963 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2964 u32 num_entries, max_num_entries, start_off, end_off;
2965 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2966 struct ipr_sdt *sdt;
2972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2974 if (ioa_cfg->sdt_state != READ_DUMP) {
2975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2979 if (ioa_cfg->sis64) {
2980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2981 ssleep(IPR_DUMP_DELAY_SECONDS);
2982 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2985 start_addr = readl(ioa_cfg->ioa_mailbox);
2987 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2988 dev_err(&ioa_cfg->pdev->dev,
2989 "Invalid dump table format: %lx\n", start_addr);
2990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2994 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2996 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2998 /* Initialize the overall dump header */
2999 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3000 driver_dump->hdr.num_entries = 1;
3001 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3002 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3003 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3004 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3006 ipr_dump_version_data(ioa_cfg, driver_dump);
3007 ipr_dump_location_data(ioa_cfg, driver_dump);
3008 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3009 ipr_dump_trace_data(ioa_cfg, driver_dump);
3011 /* Update dump_header */
3012 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3014 /* IOA Dump entry */
3015 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3016 ioa_dump->hdr.len = 0;
3017 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3018 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3020 /* First entries in sdt are actually a list of dump addresses and
3021 lengths to gather the real dump data. sdt represents the pointer
3022 to the ioa generated dump table. Dump data will be extracted based
3023 on entries in this table */
3024 sdt = &ioa_dump->sdt;
3026 if (ioa_cfg->sis64) {
3027 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3028 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3030 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3031 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3034 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3035 (max_num_entries * sizeof(struct ipr_sdt_entry));
3036 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3037 bytes_to_copy / sizeof(__be32));
3039 /* Smart Dump table is ready to use and the first entry is valid */
3040 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3041 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3042 dev_err(&ioa_cfg->pdev->dev,
3043 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3044 rc, be32_to_cpu(sdt->hdr.state));
3045 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3046 ioa_cfg->sdt_state = DUMP_OBTAINED;
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3053 if (num_entries > max_num_entries)
3054 num_entries = max_num_entries;
3056 /* Update dump length to the actual data to be copied */
3057 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3059 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3061 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065 for (i = 0; i < num_entries; i++) {
3066 if (ioa_dump->hdr.len > max_dump_size) {
3067 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3071 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3072 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3074 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3076 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3077 end_off = be32_to_cpu(sdt->entry[i].end_token);
3079 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3080 bytes_to_copy = end_off - start_off;
3085 if (bytes_to_copy > max_dump_size) {
3086 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3090 /* Copy data from adapter to driver buffers */
3091 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3094 ioa_dump->hdr.len += bytes_copied;
3096 if (bytes_copied != bytes_to_copy) {
3097 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3104 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3106 /* Update dump_header */
3107 driver_dump->hdr.len += ioa_dump->hdr.len;
3109 ioa_cfg->sdt_state = DUMP_OBTAINED;
3114 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3118 * ipr_release_dump - Free adapter dump memory
3119 * @kref: kref struct
3124 static void ipr_release_dump(struct kref *kref)
3126 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3127 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3128 unsigned long lock_flags = 0;
3132 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3133 ioa_cfg->dump = NULL;
3134 ioa_cfg->sdt_state = INACTIVE;
3135 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3137 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3138 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3140 vfree(dump->ioa_dump.ioa_data);
3146 * ipr_worker_thread - Worker thread
3147 * @work: ioa config struct
3149 * Called at task level from a work thread. This function takes care
3150 * of adding and removing device from the mid-layer as configuration
3151 * changes are detected by the adapter.
3156 static void ipr_worker_thread(struct work_struct *work)
3158 unsigned long lock_flags;
3159 struct ipr_resource_entry *res;
3160 struct scsi_device *sdev;
3161 struct ipr_dump *dump;
3162 struct ipr_ioa_cfg *ioa_cfg =
3163 container_of(work, struct ipr_ioa_cfg, work_q);
3164 u8 bus, target, lun;
3168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3170 if (ioa_cfg->sdt_state == READ_DUMP) {
3171 dump = ioa_cfg->dump;
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3176 kref_get(&dump->kref);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3178 ipr_get_ioa_dump(ioa_cfg, dump);
3179 kref_put(&dump->kref, ipr_release_dump);
3181 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3182 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3183 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3184 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3191 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3192 !ioa_cfg->allow_ml_add_del) {
3193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3197 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3198 if (res->del_from_ml && res->sdev) {
3201 if (!scsi_device_get(sdev)) {
3202 if (!res->add_to_ml)
3203 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3205 res->del_from_ml = 0;
3206 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3207 scsi_remove_device(sdev);
3208 scsi_device_put(sdev);
3209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3217 if (res->add_to_ml) {
3219 target = res->target;
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 scsi_add_device(ioa_cfg->host, bus, target, lun);
3224 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3234 #ifdef CONFIG_SCSI_IPR_TRACE
3236 * ipr_read_trace - Dump the adapter trace
3237 * @filp: open sysfs file
3238 * @kobj: kobject struct
3239 * @bin_attr: bin_attribute struct
3242 * @count: buffer size
3245 * number of bytes printed to buffer
3247 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3248 struct bin_attribute *bin_attr,
3249 char *buf, loff_t off, size_t count)
3251 struct device *dev = container_of(kobj, struct device, kobj);
3252 struct Scsi_Host *shost = class_to_shost(dev);
3253 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3254 unsigned long lock_flags = 0;
3257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3258 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3265 static struct bin_attribute ipr_trace_attr = {
3271 .read = ipr_read_trace,
3276 * ipr_show_fw_version - Show the firmware version
3277 * @dev: class device struct
3281 * number of bytes printed to buffer
3283 static ssize_t ipr_show_fw_version(struct device *dev,
3284 struct device_attribute *attr, char *buf)
3286 struct Scsi_Host *shost = class_to_shost(dev);
3287 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3288 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3289 unsigned long lock_flags = 0;
3292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3294 ucode_vpd->major_release, ucode_vpd->card_type,
3295 ucode_vpd->minor_release[0],
3296 ucode_vpd->minor_release[1]);
3297 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3301 static struct device_attribute ipr_fw_version_attr = {
3303 .name = "fw_version",
3306 .show = ipr_show_fw_version,
3310 * ipr_show_log_level - Show the adapter's error logging level
3311 * @dev: class device struct
3315 * number of bytes printed to buffer
3317 static ssize_t ipr_show_log_level(struct device *dev,
3318 struct device_attribute *attr, char *buf)
3320 struct Scsi_Host *shost = class_to_shost(dev);
3321 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3322 unsigned long lock_flags = 0;
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332 * ipr_store_log_level - Change the adapter's error logging level
3333 * @dev: class device struct
3337 * number of bytes printed to buffer
3339 static ssize_t ipr_store_log_level(struct device *dev,
3340 struct device_attribute *attr,
3341 const char *buf, size_t count)
3343 struct Scsi_Host *shost = class_to_shost(dev);
3344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345 unsigned long lock_flags = 0;
3347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3349 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353 static struct device_attribute ipr_log_level_attr = {
3355 .name = "log_level",
3356 .mode = S_IRUGO | S_IWUSR,
3358 .show = ipr_show_log_level,
3359 .store = ipr_store_log_level
3363 * ipr_store_diagnostics - IOA Diagnostics interface
3364 * @dev: device struct
3366 * @count: buffer size
3368 * This function will reset the adapter and wait a reasonable
3369 * amount of time for any errors that the adapter might log.
3372 * count on success / other on failure
3374 static ssize_t ipr_store_diagnostics(struct device *dev,
3375 struct device_attribute *attr,
3376 const char *buf, size_t count)
3378 struct Scsi_Host *shost = class_to_shost(dev);
3379 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3380 unsigned long lock_flags = 0;
3383 if (!capable(CAP_SYS_ADMIN))
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 while (ioa_cfg->in_reset_reload) {
3388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393 ioa_cfg->errors_logged = 0;
3394 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3396 if (ioa_cfg->in_reset_reload) {
3397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3398 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3400 /* Wait for a second for any errors to be logged */
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415 static struct device_attribute ipr_diagnostics_attr = {
3417 .name = "run_diagnostics",
3420 .store = ipr_store_diagnostics
3424 * ipr_show_adapter_state - Show the adapter's state
3425 * @class_dev: device struct
3429 * number of bytes printed to buffer
3431 static ssize_t ipr_show_adapter_state(struct device *dev,
3432 struct device_attribute *attr, char *buf)
3434 struct Scsi_Host *shost = class_to_shost(dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
3439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3440 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3441 len = snprintf(buf, PAGE_SIZE, "offline\n");
3443 len = snprintf(buf, PAGE_SIZE, "online\n");
3444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3449 * ipr_store_adapter_state - Change adapter state
3450 * @dev: device struct
3452 * @count: buffer size
3454 * This function will change the adapter's state.
3457 * count on success / other on failure
3459 static ssize_t ipr_store_adapter_state(struct device *dev,
3460 struct device_attribute *attr,
3461 const char *buf, size_t count)
3463 struct Scsi_Host *shost = class_to_shost(dev);
3464 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465 unsigned long lock_flags;
3466 int result = count, i;
3468 if (!capable(CAP_SYS_ADMIN))
3471 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3473 !strncmp(buf, "online", 6)) {
3474 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3475 spin_lock(&ioa_cfg->hrrq[i]._lock);
3476 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3477 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3480 ioa_cfg->reset_retries = 0;
3481 ioa_cfg->in_ioa_bringdown = 0;
3482 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3485 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3490 static struct device_attribute ipr_ioa_state_attr = {
3492 .name = "online_state",
3493 .mode = S_IRUGO | S_IWUSR,
3495 .show = ipr_show_adapter_state,
3496 .store = ipr_store_adapter_state
3500 * ipr_store_reset_adapter - Reset the adapter
3501 * @dev: device struct
3503 * @count: buffer size
3505 * This function will reset the adapter.
3508 * count on success / other on failure
3510 static ssize_t ipr_store_reset_adapter(struct device *dev,
3511 struct device_attribute *attr,
3512 const char *buf, size_t count)
3514 struct Scsi_Host *shost = class_to_shost(dev);
3515 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3516 unsigned long lock_flags;
3519 if (!capable(CAP_SYS_ADMIN))
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523 if (!ioa_cfg->in_reset_reload)
3524 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3525 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3526 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3531 static struct device_attribute ipr_ioa_reset_attr = {
3533 .name = "reset_host",
3536 .store = ipr_store_reset_adapter
3539 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3541 * ipr_show_iopoll_weight - Show ipr polling mode
3542 * @dev: class device struct
3546 * number of bytes printed to buffer
3548 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3549 struct device_attribute *attr, char *buf)
3551 struct Scsi_Host *shost = class_to_shost(dev);
3552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3553 unsigned long lock_flags = 0;
3556 spin_lock_irqsave(shost->host_lock, lock_flags);
3557 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3558 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3564 * ipr_store_iopoll_weight - Change the adapter's polling mode
3565 * @dev: class device struct
3569 * number of bytes printed to buffer
3571 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3572 struct device_attribute *attr,
3573 const char *buf, size_t count)
3575 struct Scsi_Host *shost = class_to_shost(dev);
3576 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3577 unsigned long user_iopoll_weight;
3578 unsigned long lock_flags = 0;
3581 if (!ioa_cfg->sis64) {
3582 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3585 if (kstrtoul(buf, 10, &user_iopoll_weight))
3588 if (user_iopoll_weight > 256) {
3589 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3593 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3594 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3598 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3599 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3600 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3601 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3604 spin_lock_irqsave(shost->host_lock, lock_flags);
3605 ioa_cfg->iopoll_weight = user_iopoll_weight;
3606 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3607 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3608 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3609 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3610 ioa_cfg->iopoll_weight, ipr_iopoll);
3611 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3614 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3619 static struct device_attribute ipr_iopoll_weight_attr = {
3621 .name = "iopoll_weight",
3622 .mode = S_IRUGO | S_IWUSR,
3624 .show = ipr_show_iopoll_weight,
3625 .store = ipr_store_iopoll_weight
3629 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3630 * @buf_len: buffer length
3632 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3633 * list to use for microcode download
3636 * pointer to sglist / NULL on failure
3638 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3640 int sg_size, order, bsize_elem, num_elem, i, j;
3641 struct ipr_sglist *sglist;
3642 struct scatterlist *scatterlist;
3645 /* Get the minimum size per scatter/gather element */
3646 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3648 /* Get the actual size per element */
3649 order = get_order(sg_size);
3651 /* Determine the actual number of bytes per element */
3652 bsize_elem = PAGE_SIZE * (1 << order);
3654 /* Determine the actual number of sg entries needed */
3655 if (buf_len % bsize_elem)
3656 num_elem = (buf_len / bsize_elem) + 1;
3658 num_elem = buf_len / bsize_elem;
3660 /* Allocate a scatter/gather list for the DMA */
3661 sglist = kzalloc(sizeof(struct ipr_sglist) +
3662 (sizeof(struct scatterlist) * (num_elem - 1)),
3665 if (sglist == NULL) {
3670 scatterlist = sglist->scatterlist;
3671 sg_init_table(scatterlist, num_elem);
3673 sglist->order = order;
3674 sglist->num_sg = num_elem;
3676 /* Allocate a bunch of sg elements */
3677 for (i = 0; i < num_elem; i++) {
3678 page = alloc_pages(GFP_KERNEL, order);
3682 /* Free up what we already allocated */
3683 for (j = i - 1; j >= 0; j--)
3684 __free_pages(sg_page(&scatterlist[j]), order);
3689 sg_set_page(&scatterlist[i], page, 0, 0);
3696 * ipr_free_ucode_buffer - Frees a microcode download buffer
3697 * @p_dnld: scatter/gather list pointer
3699 * Free a DMA'able ucode download buffer previously allocated with
3700 * ipr_alloc_ucode_buffer
3705 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3709 for (i = 0; i < sglist->num_sg; i++)
3710 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3716 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3717 * @sglist: scatter/gather list pointer
3718 * @buffer: buffer pointer
3719 * @len: buffer length
3721 * Copy a microcode image from a user buffer into a buffer allocated by
3722 * ipr_alloc_ucode_buffer
3725 * 0 on success / other on failure
3727 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3728 u8 *buffer, u32 len)
3730 int bsize_elem, i, result = 0;
3731 struct scatterlist *scatterlist;
3734 /* Determine the actual number of bytes per element */
3735 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3737 scatterlist = sglist->scatterlist;
3739 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3740 struct page *page = sg_page(&scatterlist[i]);
3743 memcpy(kaddr, buffer, bsize_elem);
3746 scatterlist[i].length = bsize_elem;
3754 if (len % bsize_elem) {
3755 struct page *page = sg_page(&scatterlist[i]);
3758 memcpy(kaddr, buffer, len % bsize_elem);
3761 scatterlist[i].length = len % bsize_elem;
3764 sglist->buffer_len = len;
3769 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3770 * @ipr_cmd: ipr command struct
3771 * @sglist: scatter/gather list
3773 * Builds a microcode download IOA data list (IOADL).
3776 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3777 struct ipr_sglist *sglist)
3779 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3780 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3781 struct scatterlist *scatterlist = sglist->scatterlist;
3784 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3785 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3786 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3789 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3790 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3791 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3792 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3793 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3796 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3800 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3801 * @ipr_cmd: ipr command struct
3802 * @sglist: scatter/gather list
3804 * Builds a microcode download IOA data list (IOADL).
3807 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3808 struct ipr_sglist *sglist)
3810 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3811 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3812 struct scatterlist *scatterlist = sglist->scatterlist;
3815 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3816 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3817 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3820 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3822 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3823 ioadl[i].flags_and_data_len =
3824 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3826 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3829 ioadl[i-1].flags_and_data_len |=
3830 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3834 * ipr_update_ioa_ucode - Update IOA's microcode
3835 * @ioa_cfg: ioa config struct
3836 * @sglist: scatter/gather list
3838 * Initiate an adapter reset to update the IOA's microcode
3841 * 0 on success / -EIO on failure
3843 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3844 struct ipr_sglist *sglist)
3846 unsigned long lock_flags;
3848 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3849 while (ioa_cfg->in_reset_reload) {
3850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3851 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3852 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3855 if (ioa_cfg->ucode_sglist) {
3856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3857 dev_err(&ioa_cfg->pdev->dev,
3858 "Microcode download already in progress\n");
3862 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3863 sglist->num_sg, DMA_TO_DEVICE);
3865 if (!sglist->num_dma_sg) {
3866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3867 dev_err(&ioa_cfg->pdev->dev,
3868 "Failed to map microcode download buffer!\n");
3872 ioa_cfg->ucode_sglist = sglist;
3873 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3874 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3875 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3878 ioa_cfg->ucode_sglist = NULL;
3879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3884 * ipr_store_update_fw - Update the firmware on the adapter
3885 * @class_dev: device struct
3887 * @count: buffer size
3889 * This function will update the firmware on the adapter.
3892 * count on success / other on failure
3894 static ssize_t ipr_store_update_fw(struct device *dev,
3895 struct device_attribute *attr,
3896 const char *buf, size_t count)
3898 struct Scsi_Host *shost = class_to_shost(dev);
3899 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3900 struct ipr_ucode_image_header *image_hdr;
3901 const struct firmware *fw_entry;
3902 struct ipr_sglist *sglist;
3905 int len, result, dnld_size;
3907 if (!capable(CAP_SYS_ADMIN))
3910 len = snprintf(fname, 99, "%s", buf);
3911 fname[len-1] = '\0';
3913 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3914 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3918 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3920 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3921 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3922 sglist = ipr_alloc_ucode_buffer(dnld_size);
3925 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3926 release_firmware(fw_entry);
3930 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3933 dev_err(&ioa_cfg->pdev->dev,
3934 "Microcode buffer copy to DMA buffer failed\n");
3938 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3940 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3945 ipr_free_ucode_buffer(sglist);
3946 release_firmware(fw_entry);
3950 static struct device_attribute ipr_update_fw_attr = {
3952 .name = "update_fw",
3955 .store = ipr_store_update_fw
3959 * ipr_show_fw_type - Show the adapter's firmware type.
3960 * @dev: class device struct
3964 * number of bytes printed to buffer
3966 static ssize_t ipr_show_fw_type(struct device *dev,
3967 struct device_attribute *attr, char *buf)
3969 struct Scsi_Host *shost = class_to_shost(dev);
3970 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3971 unsigned long lock_flags = 0;
3974 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3975 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3976 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3980 static struct device_attribute ipr_ioa_fw_type_attr = {
3985 .show = ipr_show_fw_type
3988 static struct device_attribute *ipr_ioa_attrs[] = {
3989 &ipr_fw_version_attr,
3990 &ipr_log_level_attr,
3991 &ipr_diagnostics_attr,
3992 &ipr_ioa_state_attr,
3993 &ipr_ioa_reset_attr,
3994 &ipr_update_fw_attr,
3995 &ipr_ioa_fw_type_attr,
3996 &ipr_iopoll_weight_attr,
4000 #ifdef CONFIG_SCSI_IPR_DUMP
4002 * ipr_read_dump - Dump the adapter
4003 * @filp: open sysfs file
4004 * @kobj: kobject struct
4005 * @bin_attr: bin_attribute struct
4008 * @count: buffer size
4011 * number of bytes printed to buffer
4013 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4014 struct bin_attribute *bin_attr,
4015 char *buf, loff_t off, size_t count)
4017 struct device *cdev = container_of(kobj, struct device, kobj);
4018 struct Scsi_Host *shost = class_to_shost(cdev);
4019 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4020 struct ipr_dump *dump;
4021 unsigned long lock_flags = 0;
4026 if (!capable(CAP_SYS_ADMIN))
4029 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4030 dump = ioa_cfg->dump;
4032 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4036 kref_get(&dump->kref);
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4039 if (off > dump->driver_dump.hdr.len) {
4040 kref_put(&dump->kref, ipr_release_dump);
4044 if (off + count > dump->driver_dump.hdr.len) {
4045 count = dump->driver_dump.hdr.len - off;
4049 if (count && off < sizeof(dump->driver_dump)) {
4050 if (off + count > sizeof(dump->driver_dump))
4051 len = sizeof(dump->driver_dump) - off;
4054 src = (u8 *)&dump->driver_dump + off;
4055 memcpy(buf, src, len);
4061 off -= sizeof(dump->driver_dump);
4064 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4065 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4066 sizeof(struct ipr_sdt_entry));
4068 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4069 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4071 if (count && off < sdt_end) {
4072 if (off + count > sdt_end)
4073 len = sdt_end - off;
4076 src = (u8 *)&dump->ioa_dump + off;
4077 memcpy(buf, src, len);
4086 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4087 len = PAGE_ALIGN(off) - off;
4090 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4091 src += off & ~PAGE_MASK;
4092 memcpy(buf, src, len);
4098 kref_put(&dump->kref, ipr_release_dump);
4103 * ipr_alloc_dump - Prepare for adapter dump
4104 * @ioa_cfg: ioa config struct
4107 * 0 on success / other on failure
4109 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4111 struct ipr_dump *dump;
4113 unsigned long lock_flags = 0;
4115 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4118 ipr_err("Dump memory allocation failed\n");
4123 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4125 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4128 ipr_err("Dump memory allocation failed\n");
4133 dump->ioa_dump.ioa_data = ioa_data;
4135 kref_init(&dump->kref);
4136 dump->ioa_cfg = ioa_cfg;
4138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4140 if (INACTIVE != ioa_cfg->sdt_state) {
4141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4142 vfree(dump->ioa_dump.ioa_data);
4147 ioa_cfg->dump = dump;
4148 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4149 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4150 ioa_cfg->dump_taken = 1;
4151 schedule_work(&ioa_cfg->work_q);
4153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4159 * ipr_free_dump - Free adapter dump memory
4160 * @ioa_cfg: ioa config struct
4163 * 0 on success / other on failure
4165 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4167 struct ipr_dump *dump;
4168 unsigned long lock_flags = 0;
4172 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173 dump = ioa_cfg->dump;
4175 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4179 ioa_cfg->dump = NULL;
4180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4182 kref_put(&dump->kref, ipr_release_dump);
4189 * ipr_write_dump - Setup dump state of adapter
4190 * @filp: open sysfs file
4191 * @kobj: kobject struct
4192 * @bin_attr: bin_attribute struct
4195 * @count: buffer size
4198 * number of bytes printed to buffer
4200 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4201 struct bin_attribute *bin_attr,
4202 char *buf, loff_t off, size_t count)
4204 struct device *cdev = container_of(kobj, struct device, kobj);
4205 struct Scsi_Host *shost = class_to_shost(cdev);
4206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4209 if (!capable(CAP_SYS_ADMIN))
4213 rc = ipr_alloc_dump(ioa_cfg);
4214 else if (buf[0] == '0')
4215 rc = ipr_free_dump(ioa_cfg);
4225 static struct bin_attribute ipr_dump_attr = {
4228 .mode = S_IRUSR | S_IWUSR,
4231 .read = ipr_read_dump,
4232 .write = ipr_write_dump
4235 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4239 * ipr_change_queue_depth - Change the device's queue depth
4240 * @sdev: scsi device struct
4241 * @qdepth: depth to set
4242 * @reason: calling context
4247 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4251 struct ipr_resource_entry *res;
4252 unsigned long lock_flags = 0;
4254 if (reason != SCSI_QDEPTH_DEFAULT)
4257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4258 res = (struct ipr_resource_entry *)sdev->hostdata;
4260 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4261 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4264 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4265 return sdev->queue_depth;
4269 * ipr_change_queue_type - Change the device's queue type
4270 * @dsev: scsi device struct
4271 * @tag_type: type of tags to use
4274 * actual queue type set
4276 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4278 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4279 struct ipr_resource_entry *res;
4280 unsigned long lock_flags = 0;
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283 res = (struct ipr_resource_entry *)sdev->hostdata;
4286 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4288 * We don't bother quiescing the device here since the
4289 * adapter firmware does it for us.
4291 scsi_set_tag_type(sdev, tag_type);
4294 scsi_activate_tcq(sdev, sdev->queue_depth);
4296 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4307 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4308 * @dev: device struct
4309 * @attr: device attribute structure
4313 * number of bytes printed to buffer
4315 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4317 struct scsi_device *sdev = to_scsi_device(dev);
4318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4319 struct ipr_resource_entry *res;
4320 unsigned long lock_flags = 0;
4321 ssize_t len = -ENXIO;
4323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4324 res = (struct ipr_resource_entry *)sdev->hostdata;
4326 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4331 static struct device_attribute ipr_adapter_handle_attr = {
4333 .name = "adapter_handle",
4336 .show = ipr_show_adapter_handle
4340 * ipr_show_resource_path - Show the resource path or the resource address for
4342 * @dev: device struct
4343 * @attr: device attribute structure
4347 * number of bytes printed to buffer
4349 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4351 struct scsi_device *sdev = to_scsi_device(dev);
4352 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4353 struct ipr_resource_entry *res;
4354 unsigned long lock_flags = 0;
4355 ssize_t len = -ENXIO;
4356 char buffer[IPR_MAX_RES_PATH_LENGTH];
4358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4359 res = (struct ipr_resource_entry *)sdev->hostdata;
4360 if (res && ioa_cfg->sis64)
4361 len = snprintf(buf, PAGE_SIZE, "%s\n",
4362 __ipr_format_res_path(res->res_path, buffer,
4365 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4366 res->bus, res->target, res->lun);
4368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4372 static struct device_attribute ipr_resource_path_attr = {
4374 .name = "resource_path",
4377 .show = ipr_show_resource_path
4381 * ipr_show_device_id - Show the device_id for this device.
4382 * @dev: device struct
4383 * @attr: device attribute structure
4387 * number of bytes printed to buffer
4389 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4397 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4398 res = (struct ipr_resource_entry *)sdev->hostdata;
4399 if (res && ioa_cfg->sis64)
4400 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4402 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4408 static struct device_attribute ipr_device_id_attr = {
4410 .name = "device_id",
4413 .show = ipr_show_device_id
4417 * ipr_show_resource_type - Show the resource type for this device.
4418 * @dev: device struct
4419 * @attr: device attribute structure
4423 * number of bytes printed to buffer
4425 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4427 struct scsi_device *sdev = to_scsi_device(dev);
4428 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4429 struct ipr_resource_entry *res;
4430 unsigned long lock_flags = 0;
4431 ssize_t len = -ENXIO;
4433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4434 res = (struct ipr_resource_entry *)sdev->hostdata;
4437 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4443 static struct device_attribute ipr_resource_type_attr = {
4445 .name = "resource_type",
4448 .show = ipr_show_resource_type
4451 static struct device_attribute *ipr_dev_attrs[] = {
4452 &ipr_adapter_handle_attr,
4453 &ipr_resource_path_attr,
4454 &ipr_device_id_attr,
4455 &ipr_resource_type_attr,
4460 * ipr_biosparam - Return the HSC mapping
4461 * @sdev: scsi device struct
4462 * @block_device: block device pointer
4463 * @capacity: capacity of the device
4464 * @parm: Array containing returned HSC values.
4466 * This function generates the HSC parms that fdisk uses.
4467 * We want to make sure we return something that places partitions
4468 * on 4k boundaries for best performance with the IOA.
4473 static int ipr_biosparam(struct scsi_device *sdev,
4474 struct block_device *block_device,
4475 sector_t capacity, int *parm)
4483 cylinders = capacity;
4484 sector_div(cylinders, (128 * 32));
4489 parm[2] = cylinders;
4495 * ipr_find_starget - Find target based on bus/target.
4496 * @starget: scsi target struct
4499 * resource entry pointer if found / NULL if not found
4501 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4503 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4504 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4505 struct ipr_resource_entry *res;
4507 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4508 if ((res->bus == starget->channel) &&
4509 (res->target == starget->id)) {
4517 static struct ata_port_info sata_port_info;
4520 * ipr_target_alloc - Prepare for commands to a SCSI target
4521 * @starget: scsi target struct
4523 * If the device is a SATA device, this function allocates an
4524 * ATA port with libata, else it does nothing.
4527 * 0 on success / non-0 on failure
4529 static int ipr_target_alloc(struct scsi_target *starget)
4531 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4533 struct ipr_sata_port *sata_port;
4534 struct ata_port *ap;
4535 struct ipr_resource_entry *res;
4536 unsigned long lock_flags;
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539 res = ipr_find_starget(starget);
4540 starget->hostdata = NULL;
4542 if (res && ipr_is_gata(res)) {
4543 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4544 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4548 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4550 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4551 sata_port->ioa_cfg = ioa_cfg;
4553 sata_port->res = res;
4555 res->sata_port = sata_port;
4556 ap->private_data = sata_port;
4557 starget->hostdata = sata_port;
4563 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4569 * ipr_target_destroy - Destroy a SCSI target
4570 * @starget: scsi target struct
4572 * If the device was a SATA device, this function frees the libata
4573 * ATA port, else it does nothing.
4576 static void ipr_target_destroy(struct scsi_target *starget)
4578 struct ipr_sata_port *sata_port = starget->hostdata;
4579 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4582 if (ioa_cfg->sis64) {
4583 if (!ipr_find_starget(starget)) {
4584 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4585 clear_bit(starget->id, ioa_cfg->array_ids);
4586 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4587 clear_bit(starget->id, ioa_cfg->vset_ids);
4588 else if (starget->channel == 0)
4589 clear_bit(starget->id, ioa_cfg->target_ids);
4594 starget->hostdata = NULL;
4595 ata_sas_port_destroy(sata_port->ap);
4601 * ipr_find_sdev - Find device based on bus/target/lun.
4602 * @sdev: scsi device struct
4605 * resource entry pointer if found / NULL if not found
4607 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4610 struct ipr_resource_entry *res;
4612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4613 if ((res->bus == sdev->channel) &&
4614 (res->target == sdev->id) &&
4615 (res->lun == sdev->lun))
4623 * ipr_slave_destroy - Unconfigure a SCSI device
4624 * @sdev: scsi device struct
4629 static void ipr_slave_destroy(struct scsi_device *sdev)
4631 struct ipr_resource_entry *res;
4632 struct ipr_ioa_cfg *ioa_cfg;
4633 unsigned long lock_flags = 0;
4635 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4638 res = (struct ipr_resource_entry *) sdev->hostdata;
4641 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4642 sdev->hostdata = NULL;
4644 res->sata_port = NULL;
4646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4650 * ipr_slave_configure - Configure a SCSI device
4651 * @sdev: scsi device struct
4653 * This function configures the specified scsi device.
4658 static int ipr_slave_configure(struct scsi_device *sdev)
4660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4661 struct ipr_resource_entry *res;
4662 struct ata_port *ap = NULL;
4663 unsigned long lock_flags = 0;
4664 char buffer[IPR_MAX_RES_PATH_LENGTH];
4666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4667 res = sdev->hostdata;
4669 if (ipr_is_af_dasd_device(res))
4670 sdev->type = TYPE_RAID;
4671 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4672 sdev->scsi_level = 4;
4673 sdev->no_uld_attach = 1;
4675 if (ipr_is_vset_device(res)) {
4676 blk_queue_rq_timeout(sdev->request_queue,
4677 IPR_VSET_RW_TIMEOUT);
4678 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4680 if (ipr_is_gata(res) && res->sata_port)
4681 ap = res->sata_port->ap;
4682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4685 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4686 ata_sas_slave_configure(sdev, ap);
4688 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4690 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4691 ipr_format_res_path(ioa_cfg,
4692 res->res_path, buffer, sizeof(buffer)));
4695 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4700 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4701 * @sdev: scsi device struct
4703 * This function initializes an ATA port so that future commands
4704 * sent through queuecommand will work.
4709 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4711 struct ipr_sata_port *sata_port = NULL;
4715 if (sdev->sdev_target)
4716 sata_port = sdev->sdev_target->hostdata;
4718 rc = ata_sas_port_init(sata_port->ap);
4720 rc = ata_sas_sync_probe(sata_port->ap);
4724 ipr_slave_destroy(sdev);
4731 * ipr_slave_alloc - Prepare for commands to a device.
4732 * @sdev: scsi device struct
4734 * This function saves a pointer to the resource entry
4735 * in the scsi device struct if the device exists. We
4736 * can then use this pointer in ipr_queuecommand when
4737 * handling new commands.
4740 * 0 on success / -ENXIO if device does not exist
4742 static int ipr_slave_alloc(struct scsi_device *sdev)
4744 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4745 struct ipr_resource_entry *res;
4746 unsigned long lock_flags;
4749 sdev->hostdata = NULL;
4751 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4753 res = ipr_find_sdev(sdev);
4758 sdev->hostdata = res;
4759 if (!ipr_is_naca_model(res))
4760 res->needs_sync_complete = 1;
4762 if (ipr_is_gata(res)) {
4763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4764 return ipr_ata_slave_alloc(sdev);
4768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4774 * ipr_match_lun - Match function for specified LUN
4775 * @ipr_cmd: ipr command struct
4776 * @device: device to match (sdev)
4779 * 1 if command matches sdev / 0 if command does not match sdev
4781 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4783 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4789 * ipr_wait_for_ops - Wait for matching commands to complete
4790 * @ipr_cmd: ipr command struct
4791 * @device: device to match (sdev)
4792 * @match: match function to use
4797 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4798 int (*match)(struct ipr_cmnd *, void *))
4800 struct ipr_cmnd *ipr_cmd;
4802 unsigned long flags;
4803 struct ipr_hrr_queue *hrrq;
4804 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4805 DECLARE_COMPLETION_ONSTACK(comp);
4811 for_each_hrrq(hrrq, ioa_cfg) {
4812 spin_lock_irqsave(hrrq->lock, flags);
4813 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4814 if (match(ipr_cmd, device)) {
4815 ipr_cmd->eh_comp = ∁
4819 spin_unlock_irqrestore(hrrq->lock, flags);
4823 timeout = wait_for_completion_timeout(&comp, timeout);
4828 for_each_hrrq(hrrq, ioa_cfg) {
4829 spin_lock_irqsave(hrrq->lock, flags);
4830 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4831 if (match(ipr_cmd, device)) {
4832 ipr_cmd->eh_comp = NULL;
4836 spin_unlock_irqrestore(hrrq->lock, flags);
4840 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4842 return wait ? FAILED : SUCCESS;
4851 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4853 struct ipr_ioa_cfg *ioa_cfg;
4854 unsigned long lock_flags = 0;
4858 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4859 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4861 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4862 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4863 dev_err(&ioa_cfg->pdev->dev,
4864 "Adapter being reset as a result of error recovery.\n");
4866 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4867 ioa_cfg->sdt_state = GET_DUMP;
4870 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4871 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4872 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4874 /* If we got hit with a host reset while we were already resetting
4875 the adapter for some reason, and the reset failed. */
4876 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4887 * ipr_device_reset - Reset the device
4888 * @ioa_cfg: ioa config struct
4889 * @res: resource entry struct
4891 * This function issues a device reset to the affected device.
4892 * If the device is a SCSI device, a LUN reset will be sent
4893 * to the device first. If that does not work, a target reset
4894 * will be sent. If the device is a SATA device, a PHY reset will
4898 * 0 on success / non-zero on failure
4900 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4901 struct ipr_resource_entry *res)
4903 struct ipr_cmnd *ipr_cmd;
4904 struct ipr_ioarcb *ioarcb;
4905 struct ipr_cmd_pkt *cmd_pkt;
4906 struct ipr_ioarcb_ata_regs *regs;
4910 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4911 ioarcb = &ipr_cmd->ioarcb;
4912 cmd_pkt = &ioarcb->cmd_pkt;
4914 if (ipr_cmd->ioa_cfg->sis64) {
4915 regs = &ipr_cmd->i.ata_ioadl.regs;
4916 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4918 regs = &ioarcb->u.add_data.u.regs;
4920 ioarcb->res_handle = res->res_handle;
4921 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4922 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4923 if (ipr_is_gata(res)) {
4924 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4925 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4926 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4929 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4930 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4931 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4932 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4933 if (ipr_cmd->ioa_cfg->sis64)
4934 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4935 sizeof(struct ipr_ioasa_gata));
4937 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4938 sizeof(struct ipr_ioasa_gata));
4942 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4946 * ipr_sata_reset - Reset the SATA port
4947 * @link: SATA link to reset
4948 * @classes: class of the attached device
4950 * This function issues a SATA phy reset to the affected ATA link.
4953 * 0 on success / non-zero on failure
4955 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4956 unsigned long deadline)
4958 struct ipr_sata_port *sata_port = link->ap->private_data;
4959 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4960 struct ipr_resource_entry *res;
4961 unsigned long lock_flags = 0;
4965 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4966 while (ioa_cfg->in_reset_reload) {
4967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4972 res = sata_port->res;
4974 rc = ipr_device_reset(ioa_cfg, res);
4975 *classes = res->ata_class;
4978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4984 * ipr_eh_dev_reset - Reset the device
4985 * @scsi_cmd: scsi command struct
4987 * This function issues a device reset to the affected device.
4988 * A LUN reset will be sent to the device first. If that does
4989 * not work, a target reset will be sent.
4994 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4996 struct ipr_cmnd *ipr_cmd;
4997 struct ipr_ioa_cfg *ioa_cfg;
4998 struct ipr_resource_entry *res;
4999 struct ata_port *ap;
5001 struct ipr_hrr_queue *hrrq;
5004 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5005 res = scsi_cmd->device->hostdata;
5011 * If we are currently going through reset/reload, return failed. This will force the
5012 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5015 if (ioa_cfg->in_reset_reload)
5017 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5020 for_each_hrrq(hrrq, ioa_cfg) {
5021 spin_lock(&hrrq->_lock);
5022 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5023 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5024 if (ipr_cmd->scsi_cmd)
5025 ipr_cmd->done = ipr_scsi_eh_done;
5027 ipr_cmd->done = ipr_sata_eh_done;
5029 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5030 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5031 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5035 spin_unlock(&hrrq->_lock);
5037 res->resetting_device = 1;
5038 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5040 if (ipr_is_gata(res) && res->sata_port) {
5041 ap = res->sata_port->ap;
5042 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5043 ata_std_error_handler(ap);
5044 spin_lock_irq(scsi_cmd->device->host->host_lock);
5046 for_each_hrrq(hrrq, ioa_cfg) {
5047 spin_lock(&hrrq->_lock);
5048 list_for_each_entry(ipr_cmd,
5049 &hrrq->hrrq_pending_q, queue) {
5050 if (ipr_cmd->ioarcb.res_handle ==
5056 spin_unlock(&hrrq->_lock);
5059 rc = ipr_device_reset(ioa_cfg, res);
5060 res->resetting_device = 0;
5063 return rc ? FAILED : SUCCESS;
5066 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5069 struct ipr_ioa_cfg *ioa_cfg;
5071 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5073 spin_lock_irq(cmd->device->host->host_lock);
5074 rc = __ipr_eh_dev_reset(cmd);
5075 spin_unlock_irq(cmd->device->host->host_lock);
5078 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5084 * ipr_bus_reset_done - Op done function for bus reset.
5085 * @ipr_cmd: ipr command struct
5087 * This function is the op done function for a bus reset
5092 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5095 struct ipr_resource_entry *res;
5098 if (!ioa_cfg->sis64)
5099 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5100 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5101 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5107 * If abort has not completed, indicate the reset has, else call the
5108 * abort's done function to wake the sleeping eh thread
5110 if (ipr_cmd->sibling->sibling)
5111 ipr_cmd->sibling->sibling = NULL;
5113 ipr_cmd->sibling->done(ipr_cmd->sibling);
5115 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5120 * ipr_abort_timeout - An abort task has timed out
5121 * @ipr_cmd: ipr command struct
5123 * This function handles when an abort task times out. If this
5124 * happens we issue a bus reset since we have resources tied
5125 * up that must be freed before returning to the midlayer.
5130 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5132 struct ipr_cmnd *reset_cmd;
5133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 struct ipr_cmd_pkt *cmd_pkt;
5135 unsigned long lock_flags = 0;
5138 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5139 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5144 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5145 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5146 ipr_cmd->sibling = reset_cmd;
5147 reset_cmd->sibling = ipr_cmd;
5148 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5149 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5150 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5151 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5152 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5154 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5160 * ipr_cancel_op - Cancel specified op
5161 * @scsi_cmd: scsi command struct
5163 * This function cancels specified op.
5168 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5170 struct ipr_cmnd *ipr_cmd;
5171 struct ipr_ioa_cfg *ioa_cfg;
5172 struct ipr_resource_entry *res;
5173 struct ipr_cmd_pkt *cmd_pkt;
5176 struct ipr_hrr_queue *hrrq;
5179 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5180 res = scsi_cmd->device->hostdata;
5182 /* If we are currently going through reset/reload, return failed.
5183 * This will force the mid-layer to call ipr_eh_host_reset,
5184 * which will then go to sleep and wait for the reset to complete
5186 if (ioa_cfg->in_reset_reload ||
5187 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5193 * If we are aborting a timed out op, chances are that the timeout was caused
5194 * by a still not detected EEH error. In such cases, reading a register will
5195 * trigger the EEH recovery infrastructure.
5197 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5199 if (!ipr_is_gscsi(res))
5202 for_each_hrrq(hrrq, ioa_cfg) {
5203 spin_lock(&hrrq->_lock);
5204 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5205 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5206 ipr_cmd->done = ipr_scsi_eh_done;
5211 spin_unlock(&hrrq->_lock);
5217 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5218 ipr_cmd->ioarcb.res_handle = res->res_handle;
5219 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5220 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5221 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5222 ipr_cmd->u.sdev = scsi_cmd->device;
5224 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5226 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5227 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5230 * If the abort task timed out and we sent a bus reset, we will get
5231 * one the following responses to the abort
5233 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5238 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5239 if (!ipr_is_naca_model(res))
5240 res->needs_sync_complete = 1;
5243 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5247 * ipr_eh_abort - Abort a single op
5248 * @scsi_cmd: scsi command struct
5253 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5255 unsigned long flags;
5257 struct ipr_ioa_cfg *ioa_cfg;
5261 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5263 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5264 rc = ipr_cancel_op(scsi_cmd);
5265 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5268 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5274 * ipr_handle_other_interrupt - Handle "other" interrupts
5275 * @ioa_cfg: ioa config struct
5276 * @int_reg: interrupt register
5279 * IRQ_NONE / IRQ_HANDLED
5281 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5284 irqreturn_t rc = IRQ_HANDLED;
5287 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5288 int_reg &= ~int_mask_reg;
5290 /* If an interrupt on the adapter did not occur, ignore it.
5291 * Or in the case of SIS 64, check for a stage change interrupt.
5293 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5294 if (ioa_cfg->sis64) {
5295 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5296 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5297 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5299 /* clear stage change */
5300 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5301 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5302 list_del(&ioa_cfg->reset_cmd->queue);
5303 del_timer(&ioa_cfg->reset_cmd->timer);
5304 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5312 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5313 /* Mask the interrupt */
5314 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5316 /* Clear the interrupt */
5317 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5318 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5320 list_del(&ioa_cfg->reset_cmd->queue);
5321 del_timer(&ioa_cfg->reset_cmd->timer);
5322 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5323 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5324 if (ioa_cfg->clear_isr) {
5325 if (ipr_debug && printk_ratelimit())
5326 dev_err(&ioa_cfg->pdev->dev,
5327 "Spurious interrupt detected. 0x%08X\n", int_reg);
5328 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5329 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5333 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5334 ioa_cfg->ioa_unit_checked = 1;
5335 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5336 dev_err(&ioa_cfg->pdev->dev,
5337 "No Host RRQ. 0x%08X\n", int_reg);
5339 dev_err(&ioa_cfg->pdev->dev,
5340 "Permanent IOA failure. 0x%08X\n", int_reg);
5342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5343 ioa_cfg->sdt_state = GET_DUMP;
5345 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5353 * ipr_isr_eh - Interrupt service routine error handler
5354 * @ioa_cfg: ioa config struct
5355 * @msg: message to log
5360 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5362 ioa_cfg->errors_logged++;
5363 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5365 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5366 ioa_cfg->sdt_state = GET_DUMP;
5368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5371 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5372 struct list_head *doneq)
5376 struct ipr_cmnd *ipr_cmd;
5377 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5380 /* If interrupts are disabled, ignore the interrupt */
5381 if (!hrr_queue->allow_interrupts)
5384 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5385 hrr_queue->toggle_bit) {
5387 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5388 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5389 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5391 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5392 cmd_index < hrr_queue->min_cmd_id)) {
5394 "Invalid response handle from IOA: ",
5399 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5400 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5402 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5404 list_move_tail(&ipr_cmd->queue, doneq);
5406 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5407 hrr_queue->hrrq_curr++;
5409 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5410 hrr_queue->toggle_bit ^= 1u;
5413 if (budget > 0 && num_hrrq >= budget)
5420 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5422 struct ipr_ioa_cfg *ioa_cfg;
5423 struct ipr_hrr_queue *hrrq;
5424 struct ipr_cmnd *ipr_cmd, *temp;
5425 unsigned long hrrq_flags;
5429 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5430 ioa_cfg = hrrq->ioa_cfg;
5432 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5433 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5435 if (completed_ops < budget)
5436 blk_iopoll_complete(iop);
5437 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5439 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5440 list_del(&ipr_cmd->queue);
5441 del_timer(&ipr_cmd->timer);
5442 ipr_cmd->fast_done(ipr_cmd);
5445 return completed_ops;
5449 * ipr_isr - Interrupt service routine
5451 * @devp: pointer to ioa config struct
5454 * IRQ_NONE / IRQ_HANDLED
5456 static irqreturn_t ipr_isr(int irq, void *devp)
5458 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5459 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5460 unsigned long hrrq_flags = 0;
5464 struct ipr_cmnd *ipr_cmd, *temp;
5465 irqreturn_t rc = IRQ_NONE;
5468 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5469 /* If interrupts are disabled, ignore the interrupt */
5470 if (!hrrq->allow_interrupts) {
5471 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5476 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5479 if (!ioa_cfg->clear_isr)
5482 /* Clear the PCI interrupt */
5485 writel(IPR_PCII_HRRQ_UPDATED,
5486 ioa_cfg->regs.clr_interrupt_reg32);
5487 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5488 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5489 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5491 } else if (rc == IRQ_NONE && irq_none == 0) {
5492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5494 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5495 int_reg & IPR_PCII_HRRQ_UPDATED) {
5497 "Error clearing HRRQ: ", num_hrrq);
5504 if (unlikely(rc == IRQ_NONE))
5505 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5507 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509 list_del(&ipr_cmd->queue);
5510 del_timer(&ipr_cmd->timer);
5511 ipr_cmd->fast_done(ipr_cmd);
5517 * ipr_isr_mhrrq - Interrupt service routine
5519 * @devp: pointer to ioa config struct
5522 * IRQ_NONE / IRQ_HANDLED
5524 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5526 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5527 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5528 unsigned long hrrq_flags = 0;
5529 struct ipr_cmnd *ipr_cmd, *temp;
5530 irqreturn_t rc = IRQ_NONE;
5533 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5535 /* If interrupts are disabled, ignore the interrupt */
5536 if (!hrrq->allow_interrupts) {
5537 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5541 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5542 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5543 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5545 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5546 blk_iopoll_sched(&hrrq->iopoll);
5547 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5551 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5554 if (ipr_process_hrrq(hrrq, -1, &doneq))
5558 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5560 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5561 list_del(&ipr_cmd->queue);
5562 del_timer(&ipr_cmd->timer);
5563 ipr_cmd->fast_done(ipr_cmd);
5569 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5570 * @ioa_cfg: ioa config struct
5571 * @ipr_cmd: ipr command struct
5574 * 0 on success / -1 on failure
5576 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5577 struct ipr_cmnd *ipr_cmd)
5580 struct scatterlist *sg;
5582 u32 ioadl_flags = 0;
5583 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5584 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5585 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5587 length = scsi_bufflen(scsi_cmd);
5591 nseg = scsi_dma_map(scsi_cmd);
5593 if (printk_ratelimit())
5594 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5598 ipr_cmd->dma_use_sg = nseg;
5600 ioarcb->data_transfer_length = cpu_to_be32(length);
5602 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5604 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5605 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5607 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5608 ioadl_flags = IPR_IOADL_FLAGS_READ;
5610 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5611 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5612 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5613 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5616 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5621 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5622 * @ioa_cfg: ioa config struct
5623 * @ipr_cmd: ipr command struct
5626 * 0 on success / -1 on failure
5628 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5629 struct ipr_cmnd *ipr_cmd)
5632 struct scatterlist *sg;
5634 u32 ioadl_flags = 0;
5635 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5636 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5637 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5639 length = scsi_bufflen(scsi_cmd);
5643 nseg = scsi_dma_map(scsi_cmd);
5645 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5649 ipr_cmd->dma_use_sg = nseg;
5651 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5652 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5654 ioarcb->data_transfer_length = cpu_to_be32(length);
5656 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5657 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5658 ioadl_flags = IPR_IOADL_FLAGS_READ;
5659 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5660 ioarcb->read_ioadl_len =
5661 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5664 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5665 ioadl = ioarcb->u.add_data.u.ioadl;
5666 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5667 offsetof(struct ipr_ioarcb, u.add_data));
5668 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5671 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5672 ioadl[i].flags_and_data_len =
5673 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5674 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5677 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5682 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5683 * @scsi_cmd: scsi command struct
5688 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5691 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5693 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5695 case MSG_SIMPLE_TAG:
5696 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5699 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5701 case MSG_ORDERED_TAG:
5702 rc = IPR_FLAGS_LO_ORDERED_TASK;
5711 * ipr_erp_done - Process completion of ERP for a device
5712 * @ipr_cmd: ipr command struct
5714 * This function copies the sense buffer into the scsi_cmd
5715 * struct and pushes the scsi_done function.
5720 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5722 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5723 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5724 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5726 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5727 scsi_cmd->result |= (DID_ERROR << 16);
5728 scmd_printk(KERN_ERR, scsi_cmd,
5729 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5731 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5732 SCSI_SENSE_BUFFERSIZE);
5736 if (!ipr_is_naca_model(res))
5737 res->needs_sync_complete = 1;
5740 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5741 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5742 scsi_cmd->scsi_done(scsi_cmd);
5746 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5747 * @ipr_cmd: ipr command struct
5752 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5754 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5755 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5756 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5758 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5759 ioarcb->data_transfer_length = 0;
5760 ioarcb->read_data_transfer_length = 0;
5761 ioarcb->ioadl_len = 0;
5762 ioarcb->read_ioadl_len = 0;
5763 ioasa->hdr.ioasc = 0;
5764 ioasa->hdr.residual_data_len = 0;
5766 if (ipr_cmd->ioa_cfg->sis64)
5767 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5768 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5770 ioarcb->write_ioadl_addr =
5771 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5772 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5777 * ipr_erp_request_sense - Send request sense to a device
5778 * @ipr_cmd: ipr command struct
5780 * This function sends a request sense to a device as a result
5781 * of a check condition.
5786 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5788 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5789 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5791 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5792 ipr_erp_done(ipr_cmd);
5796 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5798 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5799 cmd_pkt->cdb[0] = REQUEST_SENSE;
5800 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5801 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5802 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5803 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5805 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5806 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5808 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5809 IPR_REQUEST_SENSE_TIMEOUT * 2);
5813 * ipr_erp_cancel_all - Send cancel all to a device
5814 * @ipr_cmd: ipr command struct
5816 * This function sends a cancel all to a device to clear the
5817 * queue. If we are running TCQ on the device, QERR is set to 1,
5818 * which means all outstanding ops have been dropped on the floor.
5819 * Cancel all will return them to us.
5824 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5826 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5827 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5828 struct ipr_cmd_pkt *cmd_pkt;
5832 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5834 if (!scsi_get_tag_type(scsi_cmd->device)) {
5835 ipr_erp_request_sense(ipr_cmd);
5839 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5840 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5841 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5843 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5844 IPR_CANCEL_ALL_TIMEOUT);
5848 * ipr_dump_ioasa - Dump contents of IOASA
5849 * @ioa_cfg: ioa config struct
5850 * @ipr_cmd: ipr command struct
5851 * @res: resource entry struct
5853 * This function is invoked by the interrupt handler when ops
5854 * fail. It will log the IOASA if appropriate. Only called
5860 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5861 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5865 u32 ioasc, fd_ioasc;
5866 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5867 __be32 *ioasa_data = (__be32 *)ioasa;
5870 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5871 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5876 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5879 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5880 error_index = ipr_get_error(fd_ioasc);
5882 error_index = ipr_get_error(ioasc);
5884 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5885 /* Don't log an error if the IOA already logged one */
5886 if (ioasa->hdr.ilid != 0)
5889 if (!ipr_is_gscsi(res))
5892 if (ipr_error_table[error_index].log_ioasa == 0)
5896 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5898 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5899 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5900 data_len = sizeof(struct ipr_ioasa64);
5901 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5902 data_len = sizeof(struct ipr_ioasa);
5904 ipr_err("IOASA Dump:\n");
5906 for (i = 0; i < data_len / 4; i += 4) {
5907 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5908 be32_to_cpu(ioasa_data[i]),
5909 be32_to_cpu(ioasa_data[i+1]),
5910 be32_to_cpu(ioasa_data[i+2]),
5911 be32_to_cpu(ioasa_data[i+3]));
5916 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5918 * @sense_buf: sense data buffer
5923 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5926 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5927 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5928 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5929 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5931 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5933 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5936 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5938 if (ipr_is_vset_device(res) &&
5939 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5940 ioasa->u.vset.failing_lba_hi != 0) {
5941 sense_buf[0] = 0x72;
5942 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5943 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5944 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5948 sense_buf[9] = 0x0A;
5949 sense_buf[10] = 0x80;
5951 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5953 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5954 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5955 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5956 sense_buf[15] = failing_lba & 0x000000ff;
5958 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5960 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5961 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5962 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5963 sense_buf[19] = failing_lba & 0x000000ff;
5965 sense_buf[0] = 0x70;
5966 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5967 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5968 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5970 /* Illegal request */
5971 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5972 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5973 sense_buf[7] = 10; /* additional length */
5975 /* IOARCB was in error */
5976 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5977 sense_buf[15] = 0xC0;
5978 else /* Parameter data was invalid */
5979 sense_buf[15] = 0x80;
5982 ((IPR_FIELD_POINTER_MASK &
5983 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5985 (IPR_FIELD_POINTER_MASK &
5986 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5988 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5989 if (ipr_is_vset_device(res))
5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5992 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5994 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5995 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5996 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5997 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5998 sense_buf[6] = failing_lba & 0x000000ff;
6001 sense_buf[7] = 6; /* additional length */
6007 * ipr_get_autosense - Copy autosense data to sense buffer
6008 * @ipr_cmd: ipr command struct
6010 * This function copies the autosense buffer to the buffer
6011 * in the scsi_cmd, if there is autosense available.
6014 * 1 if autosense was available / 0 if not
6016 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6018 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6019 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6021 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6024 if (ipr_cmd->ioa_cfg->sis64)
6025 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6026 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6027 SCSI_SENSE_BUFFERSIZE));
6029 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6030 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6031 SCSI_SENSE_BUFFERSIZE));
6036 * ipr_erp_start - Process an error response for a SCSI op
6037 * @ioa_cfg: ioa config struct
6038 * @ipr_cmd: ipr command struct
6040 * This function determines whether or not to initiate ERP
6041 * on the affected device.
6046 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6047 struct ipr_cmnd *ipr_cmd)
6049 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6050 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6052 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6055 ipr_scsi_eh_done(ipr_cmd);
6059 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6060 ipr_gen_sense(ipr_cmd);
6062 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6064 switch (masked_ioasc) {
6065 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6066 if (ipr_is_naca_model(res))
6067 scsi_cmd->result |= (DID_ABORT << 16);
6069 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6071 case IPR_IOASC_IR_RESOURCE_HANDLE:
6072 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6073 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6075 case IPR_IOASC_HW_SEL_TIMEOUT:
6076 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6077 if (!ipr_is_naca_model(res))
6078 res->needs_sync_complete = 1;
6080 case IPR_IOASC_SYNC_REQUIRED:
6082 res->needs_sync_complete = 1;
6083 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6085 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6086 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6087 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6089 case IPR_IOASC_BUS_WAS_RESET:
6090 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6092 * Report the bus reset and ask for a retry. The device
6093 * will give CC/UA the next command.
6095 if (!res->resetting_device)
6096 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6097 scsi_cmd->result |= (DID_ERROR << 16);
6098 if (!ipr_is_naca_model(res))
6099 res->needs_sync_complete = 1;
6101 case IPR_IOASC_HW_DEV_BUS_STATUS:
6102 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6103 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6104 if (!ipr_get_autosense(ipr_cmd)) {
6105 if (!ipr_is_naca_model(res)) {
6106 ipr_erp_cancel_all(ipr_cmd);
6111 if (!ipr_is_naca_model(res))
6112 res->needs_sync_complete = 1;
6114 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6117 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6118 scsi_cmd->result |= (DID_ERROR << 16);
6119 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6120 res->needs_sync_complete = 1;
6124 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6125 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6126 scsi_cmd->scsi_done(scsi_cmd);
6130 * ipr_scsi_done - mid-layer done function
6131 * @ipr_cmd: ipr command struct
6133 * This function is invoked by the interrupt handler for
6134 * ops generated by the SCSI mid-layer
6139 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6141 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6143 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6144 unsigned long hrrq_flags;
6146 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6148 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6149 scsi_dma_unmap(scsi_cmd);
6151 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6152 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6153 scsi_cmd->scsi_done(scsi_cmd);
6154 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6156 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6157 ipr_erp_start(ioa_cfg, ipr_cmd);
6158 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6163 * ipr_queuecommand - Queue a mid-layer request
6164 * @shost: scsi host struct
6165 * @scsi_cmd: scsi command struct
6167 * This function queues a request generated by the mid-layer.
6171 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6172 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6174 static int ipr_queuecommand(struct Scsi_Host *shost,
6175 struct scsi_cmnd *scsi_cmd)
6177 struct ipr_ioa_cfg *ioa_cfg;
6178 struct ipr_resource_entry *res;
6179 struct ipr_ioarcb *ioarcb;
6180 struct ipr_cmnd *ipr_cmd;
6181 unsigned long hrrq_flags, lock_flags;
6183 struct ipr_hrr_queue *hrrq;
6186 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6188 scsi_cmd->result = (DID_OK << 16);
6189 res = scsi_cmd->device->hostdata;
6191 if (ipr_is_gata(res) && res->sata_port) {
6192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6193 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6198 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6199 hrrq = &ioa_cfg->hrrq[hrrq_id];
6201 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6203 * We are currently blocking all devices due to a host reset
6204 * We have told the host to stop giving us new requests, but
6205 * ERP ops don't count. FIXME
6207 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6208 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6209 return SCSI_MLQUEUE_HOST_BUSY;
6213 * FIXME - Create scsi_set_host_offline interface
6214 * and the ioa_is_dead check can be removed
6216 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6217 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6221 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6222 if (ipr_cmd == NULL) {
6223 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6224 return SCSI_MLQUEUE_HOST_BUSY;
6226 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6228 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6229 ioarcb = &ipr_cmd->ioarcb;
6231 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6232 ipr_cmd->scsi_cmd = scsi_cmd;
6233 ipr_cmd->done = ipr_scsi_eh_done;
6235 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6236 if (scsi_cmd->underflow == 0)
6237 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6239 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6240 if (ipr_is_gscsi(res))
6241 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6242 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6243 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6246 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6247 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6252 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6254 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6256 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6257 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6258 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6259 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6261 scsi_dma_unmap(scsi_cmd);
6262 return SCSI_MLQUEUE_HOST_BUSY;
6265 if (unlikely(hrrq->ioa_is_dead)) {
6266 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6267 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6268 scsi_dma_unmap(scsi_cmd);
6272 ioarcb->res_handle = res->res_handle;
6273 if (res->needs_sync_complete) {
6274 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6275 res->needs_sync_complete = 0;
6277 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6278 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6279 ipr_send_command(ipr_cmd);
6280 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6284 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6285 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6286 scsi_cmd->result = (DID_NO_CONNECT << 16);
6287 scsi_cmd->scsi_done(scsi_cmd);
6288 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6293 * ipr_ioctl - IOCTL handler
6294 * @sdev: scsi device struct
6299 * 0 on success / other on failure
6301 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6303 struct ipr_resource_entry *res;
6305 res = (struct ipr_resource_entry *)sdev->hostdata;
6306 if (res && ipr_is_gata(res)) {
6307 if (cmd == HDIO_GET_IDENTITY)
6309 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6316 * ipr_info - Get information about the card/driver
6317 * @scsi_host: scsi host struct
6320 * pointer to buffer with description string
6322 static const char *ipr_ioa_info(struct Scsi_Host *host)
6324 static char buffer[512];
6325 struct ipr_ioa_cfg *ioa_cfg;
6326 unsigned long lock_flags = 0;
6328 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6330 spin_lock_irqsave(host->host_lock, lock_flags);
6331 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6332 spin_unlock_irqrestore(host->host_lock, lock_flags);
6337 static struct scsi_host_template driver_template = {
6338 .module = THIS_MODULE,
6340 .info = ipr_ioa_info,
6342 .queuecommand = ipr_queuecommand,
6343 .eh_abort_handler = ipr_eh_abort,
6344 .eh_device_reset_handler = ipr_eh_dev_reset,
6345 .eh_host_reset_handler = ipr_eh_host_reset,
6346 .slave_alloc = ipr_slave_alloc,
6347 .slave_configure = ipr_slave_configure,
6348 .slave_destroy = ipr_slave_destroy,
6349 .target_alloc = ipr_target_alloc,
6350 .target_destroy = ipr_target_destroy,
6351 .change_queue_depth = ipr_change_queue_depth,
6352 .change_queue_type = ipr_change_queue_type,
6353 .bios_param = ipr_biosparam,
6354 .can_queue = IPR_MAX_COMMANDS,
6356 .sg_tablesize = IPR_MAX_SGLIST,
6357 .max_sectors = IPR_IOA_MAX_SECTORS,
6358 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6359 .use_clustering = ENABLE_CLUSTERING,
6360 .shost_attrs = ipr_ioa_attrs,
6361 .sdev_attrs = ipr_dev_attrs,
6362 .proc_name = IPR_NAME,
6367 * ipr_ata_phy_reset - libata phy_reset handler
6368 * @ap: ata port to reset
6371 static void ipr_ata_phy_reset(struct ata_port *ap)
6373 unsigned long flags;
6374 struct ipr_sata_port *sata_port = ap->private_data;
6375 struct ipr_resource_entry *res = sata_port->res;
6376 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6380 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6381 while (ioa_cfg->in_reset_reload) {
6382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6383 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6384 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6387 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6390 rc = ipr_device_reset(ioa_cfg, res);
6393 ap->link.device[0].class = ATA_DEV_NONE;
6397 ap->link.device[0].class = res->ata_class;
6398 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6399 ap->link.device[0].class = ATA_DEV_NONE;
6402 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6407 * ipr_ata_post_internal - Cleanup after an internal command
6408 * @qc: ATA queued command
6413 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6415 struct ipr_sata_port *sata_port = qc->ap->private_data;
6416 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6417 struct ipr_cmnd *ipr_cmd;
6418 struct ipr_hrr_queue *hrrq;
6419 unsigned long flags;
6421 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6422 while (ioa_cfg->in_reset_reload) {
6423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6424 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6428 for_each_hrrq(hrrq, ioa_cfg) {
6429 spin_lock(&hrrq->_lock);
6430 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6431 if (ipr_cmd->qc == qc) {
6432 ipr_device_reset(ioa_cfg, sata_port->res);
6436 spin_unlock(&hrrq->_lock);
6438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6442 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6443 * @regs: destination
6444 * @tf: source ATA taskfile
6449 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6450 struct ata_taskfile *tf)
6452 regs->feature = tf->feature;
6453 regs->nsect = tf->nsect;
6454 regs->lbal = tf->lbal;
6455 regs->lbam = tf->lbam;
6456 regs->lbah = tf->lbah;
6457 regs->device = tf->device;
6458 regs->command = tf->command;
6459 regs->hob_feature = tf->hob_feature;
6460 regs->hob_nsect = tf->hob_nsect;
6461 regs->hob_lbal = tf->hob_lbal;
6462 regs->hob_lbam = tf->hob_lbam;
6463 regs->hob_lbah = tf->hob_lbah;
6464 regs->ctl = tf->ctl;
6468 * ipr_sata_done - done function for SATA commands
6469 * @ipr_cmd: ipr command struct
6471 * This function is invoked by the interrupt handler for
6472 * ops generated by the SCSI mid-layer to SATA devices
6477 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480 struct ata_queued_cmd *qc = ipr_cmd->qc;
6481 struct ipr_sata_port *sata_port = qc->ap->private_data;
6482 struct ipr_resource_entry *res = sata_port->res;
6483 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6485 spin_lock(&ipr_cmd->hrrq->_lock);
6486 if (ipr_cmd->ioa_cfg->sis64)
6487 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6488 sizeof(struct ipr_ioasa_gata));
6490 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6491 sizeof(struct ipr_ioasa_gata));
6492 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6494 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6495 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6497 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6498 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6500 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6501 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6502 spin_unlock(&ipr_cmd->hrrq->_lock);
6503 ata_qc_complete(qc);
6507 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6508 * @ipr_cmd: ipr command struct
6509 * @qc: ATA queued command
6512 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6513 struct ata_queued_cmd *qc)
6515 u32 ioadl_flags = 0;
6516 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6517 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6518 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6519 int len = qc->nbytes;
6520 struct scatterlist *sg;
6522 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6527 if (qc->dma_dir == DMA_TO_DEVICE) {
6528 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6529 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6530 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6531 ioadl_flags = IPR_IOADL_FLAGS_READ;
6533 ioarcb->data_transfer_length = cpu_to_be32(len);
6535 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6536 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6537 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6539 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6540 ioadl64->flags = cpu_to_be32(ioadl_flags);
6541 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6542 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6544 last_ioadl64 = ioadl64;
6548 if (likely(last_ioadl64))
6549 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6553 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6554 * @ipr_cmd: ipr command struct
6555 * @qc: ATA queued command
6558 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6559 struct ata_queued_cmd *qc)
6561 u32 ioadl_flags = 0;
6562 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6563 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6564 struct ipr_ioadl_desc *last_ioadl = NULL;
6565 int len = qc->nbytes;
6566 struct scatterlist *sg;
6572 if (qc->dma_dir == DMA_TO_DEVICE) {
6573 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6575 ioarcb->data_transfer_length = cpu_to_be32(len);
6577 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6578 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6579 ioadl_flags = IPR_IOADL_FLAGS_READ;
6580 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6581 ioarcb->read_ioadl_len =
6582 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6585 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6586 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6587 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6593 if (likely(last_ioadl))
6594 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6598 * ipr_qc_defer - Get a free ipr_cmd
6599 * @qc: queued command
6604 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6606 struct ata_port *ap = qc->ap;
6607 struct ipr_sata_port *sata_port = ap->private_data;
6608 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6609 struct ipr_cmnd *ipr_cmd;
6610 struct ipr_hrr_queue *hrrq;
6613 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6614 hrrq = &ioa_cfg->hrrq[hrrq_id];
6616 qc->lldd_task = NULL;
6617 spin_lock(&hrrq->_lock);
6618 if (unlikely(hrrq->ioa_is_dead)) {
6619 spin_unlock(&hrrq->_lock);
6623 if (unlikely(!hrrq->allow_cmds)) {
6624 spin_unlock(&hrrq->_lock);
6625 return ATA_DEFER_LINK;
6628 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6629 if (ipr_cmd == NULL) {
6630 spin_unlock(&hrrq->_lock);
6631 return ATA_DEFER_LINK;
6634 qc->lldd_task = ipr_cmd;
6635 spin_unlock(&hrrq->_lock);
6640 * ipr_qc_issue - Issue a SATA qc to a device
6641 * @qc: queued command
6646 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6648 struct ata_port *ap = qc->ap;
6649 struct ipr_sata_port *sata_port = ap->private_data;
6650 struct ipr_resource_entry *res = sata_port->res;
6651 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6652 struct ipr_cmnd *ipr_cmd;
6653 struct ipr_ioarcb *ioarcb;
6654 struct ipr_ioarcb_ata_regs *regs;
6656 if (qc->lldd_task == NULL)
6659 ipr_cmd = qc->lldd_task;
6660 if (ipr_cmd == NULL)
6661 return AC_ERR_SYSTEM;
6663 qc->lldd_task = NULL;
6664 spin_lock(&ipr_cmd->hrrq->_lock);
6665 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6666 ipr_cmd->hrrq->ioa_is_dead)) {
6667 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6668 spin_unlock(&ipr_cmd->hrrq->_lock);
6669 return AC_ERR_SYSTEM;
6672 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6673 ioarcb = &ipr_cmd->ioarcb;
6675 if (ioa_cfg->sis64) {
6676 regs = &ipr_cmd->i.ata_ioadl.regs;
6677 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6679 regs = &ioarcb->u.add_data.u.regs;
6681 memset(regs, 0, sizeof(*regs));
6682 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6684 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6686 ipr_cmd->done = ipr_sata_done;
6687 ipr_cmd->ioarcb.res_handle = res->res_handle;
6688 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6689 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6690 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6691 ipr_cmd->dma_use_sg = qc->n_elem;
6694 ipr_build_ata_ioadl64(ipr_cmd, qc);
6696 ipr_build_ata_ioadl(ipr_cmd, qc);
6698 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6699 ipr_copy_sata_tf(regs, &qc->tf);
6700 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6701 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6703 switch (qc->tf.protocol) {
6704 case ATA_PROT_NODATA:
6709 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6712 case ATAPI_PROT_PIO:
6713 case ATAPI_PROT_NODATA:
6714 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6717 case ATAPI_PROT_DMA:
6718 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6719 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6724 spin_unlock(&ipr_cmd->hrrq->_lock);
6725 return AC_ERR_INVALID;
6728 ipr_send_command(ipr_cmd);
6729 spin_unlock(&ipr_cmd->hrrq->_lock);
6735 * ipr_qc_fill_rtf - Read result TF
6736 * @qc: ATA queued command
6741 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6743 struct ipr_sata_port *sata_port = qc->ap->private_data;
6744 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6745 struct ata_taskfile *tf = &qc->result_tf;
6747 tf->feature = g->error;
6748 tf->nsect = g->nsect;
6752 tf->device = g->device;
6753 tf->command = g->status;
6754 tf->hob_nsect = g->hob_nsect;
6755 tf->hob_lbal = g->hob_lbal;
6756 tf->hob_lbam = g->hob_lbam;
6757 tf->hob_lbah = g->hob_lbah;
6758 tf->ctl = g->alt_status;
6763 static struct ata_port_operations ipr_sata_ops = {
6764 .phy_reset = ipr_ata_phy_reset,
6765 .hardreset = ipr_sata_reset,
6766 .post_internal_cmd = ipr_ata_post_internal,
6767 .qc_prep = ata_noop_qc_prep,
6768 .qc_defer = ipr_qc_defer,
6769 .qc_issue = ipr_qc_issue,
6770 .qc_fill_rtf = ipr_qc_fill_rtf,
6771 .port_start = ata_sas_port_start,
6772 .port_stop = ata_sas_port_stop
6775 static struct ata_port_info sata_port_info = {
6776 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6777 .pio_mask = ATA_PIO4_ONLY,
6778 .mwdma_mask = ATA_MWDMA2,
6779 .udma_mask = ATA_UDMA6,
6780 .port_ops = &ipr_sata_ops
6783 #ifdef CONFIG_PPC_PSERIES
6784 static const u16 ipr_blocked_processors[] = {
6796 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6797 * @ioa_cfg: ioa cfg struct
6799 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6800 * certain pSeries hardware. This function determines if the given
6801 * adapter is in one of these confgurations or not.
6804 * 1 if adapter is not supported / 0 if adapter is supported
6806 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6810 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6811 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6812 if (pvr_version_is(ipr_blocked_processors[i]))
6819 #define ipr_invalid_adapter(ioa_cfg) 0
6823 * ipr_ioa_bringdown_done - IOA bring down completion.
6824 * @ipr_cmd: ipr command struct
6826 * This function processes the completion of an adapter bring down.
6827 * It wakes any reset sleepers.
6832 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6838 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6840 spin_unlock_irq(ioa_cfg->host->host_lock);
6841 scsi_unblock_requests(ioa_cfg->host);
6842 spin_lock_irq(ioa_cfg->host->host_lock);
6845 ioa_cfg->in_reset_reload = 0;
6846 ioa_cfg->reset_retries = 0;
6847 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6848 spin_lock(&ioa_cfg->hrrq[i]._lock);
6849 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6850 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6854 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6855 wake_up_all(&ioa_cfg->reset_wait_q);
6858 return IPR_RC_JOB_RETURN;
6862 * ipr_ioa_reset_done - IOA reset completion.
6863 * @ipr_cmd: ipr command struct
6865 * This function processes the completion of an adapter reset.
6866 * It schedules any necessary mid-layer add/removes and
6867 * wakes any reset sleepers.
6872 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6874 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6875 struct ipr_resource_entry *res;
6876 struct ipr_hostrcb *hostrcb, *temp;
6880 ioa_cfg->in_reset_reload = 0;
6881 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6882 spin_lock(&ioa_cfg->hrrq[j]._lock);
6883 ioa_cfg->hrrq[j].allow_cmds = 1;
6884 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6887 ioa_cfg->reset_cmd = NULL;
6888 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6890 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6891 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6896 schedule_work(&ioa_cfg->work_q);
6898 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6899 list_del(&hostrcb->queue);
6900 if (i++ < IPR_NUM_LOG_HCAMS)
6901 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6903 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6906 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6907 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6909 ioa_cfg->reset_retries = 0;
6910 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6911 wake_up_all(&ioa_cfg->reset_wait_q);
6913 spin_unlock(ioa_cfg->host->host_lock);
6914 scsi_unblock_requests(ioa_cfg->host);
6915 spin_lock(ioa_cfg->host->host_lock);
6917 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6918 scsi_block_requests(ioa_cfg->host);
6921 return IPR_RC_JOB_RETURN;
6925 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6926 * @supported_dev: supported device struct
6927 * @vpids: vendor product id struct
6932 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6933 struct ipr_std_inq_vpids *vpids)
6935 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6936 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6937 supported_dev->num_records = 1;
6938 supported_dev->data_length =
6939 cpu_to_be16(sizeof(struct ipr_supported_device));
6940 supported_dev->reserved = 0;
6944 * ipr_set_supported_devs - Send Set Supported Devices for a device
6945 * @ipr_cmd: ipr command struct
6947 * This function sends a Set Supported Devices to the adapter
6950 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6952 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6954 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6955 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6956 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6957 struct ipr_resource_entry *res = ipr_cmd->u.res;
6959 ipr_cmd->job_step = ipr_ioa_reset_done;
6961 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6962 if (!ipr_is_scsi_disk(res))
6965 ipr_cmd->u.res = res;
6966 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6968 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6969 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6970 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6972 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6973 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6974 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6975 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6977 ipr_init_ioadl(ipr_cmd,
6978 ioa_cfg->vpd_cbs_dma +
6979 offsetof(struct ipr_misc_cbs, supp_dev),
6980 sizeof(struct ipr_supported_device),
6981 IPR_IOADL_FLAGS_WRITE_LAST);
6983 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6984 IPR_SET_SUP_DEVICE_TIMEOUT);
6986 if (!ioa_cfg->sis64)
6987 ipr_cmd->job_step = ipr_set_supported_devs;
6989 return IPR_RC_JOB_RETURN;
6993 return IPR_RC_JOB_CONTINUE;
6997 * ipr_get_mode_page - Locate specified mode page
6998 * @mode_pages: mode page buffer
6999 * @page_code: page code to find
7000 * @len: minimum required length for mode page
7003 * pointer to mode page / NULL on failure
7005 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7006 u32 page_code, u32 len)
7008 struct ipr_mode_page_hdr *mode_hdr;
7012 if (!mode_pages || (mode_pages->hdr.length == 0))
7015 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7016 mode_hdr = (struct ipr_mode_page_hdr *)
7017 (mode_pages->data + mode_pages->hdr.block_desc_len);
7020 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7021 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7025 page_length = (sizeof(struct ipr_mode_page_hdr) +
7026 mode_hdr->page_length);
7027 length -= page_length;
7028 mode_hdr = (struct ipr_mode_page_hdr *)
7029 ((unsigned long)mode_hdr + page_length);
7036 * ipr_check_term_power - Check for term power errors
7037 * @ioa_cfg: ioa config struct
7038 * @mode_pages: IOAFP mode pages buffer
7040 * Check the IOAFP's mode page 28 for term power errors
7045 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7046 struct ipr_mode_pages *mode_pages)
7050 struct ipr_dev_bus_entry *bus;
7051 struct ipr_mode_page28 *mode_page;
7053 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7054 sizeof(struct ipr_mode_page28));
7056 entry_length = mode_page->entry_length;
7058 bus = mode_page->bus;
7060 for (i = 0; i < mode_page->num_entries; i++) {
7061 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7062 dev_err(&ioa_cfg->pdev->dev,
7063 "Term power is absent on scsi bus %d\n",
7067 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7072 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7073 * @ioa_cfg: ioa config struct
7075 * Looks through the config table checking for SES devices. If
7076 * the SES device is in the SES table indicating a maximum SCSI
7077 * bus speed, the speed is limited for the bus.
7082 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7087 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7088 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7089 ioa_cfg->bus_attr[i].bus_width);
7091 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7092 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7097 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7098 * @ioa_cfg: ioa config struct
7099 * @mode_pages: mode page 28 buffer
7101 * Updates mode page 28 based on driver configuration
7106 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7107 struct ipr_mode_pages *mode_pages)
7109 int i, entry_length;
7110 struct ipr_dev_bus_entry *bus;
7111 struct ipr_bus_attributes *bus_attr;
7112 struct ipr_mode_page28 *mode_page;
7114 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7115 sizeof(struct ipr_mode_page28));
7117 entry_length = mode_page->entry_length;
7119 /* Loop for each device bus entry */
7120 for (i = 0, bus = mode_page->bus;
7121 i < mode_page->num_entries;
7122 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7123 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7124 dev_err(&ioa_cfg->pdev->dev,
7125 "Invalid resource address reported: 0x%08X\n",
7126 IPR_GET_PHYS_LOC(bus->res_addr));
7130 bus_attr = &ioa_cfg->bus_attr[i];
7131 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7132 bus->bus_width = bus_attr->bus_width;
7133 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7134 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7135 if (bus_attr->qas_enabled)
7136 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7138 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7143 * ipr_build_mode_select - Build a mode select command
7144 * @ipr_cmd: ipr command struct
7145 * @res_handle: resource handle to send command to
7146 * @parm: Byte 2 of Mode Sense command
7147 * @dma_addr: DMA buffer address
7148 * @xfer_len: data transfer length
7153 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7154 __be32 res_handle, u8 parm,
7155 dma_addr_t dma_addr, u8 xfer_len)
7157 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7159 ioarcb->res_handle = res_handle;
7160 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7161 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7162 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7163 ioarcb->cmd_pkt.cdb[1] = parm;
7164 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7166 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7170 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7171 * @ipr_cmd: ipr command struct
7173 * This function sets up the SCSI bus attributes and sends
7174 * a Mode Select for Page 28 to activate them.
7179 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7181 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7182 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7186 ipr_scsi_bus_speed_limit(ioa_cfg);
7187 ipr_check_term_power(ioa_cfg, mode_pages);
7188 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7189 length = mode_pages->hdr.length + 1;
7190 mode_pages->hdr.length = 0;
7192 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7193 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7196 ipr_cmd->job_step = ipr_set_supported_devs;
7197 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7198 struct ipr_resource_entry, queue);
7199 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7202 return IPR_RC_JOB_RETURN;
7206 * ipr_build_mode_sense - Builds a mode sense command
7207 * @ipr_cmd: ipr command struct
7208 * @res: resource entry struct
7209 * @parm: Byte 2 of mode sense command
7210 * @dma_addr: DMA address of mode sense buffer
7211 * @xfer_len: Size of DMA buffer
7216 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7218 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7220 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7222 ioarcb->res_handle = res_handle;
7223 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7224 ioarcb->cmd_pkt.cdb[2] = parm;
7225 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7226 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7228 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7232 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7233 * @ipr_cmd: ipr command struct
7235 * This function handles the failure of an IOA bringup command.
7240 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7242 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7243 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7245 dev_err(&ioa_cfg->pdev->dev,
7246 "0x%02X failed with IOASC: 0x%08X\n",
7247 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7249 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7250 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7251 return IPR_RC_JOB_RETURN;
7255 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7256 * @ipr_cmd: ipr command struct
7258 * This function handles the failure of a Mode Sense to the IOAFP.
7259 * Some adapters do not handle all mode pages.
7262 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7264 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7267 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7269 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7270 ipr_cmd->job_step = ipr_set_supported_devs;
7271 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7272 struct ipr_resource_entry, queue);
7273 return IPR_RC_JOB_CONTINUE;
7276 return ipr_reset_cmd_failed(ipr_cmd);
7280 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7281 * @ipr_cmd: ipr command struct
7283 * This function send a Page 28 mode sense to the IOA to
7284 * retrieve SCSI bus attributes.
7289 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7291 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7294 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7295 0x28, ioa_cfg->vpd_cbs_dma +
7296 offsetof(struct ipr_misc_cbs, mode_pages),
7297 sizeof(struct ipr_mode_pages));
7299 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7300 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7302 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7305 return IPR_RC_JOB_RETURN;
7309 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7310 * @ipr_cmd: ipr command struct
7312 * This function enables dual IOA RAID support if possible.
7317 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7320 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7321 struct ipr_mode_page24 *mode_page;
7325 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7326 sizeof(struct ipr_mode_page24));
7329 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7331 length = mode_pages->hdr.length + 1;
7332 mode_pages->hdr.length = 0;
7334 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7335 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7338 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7339 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7342 return IPR_RC_JOB_RETURN;
7346 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7347 * @ipr_cmd: ipr command struct
7349 * This function handles the failure of a Mode Sense to the IOAFP.
7350 * Some adapters do not handle all mode pages.
7353 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7355 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7357 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7359 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7360 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7361 return IPR_RC_JOB_CONTINUE;
7364 return ipr_reset_cmd_failed(ipr_cmd);
7368 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7369 * @ipr_cmd: ipr command struct
7371 * This function send a mode sense to the IOA to retrieve
7372 * the IOA Advanced Function Control mode page.
7377 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7379 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7382 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7383 0x24, ioa_cfg->vpd_cbs_dma +
7384 offsetof(struct ipr_misc_cbs, mode_pages),
7385 sizeof(struct ipr_mode_pages));
7387 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7388 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7390 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7393 return IPR_RC_JOB_RETURN;
7397 * ipr_init_res_table - Initialize the resource table
7398 * @ipr_cmd: ipr command struct
7400 * This function looks through the existing resource table, comparing
7401 * it with the config table. This function will take care of old/new
7402 * devices and schedule adding/removing them from the mid-layer
7406 * IPR_RC_JOB_CONTINUE
7408 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7410 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7411 struct ipr_resource_entry *res, *temp;
7412 struct ipr_config_table_entry_wrapper cfgtew;
7413 int entries, found, flag, i;
7418 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7420 flag = ioa_cfg->u.cfg_table->hdr.flags;
7422 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7423 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7425 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7426 list_move_tail(&res->queue, &old_res);
7429 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7431 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7433 for (i = 0; i < entries; i++) {
7435 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7437 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7440 list_for_each_entry_safe(res, temp, &old_res, queue) {
7441 if (ipr_is_same_device(res, &cfgtew)) {
7442 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7449 if (list_empty(&ioa_cfg->free_res_q)) {
7450 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7455 res = list_entry(ioa_cfg->free_res_q.next,
7456 struct ipr_resource_entry, queue);
7457 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7458 ipr_init_res_entry(res, &cfgtew);
7460 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7461 res->sdev->allow_restart = 1;
7464 ipr_update_res_entry(res, &cfgtew);
7467 list_for_each_entry_safe(res, temp, &old_res, queue) {
7469 res->del_from_ml = 1;
7470 res->res_handle = IPR_INVALID_RES_HANDLE;
7471 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7475 list_for_each_entry_safe(res, temp, &old_res, queue) {
7476 ipr_clear_res_target(res);
7477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7480 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7481 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7483 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7486 return IPR_RC_JOB_CONTINUE;
7490 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7491 * @ipr_cmd: ipr command struct
7493 * This function sends a Query IOA Configuration command
7494 * to the adapter to retrieve the IOA configuration table.
7499 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7502 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7503 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7504 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7507 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7508 ioa_cfg->dual_raid = 1;
7509 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7510 ucode_vpd->major_release, ucode_vpd->card_type,
7511 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7512 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7513 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7515 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7516 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7517 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7518 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7520 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7521 IPR_IOADL_FLAGS_READ_LAST);
7523 ipr_cmd->job_step = ipr_init_res_table;
7525 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7528 return IPR_RC_JOB_RETURN;
7532 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7533 * @ipr_cmd: ipr command struct
7535 * This utility function sends an inquiry to the adapter.
7540 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7541 dma_addr_t dma_addr, u8 xfer_len)
7543 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7546 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7547 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7549 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7550 ioarcb->cmd_pkt.cdb[1] = flags;
7551 ioarcb->cmd_pkt.cdb[2] = page;
7552 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7554 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7556 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7561 * ipr_inquiry_page_supported - Is the given inquiry page supported
7562 * @page0: inquiry page 0 buffer
7565 * This function determines if the specified inquiry page is supported.
7568 * 1 if page is supported / 0 if not
7570 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7574 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7575 if (page0->page[i] == page)
7582 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7583 * @ipr_cmd: ipr command struct
7585 * This function sends a Page 0xD0 inquiry to the adapter
7586 * to retrieve adapter capabilities.
7589 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7591 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7594 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7595 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7598 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7599 memset(cap, 0, sizeof(*cap));
7601 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7602 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7603 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7604 sizeof(struct ipr_inquiry_cap));
7605 return IPR_RC_JOB_RETURN;
7609 return IPR_RC_JOB_CONTINUE;
7613 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7614 * @ipr_cmd: ipr command struct
7616 * This function sends a Page 3 inquiry to the adapter
7617 * to retrieve software VPD information.
7620 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7622 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7628 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7630 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7631 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7632 sizeof(struct ipr_inquiry_page3));
7635 return IPR_RC_JOB_RETURN;
7639 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7640 * @ipr_cmd: ipr command struct
7642 * This function sends a Page 0 inquiry to the adapter
7643 * to retrieve supported inquiry pages.
7646 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7648 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7655 /* Grab the type out of the VPD and store it away */
7656 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7658 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7660 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7662 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7663 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7664 sizeof(struct ipr_inquiry_page0));
7667 return IPR_RC_JOB_RETURN;
7671 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7672 * @ipr_cmd: ipr command struct
7674 * This function sends a standard inquiry to the adapter.
7679 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7684 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7686 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7687 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7688 sizeof(struct ipr_ioa_vpd));
7691 return IPR_RC_JOB_RETURN;
7695 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7696 * @ipr_cmd: ipr command struct
7698 * This function send an Identify Host Request Response Queue
7699 * command to establish the HRRQ with the adapter.
7704 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7706 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7707 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7708 struct ipr_hrr_queue *hrrq;
7711 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7712 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7714 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7715 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7717 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7718 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7720 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7722 ioarcb->cmd_pkt.cdb[1] = 0x1;
7724 if (ioa_cfg->nvectors == 1)
7725 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7727 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7729 ioarcb->cmd_pkt.cdb[2] =
7730 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7731 ioarcb->cmd_pkt.cdb[3] =
7732 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7733 ioarcb->cmd_pkt.cdb[4] =
7734 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7735 ioarcb->cmd_pkt.cdb[5] =
7736 ((u64) hrrq->host_rrq_dma) & 0xff;
7737 ioarcb->cmd_pkt.cdb[7] =
7738 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7739 ioarcb->cmd_pkt.cdb[8] =
7740 (sizeof(u32) * hrrq->size) & 0xff;
7742 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7743 ioarcb->cmd_pkt.cdb[9] =
7744 ioa_cfg->identify_hrrq_index;
7746 if (ioa_cfg->sis64) {
7747 ioarcb->cmd_pkt.cdb[10] =
7748 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7749 ioarcb->cmd_pkt.cdb[11] =
7750 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7751 ioarcb->cmd_pkt.cdb[12] =
7752 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7753 ioarcb->cmd_pkt.cdb[13] =
7754 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7757 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7758 ioarcb->cmd_pkt.cdb[14] =
7759 ioa_cfg->identify_hrrq_index;
7761 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7762 IPR_INTERNAL_TIMEOUT);
7764 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7765 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7768 return IPR_RC_JOB_RETURN;
7772 return IPR_RC_JOB_CONTINUE;
7776 * ipr_reset_timer_done - Adapter reset timer function
7777 * @ipr_cmd: ipr command struct
7779 * Description: This function is used in adapter reset processing
7780 * for timing events. If the reset_cmd pointer in the IOA
7781 * config struct is not this adapter's we are doing nested
7782 * resets and fail_all_ops will take care of freeing the
7788 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7790 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7791 unsigned long lock_flags = 0;
7793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7795 if (ioa_cfg->reset_cmd == ipr_cmd) {
7796 list_del(&ipr_cmd->queue);
7797 ipr_cmd->done(ipr_cmd);
7800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7804 * ipr_reset_start_timer - Start a timer for adapter reset job
7805 * @ipr_cmd: ipr command struct
7806 * @timeout: timeout value
7808 * Description: This function is used in adapter reset processing
7809 * for timing events. If the reset_cmd pointer in the IOA
7810 * config struct is not this adapter's we are doing nested
7811 * resets and fail_all_ops will take care of freeing the
7817 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7818 unsigned long timeout)
7822 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7823 ipr_cmd->done = ipr_reset_ioa_job;
7825 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7826 ipr_cmd->timer.expires = jiffies + timeout;
7827 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7828 add_timer(&ipr_cmd->timer);
7832 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7833 * @ioa_cfg: ioa cfg struct
7838 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7840 struct ipr_hrr_queue *hrrq;
7842 for_each_hrrq(hrrq, ioa_cfg) {
7843 spin_lock(&hrrq->_lock);
7844 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7846 /* Initialize Host RRQ pointers */
7847 hrrq->hrrq_start = hrrq->host_rrq;
7848 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7849 hrrq->hrrq_curr = hrrq->hrrq_start;
7850 hrrq->toggle_bit = 1;
7851 spin_unlock(&hrrq->_lock);
7855 ioa_cfg->identify_hrrq_index = 0;
7856 if (ioa_cfg->hrrq_num == 1)
7857 atomic_set(&ioa_cfg->hrrq_index, 0);
7859 atomic_set(&ioa_cfg->hrrq_index, 1);
7861 /* Zero out config table */
7862 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7866 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7867 * @ipr_cmd: ipr command struct
7870 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7872 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7874 unsigned long stage, stage_time;
7876 volatile u32 int_reg;
7877 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7880 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7881 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7882 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7884 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7886 /* sanity check the stage_time value */
7887 if (stage_time == 0)
7888 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7889 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7890 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7891 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7892 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7894 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7895 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7896 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7897 stage_time = ioa_cfg->transop_timeout;
7898 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7899 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7901 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7902 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7903 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7904 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7905 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7906 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7907 return IPR_RC_JOB_CONTINUE;
7911 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7912 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7913 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7914 ipr_cmd->done = ipr_reset_ioa_job;
7915 add_timer(&ipr_cmd->timer);
7917 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7919 return IPR_RC_JOB_RETURN;
7923 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7924 * @ipr_cmd: ipr command struct
7926 * This function reinitializes some control blocks and
7927 * enables destructive diagnostics on the adapter.
7932 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7935 volatile u32 int_reg;
7936 volatile u64 maskval;
7940 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7941 ipr_init_ioa_mem(ioa_cfg);
7943 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7944 spin_lock(&ioa_cfg->hrrq[i]._lock);
7945 ioa_cfg->hrrq[i].allow_interrupts = 1;
7946 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7949 if (ioa_cfg->sis64) {
7950 /* Set the adapter to the correct endian mode. */
7951 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7952 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7955 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7957 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7958 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7959 ioa_cfg->regs.clr_interrupt_mask_reg32);
7960 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7961 return IPR_RC_JOB_CONTINUE;
7964 /* Enable destructive diagnostics on IOA */
7965 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7967 if (ioa_cfg->sis64) {
7968 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7969 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7970 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7972 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7974 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7976 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7978 if (ioa_cfg->sis64) {
7979 ipr_cmd->job_step = ipr_reset_next_stage;
7980 return IPR_RC_JOB_CONTINUE;
7983 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7984 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7985 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7986 ipr_cmd->done = ipr_reset_ioa_job;
7987 add_timer(&ipr_cmd->timer);
7988 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7991 return IPR_RC_JOB_RETURN;
7995 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7996 * @ipr_cmd: ipr command struct
7998 * This function is invoked when an adapter dump has run out
7999 * of processing time.
8002 * IPR_RC_JOB_CONTINUE
8004 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8006 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8008 if (ioa_cfg->sdt_state == GET_DUMP)
8009 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8010 else if (ioa_cfg->sdt_state == READ_DUMP)
8011 ioa_cfg->sdt_state = ABORT_DUMP;
8013 ioa_cfg->dump_timeout = 1;
8014 ipr_cmd->job_step = ipr_reset_alert;
8016 return IPR_RC_JOB_CONTINUE;
8020 * ipr_unit_check_no_data - Log a unit check/no data error log
8021 * @ioa_cfg: ioa config struct
8023 * Logs an error indicating the adapter unit checked, but for some
8024 * reason, we were unable to fetch the unit check buffer.
8029 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8031 ioa_cfg->errors_logged++;
8032 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8036 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8037 * @ioa_cfg: ioa config struct
8039 * Fetches the unit check buffer from the adapter by clocking the data
8040 * through the mailbox register.
8045 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8047 unsigned long mailbox;
8048 struct ipr_hostrcb *hostrcb;
8049 struct ipr_uc_sdt sdt;
8053 mailbox = readl(ioa_cfg->ioa_mailbox);
8055 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8056 ipr_unit_check_no_data(ioa_cfg);
8060 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8061 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8062 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8064 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8065 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8066 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8067 ipr_unit_check_no_data(ioa_cfg);
8071 /* Find length of the first sdt entry (UC buffer) */
8072 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8073 length = be32_to_cpu(sdt.entry[0].end_token);
8075 length = (be32_to_cpu(sdt.entry[0].end_token) -
8076 be32_to_cpu(sdt.entry[0].start_token)) &
8077 IPR_FMT2_MBX_ADDR_MASK;
8079 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8080 struct ipr_hostrcb, queue);
8081 list_del(&hostrcb->queue);
8082 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8084 rc = ipr_get_ldump_data_section(ioa_cfg,
8085 be32_to_cpu(sdt.entry[0].start_token),
8086 (__be32 *)&hostrcb->hcam,
8087 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8090 ipr_handle_log_data(ioa_cfg, hostrcb);
8091 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8092 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8093 ioa_cfg->sdt_state == GET_DUMP)
8094 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8096 ipr_unit_check_no_data(ioa_cfg);
8098 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8102 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8103 * @ipr_cmd: ipr command struct
8105 * Description: This function will call to get the unit check buffer.
8110 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8115 ioa_cfg->ioa_unit_checked = 0;
8116 ipr_get_unit_check_buffer(ioa_cfg);
8117 ipr_cmd->job_step = ipr_reset_alert;
8118 ipr_reset_start_timer(ipr_cmd, 0);
8121 return IPR_RC_JOB_RETURN;
8125 * ipr_reset_restore_cfg_space - Restore PCI config space.
8126 * @ipr_cmd: ipr command struct
8128 * Description: This function restores the saved PCI config space of
8129 * the adapter, fails all outstanding ops back to the callers, and
8130 * fetches the dump/unit check if applicable to this reset.
8133 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8135 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8141 ioa_cfg->pdev->state_saved = true;
8142 pci_restore_state(ioa_cfg->pdev);
8144 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8145 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8146 return IPR_RC_JOB_CONTINUE;
8149 ipr_fail_all_ops(ioa_cfg);
8151 if (ioa_cfg->sis64) {
8152 /* Set the adapter to the correct endian mode. */
8153 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8154 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8157 if (ioa_cfg->ioa_unit_checked) {
8158 if (ioa_cfg->sis64) {
8159 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8160 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8161 return IPR_RC_JOB_RETURN;
8163 ioa_cfg->ioa_unit_checked = 0;
8164 ipr_get_unit_check_buffer(ioa_cfg);
8165 ipr_cmd->job_step = ipr_reset_alert;
8166 ipr_reset_start_timer(ipr_cmd, 0);
8167 return IPR_RC_JOB_RETURN;
8171 if (ioa_cfg->in_ioa_bringdown) {
8172 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8174 ipr_cmd->job_step = ipr_reset_enable_ioa;
8176 if (GET_DUMP == ioa_cfg->sdt_state) {
8177 ioa_cfg->sdt_state = READ_DUMP;
8178 ioa_cfg->dump_timeout = 0;
8180 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8182 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8183 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8184 schedule_work(&ioa_cfg->work_q);
8185 return IPR_RC_JOB_RETURN;
8190 return IPR_RC_JOB_CONTINUE;
8194 * ipr_reset_bist_done - BIST has completed on the adapter.
8195 * @ipr_cmd: ipr command struct
8197 * Description: Unblock config space and resume the reset process.
8200 * IPR_RC_JOB_CONTINUE
8202 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8204 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8207 if (ioa_cfg->cfg_locked)
8208 pci_cfg_access_unlock(ioa_cfg->pdev);
8209 ioa_cfg->cfg_locked = 0;
8210 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8212 return IPR_RC_JOB_CONTINUE;
8216 * ipr_reset_start_bist - Run BIST on the adapter.
8217 * @ipr_cmd: ipr command struct
8219 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8222 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8224 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8227 int rc = PCIBIOS_SUCCESSFUL;
8230 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8231 writel(IPR_UPROCI_SIS64_START_BIST,
8232 ioa_cfg->regs.set_uproc_interrupt_reg32);
8234 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8236 if (rc == PCIBIOS_SUCCESSFUL) {
8237 ipr_cmd->job_step = ipr_reset_bist_done;
8238 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8239 rc = IPR_RC_JOB_RETURN;
8241 if (ioa_cfg->cfg_locked)
8242 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8243 ioa_cfg->cfg_locked = 0;
8244 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8245 rc = IPR_RC_JOB_CONTINUE;
8253 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8254 * @ipr_cmd: ipr command struct
8256 * Description: This clears PCI reset to the adapter and delays two seconds.
8261 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8264 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8265 ipr_cmd->job_step = ipr_reset_bist_done;
8266 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8268 return IPR_RC_JOB_RETURN;
8272 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8273 * @ipr_cmd: ipr command struct
8275 * Description: This asserts PCI reset to the adapter.
8280 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 struct pci_dev *pdev = ioa_cfg->pdev;
8286 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8287 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8288 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8290 return IPR_RC_JOB_RETURN;
8294 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8295 * @ipr_cmd: ipr command struct
8297 * Description: This attempts to block config access to the IOA.
8300 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8302 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8304 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8305 int rc = IPR_RC_JOB_CONTINUE;
8307 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8308 ioa_cfg->cfg_locked = 1;
8309 ipr_cmd->job_step = ioa_cfg->reset;
8311 if (ipr_cmd->u.time_left) {
8312 rc = IPR_RC_JOB_RETURN;
8313 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8314 ipr_reset_start_timer(ipr_cmd,
8315 IPR_CHECK_FOR_RESET_TIMEOUT);
8317 ipr_cmd->job_step = ioa_cfg->reset;
8318 dev_err(&ioa_cfg->pdev->dev,
8319 "Timed out waiting to lock config access. Resetting anyway.\n");
8327 * ipr_reset_block_config_access - Block config access to the IOA
8328 * @ipr_cmd: ipr command struct
8330 * Description: This attempts to block config access to the IOA
8333 * IPR_RC_JOB_CONTINUE
8335 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8337 ipr_cmd->ioa_cfg->cfg_locked = 0;
8338 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8339 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8340 return IPR_RC_JOB_CONTINUE;
8344 * ipr_reset_allowed - Query whether or not IOA can be reset
8345 * @ioa_cfg: ioa config struct
8348 * 0 if reset not allowed / non-zero if reset is allowed
8350 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8352 volatile u32 temp_reg;
8354 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8355 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8359 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8360 * @ipr_cmd: ipr command struct
8362 * Description: This function waits for adapter permission to run BIST,
8363 * then runs BIST. If the adapter does not give permission after a
8364 * reasonable time, we will reset the adapter anyway. The impact of
8365 * resetting the adapter without warning the adapter is the risk of
8366 * losing the persistent error log on the adapter. If the adapter is
8367 * reset while it is writing to the flash on the adapter, the flash
8368 * segment will have bad ECC and be zeroed.
8371 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8373 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376 int rc = IPR_RC_JOB_RETURN;
8378 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8379 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8380 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8382 ipr_cmd->job_step = ipr_reset_block_config_access;
8383 rc = IPR_RC_JOB_CONTINUE;
8390 * ipr_reset_alert - Alert the adapter of a pending reset
8391 * @ipr_cmd: ipr command struct
8393 * Description: This function alerts the adapter that it will be reset.
8394 * If memory space is not currently enabled, proceed directly
8395 * to running BIST on the adapter. The timer must always be started
8396 * so we guarantee we do not run BIST from ipr_isr.
8401 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8408 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8410 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8411 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8412 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8413 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8415 ipr_cmd->job_step = ipr_reset_block_config_access;
8418 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8419 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8422 return IPR_RC_JOB_RETURN;
8426 * ipr_reset_ucode_download_done - Microcode download completion
8427 * @ipr_cmd: ipr command struct
8429 * Description: This function unmaps the microcode download buffer.
8432 * IPR_RC_JOB_CONTINUE
8434 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8437 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8439 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8440 sglist->num_sg, DMA_TO_DEVICE);
8442 ipr_cmd->job_step = ipr_reset_alert;
8443 return IPR_RC_JOB_CONTINUE;
8447 * ipr_reset_ucode_download - Download microcode to the adapter
8448 * @ipr_cmd: ipr command struct
8450 * Description: This function checks to see if it there is microcode
8451 * to download to the adapter. If there is, a download is performed.
8454 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8456 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8459 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8462 ipr_cmd->job_step = ipr_reset_alert;
8465 return IPR_RC_JOB_CONTINUE;
8467 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8468 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8469 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8470 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8471 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8472 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8473 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8476 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8478 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8479 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8481 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8482 IPR_WRITE_BUFFER_TIMEOUT);
8485 return IPR_RC_JOB_RETURN;
8489 * ipr_reset_shutdown_ioa - Shutdown the adapter
8490 * @ipr_cmd: ipr command struct
8492 * Description: This function issues an adapter shutdown of the
8493 * specified type to the specified adapter as part of the
8494 * adapter reset job.
8497 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8499 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8503 unsigned long timeout;
8504 int rc = IPR_RC_JOB_CONTINUE;
8507 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8508 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8509 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8510 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8511 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8512 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8514 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8515 timeout = IPR_SHUTDOWN_TIMEOUT;
8516 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8517 timeout = IPR_INTERNAL_TIMEOUT;
8518 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8519 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8521 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8523 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8525 rc = IPR_RC_JOB_RETURN;
8526 ipr_cmd->job_step = ipr_reset_ucode_download;
8528 ipr_cmd->job_step = ipr_reset_alert;
8535 * ipr_reset_ioa_job - Adapter reset job
8536 * @ipr_cmd: ipr command struct
8538 * Description: This function is the job router for the adapter reset job.
8543 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8549 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8551 if (ioa_cfg->reset_cmd != ipr_cmd) {
8553 * We are doing nested adapter resets and this is
8554 * not the current reset job.
8556 list_add_tail(&ipr_cmd->queue,
8557 &ipr_cmd->hrrq->hrrq_free_q);
8561 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8562 rc = ipr_cmd->job_step_failed(ipr_cmd);
8563 if (rc == IPR_RC_JOB_RETURN)
8567 ipr_reinit_ipr_cmnd(ipr_cmd);
8568 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8569 rc = ipr_cmd->job_step(ipr_cmd);
8570 } while (rc == IPR_RC_JOB_CONTINUE);
8574 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8575 * @ioa_cfg: ioa config struct
8576 * @job_step: first job step of reset job
8577 * @shutdown_type: shutdown type
8579 * Description: This function will initiate the reset of the given adapter
8580 * starting at the selected job step.
8581 * If the caller needs to wait on the completion of the reset,
8582 * the caller must sleep on the reset_wait_q.
8587 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8588 int (*job_step) (struct ipr_cmnd *),
8589 enum ipr_shutdown_type shutdown_type)
8591 struct ipr_cmnd *ipr_cmd;
8594 ioa_cfg->in_reset_reload = 1;
8595 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8596 spin_lock(&ioa_cfg->hrrq[i]._lock);
8597 ioa_cfg->hrrq[i].allow_cmds = 0;
8598 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8601 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8602 scsi_block_requests(ioa_cfg->host);
8604 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8605 ioa_cfg->reset_cmd = ipr_cmd;
8606 ipr_cmd->job_step = job_step;
8607 ipr_cmd->u.shutdown_type = shutdown_type;
8609 ipr_reset_ioa_job(ipr_cmd);
8613 * ipr_initiate_ioa_reset - Initiate an adapter reset
8614 * @ioa_cfg: ioa config struct
8615 * @shutdown_type: shutdown type
8617 * Description: This function will initiate the reset of the given adapter.
8618 * If the caller needs to wait on the completion of the reset,
8619 * the caller must sleep on the reset_wait_q.
8624 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8625 enum ipr_shutdown_type shutdown_type)
8629 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8632 if (ioa_cfg->in_reset_reload) {
8633 if (ioa_cfg->sdt_state == GET_DUMP)
8634 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8635 else if (ioa_cfg->sdt_state == READ_DUMP)
8636 ioa_cfg->sdt_state = ABORT_DUMP;
8639 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8640 dev_err(&ioa_cfg->pdev->dev,
8641 "IOA taken offline - error recovery failed\n");
8643 ioa_cfg->reset_retries = 0;
8644 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8645 spin_lock(&ioa_cfg->hrrq[i]._lock);
8646 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8647 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8651 if (ioa_cfg->in_ioa_bringdown) {
8652 ioa_cfg->reset_cmd = NULL;
8653 ioa_cfg->in_reset_reload = 0;
8654 ipr_fail_all_ops(ioa_cfg);
8655 wake_up_all(&ioa_cfg->reset_wait_q);
8657 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8658 spin_unlock_irq(ioa_cfg->host->host_lock);
8659 scsi_unblock_requests(ioa_cfg->host);
8660 spin_lock_irq(ioa_cfg->host->host_lock);
8664 ioa_cfg->in_ioa_bringdown = 1;
8665 shutdown_type = IPR_SHUTDOWN_NONE;
8669 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8674 * ipr_reset_freeze - Hold off all I/O activity
8675 * @ipr_cmd: ipr command struct
8677 * Description: If the PCI slot is frozen, hold off all I/O
8678 * activity; then, as soon as the slot is available again,
8679 * initiate an adapter reset.
8681 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8683 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8686 /* Disallow new interrupts, avoid loop */
8687 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8688 spin_lock(&ioa_cfg->hrrq[i]._lock);
8689 ioa_cfg->hrrq[i].allow_interrupts = 0;
8690 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8693 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8694 ipr_cmd->done = ipr_reset_ioa_job;
8695 return IPR_RC_JOB_RETURN;
8699 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8700 * @pdev: PCI device struct
8702 * Description: This routine is called to tell us that the PCI bus
8703 * is down. Can't do anything here, except put the device driver
8704 * into a holding pattern, waiting for the PCI bus to come back.
8706 static void ipr_pci_frozen(struct pci_dev *pdev)
8708 unsigned long flags = 0;
8709 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8711 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8712 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8713 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8717 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8718 * @pdev: PCI device struct
8720 * Description: This routine is called by the pci error recovery
8721 * code after the PCI slot has been reset, just before we
8722 * should resume normal operations.
8724 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8726 unsigned long flags = 0;
8727 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8729 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8730 if (ioa_cfg->needs_warm_reset)
8731 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8733 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8736 return PCI_ERS_RESULT_RECOVERED;
8740 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8741 * @pdev: PCI device struct
8743 * Description: This routine is called when the PCI bus has
8744 * permanently failed.
8746 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8748 unsigned long flags = 0;
8749 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8752 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8753 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8754 ioa_cfg->sdt_state = ABORT_DUMP;
8755 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8756 ioa_cfg->in_ioa_bringdown = 1;
8757 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8758 spin_lock(&ioa_cfg->hrrq[i]._lock);
8759 ioa_cfg->hrrq[i].allow_cmds = 0;
8760 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8763 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8764 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8768 * ipr_pci_error_detected - Called when a PCI error is detected.
8769 * @pdev: PCI device struct
8770 * @state: PCI channel state
8772 * Description: Called when a PCI error is detected.
8775 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8777 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8778 pci_channel_state_t state)
8781 case pci_channel_io_frozen:
8782 ipr_pci_frozen(pdev);
8783 return PCI_ERS_RESULT_NEED_RESET;
8784 case pci_channel_io_perm_failure:
8785 ipr_pci_perm_failure(pdev);
8786 return PCI_ERS_RESULT_DISCONNECT;
8791 return PCI_ERS_RESULT_NEED_RESET;
8795 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8796 * @ioa_cfg: ioa cfg struct
8798 * Description: This is the second phase of adapter intialization
8799 * This function takes care of initilizing the adapter to the point
8800 * where it can accept new commands.
8803 * 0 on success / -EIO on failure
8805 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8808 unsigned long host_lock_flags = 0;
8811 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8812 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8813 if (ioa_cfg->needs_hard_reset) {
8814 ioa_cfg->needs_hard_reset = 0;
8815 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8817 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8820 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8821 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8823 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8825 } else if (ipr_invalid_adapter(ioa_cfg)) {
8829 dev_err(&ioa_cfg->pdev->dev,
8830 "Adapter not supported in this hardware configuration.\n");
8833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8840 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8841 * @ioa_cfg: ioa config struct
8846 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8850 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8851 if (ioa_cfg->ipr_cmnd_list[i])
8852 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8853 ioa_cfg->ipr_cmnd_list[i],
8854 ioa_cfg->ipr_cmnd_list_dma[i]);
8856 ioa_cfg->ipr_cmnd_list[i] = NULL;
8859 if (ioa_cfg->ipr_cmd_pool)
8860 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8862 kfree(ioa_cfg->ipr_cmnd_list);
8863 kfree(ioa_cfg->ipr_cmnd_list_dma);
8864 ioa_cfg->ipr_cmnd_list = NULL;
8865 ioa_cfg->ipr_cmnd_list_dma = NULL;
8866 ioa_cfg->ipr_cmd_pool = NULL;
8870 * ipr_free_mem - Frees memory allocated for an adapter
8871 * @ioa_cfg: ioa cfg struct
8876 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8880 kfree(ioa_cfg->res_entries);
8881 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8882 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8883 ipr_free_cmd_blks(ioa_cfg);
8885 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8886 pci_free_consistent(ioa_cfg->pdev,
8887 sizeof(u32) * ioa_cfg->hrrq[i].size,
8888 ioa_cfg->hrrq[i].host_rrq,
8889 ioa_cfg->hrrq[i].host_rrq_dma);
8891 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8892 ioa_cfg->u.cfg_table,
8893 ioa_cfg->cfg_table_dma);
8895 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8896 pci_free_consistent(ioa_cfg->pdev,
8897 sizeof(struct ipr_hostrcb),
8898 ioa_cfg->hostrcb[i],
8899 ioa_cfg->hostrcb_dma[i]);
8902 ipr_free_dump(ioa_cfg);
8903 kfree(ioa_cfg->trace);
8907 * ipr_free_all_resources - Free all allocated resources for an adapter.
8908 * @ipr_cmd: ipr command struct
8910 * This function frees all allocated resources for the
8911 * specified adapter.
8916 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8918 struct pci_dev *pdev = ioa_cfg->pdev;
8921 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8922 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8924 for (i = 0; i < ioa_cfg->nvectors; i++)
8925 free_irq(ioa_cfg->vectors_info[i].vec,
8928 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8930 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8931 pci_disable_msi(pdev);
8932 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8933 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8934 pci_disable_msix(pdev);
8935 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8938 iounmap(ioa_cfg->hdw_dma_regs);
8939 pci_release_regions(pdev);
8940 ipr_free_mem(ioa_cfg);
8941 scsi_host_put(ioa_cfg->host);
8942 pci_disable_device(pdev);
8947 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8948 * @ioa_cfg: ioa config struct
8951 * 0 on success / -ENOMEM on allocation failure
8953 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8955 struct ipr_cmnd *ipr_cmd;
8956 struct ipr_ioarcb *ioarcb;
8957 dma_addr_t dma_addr;
8958 int i, entries_each_hrrq, hrrq_id = 0;
8960 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8961 sizeof(struct ipr_cmnd), 512, 0);
8963 if (!ioa_cfg->ipr_cmd_pool)
8966 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8967 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8969 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8970 ipr_free_cmd_blks(ioa_cfg);
8974 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8975 if (ioa_cfg->hrrq_num > 1) {
8977 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8978 ioa_cfg->hrrq[i].min_cmd_id = 0;
8979 ioa_cfg->hrrq[i].max_cmd_id =
8980 (entries_each_hrrq - 1);
8983 IPR_NUM_BASE_CMD_BLKS/
8984 (ioa_cfg->hrrq_num - 1);
8985 ioa_cfg->hrrq[i].min_cmd_id =
8986 IPR_NUM_INTERNAL_CMD_BLKS +
8987 (i - 1) * entries_each_hrrq;
8988 ioa_cfg->hrrq[i].max_cmd_id =
8989 (IPR_NUM_INTERNAL_CMD_BLKS +
8990 i * entries_each_hrrq - 1);
8993 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8994 ioa_cfg->hrrq[i].min_cmd_id = 0;
8995 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8997 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9000 BUG_ON(ioa_cfg->hrrq_num == 0);
9002 i = IPR_NUM_CMD_BLKS -
9003 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9005 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9006 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9009 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9010 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9013 ipr_free_cmd_blks(ioa_cfg);
9017 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9018 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9019 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9021 ioarcb = &ipr_cmd->ioarcb;
9022 ipr_cmd->dma_addr = dma_addr;
9024 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9026 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9028 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9029 if (ioa_cfg->sis64) {
9030 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9031 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9032 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9033 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9035 ioarcb->write_ioadl_addr =
9036 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9037 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9038 ioarcb->ioasa_host_pci_addr =
9039 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9041 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9042 ipr_cmd->cmd_index = i;
9043 ipr_cmd->ioa_cfg = ioa_cfg;
9044 ipr_cmd->sense_buffer_dma = dma_addr +
9045 offsetof(struct ipr_cmnd, sense_buffer);
9047 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9048 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9049 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9050 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9058 * ipr_alloc_mem - Allocate memory for an adapter
9059 * @ioa_cfg: ioa config struct
9062 * 0 on success / non-zero for error
9064 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9066 struct pci_dev *pdev = ioa_cfg->pdev;
9067 int i, rc = -ENOMEM;
9070 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9071 ioa_cfg->max_devs_supported, GFP_KERNEL);
9073 if (!ioa_cfg->res_entries)
9076 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9077 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9078 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9081 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9082 sizeof(struct ipr_misc_cbs),
9083 &ioa_cfg->vpd_cbs_dma);
9085 if (!ioa_cfg->vpd_cbs)
9086 goto out_free_res_entries;
9088 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9089 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9090 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9091 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9093 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9095 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9098 if (ipr_alloc_cmd_blks(ioa_cfg))
9099 goto out_free_vpd_cbs;
9101 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9102 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9103 sizeof(u32) * ioa_cfg->hrrq[i].size,
9104 &ioa_cfg->hrrq[i].host_rrq_dma);
9106 if (!ioa_cfg->hrrq[i].host_rrq) {
9108 pci_free_consistent(pdev,
9109 sizeof(u32) * ioa_cfg->hrrq[i].size,
9110 ioa_cfg->hrrq[i].host_rrq,
9111 ioa_cfg->hrrq[i].host_rrq_dma);
9112 goto out_ipr_free_cmd_blocks;
9114 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9118 ioa_cfg->cfg_table_size,
9119 &ioa_cfg->cfg_table_dma);
9121 if (!ioa_cfg->u.cfg_table)
9122 goto out_free_host_rrq;
9124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9125 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9126 sizeof(struct ipr_hostrcb),
9127 &ioa_cfg->hostrcb_dma[i]);
9129 if (!ioa_cfg->hostrcb[i])
9130 goto out_free_hostrcb_dma;
9132 ioa_cfg->hostrcb[i]->hostrcb_dma =
9133 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9134 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9135 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9138 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9139 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9141 if (!ioa_cfg->trace)
9142 goto out_free_hostrcb_dma;
9149 out_free_hostrcb_dma:
9151 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9152 ioa_cfg->hostrcb[i],
9153 ioa_cfg->hostrcb_dma[i]);
9155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9156 ioa_cfg->u.cfg_table,
9157 ioa_cfg->cfg_table_dma);
9159 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9160 pci_free_consistent(pdev,
9161 sizeof(u32) * ioa_cfg->hrrq[i].size,
9162 ioa_cfg->hrrq[i].host_rrq,
9163 ioa_cfg->hrrq[i].host_rrq_dma);
9165 out_ipr_free_cmd_blocks:
9166 ipr_free_cmd_blks(ioa_cfg);
9168 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9169 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9170 out_free_res_entries:
9171 kfree(ioa_cfg->res_entries);
9176 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9177 * @ioa_cfg: ioa config struct
9182 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9186 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9187 ioa_cfg->bus_attr[i].bus = i;
9188 ioa_cfg->bus_attr[i].qas_enabled = 0;
9189 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9190 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9191 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9193 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9198 * ipr_init_ioa_cfg - Initialize IOA config struct
9199 * @ioa_cfg: ioa config struct
9200 * @host: scsi host struct
9201 * @pdev: PCI dev struct
9206 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9207 struct Scsi_Host *host, struct pci_dev *pdev)
9209 const struct ipr_interrupt_offsets *p;
9210 struct ipr_interrupts *t;
9213 ioa_cfg->host = host;
9214 ioa_cfg->pdev = pdev;
9215 ioa_cfg->log_level = ipr_log_level;
9216 ioa_cfg->doorbell = IPR_DOORBELL;
9217 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9218 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9219 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9220 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9221 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9222 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9224 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9225 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9226 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9227 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9228 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9231 ioa_cfg->sdt_state = INACTIVE;
9233 ipr_initialize_bus_attr(ioa_cfg);
9234 ioa_cfg->max_devs_supported = ipr_max_devs;
9236 if (ioa_cfg->sis64) {
9237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9248 host->unique_id = host->host_no;
9249 host->max_cmd_len = IPR_MAX_CDB_LEN;
9250 host->can_queue = ioa_cfg->max_cmds;
9251 pci_set_drvdata(pdev, ioa_cfg);
9253 p = &ioa_cfg->chip_cfg->regs;
9255 base = ioa_cfg->hdw_dma_regs;
9257 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9258 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9259 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9260 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9261 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9262 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9263 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9264 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9265 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9266 t->ioarrin_reg = base + p->ioarrin_reg;
9267 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9268 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9269 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9270 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9271 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9272 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9274 if (ioa_cfg->sis64) {
9275 t->init_feedback_reg = base + p->init_feedback_reg;
9276 t->dump_addr_reg = base + p->dump_addr_reg;
9277 t->dump_data_reg = base + p->dump_data_reg;
9278 t->endian_swap_reg = base + p->endian_swap_reg;
9283 * ipr_get_chip_info - Find adapter chip information
9284 * @dev_id: PCI device id struct
9287 * ptr to chip information on success / NULL on failure
9289 static const struct ipr_chip_t *
9290 ipr_get_chip_info(const struct pci_device_id *dev_id)
9294 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9295 if (ipr_chip[i].vendor == dev_id->vendor &&
9296 ipr_chip[i].device == dev_id->device)
9297 return &ipr_chip[i];
9301 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9303 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9304 int i, err, vectors;
9306 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9307 entries[i].entry = i;
9309 vectors = ipr_number_of_msix;
9311 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9315 pci_disable_msix(ioa_cfg->pdev);
9320 for (i = 0; i < vectors; i++)
9321 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9322 ioa_cfg->nvectors = vectors;
9328 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9330 int i, err, vectors;
9332 vectors = ipr_number_of_msix;
9334 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9338 pci_disable_msi(ioa_cfg->pdev);
9343 for (i = 0; i < vectors; i++)
9344 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9345 ioa_cfg->nvectors = vectors;
9351 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9353 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9355 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9356 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9357 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9358 ioa_cfg->vectors_info[vec_idx].
9359 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9363 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9367 for (i = 1; i < ioa_cfg->nvectors; i++) {
9368 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9371 ioa_cfg->vectors_info[i].desc,
9375 free_irq(ioa_cfg->vectors_info[i].vec,
9384 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9385 * @pdev: PCI device struct
9387 * Description: Simply set the msi_received flag to 1 indicating that
9388 * Message Signaled Interrupts are supported.
9391 * 0 on success / non-zero on failure
9393 static irqreturn_t ipr_test_intr(int irq, void *devp)
9395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9396 unsigned long lock_flags = 0;
9397 irqreturn_t rc = IRQ_HANDLED;
9399 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9402 ioa_cfg->msi_received = 1;
9403 wake_up(&ioa_cfg->msi_wait_q);
9405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9410 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9411 * @pdev: PCI device struct
9413 * Description: The return value from pci_enable_msi() can not always be
9414 * trusted. This routine sets up and initiates a test interrupt to determine
9415 * if the interrupt is received via the ipr_test_intr() service routine.
9416 * If the tests fails, the driver will fall back to LSI.
9419 * 0 on success / non-zero on failure
9421 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9424 volatile u32 int_reg;
9425 unsigned long lock_flags = 0;
9429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9430 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9431 ioa_cfg->msi_received = 0;
9432 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9433 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9434 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9437 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9438 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9440 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9442 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9444 } else if (ipr_debug)
9445 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9447 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9448 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9449 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9451 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9453 if (!ioa_cfg->msi_received) {
9454 /* MSI test failed */
9455 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9457 } else if (ipr_debug)
9458 dev_info(&pdev->dev, "MSI test succeeded.\n");
9460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9462 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9463 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9465 free_irq(pdev->irq, ioa_cfg);
9472 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9473 * @pdev: PCI device struct
9474 * @dev_id: PCI device id struct
9477 * 0 on success / non-zero on failure
9479 static int ipr_probe_ioa(struct pci_dev *pdev,
9480 const struct pci_device_id *dev_id)
9482 struct ipr_ioa_cfg *ioa_cfg;
9483 struct Scsi_Host *host;
9484 unsigned long ipr_regs_pci;
9485 void __iomem *ipr_regs;
9486 int rc = PCIBIOS_SUCCESSFUL;
9487 volatile u32 mask, uproc, interrupts;
9488 unsigned long lock_flags;
9492 if ((rc = pci_enable_device(pdev))) {
9493 dev_err(&pdev->dev, "Cannot enable adapter\n");
9497 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9499 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9502 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9507 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9508 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9509 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9511 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9513 if (!ioa_cfg->ipr_chip) {
9514 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9515 dev_id->vendor, dev_id->device);
9516 goto out_scsi_host_put;
9519 /* set SIS 32 or SIS 64 */
9520 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9521 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9522 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9523 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9525 if (ipr_transop_timeout)
9526 ioa_cfg->transop_timeout = ipr_transop_timeout;
9527 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9528 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9530 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9532 ioa_cfg->revid = pdev->revision;
9534 ipr_regs_pci = pci_resource_start(pdev, 0);
9536 rc = pci_request_regions(pdev, IPR_NAME);
9539 "Couldn't register memory range of registers\n");
9540 goto out_scsi_host_put;
9543 ipr_regs = pci_ioremap_bar(pdev, 0);
9547 "Couldn't map memory range of registers\n");
9549 goto out_release_regions;
9552 ioa_cfg->hdw_dma_regs = ipr_regs;
9553 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9554 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9556 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9558 pci_set_master(pdev);
9560 if (ioa_cfg->sis64) {
9561 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9563 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9564 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9568 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9571 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9575 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9576 ioa_cfg->chip_cfg->cache_line_size);
9578 if (rc != PCIBIOS_SUCCESSFUL) {
9579 dev_err(&pdev->dev, "Write of cache line size failed\n");
9584 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9585 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9586 IPR_MAX_MSIX_VECTORS);
9587 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9590 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9591 ipr_enable_msix(ioa_cfg) == 0)
9592 ioa_cfg->intr_flag = IPR_USE_MSIX;
9593 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9594 ipr_enable_msi(ioa_cfg) == 0)
9595 ioa_cfg->intr_flag = IPR_USE_MSI;
9597 ioa_cfg->intr_flag = IPR_USE_LSI;
9598 ioa_cfg->nvectors = 1;
9599 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9602 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9603 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9604 rc = ipr_test_msi(ioa_cfg, pdev);
9605 if (rc == -EOPNOTSUPP) {
9606 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9607 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9608 pci_disable_msi(pdev);
9609 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9610 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9611 pci_disable_msix(pdev);
9614 ioa_cfg->intr_flag = IPR_USE_LSI;
9615 ioa_cfg->nvectors = 1;
9618 goto out_msi_disable;
9620 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9621 dev_info(&pdev->dev,
9622 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9623 ioa_cfg->nvectors, pdev->irq);
9624 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9625 dev_info(&pdev->dev,
9626 "Request for %d MSIXs succeeded.",
9631 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9632 (unsigned int)num_online_cpus(),
9633 (unsigned int)IPR_MAX_HRRQ_NUM);
9635 /* Save away PCI config space for use following IOA reset */
9636 rc = pci_save_state(pdev);
9638 if (rc != PCIBIOS_SUCCESSFUL) {
9639 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9641 goto out_msi_disable;
9644 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9645 goto out_msi_disable;
9647 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9648 goto out_msi_disable;
9651 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9652 + ((sizeof(struct ipr_config_table_entry64)
9653 * ioa_cfg->max_devs_supported)));
9655 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9656 + ((sizeof(struct ipr_config_table_entry)
9657 * ioa_cfg->max_devs_supported)));
9659 rc = ipr_alloc_mem(ioa_cfg);
9662 "Couldn't allocate enough memory for device driver!\n");
9663 goto out_msi_disable;
9667 * If HRRQ updated interrupt is not masked, or reset alert is set,
9668 * the card is in an unknown state and needs a hard reset
9670 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9671 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9672 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9673 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9674 ioa_cfg->needs_hard_reset = 1;
9675 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9676 ioa_cfg->needs_hard_reset = 1;
9677 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9678 ioa_cfg->ioa_unit_checked = 1;
9680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9681 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9684 if (ioa_cfg->intr_flag == IPR_USE_MSI
9685 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9686 name_msi_vectors(ioa_cfg);
9687 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9689 ioa_cfg->vectors_info[0].desc,
9692 rc = ipr_request_other_msi_irqs(ioa_cfg);
9694 rc = request_irq(pdev->irq, ipr_isr,
9696 IPR_NAME, &ioa_cfg->hrrq[0]);
9699 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9704 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9705 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9706 ioa_cfg->needs_warm_reset = 1;
9707 ioa_cfg->reset = ipr_reset_slot_reset;
9709 ioa_cfg->reset = ipr_reset_start_bist;
9711 spin_lock(&ipr_driver_lock);
9712 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9713 spin_unlock(&ipr_driver_lock);
9720 ipr_free_mem(ioa_cfg);
9722 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9723 pci_disable_msi(pdev);
9724 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9725 pci_disable_msix(pdev);
9728 out_release_regions:
9729 pci_release_regions(pdev);
9731 scsi_host_put(host);
9733 pci_disable_device(pdev);
9738 * ipr_scan_vsets - Scans for VSET devices
9739 * @ioa_cfg: ioa config struct
9741 * Description: Since the VSET resources do not follow SAM in that we can have
9742 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9747 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9751 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9752 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9753 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9757 * ipr_initiate_ioa_bringdown - Bring down an adapter
9758 * @ioa_cfg: ioa config struct
9759 * @shutdown_type: shutdown type
9761 * Description: This function will initiate bringing down the adapter.
9762 * This consists of issuing an IOA shutdown to the adapter
9763 * to flush the cache, and running BIST.
9764 * If the caller needs to wait on the completion of the reset,
9765 * the caller must sleep on the reset_wait_q.
9770 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9771 enum ipr_shutdown_type shutdown_type)
9774 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9775 ioa_cfg->sdt_state = ABORT_DUMP;
9776 ioa_cfg->reset_retries = 0;
9777 ioa_cfg->in_ioa_bringdown = 1;
9778 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9783 * __ipr_remove - Remove a single adapter
9784 * @pdev: pci device struct
9786 * Adapter hot plug remove entry point.
9791 static void __ipr_remove(struct pci_dev *pdev)
9793 unsigned long host_lock_flags = 0;
9794 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9798 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9799 while (ioa_cfg->in_reset_reload) {
9800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9801 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9802 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9805 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9806 spin_lock(&ioa_cfg->hrrq[i]._lock);
9807 ioa_cfg->hrrq[i].removing_ioa = 1;
9808 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9811 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9814 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9815 flush_work(&ioa_cfg->work_q);
9816 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9817 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9819 spin_lock(&ipr_driver_lock);
9820 list_del(&ioa_cfg->queue);
9821 spin_unlock(&ipr_driver_lock);
9823 if (ioa_cfg->sdt_state == ABORT_DUMP)
9824 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9827 ipr_free_all_resources(ioa_cfg);
9833 * ipr_remove - IOA hot plug remove entry point
9834 * @pdev: pci device struct
9836 * Adapter hot plug remove entry point.
9841 static void ipr_remove(struct pci_dev *pdev)
9843 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9847 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9849 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9851 scsi_remove_host(ioa_cfg->host);
9859 * ipr_probe - Adapter hot plug add entry point
9862 * 0 on success / non-zero on failure
9864 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9866 struct ipr_ioa_cfg *ioa_cfg;
9869 rc = ipr_probe_ioa(pdev, dev_id);
9874 ioa_cfg = pci_get_drvdata(pdev);
9875 rc = ipr_probe_ioa_part2(ioa_cfg);
9882 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9889 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9893 scsi_remove_host(ioa_cfg->host);
9898 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9902 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9904 scsi_remove_host(ioa_cfg->host);
9909 scsi_scan_host(ioa_cfg->host);
9910 ipr_scan_vsets(ioa_cfg);
9911 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9912 ioa_cfg->allow_ml_add_del = 1;
9913 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9914 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9916 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9917 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9918 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9919 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9920 ioa_cfg->iopoll_weight, ipr_iopoll);
9921 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9925 schedule_work(&ioa_cfg->work_q);
9930 * ipr_shutdown - Shutdown handler.
9931 * @pdev: pci device struct
9933 * This function is invoked upon system shutdown/reboot. It will issue
9934 * an adapter shutdown to the adapter to flush the write cache.
9939 static void ipr_shutdown(struct pci_dev *pdev)
9941 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9942 unsigned long lock_flags = 0;
9945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9946 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9947 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9948 ioa_cfg->iopoll_weight = 0;
9949 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9950 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9953 while (ioa_cfg->in_reset_reload) {
9954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9955 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9956 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9959 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9960 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9961 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9964 static struct pci_device_id ipr_pci_table[] = {
9965 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9967 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9969 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9971 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9972 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9973 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9974 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9975 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9977 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9978 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9979 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9980 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9981 IPR_USE_LONG_TRANSOP_TIMEOUT },
9982 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9983 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9984 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9985 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9986 IPR_USE_LONG_TRANSOP_TIMEOUT },
9987 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9988 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9989 IPR_USE_LONG_TRANSOP_TIMEOUT },
9990 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9991 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9992 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9993 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9994 IPR_USE_LONG_TRANSOP_TIMEOUT},
9995 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9996 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9997 IPR_USE_LONG_TRANSOP_TIMEOUT },
9998 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9999 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10000 IPR_USE_LONG_TRANSOP_TIMEOUT },
10001 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10002 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10007 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10008 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10009 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10010 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10011 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10012 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10013 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10014 IPR_USE_LONG_TRANSOP_TIMEOUT },
10015 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10016 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10017 IPR_USE_LONG_TRANSOP_TIMEOUT },
10018 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10019 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10020 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10021 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10022 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10023 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10024 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10033 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10034 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10035 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10036 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10037 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10038 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10039 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10040 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10041 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10042 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10043 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10044 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10045 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10046 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10047 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10050 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10052 static const struct pci_error_handlers ipr_err_handler = {
10053 .error_detected = ipr_pci_error_detected,
10054 .slot_reset = ipr_pci_slot_reset,
10057 static struct pci_driver ipr_driver = {
10059 .id_table = ipr_pci_table,
10060 .probe = ipr_probe,
10061 .remove = ipr_remove,
10062 .shutdown = ipr_shutdown,
10063 .err_handler = &ipr_err_handler,
10067 * ipr_halt_done - Shutdown prepare completion
10072 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10074 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10078 * ipr_halt - Issue shutdown prepare to all adapters
10081 * NOTIFY_OK on success / NOTIFY_DONE on failure
10083 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10085 struct ipr_cmnd *ipr_cmd;
10086 struct ipr_ioa_cfg *ioa_cfg;
10087 unsigned long flags = 0;
10089 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10090 return NOTIFY_DONE;
10092 spin_lock(&ipr_driver_lock);
10094 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10095 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10096 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10101 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10102 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10103 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10104 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10105 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10107 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10108 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10110 spin_unlock(&ipr_driver_lock);
10115 static struct notifier_block ipr_notifier = {
10120 * ipr_init - Module entry point
10123 * 0 on success / negative value on failure
10125 static int __init ipr_init(void)
10127 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10128 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10130 register_reboot_notifier(&ipr_notifier);
10131 return pci_register_driver(&ipr_driver);
10135 * ipr_exit - Module unload
10137 * Module unload entry point.
10142 static void __exit ipr_exit(void)
10144 unregister_reboot_notifier(&ipr_notifier);
10145 pci_unregister_driver(&ipr_driver);
10148 module_init(ipr_init);
10149 module_exit(ipr_exit);