2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size = 0x20,
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size = 0x20,
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
159 .cache_line_size = 0x20,
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
285 "Logical Unit in process of becoming ready"},
287 "Initializing command required"},
289 "34FF: Disk device format in progress"},
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
295 "Synchronization required"},
297 "IOA microcode download required"},
299 "Device bus connection is prohibited by host"},
301 "No ready, IOA shutdown"},
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305 "3020: Storage subsystem configuration error"},
307 "FFF5: Medium error, data unreadable, recommend reassign"},
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3109: IOA timed out a device command"},
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355 "8150: PCI bus error"},
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8151: IOA microcode error"},
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8157: IOA error requiring IOA reset to recover"},
371 "ATA device status error"},
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "FFF4: Command to logical unit failed"},
403 "Illegal request, invalid request type or request packet"},
405 "Illegal request, invalid resource handle"},
407 "Illegal request, commands not allowed to this device"},
409 "Illegal request, command not allowed to a secondary adapter"},
411 "Illegal request, command not allowed to a non-optimized resource"},
413 "Illegal request, invalid field in parameter list"},
415 "Illegal request, parameter not supported"},
417 "Illegal request, parameter value invalid"},
419 "Illegal request, command sequence error"},
421 "Illegal request, dual adapter support not enabled"},
423 "Illegal request, another cable connector was physically disabled"},
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "FFFB: SCSI bus was reset"},
439 "FFFE: SCSI bus transition to single ended"},
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497 "4060: Multipath redundancy level got worse"},
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9063: Maximum number of functional arrays has been exceeded"},
541 "Data protect, other volume set problem"},
543 "Aborted command, invalid descriptor"},
545 "Target operating conditions have changed, dual adapter takeover"},
547 "Aborted command, medium removal prevented"},
549 "Command terminated by host"},
551 "Aborted command, command terminated by host"}
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571 * Function Prototypes
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
580 #ifdef CONFIG_SCSI_IPR_TRACE
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
585 * @add_data: additional data
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
596 trace_entry = &ioa_cfg->trace[atomic_add_return
597 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598 trace_entry->time = jiffies;
599 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600 trace_entry->type = type;
601 if (ipr_cmd->ioa_cfg->sis64)
602 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
604 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607 trace_entry->u.add_data = add_data;
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
623 unsigned long lock_flags;
624 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627 ipr_cmd->done(ipr_cmd);
628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
640 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643 dma_addr_t dma_addr = ipr_cmd->dma_addr;
646 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649 ioarcb->data_transfer_length = 0;
650 ioarcb->read_data_transfer_length = 0;
651 ioarcb->ioadl_len = 0;
652 ioarcb->read_ioadl_len = 0;
654 if (ipr_cmd->ioa_cfg->sis64) {
655 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657 ioasa64->u.gata.status = 0;
659 ioarcb->write_ioadl_addr =
660 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662 ioasa->u.gata.status = 0;
665 ioasa->hdr.ioasc = 0;
666 ioasa->hdr.residual_data_len = 0;
667 ipr_cmd->scsi_cmd = NULL;
669 ipr_cmd->sense_buffer[0] = 0;
670 ipr_cmd->dma_use_sg = 0;
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681 void (*fast_done) (struct ipr_cmnd *))
683 ipr_reinit_ipr_cmnd(ipr_cmd);
684 ipr_cmd->u.scratch = 0;
685 ipr_cmd->sibling = NULL;
686 ipr_cmd->fast_done = fast_done;
687 init_timer(&ipr_cmd->timer);
691 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692 * @ioa_cfg: ioa config struct
695 * pointer to ipr command struct
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
700 struct ipr_cmnd *ipr_cmd = NULL;
702 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704 struct ipr_cmnd, queue);
705 list_del(&ipr_cmd->queue);
713 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714 * @ioa_cfg: ioa config struct
717 * pointer to ipr command struct
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
722 struct ipr_cmnd *ipr_cmd =
723 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
729 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730 * @ioa_cfg: ioa config struct
731 * @clr_ints: interrupts to clear
733 * This function masks all interrupts on the adapter, then clears the
734 * interrupts specified in the mask
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
742 volatile u32 int_reg;
745 /* Stop new interrupts */
746 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747 spin_lock(&ioa_cfg->hrrq[i]._lock);
748 ioa_cfg->hrrq[i].allow_interrupts = 0;
749 spin_unlock(&ioa_cfg->hrrq[i]._lock);
753 /* Set interrupt mask to stop all new interrupts */
755 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
757 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
759 /* Clear any pending interrupts */
761 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
767 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * @ioa_cfg: ioa config struct
771 * 0 on success / -EIO on failure
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
775 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
777 if (pcix_cmd_reg == 0)
780 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
786 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
791 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * @ioa_cfg: ioa config struct
795 * 0 on success / -EIO on failure
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
799 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
802 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
813 * ipr_sata_eh_done - done function for aborted SATA commands
814 * @ipr_cmd: ipr command struct
816 * This function is invoked for ops generated to SATA
817 * devices which are being aborted.
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
824 struct ata_queued_cmd *qc = ipr_cmd->qc;
825 struct ipr_sata_port *sata_port = qc->ap->private_data;
827 qc->err_mask |= AC_ERR_OTHER;
828 sata_port->ioasa.status |= ATA_BUSY;
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
834 * ipr_scsi_eh_done - mid-layer done function for aborted ops
835 * @ipr_cmd: ipr command struct
837 * This function is invoked by the interrupt handler for
838 * ops generated by the SCSI mid-layer which are being aborted.
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
845 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
847 scsi_cmd->result |= (DID_ERROR << 16);
849 scsi_dma_unmap(ipr_cmd->scsi_cmd);
850 scsi_cmd->scsi_done(scsi_cmd);
851 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
855 * ipr_fail_all_ops - Fails all outstanding ops.
856 * @ioa_cfg: ioa config struct
858 * This function fails all outstanding ops.
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
865 struct ipr_cmnd *ipr_cmd, *temp;
866 struct ipr_hrr_queue *hrrq;
869 for_each_hrrq(hrrq, ioa_cfg) {
870 spin_lock(&hrrq->_lock);
871 list_for_each_entry_safe(ipr_cmd,
872 temp, &hrrq->hrrq_pending_q, queue) {
873 list_del(&ipr_cmd->queue);
875 ipr_cmd->s.ioasa.hdr.ioasc =
876 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877 ipr_cmd->s.ioasa.hdr.ilid =
878 cpu_to_be32(IPR_DRIVER_ILID);
880 if (ipr_cmd->scsi_cmd)
881 ipr_cmd->done = ipr_scsi_eh_done;
882 else if (ipr_cmd->qc)
883 ipr_cmd->done = ipr_sata_eh_done;
885 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886 IPR_IOASC_IOA_WAS_RESET);
887 del_timer(&ipr_cmd->timer);
888 ipr_cmd->done(ipr_cmd);
890 spin_unlock(&hrrq->_lock);
896 * ipr_send_command - Send driver initiated requests.
897 * @ipr_cmd: ipr command struct
899 * This function sends a command to the adapter using the correct write call.
900 * In the case of sis64, calculate the ioarcb size required. Then or in the
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
908 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
911 if (ioa_cfg->sis64) {
912 /* The default size is 256 bytes */
913 send_dma_addr |= 0x1;
915 /* If the number of ioadls * size of ioadl > 128 bytes,
916 then use a 512 byte ioarcb */
917 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918 send_dma_addr |= 0x4;
919 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
921 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
925 * ipr_do_req - Send driver initiated requests.
926 * @ipr_cmd: ipr command struct
927 * @done: done function
928 * @timeout_func: timeout function
929 * @timeout: timeout value
931 * This function sends the specified command to the adapter with the
932 * timeout given. The done function is invoked on command completion.
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938 void (*done) (struct ipr_cmnd *),
939 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
941 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
943 ipr_cmd->done = done;
945 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946 ipr_cmd->timer.expires = jiffies + timeout;
947 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
949 add_timer(&ipr_cmd->timer);
951 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
953 ipr_send_command(ipr_cmd);
957 * ipr_internal_cmd_done - Op done function for an internally generated op.
958 * @ipr_cmd: ipr command struct
960 * This function is the op done function for an internally generated,
961 * blocking op. It simply wakes the sleeping thread.
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
968 if (ipr_cmd->sibling)
969 ipr_cmd->sibling = NULL;
971 complete(&ipr_cmd->completion);
975 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976 * @ipr_cmd: ipr command struct
977 * @dma_addr: dma address
978 * @len: transfer length
979 * @flags: ioadl flag value
981 * This function initializes an ioadl in the case where there is only a single
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
990 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
993 ipr_cmd->dma_use_sg = 1;
995 if (ipr_cmd->ioa_cfg->sis64) {
996 ioadl64->flags = cpu_to_be32(flags);
997 ioadl64->data_len = cpu_to_be32(len);
998 ioadl64->address = cpu_to_be64(dma_addr);
1000 ipr_cmd->ioarcb.ioadl_len =
1001 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1004 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005 ioadl->address = cpu_to_be32(dma_addr);
1007 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008 ipr_cmd->ioarcb.read_ioadl_len =
1009 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1012 ipr_cmd->ioarcb.ioadl_len =
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1020 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021 * @ipr_cmd: ipr command struct
1022 * @timeout_func: function to invoke if command times out
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1032 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1034 init_completion(&ipr_cmd->completion);
1035 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1037 spin_unlock_irq(ioa_cfg->host->host_lock);
1038 wait_for_completion(&ipr_cmd->completion);
1039 spin_lock_irq(ioa_cfg->host->host_lock);
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1044 if (ioa_cfg->hrrq_num == 1)
1047 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1051 * ipr_send_hcam - Send an HCAM to the adapter.
1052 * @ioa_cfg: ioa config struct
1054 * @hostrcb: hostrcb struct
1056 * This function will send a Host Controlled Async command to the adapter.
1057 * If HCAMs are currently not allowed to be issued to the adapter, it will
1058 * place the hostrcb on the free queue.
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064 struct ipr_hostrcb *hostrcb)
1066 struct ipr_cmnd *ipr_cmd;
1067 struct ipr_ioarcb *ioarcb;
1069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1074 ipr_cmd->u.hostrcb = hostrcb;
1075 ioarcb = &ipr_cmd->ioarcb;
1077 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080 ioarcb->cmd_pkt.cdb[1] = type;
1081 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1084 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1087 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088 ipr_cmd->done = ipr_process_ccn;
1090 ipr_cmd->done = ipr_process_error;
1092 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1094 ipr_send_command(ipr_cmd);
1096 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1101 * ipr_update_ata_class - Update the ata class in the resource entry
1102 * @res: resource entry struct
1103 * @proto: cfgte device bus protocol value
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1111 case IPR_PROTO_SATA:
1112 case IPR_PROTO_SAS_STP:
1113 res->ata_class = ATA_DEV_ATA;
1115 case IPR_PROTO_SATA_ATAPI:
1116 case IPR_PROTO_SAS_STP_ATAPI:
1117 res->ata_class = ATA_DEV_ATAPI;
1120 res->ata_class = ATA_DEV_UNKNOWN;
1126 * ipr_init_res_entry - Initialize a resource entry struct.
1127 * @res: resource entry struct
1128 * @cfgtew: config table entry wrapper struct
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134 struct ipr_config_table_entry_wrapper *cfgtew)
1138 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139 struct ipr_resource_entry *gscsi_res = NULL;
1141 res->needs_sync_complete = 0;
1144 res->del_from_ml = 0;
1145 res->resetting_device = 0;
1146 res->reset_occurred = 0;
1148 res->sata_port = NULL;
1150 if (ioa_cfg->sis64) {
1151 proto = cfgtew->u.cfgte64->proto;
1152 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154 res->type = cfgtew->u.cfgte64->res_type;
1156 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157 sizeof(res->res_path));
1160 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161 sizeof(res->dev_lun.scsi_lun));
1162 res->lun = scsilun_to_int(&res->dev_lun);
1164 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1168 res->target = gscsi_res->target;
1173 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174 ioa_cfg->max_devs_supported);
1175 set_bit(res->target, ioa_cfg->target_ids);
1177 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1180 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183 ioa_cfg->max_devs_supported);
1184 set_bit(res->target, ioa_cfg->array_ids);
1185 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186 res->bus = IPR_VSET_VIRTUAL_BUS;
1187 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188 ioa_cfg->max_devs_supported);
1189 set_bit(res->target, ioa_cfg->vset_ids);
1191 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192 ioa_cfg->max_devs_supported);
1193 set_bit(res->target, ioa_cfg->target_ids);
1196 proto = cfgtew->u.cfgte->proto;
1197 res->qmodel = IPR_QUEUEING_MODEL(res);
1198 res->flags = cfgtew->u.cfgte->flags;
1199 if (res->flags & IPR_IS_IOA_RESOURCE)
1200 res->type = IPR_RES_TYPE_IOAFP;
1202 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1204 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205 res->target = cfgtew->u.cfgte->res_addr.target;
1206 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1210 ipr_update_ata_class(res, proto);
1214 * ipr_is_same_device - Determine if two devices are the same.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1219 * 1 if the devices are the same / 0 otherwise
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222 struct ipr_config_table_entry_wrapper *cfgtew)
1224 if (res->ioa_cfg->sis64) {
1225 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228 sizeof(cfgtew->u.cfgte64->lun))) {
1232 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233 res->target == cfgtew->u.cfgte->res_addr.target &&
1234 res->lun == cfgtew->u.cfgte->res_addr.lun)
1242 * __ipr_format_res_path - Format the resource path for printing.
1243 * @res_path: resource path
1245 * @len: length of buffer provided
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1256 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1264 * ipr_format_res_path - Format the resource path for printing.
1265 * @ioa_cfg: ioa config struct
1266 * @res_path: resource path
1268 * @len: length of buffer provided
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274 u8 *res_path, char *buffer, int len)
1279 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280 __ipr_format_res_path(res_path, p, len - (buffer - p));
1285 * ipr_update_res_entry - Update the resource entry.
1286 * @res: resource entry struct
1287 * @cfgtew: config table entry wrapper struct
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293 struct ipr_config_table_entry_wrapper *cfgtew)
1295 char buffer[IPR_MAX_RES_PATH_LENGTH];
1299 if (res->ioa_cfg->sis64) {
1300 res->flags = cfgtew->u.cfgte64->flags;
1301 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302 res->type = cfgtew->u.cfgte64->res_type;
1304 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305 sizeof(struct ipr_std_inq_data));
1307 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308 proto = cfgtew->u.cfgte64->proto;
1309 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310 res->dev_id = cfgtew->u.cfgte64->dev_id;
1312 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313 sizeof(res->dev_lun.scsi_lun));
1315 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316 sizeof(res->res_path))) {
1317 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318 sizeof(res->res_path));
1322 if (res->sdev && new_path)
1323 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324 ipr_format_res_path(res->ioa_cfg,
1325 res->res_path, buffer, sizeof(buffer)));
1327 res->flags = cfgtew->u.cfgte->flags;
1328 if (res->flags & IPR_IS_IOA_RESOURCE)
1329 res->type = IPR_RES_TYPE_IOAFP;
1331 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1333 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334 sizeof(struct ipr_std_inq_data));
1336 res->qmodel = IPR_QUEUEING_MODEL(res);
1337 proto = cfgtew->u.cfgte->proto;
1338 res->res_handle = cfgtew->u.cfgte->res_handle;
1341 ipr_update_ata_class(res, proto);
1345 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1355 struct ipr_resource_entry *gscsi_res = NULL;
1356 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1358 if (!ioa_cfg->sis64)
1361 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362 clear_bit(res->target, ioa_cfg->array_ids);
1363 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364 clear_bit(res->target, ioa_cfg->vset_ids);
1365 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1369 clear_bit(res->target, ioa_cfg->target_ids);
1371 } else if (res->bus == 0)
1372 clear_bit(res->target, ioa_cfg->target_ids);
1376 * ipr_handle_config_change - Handle a config change from the adapter
1377 * @ioa_cfg: ioa config struct
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384 struct ipr_hostrcb *hostrcb)
1386 struct ipr_resource_entry *res = NULL;
1387 struct ipr_config_table_entry_wrapper cfgtew;
1388 __be32 cc_res_handle;
1392 if (ioa_cfg->sis64) {
1393 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1396 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397 cc_res_handle = cfgtew.u.cfgte->res_handle;
1400 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401 if (res->res_handle == cc_res_handle) {
1408 if (list_empty(&ioa_cfg->free_res_q)) {
1409 ipr_send_hcam(ioa_cfg,
1410 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1415 res = list_entry(ioa_cfg->free_res_q.next,
1416 struct ipr_resource_entry, queue);
1418 list_del(&res->queue);
1419 ipr_init_res_entry(res, &cfgtew);
1420 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1423 ipr_update_res_entry(res, &cfgtew);
1425 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1427 res->del_from_ml = 1;
1428 res->res_handle = IPR_INVALID_RES_HANDLE;
1429 schedule_work(&ioa_cfg->work_q);
1431 ipr_clear_res_target(res);
1432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 } else if (!res->sdev || res->del_from_ml) {
1436 schedule_work(&ioa_cfg->work_q);
1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1443 * ipr_process_ccn - Op done function for a CCN.
1444 * @ipr_cmd: ipr command struct
1446 * This function is the op done function for a configuration
1447 * change notification host controlled async from the adapter.
1452 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1454 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1455 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1456 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1458 list_del(&hostrcb->queue);
1459 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1463 dev_err(&ioa_cfg->pdev->dev,
1464 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1466 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1468 ipr_handle_config_change(ioa_cfg, hostrcb);
1473 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1474 * @i: index into buffer
1475 * @buf: string to modify
1477 * This function will strip all trailing whitespace, pad the end
1478 * of the string with a single space, and NULL terminate the string.
1481 * new length of string
1483 static int strip_and_pad_whitespace(int i, char *buf)
1485 while (i && buf[i] == ' ')
1493 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1494 * @prefix: string to print at start of printk
1495 * @hostrcb: hostrcb pointer
1496 * @vpd: vendor/product id/sn struct
1501 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1502 struct ipr_vpd *vpd)
1504 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1508 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1510 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1511 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1513 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1514 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1516 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1520 * ipr_log_vpd - Log the passed VPD to the error log.
1521 * @vpd: vendor/product id/sn struct
1526 static void ipr_log_vpd(struct ipr_vpd *vpd)
1528 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1529 + IPR_SERIAL_NUM_LEN];
1531 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1532 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1534 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1535 ipr_err("Vendor/Product ID: %s\n", buffer);
1537 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1538 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1539 ipr_err(" Serial Number: %s\n", buffer);
1543 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1544 * @prefix: string to print at start of printk
1545 * @hostrcb: hostrcb pointer
1546 * @vpd: vendor/product id/sn/wwn struct
1551 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1552 struct ipr_ext_vpd *vpd)
1554 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1555 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1556 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1560 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1561 * @vpd: vendor/product id/sn/wwn struct
1566 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1568 ipr_log_vpd(&vpd->vpd);
1569 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1570 be32_to_cpu(vpd->wwid[1]));
1574 * ipr_log_enhanced_cache_error - Log a cache error.
1575 * @ioa_cfg: ioa config struct
1576 * @hostrcb: hostrcb struct
1581 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1582 struct ipr_hostrcb *hostrcb)
1584 struct ipr_hostrcb_type_12_error *error;
1587 error = &hostrcb->hcam.u.error64.u.type_12_error;
1589 error = &hostrcb->hcam.u.error.u.type_12_error;
1591 ipr_err("-----Current Configuration-----\n");
1592 ipr_err("Cache Directory Card Information:\n");
1593 ipr_log_ext_vpd(&error->ioa_vpd);
1594 ipr_err("Adapter Card Information:\n");
1595 ipr_log_ext_vpd(&error->cfc_vpd);
1597 ipr_err("-----Expected Configuration-----\n");
1598 ipr_err("Cache Directory Card Information:\n");
1599 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1600 ipr_err("Adapter Card Information:\n");
1601 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1603 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1604 be32_to_cpu(error->ioa_data[0]),
1605 be32_to_cpu(error->ioa_data[1]),
1606 be32_to_cpu(error->ioa_data[2]));
1610 * ipr_log_cache_error - Log a cache error.
1611 * @ioa_cfg: ioa config struct
1612 * @hostrcb: hostrcb struct
1617 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1618 struct ipr_hostrcb *hostrcb)
1620 struct ipr_hostrcb_type_02_error *error =
1621 &hostrcb->hcam.u.error.u.type_02_error;
1623 ipr_err("-----Current Configuration-----\n");
1624 ipr_err("Cache Directory Card Information:\n");
1625 ipr_log_vpd(&error->ioa_vpd);
1626 ipr_err("Adapter Card Information:\n");
1627 ipr_log_vpd(&error->cfc_vpd);
1629 ipr_err("-----Expected Configuration-----\n");
1630 ipr_err("Cache Directory Card Information:\n");
1631 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1632 ipr_err("Adapter Card Information:\n");
1633 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1635 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1636 be32_to_cpu(error->ioa_data[0]),
1637 be32_to_cpu(error->ioa_data[1]),
1638 be32_to_cpu(error->ioa_data[2]));
1642 * ipr_log_enhanced_config_error - Log a configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1649 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1652 int errors_logged, i;
1653 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1654 struct ipr_hostrcb_type_13_error *error;
1656 error = &hostrcb->hcam.u.error.u.type_13_error;
1657 errors_logged = be32_to_cpu(error->errors_logged);
1659 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1660 be32_to_cpu(error->errors_detected), errors_logged);
1662 dev_entry = error->dev;
1664 for (i = 0; i < errors_logged; i++, dev_entry++) {
1667 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1668 ipr_log_ext_vpd(&dev_entry->vpd);
1670 ipr_err("-----New Device Information-----\n");
1671 ipr_log_ext_vpd(&dev_entry->new_vpd);
1673 ipr_err("Cache Directory Card Information:\n");
1674 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1676 ipr_err("Adapter Card Information:\n");
1677 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1682 * ipr_log_sis64_config_error - Log a device error.
1683 * @ioa_cfg: ioa config struct
1684 * @hostrcb: hostrcb struct
1689 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1690 struct ipr_hostrcb *hostrcb)
1692 int errors_logged, i;
1693 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1694 struct ipr_hostrcb_type_23_error *error;
1695 char buffer[IPR_MAX_RES_PATH_LENGTH];
1697 error = &hostrcb->hcam.u.error64.u.type_23_error;
1698 errors_logged = be32_to_cpu(error->errors_logged);
1700 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1701 be32_to_cpu(error->errors_detected), errors_logged);
1703 dev_entry = error->dev;
1705 for (i = 0; i < errors_logged; i++, dev_entry++) {
1708 ipr_err("Device %d : %s", i + 1,
1709 __ipr_format_res_path(dev_entry->res_path,
1710 buffer, sizeof(buffer)));
1711 ipr_log_ext_vpd(&dev_entry->vpd);
1713 ipr_err("-----New Device Information-----\n");
1714 ipr_log_ext_vpd(&dev_entry->new_vpd);
1716 ipr_err("Cache Directory Card Information:\n");
1717 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1719 ipr_err("Adapter Card Information:\n");
1720 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1725 * ipr_log_config_error - Log a configuration error.
1726 * @ioa_cfg: ioa config struct
1727 * @hostrcb: hostrcb struct
1732 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1733 struct ipr_hostrcb *hostrcb)
1735 int errors_logged, i;
1736 struct ipr_hostrcb_device_data_entry *dev_entry;
1737 struct ipr_hostrcb_type_03_error *error;
1739 error = &hostrcb->hcam.u.error.u.type_03_error;
1740 errors_logged = be32_to_cpu(error->errors_logged);
1742 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1743 be32_to_cpu(error->errors_detected), errors_logged);
1745 dev_entry = error->dev;
1747 for (i = 0; i < errors_logged; i++, dev_entry++) {
1750 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1751 ipr_log_vpd(&dev_entry->vpd);
1753 ipr_err("-----New Device Information-----\n");
1754 ipr_log_vpd(&dev_entry->new_vpd);
1756 ipr_err("Cache Directory Card Information:\n");
1757 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1759 ipr_err("Adapter Card Information:\n");
1760 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1762 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1763 be32_to_cpu(dev_entry->ioa_data[0]),
1764 be32_to_cpu(dev_entry->ioa_data[1]),
1765 be32_to_cpu(dev_entry->ioa_data[2]),
1766 be32_to_cpu(dev_entry->ioa_data[3]),
1767 be32_to_cpu(dev_entry->ioa_data[4]));
1772 * ipr_log_enhanced_array_error - Log an array configuration error.
1773 * @ioa_cfg: ioa config struct
1774 * @hostrcb: hostrcb struct
1779 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1780 struct ipr_hostrcb *hostrcb)
1783 struct ipr_hostrcb_type_14_error *error;
1784 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1785 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1787 error = &hostrcb->hcam.u.error.u.type_14_error;
1791 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1792 error->protection_level,
1793 ioa_cfg->host->host_no,
1794 error->last_func_vset_res_addr.bus,
1795 error->last_func_vset_res_addr.target,
1796 error->last_func_vset_res_addr.lun);
1800 array_entry = error->array_member;
1801 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1802 ARRAY_SIZE(error->array_member));
1804 for (i = 0; i < num_entries; i++, array_entry++) {
1805 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808 if (be32_to_cpu(error->exposed_mode_adn) == i)
1809 ipr_err("Exposed Array Member %d:\n", i);
1811 ipr_err("Array Member %d:\n", i);
1813 ipr_log_ext_vpd(&array_entry->vpd);
1814 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1815 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1816 "Expected Location");
1823 * ipr_log_array_error - Log an array configuration error.
1824 * @ioa_cfg: ioa config struct
1825 * @hostrcb: hostrcb struct
1830 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1831 struct ipr_hostrcb *hostrcb)
1834 struct ipr_hostrcb_type_04_error *error;
1835 struct ipr_hostrcb_array_data_entry *array_entry;
1836 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1838 error = &hostrcb->hcam.u.error.u.type_04_error;
1842 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1843 error->protection_level,
1844 ioa_cfg->host->host_no,
1845 error->last_func_vset_res_addr.bus,
1846 error->last_func_vset_res_addr.target,
1847 error->last_func_vset_res_addr.lun);
1851 array_entry = error->array_member;
1853 for (i = 0; i < 18; i++) {
1854 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857 if (be32_to_cpu(error->exposed_mode_adn) == i)
1858 ipr_err("Exposed Array Member %d:\n", i);
1860 ipr_err("Array Member %d:\n", i);
1862 ipr_log_vpd(&array_entry->vpd);
1864 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1865 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1866 "Expected Location");
1871 array_entry = error->array_member2;
1878 * ipr_log_hex_data - Log additional hex IOA error data.
1879 * @ioa_cfg: ioa config struct
1880 * @data: IOA error data
1886 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1893 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1894 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1896 for (i = 0; i < len / 4; i += 4) {
1897 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1898 be32_to_cpu(data[i]),
1899 be32_to_cpu(data[i+1]),
1900 be32_to_cpu(data[i+2]),
1901 be32_to_cpu(data[i+3]));
1906 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1907 * @ioa_cfg: ioa config struct
1908 * @hostrcb: hostrcb struct
1913 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1914 struct ipr_hostrcb *hostrcb)
1916 struct ipr_hostrcb_type_17_error *error;
1919 error = &hostrcb->hcam.u.error64.u.type_17_error;
1921 error = &hostrcb->hcam.u.error.u.type_17_error;
1923 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1924 strim(error->failure_reason);
1926 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1927 be32_to_cpu(hostrcb->hcam.u.error.prc));
1928 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1929 ipr_log_hex_data(ioa_cfg, error->data,
1930 be32_to_cpu(hostrcb->hcam.length) -
1931 (offsetof(struct ipr_hostrcb_error, u) +
1932 offsetof(struct ipr_hostrcb_type_17_error, data)));
1936 * ipr_log_dual_ioa_error - Log a dual adapter error.
1937 * @ioa_cfg: ioa config struct
1938 * @hostrcb: hostrcb struct
1943 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1944 struct ipr_hostrcb *hostrcb)
1946 struct ipr_hostrcb_type_07_error *error;
1948 error = &hostrcb->hcam.u.error.u.type_07_error;
1949 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1950 strim(error->failure_reason);
1952 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1953 be32_to_cpu(hostrcb->hcam.u.error.prc));
1954 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1955 ipr_log_hex_data(ioa_cfg, error->data,
1956 be32_to_cpu(hostrcb->hcam.length) -
1957 (offsetof(struct ipr_hostrcb_error, u) +
1958 offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 static const struct {
1964 } path_active_desc[] = {
1965 { IPR_PATH_NO_INFO, "Path" },
1966 { IPR_PATH_ACTIVE, "Active path" },
1967 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 static const struct {
1973 } path_state_desc[] = {
1974 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1975 { IPR_PATH_HEALTHY, "is healthy" },
1976 { IPR_PATH_DEGRADED, "is degraded" },
1977 { IPR_PATH_FAILED, "is failed" }
1981 * ipr_log_fabric_path - Log a fabric path error
1982 * @hostrcb: hostrcb struct
1983 * @fabric: fabric descriptor
1988 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1989 struct ipr_hostrcb_fabric_desc *fabric)
1992 u8 path_state = fabric->path_state;
1993 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1994 u8 state = path_state & IPR_PATH_STATE_MASK;
1996 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1997 if (path_active_desc[i].active != active)
2000 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2001 if (path_state_desc[j].state != state)
2004 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2005 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2006 path_active_desc[i].desc, path_state_desc[j].desc,
2008 } else if (fabric->cascaded_expander == 0xff) {
2009 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2010 path_active_desc[i].desc, path_state_desc[j].desc,
2011 fabric->ioa_port, fabric->phy);
2012 } else if (fabric->phy == 0xff) {
2013 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2014 path_active_desc[i].desc, path_state_desc[j].desc,
2015 fabric->ioa_port, fabric->cascaded_expander);
2017 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2025 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2030 * ipr_log64_fabric_path - Log a fabric path error
2031 * @hostrcb: hostrcb struct
2032 * @fabric: fabric descriptor
2037 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2038 struct ipr_hostrcb64_fabric_desc *fabric)
2041 u8 path_state = fabric->path_state;
2042 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2043 u8 state = path_state & IPR_PATH_STATE_MASK;
2044 char buffer[IPR_MAX_RES_PATH_LENGTH];
2046 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2047 if (path_active_desc[i].active != active)
2050 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2051 if (path_state_desc[j].state != state)
2054 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2055 path_active_desc[i].desc, path_state_desc[j].desc,
2056 ipr_format_res_path(hostrcb->ioa_cfg,
2058 buffer, sizeof(buffer)));
2063 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2064 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2065 buffer, sizeof(buffer)));
2068 static const struct {
2071 } path_type_desc[] = {
2072 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2073 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2074 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2075 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 static const struct {
2081 } path_status_desc[] = {
2082 { IPR_PATH_CFG_NO_PROB, "Functional" },
2083 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2084 { IPR_PATH_CFG_FAILED, "Failed" },
2085 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2086 { IPR_PATH_NOT_DETECTED, "Missing" },
2087 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 static const char *link_rate[] = {
2093 "phy reset problem",
2110 * ipr_log_path_elem - Log a fabric path element.
2111 * @hostrcb: hostrcb struct
2112 * @cfg: fabric path element struct
2117 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2118 struct ipr_hostrcb_config_element *cfg)
2121 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2122 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2124 if (type == IPR_PATH_CFG_NOT_EXIST)
2127 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2128 if (path_type_desc[i].type != type)
2131 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2132 if (path_status_desc[j].status != status)
2135 if (type == IPR_PATH_CFG_IOA_PORT) {
2136 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2137 path_status_desc[j].desc, path_type_desc[i].desc,
2138 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2139 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2141 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2142 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2143 path_status_desc[j].desc, path_type_desc[i].desc,
2144 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2145 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2146 } else if (cfg->cascaded_expander == 0xff) {
2147 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2148 "WWN=%08X%08X\n", path_status_desc[j].desc,
2149 path_type_desc[i].desc, cfg->phy,
2150 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2151 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2152 } else if (cfg->phy == 0xff) {
2153 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2154 "WWN=%08X%08X\n", path_status_desc[j].desc,
2155 path_type_desc[i].desc, cfg->cascaded_expander,
2156 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2159 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2160 "WWN=%08X%08X\n", path_status_desc[j].desc,
2161 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2162 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2170 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2171 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2172 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2173 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2177 * ipr_log64_path_elem - Log a fabric path element.
2178 * @hostrcb: hostrcb struct
2179 * @cfg: fabric path element struct
2184 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2185 struct ipr_hostrcb64_config_element *cfg)
2188 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2189 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2190 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2191 char buffer[IPR_MAX_RES_PATH_LENGTH];
2193 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2197 if (path_type_desc[i].type != type)
2200 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2201 if (path_status_desc[j].status != status)
2204 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 ipr_format_res_path(hostrcb->ioa_cfg,
2207 cfg->res_path, buffer, sizeof(buffer)),
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209 be32_to_cpu(cfg->wwid[0]),
2210 be32_to_cpu(cfg->wwid[1]));
2214 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2215 "WWN=%08X%08X\n", cfg->type_status,
2216 ipr_format_res_path(hostrcb->ioa_cfg,
2217 cfg->res_path, buffer, sizeof(buffer)),
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2223 * ipr_log_fabric_error - Log a fabric error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2230 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2233 struct ipr_hostrcb_type_20_error *error;
2234 struct ipr_hostrcb_fabric_desc *fabric;
2235 struct ipr_hostrcb_config_element *cfg;
2238 error = &hostrcb->hcam.u.error.u.type_20_error;
2239 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2240 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2242 add_len = be32_to_cpu(hostrcb->hcam.length) -
2243 (offsetof(struct ipr_hostrcb_error, u) +
2244 offsetof(struct ipr_hostrcb_type_20_error, desc));
2246 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2247 ipr_log_fabric_path(hostrcb, fabric);
2248 for_each_fabric_cfg(fabric, cfg)
2249 ipr_log_path_elem(hostrcb, cfg);
2251 add_len -= be16_to_cpu(fabric->length);
2252 fabric = (struct ipr_hostrcb_fabric_desc *)
2253 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2260 * ipr_log_sis64_array_error - Log a sis64 array error.
2261 * @ioa_cfg: ioa config struct
2262 * @hostrcb: hostrcb struct
2267 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2268 struct ipr_hostrcb *hostrcb)
2271 struct ipr_hostrcb_type_24_error *error;
2272 struct ipr_hostrcb64_array_data_entry *array_entry;
2273 char buffer[IPR_MAX_RES_PATH_LENGTH];
2274 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2276 error = &hostrcb->hcam.u.error64.u.type_24_error;
2280 ipr_err("RAID %s Array Configuration: %s\n",
2281 error->protection_level,
2282 ipr_format_res_path(ioa_cfg, error->last_res_path,
2283 buffer, sizeof(buffer)));
2287 array_entry = error->array_member;
2288 num_entries = min_t(u32, error->num_entries,
2289 ARRAY_SIZE(error->array_member));
2291 for (i = 0; i < num_entries; i++, array_entry++) {
2293 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296 if (error->exposed_mode_adn == i)
2297 ipr_err("Exposed Array Member %d:\n", i);
2299 ipr_err("Array Member %d:\n", i);
2301 ipr_err("Array Member %d:\n", i);
2302 ipr_log_ext_vpd(&array_entry->vpd);
2303 ipr_err("Current Location: %s\n",
2304 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2305 buffer, sizeof(buffer)));
2306 ipr_err("Expected Location: %s\n",
2307 ipr_format_res_path(ioa_cfg,
2308 array_entry->expected_res_path,
2309 buffer, sizeof(buffer)));
2316 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2317 * @ioa_cfg: ioa config struct
2318 * @hostrcb: hostrcb struct
2323 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2324 struct ipr_hostrcb *hostrcb)
2326 struct ipr_hostrcb_type_30_error *error;
2327 struct ipr_hostrcb64_fabric_desc *fabric;
2328 struct ipr_hostrcb64_config_element *cfg;
2331 error = &hostrcb->hcam.u.error64.u.type_30_error;
2333 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2334 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2336 add_len = be32_to_cpu(hostrcb->hcam.length) -
2337 (offsetof(struct ipr_hostrcb64_error, u) +
2338 offsetof(struct ipr_hostrcb_type_30_error, desc));
2340 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2341 ipr_log64_fabric_path(hostrcb, fabric);
2342 for_each_fabric_cfg(fabric, cfg)
2343 ipr_log64_path_elem(hostrcb, cfg);
2345 add_len -= be16_to_cpu(fabric->length);
2346 fabric = (struct ipr_hostrcb64_fabric_desc *)
2347 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2354 * ipr_log_generic_error - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2361 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2362 struct ipr_hostrcb *hostrcb)
2364 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2365 be32_to_cpu(hostrcb->hcam.length));
2369 * ipr_log_sis64_device_error - Log a cache error.
2370 * @ioa_cfg: ioa config struct
2371 * @hostrcb: hostrcb struct
2376 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2377 struct ipr_hostrcb *hostrcb)
2379 struct ipr_hostrcb_type_21_error *error;
2380 char buffer[IPR_MAX_RES_PATH_LENGTH];
2382 error = &hostrcb->hcam.u.error64.u.type_21_error;
2384 ipr_err("-----Failing Device Information-----\n");
2385 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2386 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2387 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2388 ipr_err("Device Resource Path: %s\n",
2389 __ipr_format_res_path(error->res_path,
2390 buffer, sizeof(buffer)));
2391 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2392 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2393 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2394 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2395 ipr_err("SCSI Sense Data:\n");
2396 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2397 ipr_err("SCSI Command Descriptor Block: \n");
2398 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2400 ipr_err("Additional IOA Data:\n");
2401 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2405 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408 * This function will return the index of into the ipr_error_table
2409 * for the specified IOASC. If the IOASC is not in the table,
2410 * 0 will be returned, which points to the entry used for unknown errors.
2413 * index into the ipr_error_table
2415 static u32 ipr_get_error(u32 ioasc)
2419 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2420 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2427 * ipr_handle_log_data - Log an adapter error.
2428 * @ioa_cfg: ioa config struct
2429 * @hostrcb: hostrcb struct
2431 * This function logs an adapter error to the system.
2436 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2437 struct ipr_hostrcb *hostrcb)
2441 struct ipr_hostrcb_type_21_error *error;
2443 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2447 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2452 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2454 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2455 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2456 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2457 scsi_report_bus_reset(ioa_cfg->host,
2458 hostrcb->hcam.u.error.fd_res_addr.bus);
2461 error_index = ipr_get_error(ioasc);
2463 if (!ipr_error_table[error_index].log_hcam)
2466 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2467 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2468 error = &hostrcb->hcam.u.error64.u.type_21_error;
2470 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2471 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2475 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2477 /* Set indication we have logged an error */
2478 ioa_cfg->errors_logged++;
2480 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2482 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2483 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2485 switch (hostrcb->hcam.overlay_id) {
2486 case IPR_HOST_RCB_OVERLAY_ID_2:
2487 ipr_log_cache_error(ioa_cfg, hostrcb);
2489 case IPR_HOST_RCB_OVERLAY_ID_3:
2490 ipr_log_config_error(ioa_cfg, hostrcb);
2492 case IPR_HOST_RCB_OVERLAY_ID_4:
2493 case IPR_HOST_RCB_OVERLAY_ID_6:
2494 ipr_log_array_error(ioa_cfg, hostrcb);
2496 case IPR_HOST_RCB_OVERLAY_ID_7:
2497 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2499 case IPR_HOST_RCB_OVERLAY_ID_12:
2500 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2502 case IPR_HOST_RCB_OVERLAY_ID_13:
2503 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2505 case IPR_HOST_RCB_OVERLAY_ID_14:
2506 case IPR_HOST_RCB_OVERLAY_ID_16:
2507 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2509 case IPR_HOST_RCB_OVERLAY_ID_17:
2510 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2512 case IPR_HOST_RCB_OVERLAY_ID_20:
2513 ipr_log_fabric_error(ioa_cfg, hostrcb);
2515 case IPR_HOST_RCB_OVERLAY_ID_21:
2516 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2518 case IPR_HOST_RCB_OVERLAY_ID_23:
2519 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2521 case IPR_HOST_RCB_OVERLAY_ID_24:
2522 case IPR_HOST_RCB_OVERLAY_ID_26:
2523 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2525 case IPR_HOST_RCB_OVERLAY_ID_30:
2526 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2528 case IPR_HOST_RCB_OVERLAY_ID_1:
2529 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2531 ipr_log_generic_error(ioa_cfg, hostrcb);
2537 * ipr_process_error - Op done function for an adapter error log.
2538 * @ipr_cmd: ipr command struct
2540 * This function is the op done function for an error log host
2541 * controlled async from the adapter. It will log the error and
2542 * send the HCAM back to the adapter.
2547 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2549 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2550 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2551 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2555 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2557 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2559 list_del(&hostrcb->queue);
2560 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563 ipr_handle_log_data(ioa_cfg, hostrcb);
2564 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2566 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2567 dev_err(&ioa_cfg->pdev->dev,
2568 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2575 * ipr_timeout - An internally generated op has timed out.
2576 * @ipr_cmd: ipr command struct
2578 * This function blocks host requests and initiates an
2584 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2586 unsigned long lock_flags = 0;
2587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2592 ioa_cfg->errors_logged++;
2593 dev_err(&ioa_cfg->pdev->dev,
2594 "Adapter being reset due to command timeout.\n");
2596 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2597 ioa_cfg->sdt_state = GET_DUMP;
2599 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2600 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2607 * ipr_oper_timeout - Adapter timed out transitioning to operational
2608 * @ipr_cmd: ipr command struct
2610 * This function blocks host requests and initiates an
2616 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2618 unsigned long lock_flags = 0;
2619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2624 ioa_cfg->errors_logged++;
2625 dev_err(&ioa_cfg->pdev->dev,
2626 "Adapter timed out transitioning to operational.\n");
2628 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2629 ioa_cfg->sdt_state = GET_DUMP;
2631 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2633 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2634 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2642 * ipr_find_ses_entry - Find matching SES in SES table
2643 * @res: resource entry struct of SES
2646 * pointer to SES table entry / NULL on failure
2648 static const struct ipr_ses_table_entry *
2649 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 struct ipr_std_inq_vpids *vpids;
2653 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2655 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2656 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2657 if (ste->compare_product_id_byte[j] == 'X') {
2658 vpids = &res->std_inq_data.vpids;
2659 if (vpids->product_id[j] == ste->product_id[j])
2667 if (matches == IPR_PROD_ID_LEN)
2675 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2676 * @ioa_cfg: ioa config struct
2678 * @bus_width: bus width
2681 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2682 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2683 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2684 * max 160MHz = max 320MB/sec).
2686 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2688 struct ipr_resource_entry *res;
2689 const struct ipr_ses_table_entry *ste;
2690 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2692 /* Loop through each config table entry in the config table buffer */
2693 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2694 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697 if (bus != res->bus)
2700 if (!(ste = ipr_find_ses_entry(res)))
2703 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706 return max_xfer_rate;
2710 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2711 * @ioa_cfg: ioa config struct
2712 * @max_delay: max delay in micro-seconds to wait
2714 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717 * 0 on success / other on failure
2719 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2721 volatile u32 pcii_reg;
2724 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2725 while (delay < max_delay) {
2726 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2728 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731 /* udelay cannot be used if delay is more than a few milliseconds */
2732 if ((delay / 1000) > MAX_UDELAY_MS)
2733 mdelay(delay / 1000);
2743 * ipr_get_sis64_dump_data_section - Dump IOA memory
2744 * @ioa_cfg: ioa config struct
2745 * @start_addr: adapter address to dump
2746 * @dest: destination kernel buffer
2747 * @length_in_words: length to dump in 4 byte words
2752 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2754 __be32 *dest, u32 length_in_words)
2758 for (i = 0; i < length_in_words; i++) {
2759 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2760 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2768 * ipr_get_ldump_data_section - Dump IOA memory
2769 * @ioa_cfg: ioa config struct
2770 * @start_addr: adapter address to dump
2771 * @dest: destination kernel buffer
2772 * @length_in_words: length to dump in 4 byte words
2775 * 0 on success / -EIO on failure
2777 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2779 __be32 *dest, u32 length_in_words)
2781 volatile u32 temp_pcii_reg;
2785 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2786 dest, length_in_words);
2788 /* Write IOA interrupt reg starting LDUMP state */
2789 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2790 ioa_cfg->regs.set_uproc_interrupt_reg32);
2792 /* Wait for IO debug acknowledge */
2793 if (ipr_wait_iodbg_ack(ioa_cfg,
2794 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2795 dev_err(&ioa_cfg->pdev->dev,
2796 "IOA dump long data transfer timeout\n");
2800 /* Signal LDUMP interlocked - clear IO debug ack */
2801 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2802 ioa_cfg->regs.clr_interrupt_reg);
2804 /* Write Mailbox with starting address */
2805 writel(start_addr, ioa_cfg->ioa_mailbox);
2807 /* Signal address valid - clear IOA Reset alert */
2808 writel(IPR_UPROCI_RESET_ALERT,
2809 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2811 for (i = 0; i < length_in_words; i++) {
2812 /* Wait for IO debug acknowledge */
2813 if (ipr_wait_iodbg_ack(ioa_cfg,
2814 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2815 dev_err(&ioa_cfg->pdev->dev,
2816 "IOA dump short data transfer timeout\n");
2820 /* Read data from mailbox and increment destination pointer */
2821 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824 /* For all but the last word of data, signal data received */
2825 if (i < (length_in_words - 1)) {
2826 /* Signal dump data received - Clear IO debug Ack */
2827 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2828 ioa_cfg->regs.clr_interrupt_reg);
2832 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2833 writel(IPR_UPROCI_RESET_ALERT,
2834 ioa_cfg->regs.set_uproc_interrupt_reg32);
2836 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2837 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2839 /* Signal dump data received - Clear IO debug Ack */
2840 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2841 ioa_cfg->regs.clr_interrupt_reg);
2843 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2844 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2846 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2848 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2858 #ifdef CONFIG_SCSI_IPR_DUMP
2860 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2861 * @ioa_cfg: ioa config struct
2862 * @pci_address: adapter address
2863 * @length: length of data to copy
2865 * Copy data from PCI adapter to kernel buffer.
2866 * Note: length MUST be a 4 byte multiple
2868 * 0 on success / other on failure
2870 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2871 unsigned long pci_address, u32 length)
2873 int bytes_copied = 0;
2874 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2876 unsigned long lock_flags = 0;
2877 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2882 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2884 while (bytes_copied < length &&
2885 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2886 if (ioa_dump->page_offset >= PAGE_SIZE ||
2887 ioa_dump->page_offset == 0) {
2888 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2892 return bytes_copied;
2895 ioa_dump->page_offset = 0;
2896 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2897 ioa_dump->next_page_index++;
2899 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2901 rem_len = length - bytes_copied;
2902 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2903 cur_len = min(rem_len, rem_page_len);
2905 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2906 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909 rc = ipr_get_ldump_data_section(ioa_cfg,
2910 pci_address + bytes_copied,
2911 &page[ioa_dump->page_offset / 4],
2912 (cur_len / sizeof(u32)));
2914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917 ioa_dump->page_offset += cur_len;
2918 bytes_copied += cur_len;
2926 return bytes_copied;
2930 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2931 * @hdr: dump entry header struct
2936 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2938 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2940 hdr->offset = sizeof(*hdr);
2941 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2945 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2946 * @ioa_cfg: ioa config struct
2947 * @driver_dump: driver dump struct
2952 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2953 struct ipr_driver_dump *driver_dump)
2955 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2957 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2958 driver_dump->ioa_type_entry.hdr.len =
2959 sizeof(struct ipr_dump_ioa_type_entry) -
2960 sizeof(struct ipr_dump_entry_header);
2961 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2962 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2963 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2964 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2965 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2966 ucode_vpd->minor_release[1];
2967 driver_dump->hdr.num_entries++;
2971 * ipr_dump_version_data - Fill in the driver version in the dump.
2972 * @ioa_cfg: ioa config struct
2973 * @driver_dump: driver dump struct
2978 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2979 struct ipr_driver_dump *driver_dump)
2981 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2982 driver_dump->version_entry.hdr.len =
2983 sizeof(struct ipr_dump_version_entry) -
2984 sizeof(struct ipr_dump_entry_header);
2985 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2986 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2987 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2988 driver_dump->hdr.num_entries++;
2992 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2993 * @ioa_cfg: ioa config struct
2994 * @driver_dump: driver dump struct
2999 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3000 struct ipr_driver_dump *driver_dump)
3002 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3003 driver_dump->trace_entry.hdr.len =
3004 sizeof(struct ipr_dump_trace_entry) -
3005 sizeof(struct ipr_dump_entry_header);
3006 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3007 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3008 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3009 driver_dump->hdr.num_entries++;
3013 * ipr_dump_location_data - Fill in the IOA location in the dump.
3014 * @ioa_cfg: ioa config struct
3015 * @driver_dump: driver dump struct
3020 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3021 struct ipr_driver_dump *driver_dump)
3023 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3024 driver_dump->location_entry.hdr.len =
3025 sizeof(struct ipr_dump_location_entry) -
3026 sizeof(struct ipr_dump_entry_header);
3027 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3028 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3029 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3030 driver_dump->hdr.num_entries++;
3034 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3035 * @ioa_cfg: ioa config struct
3036 * @dump: dump struct
3041 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3043 unsigned long start_addr, sdt_word;
3044 unsigned long lock_flags = 0;
3045 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3046 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3047 u32 num_entries, max_num_entries, start_off, end_off;
3048 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3049 struct ipr_sdt *sdt;
3055 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3057 if (ioa_cfg->sdt_state != READ_DUMP) {
3058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3062 if (ioa_cfg->sis64) {
3063 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3064 ssleep(IPR_DUMP_DELAY_SECONDS);
3065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068 start_addr = readl(ioa_cfg->ioa_mailbox);
3070 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3071 dev_err(&ioa_cfg->pdev->dev,
3072 "Invalid dump table format: %lx\n", start_addr);
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3079 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3081 /* Initialize the overall dump header */
3082 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3083 driver_dump->hdr.num_entries = 1;
3084 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3085 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3086 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3087 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3089 ipr_dump_version_data(ioa_cfg, driver_dump);
3090 ipr_dump_location_data(ioa_cfg, driver_dump);
3091 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3092 ipr_dump_trace_data(ioa_cfg, driver_dump);
3094 /* Update dump_header */
3095 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3097 /* IOA Dump entry */
3098 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3099 ioa_dump->hdr.len = 0;
3100 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3101 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3103 /* First entries in sdt are actually a list of dump addresses and
3104 lengths to gather the real dump data. sdt represents the pointer
3105 to the ioa generated dump table. Dump data will be extracted based
3106 on entries in this table */
3107 sdt = &ioa_dump->sdt;
3109 if (ioa_cfg->sis64) {
3110 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3111 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3113 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3114 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3118 (max_num_entries * sizeof(struct ipr_sdt_entry));
3119 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3120 bytes_to_copy / sizeof(__be32));
3122 /* Smart Dump table is ready to use and the first entry is valid */
3123 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3124 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3125 dev_err(&ioa_cfg->pdev->dev,
3126 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3127 rc, be32_to_cpu(sdt->hdr.state));
3128 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3129 ioa_cfg->sdt_state = DUMP_OBTAINED;
3130 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3134 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3136 if (num_entries > max_num_entries)
3137 num_entries = max_num_entries;
3139 /* Update dump length to the actual data to be copied */
3140 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3142 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3144 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3146 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 for (i = 0; i < num_entries; i++) {
3149 if (ioa_dump->hdr.len > max_dump_size) {
3150 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3154 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3155 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3157 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3159 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3160 end_off = be32_to_cpu(sdt->entry[i].end_token);
3162 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3163 bytes_to_copy = end_off - start_off;
3168 if (bytes_to_copy > max_dump_size) {
3169 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3173 /* Copy data from adapter to driver buffers */
3174 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177 ioa_dump->hdr.len += bytes_copied;
3179 if (bytes_copied != bytes_to_copy) {
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3189 /* Update dump_header */
3190 driver_dump->hdr.len += ioa_dump->hdr.len;
3192 ioa_cfg->sdt_state = DUMP_OBTAINED;
3197 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3201 * ipr_release_dump - Free adapter dump memory
3202 * @kref: kref struct
3207 static void ipr_release_dump(struct kref *kref)
3209 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3210 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3211 unsigned long lock_flags = 0;
3215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3216 ioa_cfg->dump = NULL;
3217 ioa_cfg->sdt_state = INACTIVE;
3218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3221 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3223 vfree(dump->ioa_dump.ioa_data);
3229 * ipr_worker_thread - Worker thread
3230 * @work: ioa config struct
3232 * Called at task level from a work thread. This function takes care
3233 * of adding and removing device from the mid-layer as configuration
3234 * changes are detected by the adapter.
3239 static void ipr_worker_thread(struct work_struct *work)
3241 unsigned long lock_flags;
3242 struct ipr_resource_entry *res;
3243 struct scsi_device *sdev;
3244 struct ipr_dump *dump;
3245 struct ipr_ioa_cfg *ioa_cfg =
3246 container_of(work, struct ipr_ioa_cfg, work_q);
3247 u8 bus, target, lun;
3251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3253 if (ioa_cfg->sdt_state == READ_DUMP) {
3254 dump = ioa_cfg->dump;
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259 kref_get(&dump->kref);
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3261 ipr_get_ioa_dump(ioa_cfg, dump);
3262 kref_put(&dump->kref, ipr_release_dump);
3264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3265 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3266 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3274 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3280 if (res->del_from_ml && res->sdev) {
3283 if (!scsi_device_get(sdev)) {
3284 if (!res->add_to_ml)
3285 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3287 res->del_from_ml = 0;
3288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 scsi_remove_device(sdev);
3290 scsi_device_put(sdev);
3291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3298 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299 if (res->add_to_ml) {
3301 target = res->target;
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305 scsi_add_device(ioa_cfg->host, bus, target, lun);
3306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311 ioa_cfg->scan_done = 1;
3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3317 #ifdef CONFIG_SCSI_IPR_TRACE
3319 * ipr_read_trace - Dump the adapter trace
3320 * @filp: open sysfs file
3321 * @kobj: kobject struct
3322 * @bin_attr: bin_attribute struct
3325 * @count: buffer size
3328 * number of bytes printed to buffer
3330 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3331 struct bin_attribute *bin_attr,
3332 char *buf, loff_t off, size_t count)
3334 struct device *dev = container_of(kobj, struct device, kobj);
3335 struct Scsi_Host *shost = class_to_shost(dev);
3336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3337 unsigned long lock_flags = 0;
3340 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3341 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3348 static struct bin_attribute ipr_trace_attr = {
3354 .read = ipr_read_trace,
3359 * ipr_show_fw_version - Show the firmware version
3360 * @dev: class device struct
3364 * number of bytes printed to buffer
3366 static ssize_t ipr_show_fw_version(struct device *dev,
3367 struct device_attribute *attr, char *buf)
3369 struct Scsi_Host *shost = class_to_shost(dev);
3370 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3371 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3372 unsigned long lock_flags = 0;
3375 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3376 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3377 ucode_vpd->major_release, ucode_vpd->card_type,
3378 ucode_vpd->minor_release[0],
3379 ucode_vpd->minor_release[1]);
3380 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3384 static struct device_attribute ipr_fw_version_attr = {
3386 .name = "fw_version",
3389 .show = ipr_show_fw_version,
3393 * ipr_show_log_level - Show the adapter's error logging level
3394 * @dev: class device struct
3398 * number of bytes printed to buffer
3400 static ssize_t ipr_show_log_level(struct device *dev,
3401 struct device_attribute *attr, char *buf)
3403 struct Scsi_Host *shost = class_to_shost(dev);
3404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags = 0;
3408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3409 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415 * ipr_store_log_level - Change the adapter's error logging level
3416 * @dev: class device struct
3420 * number of bytes printed to buffer
3422 static ssize_t ipr_store_log_level(struct device *dev,
3423 struct device_attribute *attr,
3424 const char *buf, size_t count)
3426 struct Scsi_Host *shost = class_to_shost(dev);
3427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3428 unsigned long lock_flags = 0;
3430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3431 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3436 static struct device_attribute ipr_log_level_attr = {
3438 .name = "log_level",
3439 .mode = S_IRUGO | S_IWUSR,
3441 .show = ipr_show_log_level,
3442 .store = ipr_store_log_level
3446 * ipr_store_diagnostics - IOA Diagnostics interface
3447 * @dev: device struct
3449 * @count: buffer size
3451 * This function will reset the adapter and wait a reasonable
3452 * amount of time for any errors that the adapter might log.
3455 * count on success / other on failure
3457 static ssize_t ipr_store_diagnostics(struct device *dev,
3458 struct device_attribute *attr,
3459 const char *buf, size_t count)
3461 struct Scsi_Host *shost = class_to_shost(dev);
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3463 unsigned long lock_flags = 0;
3466 if (!capable(CAP_SYS_ADMIN))
3469 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3470 while (ioa_cfg->in_reset_reload) {
3471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3472 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476 ioa_cfg->errors_logged = 0;
3477 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3479 if (ioa_cfg->in_reset_reload) {
3480 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3483 /* Wait for a second for any errors to be logged */
3486 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3491 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3498 static struct device_attribute ipr_diagnostics_attr = {
3500 .name = "run_diagnostics",
3503 .store = ipr_store_diagnostics
3507 * ipr_show_adapter_state - Show the adapter's state
3508 * @class_dev: device struct
3512 * number of bytes printed to buffer
3514 static ssize_t ipr_show_adapter_state(struct device *dev,
3515 struct device_attribute *attr, char *buf)
3517 struct Scsi_Host *shost = class_to_shost(dev);
3518 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3519 unsigned long lock_flags = 0;
3522 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3523 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3524 len = snprintf(buf, PAGE_SIZE, "offline\n");
3526 len = snprintf(buf, PAGE_SIZE, "online\n");
3527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532 * ipr_store_adapter_state - Change adapter state
3533 * @dev: device struct
3535 * @count: buffer size
3537 * This function will change the adapter's state.
3540 * count on success / other on failure
3542 static ssize_t ipr_store_adapter_state(struct device *dev,
3543 struct device_attribute *attr,
3544 const char *buf, size_t count)
3546 struct Scsi_Host *shost = class_to_shost(dev);
3547 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3548 unsigned long lock_flags;
3549 int result = count, i;
3551 if (!capable(CAP_SYS_ADMIN))
3554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3555 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3556 !strncmp(buf, "online", 6)) {
3557 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3558 spin_lock(&ioa_cfg->hrrq[i]._lock);
3559 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3560 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563 ioa_cfg->reset_retries = 0;
3564 ioa_cfg->in_ioa_bringdown = 0;
3565 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3568 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3573 static struct device_attribute ipr_ioa_state_attr = {
3575 .name = "online_state",
3576 .mode = S_IRUGO | S_IWUSR,
3578 .show = ipr_show_adapter_state,
3579 .store = ipr_store_adapter_state
3583 * ipr_store_reset_adapter - Reset the adapter
3584 * @dev: device struct
3586 * @count: buffer size
3588 * This function will reset the adapter.
3591 * count on success / other on failure
3593 static ssize_t ipr_store_reset_adapter(struct device *dev,
3594 struct device_attribute *attr,
3595 const char *buf, size_t count)
3597 struct Scsi_Host *shost = class_to_shost(dev);
3598 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3599 unsigned long lock_flags;
3602 if (!capable(CAP_SYS_ADMIN))
3605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3606 if (!ioa_cfg->in_reset_reload)
3607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3609 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3614 static struct device_attribute ipr_ioa_reset_attr = {
3616 .name = "reset_host",
3619 .store = ipr_store_reset_adapter
3622 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3624 * ipr_show_iopoll_weight - Show ipr polling mode
3625 * @dev: class device struct
3629 * number of bytes printed to buffer
3631 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3632 struct device_attribute *attr, char *buf)
3634 struct Scsi_Host *shost = class_to_shost(dev);
3635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636 unsigned long lock_flags = 0;
3639 spin_lock_irqsave(shost->host_lock, lock_flags);
3640 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3647 * ipr_store_iopoll_weight - Change the adapter's polling mode
3648 * @dev: class device struct
3652 * number of bytes printed to buffer
3654 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3655 struct device_attribute *attr,
3656 const char *buf, size_t count)
3658 struct Scsi_Host *shost = class_to_shost(dev);
3659 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3660 unsigned long user_iopoll_weight;
3661 unsigned long lock_flags = 0;
3664 if (!ioa_cfg->sis64) {
3665 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668 if (kstrtoul(buf, 10, &user_iopoll_weight))
3671 if (user_iopoll_weight > 256) {
3672 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3676 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3677 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3681 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3682 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3683 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686 spin_lock_irqsave(shost->host_lock, lock_flags);
3687 ioa_cfg->iopoll_weight = user_iopoll_weight;
3688 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3689 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3690 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3691 ioa_cfg->iopoll_weight, ipr_iopoll);
3692 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3700 static struct device_attribute ipr_iopoll_weight_attr = {
3702 .name = "iopoll_weight",
3703 .mode = S_IRUGO | S_IWUSR,
3705 .show = ipr_show_iopoll_weight,
3706 .store = ipr_store_iopoll_weight
3710 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3711 * @buf_len: buffer length
3713 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3714 * list to use for microcode download
3717 * pointer to sglist / NULL on failure
3719 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3721 int sg_size, order, bsize_elem, num_elem, i, j;
3722 struct ipr_sglist *sglist;
3723 struct scatterlist *scatterlist;
3726 /* Get the minimum size per scatter/gather element */
3727 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3729 /* Get the actual size per element */
3730 order = get_order(sg_size);
3732 /* Determine the actual number of bytes per element */
3733 bsize_elem = PAGE_SIZE * (1 << order);
3735 /* Determine the actual number of sg entries needed */
3736 if (buf_len % bsize_elem)
3737 num_elem = (buf_len / bsize_elem) + 1;
3739 num_elem = buf_len / bsize_elem;
3741 /* Allocate a scatter/gather list for the DMA */
3742 sglist = kzalloc(sizeof(struct ipr_sglist) +
3743 (sizeof(struct scatterlist) * (num_elem - 1)),
3746 if (sglist == NULL) {
3751 scatterlist = sglist->scatterlist;
3752 sg_init_table(scatterlist, num_elem);
3754 sglist->order = order;
3755 sglist->num_sg = num_elem;
3757 /* Allocate a bunch of sg elements */
3758 for (i = 0; i < num_elem; i++) {
3759 page = alloc_pages(GFP_KERNEL, order);
3763 /* Free up what we already allocated */
3764 for (j = i - 1; j >= 0; j--)
3765 __free_pages(sg_page(&scatterlist[j]), order);
3770 sg_set_page(&scatterlist[i], page, 0, 0);
3777 * ipr_free_ucode_buffer - Frees a microcode download buffer
3778 * @p_dnld: scatter/gather list pointer
3780 * Free a DMA'able ucode download buffer previously allocated with
3781 * ipr_alloc_ucode_buffer
3786 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3790 for (i = 0; i < sglist->num_sg; i++)
3791 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3797 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3798 * @sglist: scatter/gather list pointer
3799 * @buffer: buffer pointer
3800 * @len: buffer length
3802 * Copy a microcode image from a user buffer into a buffer allocated by
3803 * ipr_alloc_ucode_buffer
3806 * 0 on success / other on failure
3808 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3809 u8 *buffer, u32 len)
3811 int bsize_elem, i, result = 0;
3812 struct scatterlist *scatterlist;
3815 /* Determine the actual number of bytes per element */
3816 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3818 scatterlist = sglist->scatterlist;
3820 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3821 struct page *page = sg_page(&scatterlist[i]);
3824 memcpy(kaddr, buffer, bsize_elem);
3827 scatterlist[i].length = bsize_elem;
3835 if (len % bsize_elem) {
3836 struct page *page = sg_page(&scatterlist[i]);
3839 memcpy(kaddr, buffer, len % bsize_elem);
3842 scatterlist[i].length = len % bsize_elem;
3845 sglist->buffer_len = len;
3850 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3851 * @ipr_cmd: ipr command struct
3852 * @sglist: scatter/gather list
3854 * Builds a microcode download IOA data list (IOADL).
3857 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3858 struct ipr_sglist *sglist)
3860 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3861 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3862 struct scatterlist *scatterlist = sglist->scatterlist;
3865 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3866 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3867 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3871 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3872 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3873 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3874 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3881 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3882 * @ipr_cmd: ipr command struct
3883 * @sglist: scatter/gather list
3885 * Builds a microcode download IOA data list (IOADL).
3888 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3889 struct ipr_sglist *sglist)
3891 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3892 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3893 struct scatterlist *scatterlist = sglist->scatterlist;
3896 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3898 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3903 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3904 ioadl[i].flags_and_data_len =
3905 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3907 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910 ioadl[i-1].flags_and_data_len |=
3911 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3915 * ipr_update_ioa_ucode - Update IOA's microcode
3916 * @ioa_cfg: ioa config struct
3917 * @sglist: scatter/gather list
3919 * Initiate an adapter reset to update the IOA's microcode
3922 * 0 on success / -EIO on failure
3924 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3925 struct ipr_sglist *sglist)
3927 unsigned long lock_flags;
3929 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3930 while (ioa_cfg->in_reset_reload) {
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3933 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936 if (ioa_cfg->ucode_sglist) {
3937 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3938 dev_err(&ioa_cfg->pdev->dev,
3939 "Microcode download already in progress\n");
3943 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3944 sglist->scatterlist, sglist->num_sg,
3947 if (!sglist->num_dma_sg) {
3948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3949 dev_err(&ioa_cfg->pdev->dev,
3950 "Failed to map microcode download buffer!\n");
3954 ioa_cfg->ucode_sglist = sglist;
3955 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3960 ioa_cfg->ucode_sglist = NULL;
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3966 * ipr_store_update_fw - Update the firmware on the adapter
3967 * @class_dev: device struct
3969 * @count: buffer size
3971 * This function will update the firmware on the adapter.
3974 * count on success / other on failure
3976 static ssize_t ipr_store_update_fw(struct device *dev,
3977 struct device_attribute *attr,
3978 const char *buf, size_t count)
3980 struct Scsi_Host *shost = class_to_shost(dev);
3981 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3982 struct ipr_ucode_image_header *image_hdr;
3983 const struct firmware *fw_entry;
3984 struct ipr_sglist *sglist;
3987 int len, result, dnld_size;
3989 if (!capable(CAP_SYS_ADMIN))
3992 len = snprintf(fname, 99, "%s", buf);
3993 fname[len-1] = '\0';
3995 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3996 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4000 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4002 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4003 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4004 sglist = ipr_alloc_ucode_buffer(dnld_size);
4007 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4008 release_firmware(fw_entry);
4012 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4015 dev_err(&ioa_cfg->pdev->dev,
4016 "Microcode buffer copy to DMA buffer failed\n");
4020 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4022 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4027 ipr_free_ucode_buffer(sglist);
4028 release_firmware(fw_entry);
4032 static struct device_attribute ipr_update_fw_attr = {
4034 .name = "update_fw",
4037 .store = ipr_store_update_fw
4041 * ipr_show_fw_type - Show the adapter's firmware type.
4042 * @dev: class device struct
4046 * number of bytes printed to buffer
4048 static ssize_t ipr_show_fw_type(struct device *dev,
4049 struct device_attribute *attr, char *buf)
4051 struct Scsi_Host *shost = class_to_shost(dev);
4052 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4053 unsigned long lock_flags = 0;
4056 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4058 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062 static struct device_attribute ipr_ioa_fw_type_attr = {
4067 .show = ipr_show_fw_type
4070 static struct device_attribute *ipr_ioa_attrs[] = {
4071 &ipr_fw_version_attr,
4072 &ipr_log_level_attr,
4073 &ipr_diagnostics_attr,
4074 &ipr_ioa_state_attr,
4075 &ipr_ioa_reset_attr,
4076 &ipr_update_fw_attr,
4077 &ipr_ioa_fw_type_attr,
4078 &ipr_iopoll_weight_attr,
4082 #ifdef CONFIG_SCSI_IPR_DUMP
4084 * ipr_read_dump - Dump the adapter
4085 * @filp: open sysfs file
4086 * @kobj: kobject struct
4087 * @bin_attr: bin_attribute struct
4090 * @count: buffer size
4093 * number of bytes printed to buffer
4095 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4096 struct bin_attribute *bin_attr,
4097 char *buf, loff_t off, size_t count)
4099 struct device *cdev = container_of(kobj, struct device, kobj);
4100 struct Scsi_Host *shost = class_to_shost(cdev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4102 struct ipr_dump *dump;
4103 unsigned long lock_flags = 0;
4108 if (!capable(CAP_SYS_ADMIN))
4111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4112 dump = ioa_cfg->dump;
4114 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4115 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118 kref_get(&dump->kref);
4119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4121 if (off > dump->driver_dump.hdr.len) {
4122 kref_put(&dump->kref, ipr_release_dump);
4126 if (off + count > dump->driver_dump.hdr.len) {
4127 count = dump->driver_dump.hdr.len - off;
4131 if (count && off < sizeof(dump->driver_dump)) {
4132 if (off + count > sizeof(dump->driver_dump))
4133 len = sizeof(dump->driver_dump) - off;
4136 src = (u8 *)&dump->driver_dump + off;
4137 memcpy(buf, src, len);
4143 off -= sizeof(dump->driver_dump);
4146 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4147 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4148 sizeof(struct ipr_sdt_entry));
4150 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4151 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4153 if (count && off < sdt_end) {
4154 if (off + count > sdt_end)
4155 len = sdt_end - off;
4158 src = (u8 *)&dump->ioa_dump + off;
4159 memcpy(buf, src, len);
4168 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4169 len = PAGE_ALIGN(off) - off;
4172 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4173 src += off & ~PAGE_MASK;
4174 memcpy(buf, src, len);
4180 kref_put(&dump->kref, ipr_release_dump);
4185 * ipr_alloc_dump - Prepare for adapter dump
4186 * @ioa_cfg: ioa config struct
4189 * 0 on success / other on failure
4191 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4193 struct ipr_dump *dump;
4195 unsigned long lock_flags = 0;
4197 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4200 ipr_err("Dump memory allocation failed\n");
4205 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4207 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4210 ipr_err("Dump memory allocation failed\n");
4215 dump->ioa_dump.ioa_data = ioa_data;
4217 kref_init(&dump->kref);
4218 dump->ioa_cfg = ioa_cfg;
4220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222 if (INACTIVE != ioa_cfg->sdt_state) {
4223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224 vfree(dump->ioa_dump.ioa_data);
4229 ioa_cfg->dump = dump;
4230 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4231 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4232 ioa_cfg->dump_taken = 1;
4233 schedule_work(&ioa_cfg->work_q);
4235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4241 * ipr_free_dump - Free adapter dump memory
4242 * @ioa_cfg: ioa config struct
4245 * 0 on success / other on failure
4247 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4249 struct ipr_dump *dump;
4250 unsigned long lock_flags = 0;
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 dump = ioa_cfg->dump;
4257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4261 ioa_cfg->dump = NULL;
4262 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4264 kref_put(&dump->kref, ipr_release_dump);
4271 * ipr_write_dump - Setup dump state of adapter
4272 * @filp: open sysfs file
4273 * @kobj: kobject struct
4274 * @bin_attr: bin_attribute struct
4277 * @count: buffer size
4280 * number of bytes printed to buffer
4282 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4283 struct bin_attribute *bin_attr,
4284 char *buf, loff_t off, size_t count)
4286 struct device *cdev = container_of(kobj, struct device, kobj);
4287 struct Scsi_Host *shost = class_to_shost(cdev);
4288 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4291 if (!capable(CAP_SYS_ADMIN))
4295 rc = ipr_alloc_dump(ioa_cfg);
4296 else if (buf[0] == '0')
4297 rc = ipr_free_dump(ioa_cfg);
4307 static struct bin_attribute ipr_dump_attr = {
4310 .mode = S_IRUSR | S_IWUSR,
4313 .read = ipr_read_dump,
4314 .write = ipr_write_dump
4317 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4321 * ipr_change_queue_depth - Change the device's queue depth
4322 * @sdev: scsi device struct
4323 * @qdepth: depth to set
4324 * @reason: calling context
4329 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4331 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4332 struct ipr_resource_entry *res;
4333 unsigned long lock_flags = 0;
4335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336 res = (struct ipr_resource_entry *)sdev->hostdata;
4338 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4339 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4342 scsi_change_queue_depth(sdev, qdepth);
4343 return sdev->queue_depth;
4347 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4348 * @dev: device struct
4349 * @attr: device attribute structure
4353 * number of bytes printed to buffer
4355 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4357 struct scsi_device *sdev = to_scsi_device(dev);
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361 ssize_t len = -ENXIO;
4363 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4364 res = (struct ipr_resource_entry *)sdev->hostdata;
4366 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4367 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4371 static struct device_attribute ipr_adapter_handle_attr = {
4373 .name = "adapter_handle",
4376 .show = ipr_show_adapter_handle
4380 * ipr_show_resource_path - Show the resource path or the resource address for
4382 * @dev: device struct
4383 * @attr: device attribute structure
4387 * number of bytes printed to buffer
4389 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4391 struct scsi_device *sdev = to_scsi_device(dev);
4392 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4393 struct ipr_resource_entry *res;
4394 unsigned long lock_flags = 0;
4395 ssize_t len = -ENXIO;
4396 char buffer[IPR_MAX_RES_PATH_LENGTH];
4398 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4399 res = (struct ipr_resource_entry *)sdev->hostdata;
4400 if (res && ioa_cfg->sis64)
4401 len = snprintf(buf, PAGE_SIZE, "%s\n",
4402 __ipr_format_res_path(res->res_path, buffer,
4405 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4406 res->bus, res->target, res->lun);
4408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412 static struct device_attribute ipr_resource_path_attr = {
4414 .name = "resource_path",
4417 .show = ipr_show_resource_path
4421 * ipr_show_device_id - Show the device_id for this device.
4422 * @dev: device struct
4423 * @attr: device attribute structure
4427 * number of bytes printed to buffer
4429 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4431 struct scsi_device *sdev = to_scsi_device(dev);
4432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4433 struct ipr_resource_entry *res;
4434 unsigned long lock_flags = 0;
4435 ssize_t len = -ENXIO;
4437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4438 res = (struct ipr_resource_entry *)sdev->hostdata;
4439 if (res && ioa_cfg->sis64)
4440 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4442 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4448 static struct device_attribute ipr_device_id_attr = {
4450 .name = "device_id",
4453 .show = ipr_show_device_id
4457 * ipr_show_resource_type - Show the resource type for this device.
4458 * @dev: device struct
4459 * @attr: device attribute structure
4463 * number of bytes printed to buffer
4465 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4467 struct scsi_device *sdev = to_scsi_device(dev);
4468 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4469 struct ipr_resource_entry *res;
4470 unsigned long lock_flags = 0;
4471 ssize_t len = -ENXIO;
4473 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4474 res = (struct ipr_resource_entry *)sdev->hostdata;
4477 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4483 static struct device_attribute ipr_resource_type_attr = {
4485 .name = "resource_type",
4488 .show = ipr_show_resource_type
4491 static struct device_attribute *ipr_dev_attrs[] = {
4492 &ipr_adapter_handle_attr,
4493 &ipr_resource_path_attr,
4494 &ipr_device_id_attr,
4495 &ipr_resource_type_attr,
4500 * ipr_biosparam - Return the HSC mapping
4501 * @sdev: scsi device struct
4502 * @block_device: block device pointer
4503 * @capacity: capacity of the device
4504 * @parm: Array containing returned HSC values.
4506 * This function generates the HSC parms that fdisk uses.
4507 * We want to make sure we return something that places partitions
4508 * on 4k boundaries for best performance with the IOA.
4513 static int ipr_biosparam(struct scsi_device *sdev,
4514 struct block_device *block_device,
4515 sector_t capacity, int *parm)
4523 cylinders = capacity;
4524 sector_div(cylinders, (128 * 32));
4529 parm[2] = cylinders;
4535 * ipr_find_starget - Find target based on bus/target.
4536 * @starget: scsi target struct
4539 * resource entry pointer if found / NULL if not found
4541 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4543 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4545 struct ipr_resource_entry *res;
4547 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4548 if ((res->bus == starget->channel) &&
4549 (res->target == starget->id)) {
4557 static struct ata_port_info sata_port_info;
4560 * ipr_target_alloc - Prepare for commands to a SCSI target
4561 * @starget: scsi target struct
4563 * If the device is a SATA device, this function allocates an
4564 * ATA port with libata, else it does nothing.
4567 * 0 on success / non-0 on failure
4569 static int ipr_target_alloc(struct scsi_target *starget)
4571 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4572 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4573 struct ipr_sata_port *sata_port;
4574 struct ata_port *ap;
4575 struct ipr_resource_entry *res;
4576 unsigned long lock_flags;
4578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4579 res = ipr_find_starget(starget);
4580 starget->hostdata = NULL;
4582 if (res && ipr_is_gata(res)) {
4583 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4588 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4590 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4591 sata_port->ioa_cfg = ioa_cfg;
4593 sata_port->res = res;
4595 res->sata_port = sata_port;
4596 ap->private_data = sata_port;
4597 starget->hostdata = sata_port;
4603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4609 * ipr_target_destroy - Destroy a SCSI target
4610 * @starget: scsi target struct
4612 * If the device was a SATA device, this function frees the libata
4613 * ATA port, else it does nothing.
4616 static void ipr_target_destroy(struct scsi_target *starget)
4618 struct ipr_sata_port *sata_port = starget->hostdata;
4619 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4620 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4622 if (ioa_cfg->sis64) {
4623 if (!ipr_find_starget(starget)) {
4624 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4625 clear_bit(starget->id, ioa_cfg->array_ids);
4626 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4627 clear_bit(starget->id, ioa_cfg->vset_ids);
4628 else if (starget->channel == 0)
4629 clear_bit(starget->id, ioa_cfg->target_ids);
4634 starget->hostdata = NULL;
4635 ata_sas_port_destroy(sata_port->ap);
4641 * ipr_find_sdev - Find device based on bus/target/lun.
4642 * @sdev: scsi device struct
4645 * resource entry pointer if found / NULL if not found
4647 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4650 struct ipr_resource_entry *res;
4652 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4653 if ((res->bus == sdev->channel) &&
4654 (res->target == sdev->id) &&
4655 (res->lun == sdev->lun))
4663 * ipr_slave_destroy - Unconfigure a SCSI device
4664 * @sdev: scsi device struct
4669 static void ipr_slave_destroy(struct scsi_device *sdev)
4671 struct ipr_resource_entry *res;
4672 struct ipr_ioa_cfg *ioa_cfg;
4673 unsigned long lock_flags = 0;
4675 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4678 res = (struct ipr_resource_entry *) sdev->hostdata;
4681 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4682 sdev->hostdata = NULL;
4684 res->sata_port = NULL;
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4690 * ipr_slave_configure - Configure a SCSI device
4691 * @sdev: scsi device struct
4693 * This function configures the specified scsi device.
4698 static int ipr_slave_configure(struct scsi_device *sdev)
4700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4701 struct ipr_resource_entry *res;
4702 struct ata_port *ap = NULL;
4703 unsigned long lock_flags = 0;
4704 char buffer[IPR_MAX_RES_PATH_LENGTH];
4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707 res = sdev->hostdata;
4709 if (ipr_is_af_dasd_device(res))
4710 sdev->type = TYPE_RAID;
4711 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4712 sdev->scsi_level = 4;
4713 sdev->no_uld_attach = 1;
4715 if (ipr_is_vset_device(res)) {
4716 blk_queue_rq_timeout(sdev->request_queue,
4717 IPR_VSET_RW_TIMEOUT);
4718 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4720 if (ipr_is_gata(res) && res->sata_port)
4721 ap = res->sata_port->ap;
4722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4725 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4726 ata_sas_slave_configure(sdev, ap);
4730 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4731 ipr_format_res_path(ioa_cfg,
4732 res->res_path, buffer, sizeof(buffer)));
4735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4740 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4741 * @sdev: scsi device struct
4743 * This function initializes an ATA port so that future commands
4744 * sent through queuecommand will work.
4749 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4751 struct ipr_sata_port *sata_port = NULL;
4755 if (sdev->sdev_target)
4756 sata_port = sdev->sdev_target->hostdata;
4758 rc = ata_sas_port_init(sata_port->ap);
4760 rc = ata_sas_sync_probe(sata_port->ap);
4764 ipr_slave_destroy(sdev);
4771 * ipr_slave_alloc - Prepare for commands to a device.
4772 * @sdev: scsi device struct
4774 * This function saves a pointer to the resource entry
4775 * in the scsi device struct if the device exists. We
4776 * can then use this pointer in ipr_queuecommand when
4777 * handling new commands.
4780 * 0 on success / -ENXIO if device does not exist
4782 static int ipr_slave_alloc(struct scsi_device *sdev)
4784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4785 struct ipr_resource_entry *res;
4786 unsigned long lock_flags;
4789 sdev->hostdata = NULL;
4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793 res = ipr_find_sdev(sdev);
4798 sdev->hostdata = res;
4799 if (!ipr_is_naca_model(res))
4800 res->needs_sync_complete = 1;
4802 if (ipr_is_gata(res)) {
4803 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4804 return ipr_ata_slave_alloc(sdev);
4808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4813 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4815 struct ipr_ioa_cfg *ioa_cfg;
4816 unsigned long lock_flags = 0;
4820 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4821 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4823 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4824 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4825 dev_err(&ioa_cfg->pdev->dev,
4826 "Adapter being reset as a result of error recovery.\n");
4828 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4829 ioa_cfg->sdt_state = GET_DUMP;
4832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4834 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4836 /* If we got hit with a host reset while we were already resetting
4837 the adapter for some reason, and the reset failed. */
4838 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4849 * ipr_device_reset - Reset the device
4850 * @ioa_cfg: ioa config struct
4851 * @res: resource entry struct
4853 * This function issues a device reset to the affected device.
4854 * If the device is a SCSI device, a LUN reset will be sent
4855 * to the device first. If that does not work, a target reset
4856 * will be sent. If the device is a SATA device, a PHY reset will
4860 * 0 on success / non-zero on failure
4862 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4863 struct ipr_resource_entry *res)
4865 struct ipr_cmnd *ipr_cmd;
4866 struct ipr_ioarcb *ioarcb;
4867 struct ipr_cmd_pkt *cmd_pkt;
4868 struct ipr_ioarcb_ata_regs *regs;
4872 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4873 ioarcb = &ipr_cmd->ioarcb;
4874 cmd_pkt = &ioarcb->cmd_pkt;
4876 if (ipr_cmd->ioa_cfg->sis64) {
4877 regs = &ipr_cmd->i.ata_ioadl.regs;
4878 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4880 regs = &ioarcb->u.add_data.u.regs;
4882 ioarcb->res_handle = res->res_handle;
4883 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4884 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4885 if (ipr_is_gata(res)) {
4886 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4887 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4888 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4891 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4892 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4893 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4894 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4895 if (ipr_cmd->ioa_cfg->sis64)
4896 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4897 sizeof(struct ipr_ioasa_gata));
4899 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4900 sizeof(struct ipr_ioasa_gata));
4904 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4908 * ipr_sata_reset - Reset the SATA port
4909 * @link: SATA link to reset
4910 * @classes: class of the attached device
4912 * This function issues a SATA phy reset to the affected ATA link.
4915 * 0 on success / non-zero on failure
4917 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4918 unsigned long deadline)
4920 struct ipr_sata_port *sata_port = link->ap->private_data;
4921 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4922 struct ipr_resource_entry *res;
4923 unsigned long lock_flags = 0;
4927 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928 while (ioa_cfg->in_reset_reload) {
4929 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4930 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4931 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4934 res = sata_port->res;
4936 rc = ipr_device_reset(ioa_cfg, res);
4937 *classes = res->ata_class;
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4946 * ipr_eh_dev_reset - Reset the device
4947 * @scsi_cmd: scsi command struct
4949 * This function issues a device reset to the affected device.
4950 * A LUN reset will be sent to the device first. If that does
4951 * not work, a target reset will be sent.
4956 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4958 struct ipr_cmnd *ipr_cmd;
4959 struct ipr_ioa_cfg *ioa_cfg;
4960 struct ipr_resource_entry *res;
4961 struct ata_port *ap;
4963 struct ipr_hrr_queue *hrrq;
4966 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4967 res = scsi_cmd->device->hostdata;
4973 * If we are currently going through reset/reload, return failed. This will force the
4974 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4977 if (ioa_cfg->in_reset_reload)
4979 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4982 for_each_hrrq(hrrq, ioa_cfg) {
4983 spin_lock(&hrrq->_lock);
4984 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4985 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4986 if (ipr_cmd->scsi_cmd)
4987 ipr_cmd->done = ipr_scsi_eh_done;
4989 ipr_cmd->done = ipr_sata_eh_done;
4991 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4992 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4993 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4997 spin_unlock(&hrrq->_lock);
4999 res->resetting_device = 1;
5000 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5002 if (ipr_is_gata(res) && res->sata_port) {
5003 ap = res->sata_port->ap;
5004 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5005 ata_std_error_handler(ap);
5006 spin_lock_irq(scsi_cmd->device->host->host_lock);
5008 for_each_hrrq(hrrq, ioa_cfg) {
5009 spin_lock(&hrrq->_lock);
5010 list_for_each_entry(ipr_cmd,
5011 &hrrq->hrrq_pending_q, queue) {
5012 if (ipr_cmd->ioarcb.res_handle ==
5018 spin_unlock(&hrrq->_lock);
5021 rc = ipr_device_reset(ioa_cfg, res);
5022 res->resetting_device = 0;
5023 res->reset_occurred = 1;
5026 return rc ? FAILED : SUCCESS;
5029 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5033 spin_lock_irq(cmd->device->host->host_lock);
5034 rc = __ipr_eh_dev_reset(cmd);
5035 spin_unlock_irq(cmd->device->host->host_lock);
5041 * ipr_bus_reset_done - Op done function for bus reset.
5042 * @ipr_cmd: ipr command struct
5044 * This function is the op done function for a bus reset
5049 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5051 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5052 struct ipr_resource_entry *res;
5055 if (!ioa_cfg->sis64)
5056 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5057 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5058 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5064 * If abort has not completed, indicate the reset has, else call the
5065 * abort's done function to wake the sleeping eh thread
5067 if (ipr_cmd->sibling->sibling)
5068 ipr_cmd->sibling->sibling = NULL;
5070 ipr_cmd->sibling->done(ipr_cmd->sibling);
5072 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5077 * ipr_abort_timeout - An abort task has timed out
5078 * @ipr_cmd: ipr command struct
5080 * This function handles when an abort task times out. If this
5081 * happens we issue a bus reset since we have resources tied
5082 * up that must be freed before returning to the midlayer.
5087 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5089 struct ipr_cmnd *reset_cmd;
5090 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5091 struct ipr_cmd_pkt *cmd_pkt;
5092 unsigned long lock_flags = 0;
5095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5096 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5101 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5102 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5103 ipr_cmd->sibling = reset_cmd;
5104 reset_cmd->sibling = ipr_cmd;
5105 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5106 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5107 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5108 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5109 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5111 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5112 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5117 * ipr_cancel_op - Cancel specified op
5118 * @scsi_cmd: scsi command struct
5120 * This function cancels specified op.
5125 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5127 struct ipr_cmnd *ipr_cmd;
5128 struct ipr_ioa_cfg *ioa_cfg;
5129 struct ipr_resource_entry *res;
5130 struct ipr_cmd_pkt *cmd_pkt;
5133 struct ipr_hrr_queue *hrrq;
5136 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5137 res = scsi_cmd->device->hostdata;
5139 /* If we are currently going through reset/reload, return failed.
5140 * This will force the mid-layer to call ipr_eh_host_reset,
5141 * which will then go to sleep and wait for the reset to complete
5143 if (ioa_cfg->in_reset_reload ||
5144 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5150 * If we are aborting a timed out op, chances are that the timeout was caused
5151 * by a still not detected EEH error. In such cases, reading a register will
5152 * trigger the EEH recovery infrastructure.
5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5156 if (!ipr_is_gscsi(res))
5159 for_each_hrrq(hrrq, ioa_cfg) {
5160 spin_lock(&hrrq->_lock);
5161 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5162 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5163 ipr_cmd->done = ipr_scsi_eh_done;
5168 spin_unlock(&hrrq->_lock);
5174 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5175 ipr_cmd->ioarcb.res_handle = res->res_handle;
5176 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5177 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5178 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5179 ipr_cmd->u.sdev = scsi_cmd->device;
5181 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5183 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5184 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5187 * If the abort task timed out and we sent a bus reset, we will get
5188 * one the following responses to the abort
5190 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5195 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5196 if (!ipr_is_naca_model(res))
5197 res->needs_sync_complete = 1;
5200 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5204 * ipr_eh_abort - Abort a single op
5205 * @scsi_cmd: scsi command struct
5208 * 0 if scan in progress / 1 if scan is complete
5210 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5212 unsigned long lock_flags;
5213 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5216 spin_lock_irqsave(shost->host_lock, lock_flags);
5217 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5219 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5221 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5226 * ipr_eh_host_reset - Reset the host adapter
5227 * @scsi_cmd: scsi command struct
5232 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5234 unsigned long flags;
5239 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5240 rc = ipr_cancel_op(scsi_cmd);
5241 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5248 * ipr_handle_other_interrupt - Handle "other" interrupts
5249 * @ioa_cfg: ioa config struct
5250 * @int_reg: interrupt register
5253 * IRQ_NONE / IRQ_HANDLED
5255 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5258 irqreturn_t rc = IRQ_HANDLED;
5261 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5262 int_reg &= ~int_mask_reg;
5264 /* If an interrupt on the adapter did not occur, ignore it.
5265 * Or in the case of SIS 64, check for a stage change interrupt.
5267 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5268 if (ioa_cfg->sis64) {
5269 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5270 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5271 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5273 /* clear stage change */
5274 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5275 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5276 list_del(&ioa_cfg->reset_cmd->queue);
5277 del_timer(&ioa_cfg->reset_cmd->timer);
5278 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5286 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5287 /* Mask the interrupt */
5288 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5290 /* Clear the interrupt */
5291 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5292 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5294 list_del(&ioa_cfg->reset_cmd->queue);
5295 del_timer(&ioa_cfg->reset_cmd->timer);
5296 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5297 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5298 if (ioa_cfg->clear_isr) {
5299 if (ipr_debug && printk_ratelimit())
5300 dev_err(&ioa_cfg->pdev->dev,
5301 "Spurious interrupt detected. 0x%08X\n", int_reg);
5302 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5303 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5307 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5308 ioa_cfg->ioa_unit_checked = 1;
5309 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5310 dev_err(&ioa_cfg->pdev->dev,
5311 "No Host RRQ. 0x%08X\n", int_reg);
5313 dev_err(&ioa_cfg->pdev->dev,
5314 "Permanent IOA failure. 0x%08X\n", int_reg);
5316 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5317 ioa_cfg->sdt_state = GET_DUMP;
5319 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5320 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5327 * ipr_isr_eh - Interrupt service routine error handler
5328 * @ioa_cfg: ioa config struct
5329 * @msg: message to log
5334 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5336 ioa_cfg->errors_logged++;
5337 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5339 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5340 ioa_cfg->sdt_state = GET_DUMP;
5342 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5345 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5346 struct list_head *doneq)
5350 struct ipr_cmnd *ipr_cmd;
5351 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5354 /* If interrupts are disabled, ignore the interrupt */
5355 if (!hrr_queue->allow_interrupts)
5358 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5359 hrr_queue->toggle_bit) {
5361 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5362 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5363 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5365 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5366 cmd_index < hrr_queue->min_cmd_id)) {
5368 "Invalid response handle from IOA: ",
5373 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5374 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5376 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5378 list_move_tail(&ipr_cmd->queue, doneq);
5380 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5381 hrr_queue->hrrq_curr++;
5383 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5384 hrr_queue->toggle_bit ^= 1u;
5387 if (budget > 0 && num_hrrq >= budget)
5394 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5396 struct ipr_ioa_cfg *ioa_cfg;
5397 struct ipr_hrr_queue *hrrq;
5398 struct ipr_cmnd *ipr_cmd, *temp;
5399 unsigned long hrrq_flags;
5403 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5404 ioa_cfg = hrrq->ioa_cfg;
5406 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5407 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5409 if (completed_ops < budget)
5410 blk_iopoll_complete(iop);
5411 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5413 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5414 list_del(&ipr_cmd->queue);
5415 del_timer(&ipr_cmd->timer);
5416 ipr_cmd->fast_done(ipr_cmd);
5419 return completed_ops;
5423 * ipr_isr - Interrupt service routine
5425 * @devp: pointer to ioa config struct
5428 * IRQ_NONE / IRQ_HANDLED
5430 static irqreturn_t ipr_isr(int irq, void *devp)
5432 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5433 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5434 unsigned long hrrq_flags = 0;
5438 struct ipr_cmnd *ipr_cmd, *temp;
5439 irqreturn_t rc = IRQ_NONE;
5442 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5443 /* If interrupts are disabled, ignore the interrupt */
5444 if (!hrrq->allow_interrupts) {
5445 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5450 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5453 if (!ioa_cfg->clear_isr)
5456 /* Clear the PCI interrupt */
5459 writel(IPR_PCII_HRRQ_UPDATED,
5460 ioa_cfg->regs.clr_interrupt_reg32);
5461 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5462 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5463 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5465 } else if (rc == IRQ_NONE && irq_none == 0) {
5466 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5468 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5469 int_reg & IPR_PCII_HRRQ_UPDATED) {
5471 "Error clearing HRRQ: ", num_hrrq);
5478 if (unlikely(rc == IRQ_NONE))
5479 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5481 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5482 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5483 list_del(&ipr_cmd->queue);
5484 del_timer(&ipr_cmd->timer);
5485 ipr_cmd->fast_done(ipr_cmd);
5491 * ipr_isr_mhrrq - Interrupt service routine
5493 * @devp: pointer to ioa config struct
5496 * IRQ_NONE / IRQ_HANDLED
5498 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5500 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5501 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5502 unsigned long hrrq_flags = 0;
5503 struct ipr_cmnd *ipr_cmd, *temp;
5504 irqreturn_t rc = IRQ_NONE;
5507 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5509 /* If interrupts are disabled, ignore the interrupt */
5510 if (!hrrq->allow_interrupts) {
5511 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5515 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5516 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5518 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5519 blk_iopoll_sched(&hrrq->iopoll);
5520 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5524 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5527 if (ipr_process_hrrq(hrrq, -1, &doneq))
5531 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5533 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5534 list_del(&ipr_cmd->queue);
5535 del_timer(&ipr_cmd->timer);
5536 ipr_cmd->fast_done(ipr_cmd);
5542 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5543 * @ioa_cfg: ioa config struct
5544 * @ipr_cmd: ipr command struct
5547 * 0 on success / -1 on failure
5549 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5550 struct ipr_cmnd *ipr_cmd)
5553 struct scatterlist *sg;
5555 u32 ioadl_flags = 0;
5556 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5557 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5558 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5560 length = scsi_bufflen(scsi_cmd);
5564 nseg = scsi_dma_map(scsi_cmd);
5566 if (printk_ratelimit())
5567 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5571 ipr_cmd->dma_use_sg = nseg;
5573 ioarcb->data_transfer_length = cpu_to_be32(length);
5575 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5577 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5578 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5579 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5580 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5581 ioadl_flags = IPR_IOADL_FLAGS_READ;
5583 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5584 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5585 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5586 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5589 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5594 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5595 * @ioa_cfg: ioa config struct
5596 * @ipr_cmd: ipr command struct
5599 * 0 on success / -1 on failure
5601 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5602 struct ipr_cmnd *ipr_cmd)
5605 struct scatterlist *sg;
5607 u32 ioadl_flags = 0;
5608 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5609 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5610 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5612 length = scsi_bufflen(scsi_cmd);
5616 nseg = scsi_dma_map(scsi_cmd);
5618 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5622 ipr_cmd->dma_use_sg = nseg;
5624 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5625 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5626 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5627 ioarcb->data_transfer_length = cpu_to_be32(length);
5629 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5630 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5631 ioadl_flags = IPR_IOADL_FLAGS_READ;
5632 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5633 ioarcb->read_ioadl_len =
5634 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5637 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5638 ioadl = ioarcb->u.add_data.u.ioadl;
5639 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5640 offsetof(struct ipr_ioarcb, u.add_data));
5641 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5644 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5645 ioadl[i].flags_and_data_len =
5646 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5647 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5650 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5655 * ipr_erp_done - Process completion of ERP for a device
5656 * @ipr_cmd: ipr command struct
5658 * This function copies the sense buffer into the scsi_cmd
5659 * struct and pushes the scsi_done function.
5664 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5666 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5667 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5668 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5670 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5671 scsi_cmd->result |= (DID_ERROR << 16);
5672 scmd_printk(KERN_ERR, scsi_cmd,
5673 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5675 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5676 SCSI_SENSE_BUFFERSIZE);
5680 if (!ipr_is_naca_model(res))
5681 res->needs_sync_complete = 1;
5684 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5685 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5686 scsi_cmd->scsi_done(scsi_cmd);
5690 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5691 * @ipr_cmd: ipr command struct
5696 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5698 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5699 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5700 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5702 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5703 ioarcb->data_transfer_length = 0;
5704 ioarcb->read_data_transfer_length = 0;
5705 ioarcb->ioadl_len = 0;
5706 ioarcb->read_ioadl_len = 0;
5707 ioasa->hdr.ioasc = 0;
5708 ioasa->hdr.residual_data_len = 0;
5710 if (ipr_cmd->ioa_cfg->sis64)
5711 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5712 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5714 ioarcb->write_ioadl_addr =
5715 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5716 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5721 * ipr_erp_request_sense - Send request sense to a device
5722 * @ipr_cmd: ipr command struct
5724 * This function sends a request sense to a device as a result
5725 * of a check condition.
5730 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5732 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5733 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5735 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5736 ipr_erp_done(ipr_cmd);
5740 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5742 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5743 cmd_pkt->cdb[0] = REQUEST_SENSE;
5744 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5745 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5746 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5747 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5749 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5750 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5752 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5753 IPR_REQUEST_SENSE_TIMEOUT * 2);
5757 * ipr_erp_cancel_all - Send cancel all to a device
5758 * @ipr_cmd: ipr command struct
5760 * This function sends a cancel all to a device to clear the
5761 * queue. If we are running TCQ on the device, QERR is set to 1,
5762 * which means all outstanding ops have been dropped on the floor.
5763 * Cancel all will return them to us.
5768 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5770 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5771 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5772 struct ipr_cmd_pkt *cmd_pkt;
5776 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5778 if (!scsi_cmd->device->simple_tags) {
5779 ipr_erp_request_sense(ipr_cmd);
5783 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5784 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5785 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5787 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5788 IPR_CANCEL_ALL_TIMEOUT);
5792 * ipr_dump_ioasa - Dump contents of IOASA
5793 * @ioa_cfg: ioa config struct
5794 * @ipr_cmd: ipr command struct
5795 * @res: resource entry struct
5797 * This function is invoked by the interrupt handler when ops
5798 * fail. It will log the IOASA if appropriate. Only called
5804 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5805 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5809 u32 ioasc, fd_ioasc;
5810 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5811 __be32 *ioasa_data = (__be32 *)ioasa;
5814 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5815 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5820 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5823 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5824 error_index = ipr_get_error(fd_ioasc);
5826 error_index = ipr_get_error(ioasc);
5828 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5829 /* Don't log an error if the IOA already logged one */
5830 if (ioasa->hdr.ilid != 0)
5833 if (!ipr_is_gscsi(res))
5836 if (ipr_error_table[error_index].log_ioasa == 0)
5840 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5842 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5843 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5844 data_len = sizeof(struct ipr_ioasa64);
5845 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5846 data_len = sizeof(struct ipr_ioasa);
5848 ipr_err("IOASA Dump:\n");
5850 for (i = 0; i < data_len / 4; i += 4) {
5851 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5852 be32_to_cpu(ioasa_data[i]),
5853 be32_to_cpu(ioasa_data[i+1]),
5854 be32_to_cpu(ioasa_data[i+2]),
5855 be32_to_cpu(ioasa_data[i+3]));
5860 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5862 * @sense_buf: sense data buffer
5867 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5870 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5871 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5872 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5873 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5875 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5877 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5880 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5882 if (ipr_is_vset_device(res) &&
5883 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5884 ioasa->u.vset.failing_lba_hi != 0) {
5885 sense_buf[0] = 0x72;
5886 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5887 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5888 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5892 sense_buf[9] = 0x0A;
5893 sense_buf[10] = 0x80;
5895 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5897 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5898 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5899 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5900 sense_buf[15] = failing_lba & 0x000000ff;
5902 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5904 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5905 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5906 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5907 sense_buf[19] = failing_lba & 0x000000ff;
5909 sense_buf[0] = 0x70;
5910 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5911 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5912 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5914 /* Illegal request */
5915 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5916 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5917 sense_buf[7] = 10; /* additional length */
5919 /* IOARCB was in error */
5920 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5921 sense_buf[15] = 0xC0;
5922 else /* Parameter data was invalid */
5923 sense_buf[15] = 0x80;
5926 ((IPR_FIELD_POINTER_MASK &
5927 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5929 (IPR_FIELD_POINTER_MASK &
5930 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5932 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5933 if (ipr_is_vset_device(res))
5934 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5936 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5938 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5939 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5940 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5941 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5942 sense_buf[6] = failing_lba & 0x000000ff;
5945 sense_buf[7] = 6; /* additional length */
5951 * ipr_get_autosense - Copy autosense data to sense buffer
5952 * @ipr_cmd: ipr command struct
5954 * This function copies the autosense buffer to the buffer
5955 * in the scsi_cmd, if there is autosense available.
5958 * 1 if autosense was available / 0 if not
5960 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5962 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5963 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5965 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5968 if (ipr_cmd->ioa_cfg->sis64)
5969 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5970 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5971 SCSI_SENSE_BUFFERSIZE));
5973 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5974 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5975 SCSI_SENSE_BUFFERSIZE));
5980 * ipr_erp_start - Process an error response for a SCSI op
5981 * @ioa_cfg: ioa config struct
5982 * @ipr_cmd: ipr command struct
5984 * This function determines whether or not to initiate ERP
5985 * on the affected device.
5990 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5991 struct ipr_cmnd *ipr_cmd)
5993 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5994 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5995 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5996 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5999 ipr_scsi_eh_done(ipr_cmd);
6003 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6004 ipr_gen_sense(ipr_cmd);
6006 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6008 switch (masked_ioasc) {
6009 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6010 if (ipr_is_naca_model(res))
6011 scsi_cmd->result |= (DID_ABORT << 16);
6013 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6015 case IPR_IOASC_IR_RESOURCE_HANDLE:
6016 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6017 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6019 case IPR_IOASC_HW_SEL_TIMEOUT:
6020 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6021 if (!ipr_is_naca_model(res))
6022 res->needs_sync_complete = 1;
6024 case IPR_IOASC_SYNC_REQUIRED:
6026 res->needs_sync_complete = 1;
6027 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6029 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6030 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6031 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6033 case IPR_IOASC_BUS_WAS_RESET:
6034 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6036 * Report the bus reset and ask for a retry. The device
6037 * will give CC/UA the next command.
6039 if (!res->resetting_device)
6040 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6041 scsi_cmd->result |= (DID_ERROR << 16);
6042 if (!ipr_is_naca_model(res))
6043 res->needs_sync_complete = 1;
6045 case IPR_IOASC_HW_DEV_BUS_STATUS:
6046 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6047 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6048 if (!ipr_get_autosense(ipr_cmd)) {
6049 if (!ipr_is_naca_model(res)) {
6050 ipr_erp_cancel_all(ipr_cmd);
6055 if (!ipr_is_naca_model(res))
6056 res->needs_sync_complete = 1;
6058 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6061 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6062 scsi_cmd->result |= (DID_ERROR << 16);
6063 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6064 res->needs_sync_complete = 1;
6068 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6069 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6070 scsi_cmd->scsi_done(scsi_cmd);
6074 * ipr_scsi_done - mid-layer done function
6075 * @ipr_cmd: ipr command struct
6077 * This function is invoked by the interrupt handler for
6078 * ops generated by the SCSI mid-layer
6083 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6085 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6086 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6087 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6088 unsigned long hrrq_flags;
6090 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6092 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6093 scsi_dma_unmap(scsi_cmd);
6095 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6096 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6097 scsi_cmd->scsi_done(scsi_cmd);
6098 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6100 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6101 ipr_erp_start(ioa_cfg, ipr_cmd);
6102 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6107 * ipr_queuecommand - Queue a mid-layer request
6108 * @shost: scsi host struct
6109 * @scsi_cmd: scsi command struct
6111 * This function queues a request generated by the mid-layer.
6115 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6116 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6118 static int ipr_queuecommand(struct Scsi_Host *shost,
6119 struct scsi_cmnd *scsi_cmd)
6121 struct ipr_ioa_cfg *ioa_cfg;
6122 struct ipr_resource_entry *res;
6123 struct ipr_ioarcb *ioarcb;
6124 struct ipr_cmnd *ipr_cmd;
6125 unsigned long hrrq_flags, lock_flags;
6127 struct ipr_hrr_queue *hrrq;
6130 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6132 scsi_cmd->result = (DID_OK << 16);
6133 res = scsi_cmd->device->hostdata;
6135 if (ipr_is_gata(res) && res->sata_port) {
6136 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6137 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6142 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6143 hrrq = &ioa_cfg->hrrq[hrrq_id];
6145 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6147 * We are currently blocking all devices due to a host reset
6148 * We have told the host to stop giving us new requests, but
6149 * ERP ops don't count. FIXME
6151 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6152 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6153 return SCSI_MLQUEUE_HOST_BUSY;
6157 * FIXME - Create scsi_set_host_offline interface
6158 * and the ioa_is_dead check can be removed
6160 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6161 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6165 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6166 if (ipr_cmd == NULL) {
6167 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6168 return SCSI_MLQUEUE_HOST_BUSY;
6170 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6172 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6173 ioarcb = &ipr_cmd->ioarcb;
6175 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6176 ipr_cmd->scsi_cmd = scsi_cmd;
6177 ipr_cmd->done = ipr_scsi_eh_done;
6179 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6180 if (scsi_cmd->underflow == 0)
6181 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6183 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6184 if (ipr_is_gscsi(res) && res->reset_occurred) {
6185 res->reset_occurred = 0;
6186 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6188 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6189 if (scsi_cmd->flags & SCMD_TAGGED)
6190 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6192 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6195 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6196 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6197 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6201 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6203 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6205 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6206 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6207 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6208 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6210 scsi_dma_unmap(scsi_cmd);
6211 return SCSI_MLQUEUE_HOST_BUSY;
6214 if (unlikely(hrrq->ioa_is_dead)) {
6215 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6216 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6217 scsi_dma_unmap(scsi_cmd);
6221 ioarcb->res_handle = res->res_handle;
6222 if (res->needs_sync_complete) {
6223 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6224 res->needs_sync_complete = 0;
6226 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6227 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6228 ipr_send_command(ipr_cmd);
6229 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6233 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6234 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6235 scsi_cmd->result = (DID_NO_CONNECT << 16);
6236 scsi_cmd->scsi_done(scsi_cmd);
6237 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6242 * ipr_ioctl - IOCTL handler
6243 * @sdev: scsi device struct
6248 * 0 on success / other on failure
6250 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6252 struct ipr_resource_entry *res;
6254 res = (struct ipr_resource_entry *)sdev->hostdata;
6255 if (res && ipr_is_gata(res)) {
6256 if (cmd == HDIO_GET_IDENTITY)
6258 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6265 * ipr_info - Get information about the card/driver
6266 * @scsi_host: scsi host struct
6269 * pointer to buffer with description string
6271 static const char *ipr_ioa_info(struct Scsi_Host *host)
6273 static char buffer[512];
6274 struct ipr_ioa_cfg *ioa_cfg;
6275 unsigned long lock_flags = 0;
6277 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6279 spin_lock_irqsave(host->host_lock, lock_flags);
6280 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6281 spin_unlock_irqrestore(host->host_lock, lock_flags);
6286 static struct scsi_host_template driver_template = {
6287 .module = THIS_MODULE,
6289 .info = ipr_ioa_info,
6291 .queuecommand = ipr_queuecommand,
6292 .eh_abort_handler = ipr_eh_abort,
6293 .eh_device_reset_handler = ipr_eh_dev_reset,
6294 .eh_host_reset_handler = ipr_eh_host_reset,
6295 .slave_alloc = ipr_slave_alloc,
6296 .slave_configure = ipr_slave_configure,
6297 .slave_destroy = ipr_slave_destroy,
6298 .scan_finished = ipr_scan_finished,
6299 .target_alloc = ipr_target_alloc,
6300 .target_destroy = ipr_target_destroy,
6301 .change_queue_depth = ipr_change_queue_depth,
6302 .bios_param = ipr_biosparam,
6303 .can_queue = IPR_MAX_COMMANDS,
6305 .sg_tablesize = IPR_MAX_SGLIST,
6306 .max_sectors = IPR_IOA_MAX_SECTORS,
6307 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6308 .use_clustering = ENABLE_CLUSTERING,
6309 .shost_attrs = ipr_ioa_attrs,
6310 .sdev_attrs = ipr_dev_attrs,
6311 .proc_name = IPR_NAME,
6317 * ipr_ata_phy_reset - libata phy_reset handler
6318 * @ap: ata port to reset
6321 static void ipr_ata_phy_reset(struct ata_port *ap)
6323 unsigned long flags;
6324 struct ipr_sata_port *sata_port = ap->private_data;
6325 struct ipr_resource_entry *res = sata_port->res;
6326 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6330 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6331 while (ioa_cfg->in_reset_reload) {
6332 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6333 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6334 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6337 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6340 rc = ipr_device_reset(ioa_cfg, res);
6343 ap->link.device[0].class = ATA_DEV_NONE;
6347 ap->link.device[0].class = res->ata_class;
6348 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6349 ap->link.device[0].class = ATA_DEV_NONE;
6352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6357 * ipr_ata_post_internal - Cleanup after an internal command
6358 * @qc: ATA queued command
6363 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6365 struct ipr_sata_port *sata_port = qc->ap->private_data;
6366 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6367 struct ipr_cmnd *ipr_cmd;
6368 struct ipr_hrr_queue *hrrq;
6369 unsigned long flags;
6371 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6372 while (ioa_cfg->in_reset_reload) {
6373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6374 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6375 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6378 for_each_hrrq(hrrq, ioa_cfg) {
6379 spin_lock(&hrrq->_lock);
6380 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6381 if (ipr_cmd->qc == qc) {
6382 ipr_device_reset(ioa_cfg, sata_port->res);
6386 spin_unlock(&hrrq->_lock);
6388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6392 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6393 * @regs: destination
6394 * @tf: source ATA taskfile
6399 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6400 struct ata_taskfile *tf)
6402 regs->feature = tf->feature;
6403 regs->nsect = tf->nsect;
6404 regs->lbal = tf->lbal;
6405 regs->lbam = tf->lbam;
6406 regs->lbah = tf->lbah;
6407 regs->device = tf->device;
6408 regs->command = tf->command;
6409 regs->hob_feature = tf->hob_feature;
6410 regs->hob_nsect = tf->hob_nsect;
6411 regs->hob_lbal = tf->hob_lbal;
6412 regs->hob_lbam = tf->hob_lbam;
6413 regs->hob_lbah = tf->hob_lbah;
6414 regs->ctl = tf->ctl;
6418 * ipr_sata_done - done function for SATA commands
6419 * @ipr_cmd: ipr command struct
6421 * This function is invoked by the interrupt handler for
6422 * ops generated by the SCSI mid-layer to SATA devices
6427 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6430 struct ata_queued_cmd *qc = ipr_cmd->qc;
6431 struct ipr_sata_port *sata_port = qc->ap->private_data;
6432 struct ipr_resource_entry *res = sata_port->res;
6433 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6435 spin_lock(&ipr_cmd->hrrq->_lock);
6436 if (ipr_cmd->ioa_cfg->sis64)
6437 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6438 sizeof(struct ipr_ioasa_gata));
6440 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6441 sizeof(struct ipr_ioasa_gata));
6442 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6444 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6445 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6447 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6448 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6450 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6451 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6452 spin_unlock(&ipr_cmd->hrrq->_lock);
6453 ata_qc_complete(qc);
6457 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6458 * @ipr_cmd: ipr command struct
6459 * @qc: ATA queued command
6462 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6463 struct ata_queued_cmd *qc)
6465 u32 ioadl_flags = 0;
6466 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6467 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6468 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6469 int len = qc->nbytes;
6470 struct scatterlist *sg;
6472 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6477 if (qc->dma_dir == DMA_TO_DEVICE) {
6478 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6479 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6480 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6481 ioadl_flags = IPR_IOADL_FLAGS_READ;
6483 ioarcb->data_transfer_length = cpu_to_be32(len);
6485 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6486 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6487 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6489 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6490 ioadl64->flags = cpu_to_be32(ioadl_flags);
6491 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6492 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6494 last_ioadl64 = ioadl64;
6498 if (likely(last_ioadl64))
6499 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6503 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6504 * @ipr_cmd: ipr command struct
6505 * @qc: ATA queued command
6508 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6509 struct ata_queued_cmd *qc)
6511 u32 ioadl_flags = 0;
6512 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6513 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6514 struct ipr_ioadl_desc *last_ioadl = NULL;
6515 int len = qc->nbytes;
6516 struct scatterlist *sg;
6522 if (qc->dma_dir == DMA_TO_DEVICE) {
6523 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6524 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6525 ioarcb->data_transfer_length = cpu_to_be32(len);
6527 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6528 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6529 ioadl_flags = IPR_IOADL_FLAGS_READ;
6530 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6531 ioarcb->read_ioadl_len =
6532 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6535 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6536 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6537 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6543 if (likely(last_ioadl))
6544 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6548 * ipr_qc_defer - Get a free ipr_cmd
6549 * @qc: queued command
6554 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6556 struct ata_port *ap = qc->ap;
6557 struct ipr_sata_port *sata_port = ap->private_data;
6558 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6559 struct ipr_cmnd *ipr_cmd;
6560 struct ipr_hrr_queue *hrrq;
6563 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6564 hrrq = &ioa_cfg->hrrq[hrrq_id];
6566 qc->lldd_task = NULL;
6567 spin_lock(&hrrq->_lock);
6568 if (unlikely(hrrq->ioa_is_dead)) {
6569 spin_unlock(&hrrq->_lock);
6573 if (unlikely(!hrrq->allow_cmds)) {
6574 spin_unlock(&hrrq->_lock);
6575 return ATA_DEFER_LINK;
6578 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6579 if (ipr_cmd == NULL) {
6580 spin_unlock(&hrrq->_lock);
6581 return ATA_DEFER_LINK;
6584 qc->lldd_task = ipr_cmd;
6585 spin_unlock(&hrrq->_lock);
6590 * ipr_qc_issue - Issue a SATA qc to a device
6591 * @qc: queued command
6596 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6598 struct ata_port *ap = qc->ap;
6599 struct ipr_sata_port *sata_port = ap->private_data;
6600 struct ipr_resource_entry *res = sata_port->res;
6601 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6602 struct ipr_cmnd *ipr_cmd;
6603 struct ipr_ioarcb *ioarcb;
6604 struct ipr_ioarcb_ata_regs *regs;
6606 if (qc->lldd_task == NULL)
6609 ipr_cmd = qc->lldd_task;
6610 if (ipr_cmd == NULL)
6611 return AC_ERR_SYSTEM;
6613 qc->lldd_task = NULL;
6614 spin_lock(&ipr_cmd->hrrq->_lock);
6615 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6616 ipr_cmd->hrrq->ioa_is_dead)) {
6617 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6618 spin_unlock(&ipr_cmd->hrrq->_lock);
6619 return AC_ERR_SYSTEM;
6622 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6623 ioarcb = &ipr_cmd->ioarcb;
6625 if (ioa_cfg->sis64) {
6626 regs = &ipr_cmd->i.ata_ioadl.regs;
6627 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6629 regs = &ioarcb->u.add_data.u.regs;
6631 memset(regs, 0, sizeof(*regs));
6632 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6634 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6636 ipr_cmd->done = ipr_sata_done;
6637 ipr_cmd->ioarcb.res_handle = res->res_handle;
6638 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6639 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6640 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6641 ipr_cmd->dma_use_sg = qc->n_elem;
6644 ipr_build_ata_ioadl64(ipr_cmd, qc);
6646 ipr_build_ata_ioadl(ipr_cmd, qc);
6648 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6649 ipr_copy_sata_tf(regs, &qc->tf);
6650 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6651 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6653 switch (qc->tf.protocol) {
6654 case ATA_PROT_NODATA:
6659 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6662 case ATAPI_PROT_PIO:
6663 case ATAPI_PROT_NODATA:
6664 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6667 case ATAPI_PROT_DMA:
6668 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6669 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6674 spin_unlock(&ipr_cmd->hrrq->_lock);
6675 return AC_ERR_INVALID;
6678 ipr_send_command(ipr_cmd);
6679 spin_unlock(&ipr_cmd->hrrq->_lock);
6685 * ipr_qc_fill_rtf - Read result TF
6686 * @qc: ATA queued command
6691 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6693 struct ipr_sata_port *sata_port = qc->ap->private_data;
6694 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6695 struct ata_taskfile *tf = &qc->result_tf;
6697 tf->feature = g->error;
6698 tf->nsect = g->nsect;
6702 tf->device = g->device;
6703 tf->command = g->status;
6704 tf->hob_nsect = g->hob_nsect;
6705 tf->hob_lbal = g->hob_lbal;
6706 tf->hob_lbam = g->hob_lbam;
6707 tf->hob_lbah = g->hob_lbah;
6712 static struct ata_port_operations ipr_sata_ops = {
6713 .phy_reset = ipr_ata_phy_reset,
6714 .hardreset = ipr_sata_reset,
6715 .post_internal_cmd = ipr_ata_post_internal,
6716 .qc_prep = ata_noop_qc_prep,
6717 .qc_defer = ipr_qc_defer,
6718 .qc_issue = ipr_qc_issue,
6719 .qc_fill_rtf = ipr_qc_fill_rtf,
6720 .port_start = ata_sas_port_start,
6721 .port_stop = ata_sas_port_stop
6724 static struct ata_port_info sata_port_info = {
6725 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6726 .pio_mask = ATA_PIO4_ONLY,
6727 .mwdma_mask = ATA_MWDMA2,
6728 .udma_mask = ATA_UDMA6,
6729 .port_ops = &ipr_sata_ops
6732 #ifdef CONFIG_PPC_PSERIES
6733 static const u16 ipr_blocked_processors[] = {
6745 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6746 * @ioa_cfg: ioa cfg struct
6748 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6749 * certain pSeries hardware. This function determines if the given
6750 * adapter is in one of these confgurations or not.
6753 * 1 if adapter is not supported / 0 if adapter is supported
6755 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6759 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6760 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6761 if (pvr_version_is(ipr_blocked_processors[i]))
6768 #define ipr_invalid_adapter(ioa_cfg) 0
6772 * ipr_ioa_bringdown_done - IOA bring down completion.
6773 * @ipr_cmd: ipr command struct
6775 * This function processes the completion of an adapter bring down.
6776 * It wakes any reset sleepers.
6781 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6787 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6789 spin_unlock_irq(ioa_cfg->host->host_lock);
6790 scsi_unblock_requests(ioa_cfg->host);
6791 spin_lock_irq(ioa_cfg->host->host_lock);
6794 ioa_cfg->in_reset_reload = 0;
6795 ioa_cfg->reset_retries = 0;
6796 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6797 spin_lock(&ioa_cfg->hrrq[i]._lock);
6798 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6799 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6803 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6804 wake_up_all(&ioa_cfg->reset_wait_q);
6807 return IPR_RC_JOB_RETURN;
6811 * ipr_ioa_reset_done - IOA reset completion.
6812 * @ipr_cmd: ipr command struct
6814 * This function processes the completion of an adapter reset.
6815 * It schedules any necessary mid-layer add/removes and
6816 * wakes any reset sleepers.
6821 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6823 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824 struct ipr_resource_entry *res;
6825 struct ipr_hostrcb *hostrcb, *temp;
6829 ioa_cfg->in_reset_reload = 0;
6830 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6831 spin_lock(&ioa_cfg->hrrq[j]._lock);
6832 ioa_cfg->hrrq[j].allow_cmds = 1;
6833 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6836 ioa_cfg->reset_cmd = NULL;
6837 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6839 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6840 if (res->add_to_ml || res->del_from_ml) {
6845 schedule_work(&ioa_cfg->work_q);
6847 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6848 list_del(&hostrcb->queue);
6849 if (i++ < IPR_NUM_LOG_HCAMS)
6850 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6852 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6855 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6856 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6858 ioa_cfg->reset_retries = 0;
6859 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6860 wake_up_all(&ioa_cfg->reset_wait_q);
6862 spin_unlock(ioa_cfg->host->host_lock);
6863 scsi_unblock_requests(ioa_cfg->host);
6864 spin_lock(ioa_cfg->host->host_lock);
6866 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6867 scsi_block_requests(ioa_cfg->host);
6869 schedule_work(&ioa_cfg->work_q);
6871 return IPR_RC_JOB_RETURN;
6875 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6876 * @supported_dev: supported device struct
6877 * @vpids: vendor product id struct
6882 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6883 struct ipr_std_inq_vpids *vpids)
6885 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6886 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6887 supported_dev->num_records = 1;
6888 supported_dev->data_length =
6889 cpu_to_be16(sizeof(struct ipr_supported_device));
6890 supported_dev->reserved = 0;
6894 * ipr_set_supported_devs - Send Set Supported Devices for a device
6895 * @ipr_cmd: ipr command struct
6897 * This function sends a Set Supported Devices to the adapter
6900 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6904 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6905 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6906 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6907 struct ipr_resource_entry *res = ipr_cmd->u.res;
6909 ipr_cmd->job_step = ipr_ioa_reset_done;
6911 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6912 if (!ipr_is_scsi_disk(res))
6915 ipr_cmd->u.res = res;
6916 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6918 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6919 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6920 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6922 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6923 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6924 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6925 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6927 ipr_init_ioadl(ipr_cmd,
6928 ioa_cfg->vpd_cbs_dma +
6929 offsetof(struct ipr_misc_cbs, supp_dev),
6930 sizeof(struct ipr_supported_device),
6931 IPR_IOADL_FLAGS_WRITE_LAST);
6933 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6934 IPR_SET_SUP_DEVICE_TIMEOUT);
6936 if (!ioa_cfg->sis64)
6937 ipr_cmd->job_step = ipr_set_supported_devs;
6939 return IPR_RC_JOB_RETURN;
6943 return IPR_RC_JOB_CONTINUE;
6947 * ipr_get_mode_page - Locate specified mode page
6948 * @mode_pages: mode page buffer
6949 * @page_code: page code to find
6950 * @len: minimum required length for mode page
6953 * pointer to mode page / NULL on failure
6955 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6956 u32 page_code, u32 len)
6958 struct ipr_mode_page_hdr *mode_hdr;
6962 if (!mode_pages || (mode_pages->hdr.length == 0))
6965 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6966 mode_hdr = (struct ipr_mode_page_hdr *)
6967 (mode_pages->data + mode_pages->hdr.block_desc_len);
6970 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6971 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6975 page_length = (sizeof(struct ipr_mode_page_hdr) +
6976 mode_hdr->page_length);
6977 length -= page_length;
6978 mode_hdr = (struct ipr_mode_page_hdr *)
6979 ((unsigned long)mode_hdr + page_length);
6986 * ipr_check_term_power - Check for term power errors
6987 * @ioa_cfg: ioa config struct
6988 * @mode_pages: IOAFP mode pages buffer
6990 * Check the IOAFP's mode page 28 for term power errors
6995 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6996 struct ipr_mode_pages *mode_pages)
7000 struct ipr_dev_bus_entry *bus;
7001 struct ipr_mode_page28 *mode_page;
7003 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7004 sizeof(struct ipr_mode_page28));
7006 entry_length = mode_page->entry_length;
7008 bus = mode_page->bus;
7010 for (i = 0; i < mode_page->num_entries; i++) {
7011 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7012 dev_err(&ioa_cfg->pdev->dev,
7013 "Term power is absent on scsi bus %d\n",
7017 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7022 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7023 * @ioa_cfg: ioa config struct
7025 * Looks through the config table checking for SES devices. If
7026 * the SES device is in the SES table indicating a maximum SCSI
7027 * bus speed, the speed is limited for the bus.
7032 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7037 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7038 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7039 ioa_cfg->bus_attr[i].bus_width);
7041 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7042 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7047 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7048 * @ioa_cfg: ioa config struct
7049 * @mode_pages: mode page 28 buffer
7051 * Updates mode page 28 based on driver configuration
7056 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7057 struct ipr_mode_pages *mode_pages)
7059 int i, entry_length;
7060 struct ipr_dev_bus_entry *bus;
7061 struct ipr_bus_attributes *bus_attr;
7062 struct ipr_mode_page28 *mode_page;
7064 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7065 sizeof(struct ipr_mode_page28));
7067 entry_length = mode_page->entry_length;
7069 /* Loop for each device bus entry */
7070 for (i = 0, bus = mode_page->bus;
7071 i < mode_page->num_entries;
7072 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7073 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7074 dev_err(&ioa_cfg->pdev->dev,
7075 "Invalid resource address reported: 0x%08X\n",
7076 IPR_GET_PHYS_LOC(bus->res_addr));
7080 bus_attr = &ioa_cfg->bus_attr[i];
7081 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7082 bus->bus_width = bus_attr->bus_width;
7083 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7084 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7085 if (bus_attr->qas_enabled)
7086 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7088 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7093 * ipr_build_mode_select - Build a mode select command
7094 * @ipr_cmd: ipr command struct
7095 * @res_handle: resource handle to send command to
7096 * @parm: Byte 2 of Mode Sense command
7097 * @dma_addr: DMA buffer address
7098 * @xfer_len: data transfer length
7103 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7104 __be32 res_handle, u8 parm,
7105 dma_addr_t dma_addr, u8 xfer_len)
7107 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7109 ioarcb->res_handle = res_handle;
7110 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7111 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7112 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7113 ioarcb->cmd_pkt.cdb[1] = parm;
7114 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7116 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7120 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7121 * @ipr_cmd: ipr command struct
7123 * This function sets up the SCSI bus attributes and sends
7124 * a Mode Select for Page 28 to activate them.
7129 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7132 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7136 ipr_scsi_bus_speed_limit(ioa_cfg);
7137 ipr_check_term_power(ioa_cfg, mode_pages);
7138 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7139 length = mode_pages->hdr.length + 1;
7140 mode_pages->hdr.length = 0;
7142 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7143 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7146 ipr_cmd->job_step = ipr_set_supported_devs;
7147 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7148 struct ipr_resource_entry, queue);
7149 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7152 return IPR_RC_JOB_RETURN;
7156 * ipr_build_mode_sense - Builds a mode sense command
7157 * @ipr_cmd: ipr command struct
7158 * @res: resource entry struct
7159 * @parm: Byte 2 of mode sense command
7160 * @dma_addr: DMA address of mode sense buffer
7161 * @xfer_len: Size of DMA buffer
7166 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7168 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7170 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7172 ioarcb->res_handle = res_handle;
7173 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7174 ioarcb->cmd_pkt.cdb[2] = parm;
7175 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7176 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7178 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7182 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7183 * @ipr_cmd: ipr command struct
7185 * This function handles the failure of an IOA bringup command.
7190 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7193 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7195 dev_err(&ioa_cfg->pdev->dev,
7196 "0x%02X failed with IOASC: 0x%08X\n",
7197 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7199 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7200 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7201 return IPR_RC_JOB_RETURN;
7205 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7206 * @ipr_cmd: ipr command struct
7208 * This function handles the failure of a Mode Sense to the IOAFP.
7209 * Some adapters do not handle all mode pages.
7212 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7214 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7217 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7219 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7220 ipr_cmd->job_step = ipr_set_supported_devs;
7221 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7222 struct ipr_resource_entry, queue);
7223 return IPR_RC_JOB_CONTINUE;
7226 return ipr_reset_cmd_failed(ipr_cmd);
7230 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7231 * @ipr_cmd: ipr command struct
7233 * This function send a Page 28 mode sense to the IOA to
7234 * retrieve SCSI bus attributes.
7239 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7241 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7244 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7245 0x28, ioa_cfg->vpd_cbs_dma +
7246 offsetof(struct ipr_misc_cbs, mode_pages),
7247 sizeof(struct ipr_mode_pages));
7249 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7250 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7252 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7255 return IPR_RC_JOB_RETURN;
7259 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7260 * @ipr_cmd: ipr command struct
7262 * This function enables dual IOA RAID support if possible.
7267 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7270 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7271 struct ipr_mode_page24 *mode_page;
7275 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7276 sizeof(struct ipr_mode_page24));
7279 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7281 length = mode_pages->hdr.length + 1;
7282 mode_pages->hdr.length = 0;
7284 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7285 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7288 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7289 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7292 return IPR_RC_JOB_RETURN;
7296 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7297 * @ipr_cmd: ipr command struct
7299 * This function handles the failure of a Mode Sense to the IOAFP.
7300 * Some adapters do not handle all mode pages.
7303 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7305 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7307 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7309 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7310 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7311 return IPR_RC_JOB_CONTINUE;
7314 return ipr_reset_cmd_failed(ipr_cmd);
7318 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7319 * @ipr_cmd: ipr command struct
7321 * This function send a mode sense to the IOA to retrieve
7322 * the IOA Advanced Function Control mode page.
7327 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7329 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7332 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7333 0x24, ioa_cfg->vpd_cbs_dma +
7334 offsetof(struct ipr_misc_cbs, mode_pages),
7335 sizeof(struct ipr_mode_pages));
7337 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7338 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7340 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7343 return IPR_RC_JOB_RETURN;
7347 * ipr_init_res_table - Initialize the resource table
7348 * @ipr_cmd: ipr command struct
7350 * This function looks through the existing resource table, comparing
7351 * it with the config table. This function will take care of old/new
7352 * devices and schedule adding/removing them from the mid-layer
7356 * IPR_RC_JOB_CONTINUE
7358 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7361 struct ipr_resource_entry *res, *temp;
7362 struct ipr_config_table_entry_wrapper cfgtew;
7363 int entries, found, flag, i;
7368 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7370 flag = ioa_cfg->u.cfg_table->hdr.flags;
7372 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7373 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7375 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7376 list_move_tail(&res->queue, &old_res);
7379 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7381 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7383 for (i = 0; i < entries; i++) {
7385 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7387 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7390 list_for_each_entry_safe(res, temp, &old_res, queue) {
7391 if (ipr_is_same_device(res, &cfgtew)) {
7392 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7399 if (list_empty(&ioa_cfg->free_res_q)) {
7400 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7405 res = list_entry(ioa_cfg->free_res_q.next,
7406 struct ipr_resource_entry, queue);
7407 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7408 ipr_init_res_entry(res, &cfgtew);
7410 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7411 res->sdev->allow_restart = 1;
7414 ipr_update_res_entry(res, &cfgtew);
7417 list_for_each_entry_safe(res, temp, &old_res, queue) {
7419 res->del_from_ml = 1;
7420 res->res_handle = IPR_INVALID_RES_HANDLE;
7421 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7425 list_for_each_entry_safe(res, temp, &old_res, queue) {
7426 ipr_clear_res_target(res);
7427 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7430 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7431 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7433 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7436 return IPR_RC_JOB_CONTINUE;
7440 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7441 * @ipr_cmd: ipr command struct
7443 * This function sends a Query IOA Configuration command
7444 * to the adapter to retrieve the IOA configuration table.
7449 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7451 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7452 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7453 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7454 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7457 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7458 ioa_cfg->dual_raid = 1;
7459 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7460 ucode_vpd->major_release, ucode_vpd->card_type,
7461 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7462 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7463 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7465 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7466 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7467 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7468 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7470 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7471 IPR_IOADL_FLAGS_READ_LAST);
7473 ipr_cmd->job_step = ipr_init_res_table;
7475 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7478 return IPR_RC_JOB_RETURN;
7482 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7483 * @ipr_cmd: ipr command struct
7485 * This utility function sends an inquiry to the adapter.
7490 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7491 dma_addr_t dma_addr, u8 xfer_len)
7493 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7496 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7497 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7499 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7500 ioarcb->cmd_pkt.cdb[1] = flags;
7501 ioarcb->cmd_pkt.cdb[2] = page;
7502 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7504 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7506 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7511 * ipr_inquiry_page_supported - Is the given inquiry page supported
7512 * @page0: inquiry page 0 buffer
7515 * This function determines if the specified inquiry page is supported.
7518 * 1 if page is supported / 0 if not
7520 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7524 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7525 if (page0->page[i] == page)
7532 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7533 * @ipr_cmd: ipr command struct
7535 * This function sends a Page 0xD0 inquiry to the adapter
7536 * to retrieve adapter capabilities.
7539 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7541 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7543 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7544 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7545 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7548 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7549 memset(cap, 0, sizeof(*cap));
7551 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7552 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7553 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7554 sizeof(struct ipr_inquiry_cap));
7555 return IPR_RC_JOB_RETURN;
7559 return IPR_RC_JOB_CONTINUE;
7563 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7564 * @ipr_cmd: ipr command struct
7566 * This function sends a Page 3 inquiry to the adapter
7567 * to retrieve software VPD information.
7570 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7572 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7578 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7580 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7581 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7582 sizeof(struct ipr_inquiry_page3));
7585 return IPR_RC_JOB_RETURN;
7589 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7590 * @ipr_cmd: ipr command struct
7592 * This function sends a Page 0 inquiry to the adapter
7593 * to retrieve supported inquiry pages.
7596 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7598 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7600 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7605 /* Grab the type out of the VPD and store it away */
7606 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7608 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7610 if (ipr_invalid_adapter(ioa_cfg)) {
7611 dev_err(&ioa_cfg->pdev->dev,
7612 "Adapter not supported in this hardware configuration.\n");
7614 if (!ipr_testmode) {
7615 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7616 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7617 list_add_tail(&ipr_cmd->queue,
7618 &ioa_cfg->hrrq->hrrq_free_q);
7619 return IPR_RC_JOB_RETURN;
7623 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7625 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7626 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7627 sizeof(struct ipr_inquiry_page0));
7630 return IPR_RC_JOB_RETURN;
7634 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7635 * @ipr_cmd: ipr command struct
7637 * This function sends a standard inquiry to the adapter.
7642 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7649 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7650 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7651 sizeof(struct ipr_ioa_vpd));
7654 return IPR_RC_JOB_RETURN;
7658 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7659 * @ipr_cmd: ipr command struct
7661 * This function send an Identify Host Request Response Queue
7662 * command to establish the HRRQ with the adapter.
7667 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7671 struct ipr_hrr_queue *hrrq;
7674 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7675 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7677 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7678 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7680 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7681 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7683 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7685 ioarcb->cmd_pkt.cdb[1] = 0x1;
7687 if (ioa_cfg->nvectors == 1)
7688 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7690 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7692 ioarcb->cmd_pkt.cdb[2] =
7693 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7694 ioarcb->cmd_pkt.cdb[3] =
7695 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7696 ioarcb->cmd_pkt.cdb[4] =
7697 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7698 ioarcb->cmd_pkt.cdb[5] =
7699 ((u64) hrrq->host_rrq_dma) & 0xff;
7700 ioarcb->cmd_pkt.cdb[7] =
7701 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7702 ioarcb->cmd_pkt.cdb[8] =
7703 (sizeof(u32) * hrrq->size) & 0xff;
7705 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7706 ioarcb->cmd_pkt.cdb[9] =
7707 ioa_cfg->identify_hrrq_index;
7709 if (ioa_cfg->sis64) {
7710 ioarcb->cmd_pkt.cdb[10] =
7711 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7712 ioarcb->cmd_pkt.cdb[11] =
7713 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7714 ioarcb->cmd_pkt.cdb[12] =
7715 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7716 ioarcb->cmd_pkt.cdb[13] =
7717 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7720 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7721 ioarcb->cmd_pkt.cdb[14] =
7722 ioa_cfg->identify_hrrq_index;
7724 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7725 IPR_INTERNAL_TIMEOUT);
7727 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7728 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7731 return IPR_RC_JOB_RETURN;
7735 return IPR_RC_JOB_CONTINUE;
7739 * ipr_reset_timer_done - Adapter reset timer function
7740 * @ipr_cmd: ipr command struct
7742 * Description: This function is used in adapter reset processing
7743 * for timing events. If the reset_cmd pointer in the IOA
7744 * config struct is not this adapter's we are doing nested
7745 * resets and fail_all_ops will take care of freeing the
7751 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7753 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7754 unsigned long lock_flags = 0;
7756 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7758 if (ioa_cfg->reset_cmd == ipr_cmd) {
7759 list_del(&ipr_cmd->queue);
7760 ipr_cmd->done(ipr_cmd);
7763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7767 * ipr_reset_start_timer - Start a timer for adapter reset job
7768 * @ipr_cmd: ipr command struct
7769 * @timeout: timeout value
7771 * Description: This function is used in adapter reset processing
7772 * for timing events. If the reset_cmd pointer in the IOA
7773 * config struct is not this adapter's we are doing nested
7774 * resets and fail_all_ops will take care of freeing the
7780 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7781 unsigned long timeout)
7785 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7786 ipr_cmd->done = ipr_reset_ioa_job;
7788 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7789 ipr_cmd->timer.expires = jiffies + timeout;
7790 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7791 add_timer(&ipr_cmd->timer);
7795 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7796 * @ioa_cfg: ioa cfg struct
7801 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7803 struct ipr_hrr_queue *hrrq;
7805 for_each_hrrq(hrrq, ioa_cfg) {
7806 spin_lock(&hrrq->_lock);
7807 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7809 /* Initialize Host RRQ pointers */
7810 hrrq->hrrq_start = hrrq->host_rrq;
7811 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7812 hrrq->hrrq_curr = hrrq->hrrq_start;
7813 hrrq->toggle_bit = 1;
7814 spin_unlock(&hrrq->_lock);
7818 ioa_cfg->identify_hrrq_index = 0;
7819 if (ioa_cfg->hrrq_num == 1)
7820 atomic_set(&ioa_cfg->hrrq_index, 0);
7822 atomic_set(&ioa_cfg->hrrq_index, 1);
7824 /* Zero out config table */
7825 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7829 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7830 * @ipr_cmd: ipr command struct
7833 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7835 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7837 unsigned long stage, stage_time;
7839 volatile u32 int_reg;
7840 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7843 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7844 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7845 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7847 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7849 /* sanity check the stage_time value */
7850 if (stage_time == 0)
7851 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7852 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7853 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7854 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7855 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7857 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7858 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7859 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7860 stage_time = ioa_cfg->transop_timeout;
7861 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7862 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7863 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7864 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7865 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7866 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7867 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7868 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7869 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7870 return IPR_RC_JOB_CONTINUE;
7874 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7875 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7876 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7877 ipr_cmd->done = ipr_reset_ioa_job;
7878 add_timer(&ipr_cmd->timer);
7880 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7882 return IPR_RC_JOB_RETURN;
7886 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7887 * @ipr_cmd: ipr command struct
7889 * This function reinitializes some control blocks and
7890 * enables destructive diagnostics on the adapter.
7895 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7898 volatile u32 int_reg;
7899 volatile u64 maskval;
7903 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7904 ipr_init_ioa_mem(ioa_cfg);
7906 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7907 spin_lock(&ioa_cfg->hrrq[i]._lock);
7908 ioa_cfg->hrrq[i].allow_interrupts = 1;
7909 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7912 if (ioa_cfg->sis64) {
7913 /* Set the adapter to the correct endian mode. */
7914 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7915 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7918 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7920 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7921 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7922 ioa_cfg->regs.clr_interrupt_mask_reg32);
7923 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7924 return IPR_RC_JOB_CONTINUE;
7927 /* Enable destructive diagnostics on IOA */
7928 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7930 if (ioa_cfg->sis64) {
7931 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7932 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7933 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7935 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7937 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7939 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7941 if (ioa_cfg->sis64) {
7942 ipr_cmd->job_step = ipr_reset_next_stage;
7943 return IPR_RC_JOB_CONTINUE;
7946 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7947 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7948 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7949 ipr_cmd->done = ipr_reset_ioa_job;
7950 add_timer(&ipr_cmd->timer);
7951 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7954 return IPR_RC_JOB_RETURN;
7958 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7959 * @ipr_cmd: ipr command struct
7961 * This function is invoked when an adapter dump has run out
7962 * of processing time.
7965 * IPR_RC_JOB_CONTINUE
7967 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7969 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7971 if (ioa_cfg->sdt_state == GET_DUMP)
7972 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7973 else if (ioa_cfg->sdt_state == READ_DUMP)
7974 ioa_cfg->sdt_state = ABORT_DUMP;
7976 ioa_cfg->dump_timeout = 1;
7977 ipr_cmd->job_step = ipr_reset_alert;
7979 return IPR_RC_JOB_CONTINUE;
7983 * ipr_unit_check_no_data - Log a unit check/no data error log
7984 * @ioa_cfg: ioa config struct
7986 * Logs an error indicating the adapter unit checked, but for some
7987 * reason, we were unable to fetch the unit check buffer.
7992 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7994 ioa_cfg->errors_logged++;
7995 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7999 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8000 * @ioa_cfg: ioa config struct
8002 * Fetches the unit check buffer from the adapter by clocking the data
8003 * through the mailbox register.
8008 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8010 unsigned long mailbox;
8011 struct ipr_hostrcb *hostrcb;
8012 struct ipr_uc_sdt sdt;
8016 mailbox = readl(ioa_cfg->ioa_mailbox);
8018 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8019 ipr_unit_check_no_data(ioa_cfg);
8023 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8024 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8025 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8027 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8028 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8029 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8030 ipr_unit_check_no_data(ioa_cfg);
8034 /* Find length of the first sdt entry (UC buffer) */
8035 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8036 length = be32_to_cpu(sdt.entry[0].end_token);
8038 length = (be32_to_cpu(sdt.entry[0].end_token) -
8039 be32_to_cpu(sdt.entry[0].start_token)) &
8040 IPR_FMT2_MBX_ADDR_MASK;
8042 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8043 struct ipr_hostrcb, queue);
8044 list_del(&hostrcb->queue);
8045 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8047 rc = ipr_get_ldump_data_section(ioa_cfg,
8048 be32_to_cpu(sdt.entry[0].start_token),
8049 (__be32 *)&hostrcb->hcam,
8050 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8053 ipr_handle_log_data(ioa_cfg, hostrcb);
8054 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8055 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8056 ioa_cfg->sdt_state == GET_DUMP)
8057 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8059 ipr_unit_check_no_data(ioa_cfg);
8061 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8065 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8066 * @ipr_cmd: ipr command struct
8068 * Description: This function will call to get the unit check buffer.
8073 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8075 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8078 ioa_cfg->ioa_unit_checked = 0;
8079 ipr_get_unit_check_buffer(ioa_cfg);
8080 ipr_cmd->job_step = ipr_reset_alert;
8081 ipr_reset_start_timer(ipr_cmd, 0);
8084 return IPR_RC_JOB_RETURN;
8088 * ipr_reset_restore_cfg_space - Restore PCI config space.
8089 * @ipr_cmd: ipr command struct
8091 * Description: This function restores the saved PCI config space of
8092 * the adapter, fails all outstanding ops back to the callers, and
8093 * fetches the dump/unit check if applicable to this reset.
8096 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8098 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8104 ioa_cfg->pdev->state_saved = true;
8105 pci_restore_state(ioa_cfg->pdev);
8107 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8108 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8109 return IPR_RC_JOB_CONTINUE;
8112 ipr_fail_all_ops(ioa_cfg);
8114 if (ioa_cfg->sis64) {
8115 /* Set the adapter to the correct endian mode. */
8116 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8117 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8120 if (ioa_cfg->ioa_unit_checked) {
8121 if (ioa_cfg->sis64) {
8122 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8123 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8124 return IPR_RC_JOB_RETURN;
8126 ioa_cfg->ioa_unit_checked = 0;
8127 ipr_get_unit_check_buffer(ioa_cfg);
8128 ipr_cmd->job_step = ipr_reset_alert;
8129 ipr_reset_start_timer(ipr_cmd, 0);
8130 return IPR_RC_JOB_RETURN;
8134 if (ioa_cfg->in_ioa_bringdown) {
8135 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8137 ipr_cmd->job_step = ipr_reset_enable_ioa;
8139 if (GET_DUMP == ioa_cfg->sdt_state) {
8140 ioa_cfg->sdt_state = READ_DUMP;
8141 ioa_cfg->dump_timeout = 0;
8143 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8145 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8146 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8147 schedule_work(&ioa_cfg->work_q);
8148 return IPR_RC_JOB_RETURN;
8153 return IPR_RC_JOB_CONTINUE;
8157 * ipr_reset_bist_done - BIST has completed on the adapter.
8158 * @ipr_cmd: ipr command struct
8160 * Description: Unblock config space and resume the reset process.
8163 * IPR_RC_JOB_CONTINUE
8165 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8167 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8170 if (ioa_cfg->cfg_locked)
8171 pci_cfg_access_unlock(ioa_cfg->pdev);
8172 ioa_cfg->cfg_locked = 0;
8173 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8175 return IPR_RC_JOB_CONTINUE;
8179 * ipr_reset_start_bist - Run BIST on the adapter.
8180 * @ipr_cmd: ipr command struct
8182 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8185 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8187 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8189 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8190 int rc = PCIBIOS_SUCCESSFUL;
8193 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8194 writel(IPR_UPROCI_SIS64_START_BIST,
8195 ioa_cfg->regs.set_uproc_interrupt_reg32);
8197 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8199 if (rc == PCIBIOS_SUCCESSFUL) {
8200 ipr_cmd->job_step = ipr_reset_bist_done;
8201 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8202 rc = IPR_RC_JOB_RETURN;
8204 if (ioa_cfg->cfg_locked)
8205 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8206 ioa_cfg->cfg_locked = 0;
8207 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8208 rc = IPR_RC_JOB_CONTINUE;
8216 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8217 * @ipr_cmd: ipr command struct
8219 * Description: This clears PCI reset to the adapter and delays two seconds.
8224 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8227 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8228 ipr_cmd->job_step = ipr_reset_bist_done;
8229 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8231 return IPR_RC_JOB_RETURN;
8235 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8236 * @ipr_cmd: ipr command struct
8238 * Description: This asserts PCI reset to the adapter.
8243 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8245 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8246 struct pci_dev *pdev = ioa_cfg->pdev;
8249 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8250 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8251 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8253 return IPR_RC_JOB_RETURN;
8257 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8258 * @ipr_cmd: ipr command struct
8260 * Description: This attempts to block config access to the IOA.
8263 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8265 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8267 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8268 int rc = IPR_RC_JOB_CONTINUE;
8270 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8271 ioa_cfg->cfg_locked = 1;
8272 ipr_cmd->job_step = ioa_cfg->reset;
8274 if (ipr_cmd->u.time_left) {
8275 rc = IPR_RC_JOB_RETURN;
8276 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8277 ipr_reset_start_timer(ipr_cmd,
8278 IPR_CHECK_FOR_RESET_TIMEOUT);
8280 ipr_cmd->job_step = ioa_cfg->reset;
8281 dev_err(&ioa_cfg->pdev->dev,
8282 "Timed out waiting to lock config access. Resetting anyway.\n");
8290 * ipr_reset_block_config_access - Block config access to the IOA
8291 * @ipr_cmd: ipr command struct
8293 * Description: This attempts to block config access to the IOA
8296 * IPR_RC_JOB_CONTINUE
8298 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8300 ipr_cmd->ioa_cfg->cfg_locked = 0;
8301 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8302 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8303 return IPR_RC_JOB_CONTINUE;
8307 * ipr_reset_allowed - Query whether or not IOA can be reset
8308 * @ioa_cfg: ioa config struct
8311 * 0 if reset not allowed / non-zero if reset is allowed
8313 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8315 volatile u32 temp_reg;
8317 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8318 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8322 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8323 * @ipr_cmd: ipr command struct
8325 * Description: This function waits for adapter permission to run BIST,
8326 * then runs BIST. If the adapter does not give permission after a
8327 * reasonable time, we will reset the adapter anyway. The impact of
8328 * resetting the adapter without warning the adapter is the risk of
8329 * losing the persistent error log on the adapter. If the adapter is
8330 * reset while it is writing to the flash on the adapter, the flash
8331 * segment will have bad ECC and be zeroed.
8334 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8336 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8339 int rc = IPR_RC_JOB_RETURN;
8341 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8342 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8343 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8345 ipr_cmd->job_step = ipr_reset_block_config_access;
8346 rc = IPR_RC_JOB_CONTINUE;
8353 * ipr_reset_alert - Alert the adapter of a pending reset
8354 * @ipr_cmd: ipr command struct
8356 * Description: This function alerts the adapter that it will be reset.
8357 * If memory space is not currently enabled, proceed directly
8358 * to running BIST on the adapter. The timer must always be started
8359 * so we guarantee we do not run BIST from ipr_isr.
8364 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8366 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8371 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8373 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8374 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8375 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8376 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8378 ipr_cmd->job_step = ipr_reset_block_config_access;
8381 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8382 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8385 return IPR_RC_JOB_RETURN;
8389 * ipr_reset_ucode_download_done - Microcode download completion
8390 * @ipr_cmd: ipr command struct
8392 * Description: This function unmaps the microcode download buffer.
8395 * IPR_RC_JOB_CONTINUE
8397 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8399 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8400 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8402 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8403 sglist->num_sg, DMA_TO_DEVICE);
8405 ipr_cmd->job_step = ipr_reset_alert;
8406 return IPR_RC_JOB_CONTINUE;
8410 * ipr_reset_ucode_download - Download microcode to the adapter
8411 * @ipr_cmd: ipr command struct
8413 * Description: This function checks to see if it there is microcode
8414 * to download to the adapter. If there is, a download is performed.
8417 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8419 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8421 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8422 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8425 ipr_cmd->job_step = ipr_reset_alert;
8428 return IPR_RC_JOB_CONTINUE;
8430 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8431 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8432 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8433 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8434 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8435 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8436 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8439 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8441 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8442 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8444 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8445 IPR_WRITE_BUFFER_TIMEOUT);
8448 return IPR_RC_JOB_RETURN;
8452 * ipr_reset_shutdown_ioa - Shutdown the adapter
8453 * @ipr_cmd: ipr command struct
8455 * Description: This function issues an adapter shutdown of the
8456 * specified type to the specified adapter as part of the
8457 * adapter reset job.
8460 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8462 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8464 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8465 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8466 unsigned long timeout;
8467 int rc = IPR_RC_JOB_CONTINUE;
8470 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8471 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8472 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8473 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8474 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8475 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8477 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8478 timeout = IPR_SHUTDOWN_TIMEOUT;
8479 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8480 timeout = IPR_INTERNAL_TIMEOUT;
8481 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8482 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8484 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8486 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8488 rc = IPR_RC_JOB_RETURN;
8489 ipr_cmd->job_step = ipr_reset_ucode_download;
8491 ipr_cmd->job_step = ipr_reset_alert;
8498 * ipr_reset_ioa_job - Adapter reset job
8499 * @ipr_cmd: ipr command struct
8501 * Description: This function is the job router for the adapter reset job.
8506 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8509 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8512 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8514 if (ioa_cfg->reset_cmd != ipr_cmd) {
8516 * We are doing nested adapter resets and this is
8517 * not the current reset job.
8519 list_add_tail(&ipr_cmd->queue,
8520 &ipr_cmd->hrrq->hrrq_free_q);
8524 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8525 rc = ipr_cmd->job_step_failed(ipr_cmd);
8526 if (rc == IPR_RC_JOB_RETURN)
8530 ipr_reinit_ipr_cmnd(ipr_cmd);
8531 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8532 rc = ipr_cmd->job_step(ipr_cmd);
8533 } while (rc == IPR_RC_JOB_CONTINUE);
8537 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8538 * @ioa_cfg: ioa config struct
8539 * @job_step: first job step of reset job
8540 * @shutdown_type: shutdown type
8542 * Description: This function will initiate the reset of the given adapter
8543 * starting at the selected job step.
8544 * If the caller needs to wait on the completion of the reset,
8545 * the caller must sleep on the reset_wait_q.
8550 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8551 int (*job_step) (struct ipr_cmnd *),
8552 enum ipr_shutdown_type shutdown_type)
8554 struct ipr_cmnd *ipr_cmd;
8557 ioa_cfg->in_reset_reload = 1;
8558 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8559 spin_lock(&ioa_cfg->hrrq[i]._lock);
8560 ioa_cfg->hrrq[i].allow_cmds = 0;
8561 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8564 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8565 scsi_block_requests(ioa_cfg->host);
8567 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8568 ioa_cfg->reset_cmd = ipr_cmd;
8569 ipr_cmd->job_step = job_step;
8570 ipr_cmd->u.shutdown_type = shutdown_type;
8572 ipr_reset_ioa_job(ipr_cmd);
8576 * ipr_initiate_ioa_reset - Initiate an adapter reset
8577 * @ioa_cfg: ioa config struct
8578 * @shutdown_type: shutdown type
8580 * Description: This function will initiate the reset of the given adapter.
8581 * If the caller needs to wait on the completion of the reset,
8582 * the caller must sleep on the reset_wait_q.
8587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8588 enum ipr_shutdown_type shutdown_type)
8592 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8595 if (ioa_cfg->in_reset_reload) {
8596 if (ioa_cfg->sdt_state == GET_DUMP)
8597 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8598 else if (ioa_cfg->sdt_state == READ_DUMP)
8599 ioa_cfg->sdt_state = ABORT_DUMP;
8602 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8603 dev_err(&ioa_cfg->pdev->dev,
8604 "IOA taken offline - error recovery failed\n");
8606 ioa_cfg->reset_retries = 0;
8607 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8608 spin_lock(&ioa_cfg->hrrq[i]._lock);
8609 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8610 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8614 if (ioa_cfg->in_ioa_bringdown) {
8615 ioa_cfg->reset_cmd = NULL;
8616 ioa_cfg->in_reset_reload = 0;
8617 ipr_fail_all_ops(ioa_cfg);
8618 wake_up_all(&ioa_cfg->reset_wait_q);
8620 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8621 spin_unlock_irq(ioa_cfg->host->host_lock);
8622 scsi_unblock_requests(ioa_cfg->host);
8623 spin_lock_irq(ioa_cfg->host->host_lock);
8627 ioa_cfg->in_ioa_bringdown = 1;
8628 shutdown_type = IPR_SHUTDOWN_NONE;
8632 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8637 * ipr_reset_freeze - Hold off all I/O activity
8638 * @ipr_cmd: ipr command struct
8640 * Description: If the PCI slot is frozen, hold off all I/O
8641 * activity; then, as soon as the slot is available again,
8642 * initiate an adapter reset.
8644 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8646 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8649 /* Disallow new interrupts, avoid loop */
8650 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8651 spin_lock(&ioa_cfg->hrrq[i]._lock);
8652 ioa_cfg->hrrq[i].allow_interrupts = 0;
8653 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8656 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8657 ipr_cmd->done = ipr_reset_ioa_job;
8658 return IPR_RC_JOB_RETURN;
8662 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8663 * @pdev: PCI device struct
8665 * Description: This routine is called to tell us that the MMIO
8666 * access to the IOA has been restored
8668 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8670 unsigned long flags = 0;
8671 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8673 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8674 if (!ioa_cfg->probe_done)
8675 pci_save_state(pdev);
8676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8677 return PCI_ERS_RESULT_NEED_RESET;
8681 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8682 * @pdev: PCI device struct
8684 * Description: This routine is called to tell us that the PCI bus
8685 * is down. Can't do anything here, except put the device driver
8686 * into a holding pattern, waiting for the PCI bus to come back.
8688 static void ipr_pci_frozen(struct pci_dev *pdev)
8690 unsigned long flags = 0;
8691 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8694 if (ioa_cfg->probe_done)
8695 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8700 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8701 * @pdev: PCI device struct
8703 * Description: This routine is called by the pci error recovery
8704 * code after the PCI slot has been reset, just before we
8705 * should resume normal operations.
8707 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8709 unsigned long flags = 0;
8710 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8712 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8713 if (ioa_cfg->probe_done) {
8714 if (ioa_cfg->needs_warm_reset)
8715 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8717 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8720 wake_up_all(&ioa_cfg->eeh_wait_q);
8721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8722 return PCI_ERS_RESULT_RECOVERED;
8726 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8727 * @pdev: PCI device struct
8729 * Description: This routine is called when the PCI bus has
8730 * permanently failed.
8732 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8734 unsigned long flags = 0;
8735 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8738 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8739 if (ioa_cfg->probe_done) {
8740 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8741 ioa_cfg->sdt_state = ABORT_DUMP;
8742 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8743 ioa_cfg->in_ioa_bringdown = 1;
8744 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8745 spin_lock(&ioa_cfg->hrrq[i]._lock);
8746 ioa_cfg->hrrq[i].allow_cmds = 0;
8747 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8750 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8752 wake_up_all(&ioa_cfg->eeh_wait_q);
8753 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8757 * ipr_pci_error_detected - Called when a PCI error is detected.
8758 * @pdev: PCI device struct
8759 * @state: PCI channel state
8761 * Description: Called when a PCI error is detected.
8764 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8766 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8767 pci_channel_state_t state)
8770 case pci_channel_io_frozen:
8771 ipr_pci_frozen(pdev);
8772 return PCI_ERS_RESULT_CAN_RECOVER;
8773 case pci_channel_io_perm_failure:
8774 ipr_pci_perm_failure(pdev);
8775 return PCI_ERS_RESULT_DISCONNECT;
8780 return PCI_ERS_RESULT_NEED_RESET;
8784 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8785 * @ioa_cfg: ioa cfg struct
8787 * Description: This is the second phase of adapter intialization
8788 * This function takes care of initilizing the adapter to the point
8789 * where it can accept new commands.
8792 * 0 on success / -EIO on failure
8794 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8797 unsigned long host_lock_flags = 0;
8800 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8801 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8802 ioa_cfg->probe_done = 1;
8803 if (ioa_cfg->needs_hard_reset) {
8804 ioa_cfg->needs_hard_reset = 0;
8805 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8807 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8816 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8817 * @ioa_cfg: ioa config struct
8822 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8826 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8827 if (ioa_cfg->ipr_cmnd_list[i])
8828 dma_pool_free(ioa_cfg->ipr_cmd_pool,
8829 ioa_cfg->ipr_cmnd_list[i],
8830 ioa_cfg->ipr_cmnd_list_dma[i]);
8832 ioa_cfg->ipr_cmnd_list[i] = NULL;
8835 if (ioa_cfg->ipr_cmd_pool)
8836 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8838 kfree(ioa_cfg->ipr_cmnd_list);
8839 kfree(ioa_cfg->ipr_cmnd_list_dma);
8840 ioa_cfg->ipr_cmnd_list = NULL;
8841 ioa_cfg->ipr_cmnd_list_dma = NULL;
8842 ioa_cfg->ipr_cmd_pool = NULL;
8846 * ipr_free_mem - Frees memory allocated for an adapter
8847 * @ioa_cfg: ioa cfg struct
8852 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8856 kfree(ioa_cfg->res_entries);
8857 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8858 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8859 ipr_free_cmd_blks(ioa_cfg);
8861 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8862 dma_free_coherent(&ioa_cfg->pdev->dev,
8863 sizeof(u32) * ioa_cfg->hrrq[i].size,
8864 ioa_cfg->hrrq[i].host_rrq,
8865 ioa_cfg->hrrq[i].host_rrq_dma);
8867 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8868 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8870 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8871 dma_free_coherent(&ioa_cfg->pdev->dev,
8872 sizeof(struct ipr_hostrcb),
8873 ioa_cfg->hostrcb[i],
8874 ioa_cfg->hostrcb_dma[i]);
8877 ipr_free_dump(ioa_cfg);
8878 kfree(ioa_cfg->trace);
8882 * ipr_free_all_resources - Free all allocated resources for an adapter.
8883 * @ipr_cmd: ipr command struct
8885 * This function frees all allocated resources for the
8886 * specified adapter.
8891 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8893 struct pci_dev *pdev = ioa_cfg->pdev;
8896 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8897 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8899 for (i = 0; i < ioa_cfg->nvectors; i++)
8900 free_irq(ioa_cfg->vectors_info[i].vec,
8903 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8905 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8906 pci_disable_msi(pdev);
8907 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8908 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8909 pci_disable_msix(pdev);
8910 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8913 iounmap(ioa_cfg->hdw_dma_regs);
8914 pci_release_regions(pdev);
8915 ipr_free_mem(ioa_cfg);
8916 scsi_host_put(ioa_cfg->host);
8917 pci_disable_device(pdev);
8922 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8923 * @ioa_cfg: ioa config struct
8926 * 0 on success / -ENOMEM on allocation failure
8928 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8930 struct ipr_cmnd *ipr_cmd;
8931 struct ipr_ioarcb *ioarcb;
8932 dma_addr_t dma_addr;
8933 int i, entries_each_hrrq, hrrq_id = 0;
8935 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8936 sizeof(struct ipr_cmnd), 512, 0);
8938 if (!ioa_cfg->ipr_cmd_pool)
8941 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8942 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8944 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8945 ipr_free_cmd_blks(ioa_cfg);
8949 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8950 if (ioa_cfg->hrrq_num > 1) {
8952 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8953 ioa_cfg->hrrq[i].min_cmd_id = 0;
8954 ioa_cfg->hrrq[i].max_cmd_id =
8955 (entries_each_hrrq - 1);
8958 IPR_NUM_BASE_CMD_BLKS/
8959 (ioa_cfg->hrrq_num - 1);
8960 ioa_cfg->hrrq[i].min_cmd_id =
8961 IPR_NUM_INTERNAL_CMD_BLKS +
8962 (i - 1) * entries_each_hrrq;
8963 ioa_cfg->hrrq[i].max_cmd_id =
8964 (IPR_NUM_INTERNAL_CMD_BLKS +
8965 i * entries_each_hrrq - 1);
8968 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8969 ioa_cfg->hrrq[i].min_cmd_id = 0;
8970 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8972 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8975 BUG_ON(ioa_cfg->hrrq_num == 0);
8977 i = IPR_NUM_CMD_BLKS -
8978 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8980 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8981 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8984 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8985 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8988 ipr_free_cmd_blks(ioa_cfg);
8992 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8993 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8994 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8996 ioarcb = &ipr_cmd->ioarcb;
8997 ipr_cmd->dma_addr = dma_addr;
8999 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9001 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9003 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9004 if (ioa_cfg->sis64) {
9005 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9006 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9007 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9008 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9010 ioarcb->write_ioadl_addr =
9011 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9012 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9013 ioarcb->ioasa_host_pci_addr =
9014 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9016 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9017 ipr_cmd->cmd_index = i;
9018 ipr_cmd->ioa_cfg = ioa_cfg;
9019 ipr_cmd->sense_buffer_dma = dma_addr +
9020 offsetof(struct ipr_cmnd, sense_buffer);
9022 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9023 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9024 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9025 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9033 * ipr_alloc_mem - Allocate memory for an adapter
9034 * @ioa_cfg: ioa config struct
9037 * 0 on success / non-zero for error
9039 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9041 struct pci_dev *pdev = ioa_cfg->pdev;
9042 int i, rc = -ENOMEM;
9045 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9046 ioa_cfg->max_devs_supported, GFP_KERNEL);
9048 if (!ioa_cfg->res_entries)
9051 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9052 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9053 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9056 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9057 sizeof(struct ipr_misc_cbs),
9058 &ioa_cfg->vpd_cbs_dma,
9061 if (!ioa_cfg->vpd_cbs)
9062 goto out_free_res_entries;
9064 if (ipr_alloc_cmd_blks(ioa_cfg))
9065 goto out_free_vpd_cbs;
9067 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9068 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9069 sizeof(u32) * ioa_cfg->hrrq[i].size,
9070 &ioa_cfg->hrrq[i].host_rrq_dma,
9073 if (!ioa_cfg->hrrq[i].host_rrq) {
9075 dma_free_coherent(&pdev->dev,
9076 sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 ioa_cfg->hrrq[i].host_rrq,
9078 ioa_cfg->hrrq[i].host_rrq_dma);
9079 goto out_ipr_free_cmd_blocks;
9081 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9084 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9085 ioa_cfg->cfg_table_size,
9086 &ioa_cfg->cfg_table_dma,
9089 if (!ioa_cfg->u.cfg_table)
9090 goto out_free_host_rrq;
9092 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9093 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9094 sizeof(struct ipr_hostrcb),
9095 &ioa_cfg->hostrcb_dma[i],
9098 if (!ioa_cfg->hostrcb[i])
9099 goto out_free_hostrcb_dma;
9101 ioa_cfg->hostrcb[i]->hostrcb_dma =
9102 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9103 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9104 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9107 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9108 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9110 if (!ioa_cfg->trace)
9111 goto out_free_hostrcb_dma;
9118 out_free_hostrcb_dma:
9120 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9121 ioa_cfg->hostrcb[i],
9122 ioa_cfg->hostrcb_dma[i]);
9124 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9125 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9127 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9128 dma_free_coherent(&pdev->dev,
9129 sizeof(u32) * ioa_cfg->hrrq[i].size,
9130 ioa_cfg->hrrq[i].host_rrq,
9131 ioa_cfg->hrrq[i].host_rrq_dma);
9133 out_ipr_free_cmd_blocks:
9134 ipr_free_cmd_blks(ioa_cfg);
9136 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9137 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9138 out_free_res_entries:
9139 kfree(ioa_cfg->res_entries);
9144 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9145 * @ioa_cfg: ioa config struct
9150 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9154 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9155 ioa_cfg->bus_attr[i].bus = i;
9156 ioa_cfg->bus_attr[i].qas_enabled = 0;
9157 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9158 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9159 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9161 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9166 * ipr_init_regs - Initialize IOA registers
9167 * @ioa_cfg: ioa config struct
9172 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9174 const struct ipr_interrupt_offsets *p;
9175 struct ipr_interrupts *t;
9178 p = &ioa_cfg->chip_cfg->regs;
9180 base = ioa_cfg->hdw_dma_regs;
9182 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9183 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9184 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9185 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9186 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9187 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9188 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9189 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9190 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9191 t->ioarrin_reg = base + p->ioarrin_reg;
9192 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9193 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9194 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9195 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9196 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9197 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9199 if (ioa_cfg->sis64) {
9200 t->init_feedback_reg = base + p->init_feedback_reg;
9201 t->dump_addr_reg = base + p->dump_addr_reg;
9202 t->dump_data_reg = base + p->dump_data_reg;
9203 t->endian_swap_reg = base + p->endian_swap_reg;
9208 * ipr_init_ioa_cfg - Initialize IOA config struct
9209 * @ioa_cfg: ioa config struct
9210 * @host: scsi host struct
9211 * @pdev: PCI dev struct
9216 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9217 struct Scsi_Host *host, struct pci_dev *pdev)
9221 ioa_cfg->host = host;
9222 ioa_cfg->pdev = pdev;
9223 ioa_cfg->log_level = ipr_log_level;
9224 ioa_cfg->doorbell = IPR_DOORBELL;
9225 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9226 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9227 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9228 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9229 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9230 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9232 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9233 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9234 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9235 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9236 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9237 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9238 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9239 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9240 ioa_cfg->sdt_state = INACTIVE;
9242 ipr_initialize_bus_attr(ioa_cfg);
9243 ioa_cfg->max_devs_supported = ipr_max_devs;
9245 if (ioa_cfg->sis64) {
9246 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9247 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9248 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9249 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9250 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9251 + ((sizeof(struct ipr_config_table_entry64)
9252 * ioa_cfg->max_devs_supported)));
9254 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9255 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9256 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9257 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9258 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9259 + ((sizeof(struct ipr_config_table_entry)
9260 * ioa_cfg->max_devs_supported)));
9263 host->max_channel = IPR_VSET_BUS;
9264 host->unique_id = host->host_no;
9265 host->max_cmd_len = IPR_MAX_CDB_LEN;
9266 host->can_queue = ioa_cfg->max_cmds;
9267 pci_set_drvdata(pdev, ioa_cfg);
9269 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9270 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9271 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9272 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9274 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9276 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9281 * ipr_get_chip_info - Find adapter chip information
9282 * @dev_id: PCI device id struct
9285 * ptr to chip information on success / NULL on failure
9287 static const struct ipr_chip_t *
9288 ipr_get_chip_info(const struct pci_device_id *dev_id)
9292 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9293 if (ipr_chip[i].vendor == dev_id->vendor &&
9294 ipr_chip[i].device == dev_id->device)
9295 return &ipr_chip[i];
9300 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9302 * @ioa_cfg: ioa config struct
9307 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9309 struct pci_dev *pdev = ioa_cfg->pdev;
9311 if (pci_channel_offline(pdev)) {
9312 wait_event_timeout(ioa_cfg->eeh_wait_q,
9313 !pci_channel_offline(pdev),
9314 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9315 pci_restore_state(pdev);
9319 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9321 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9324 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9325 entries[i].entry = i;
9327 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9328 entries, 1, ipr_number_of_msix);
9330 ipr_wait_for_pci_err_recovery(ioa_cfg);
9334 for (i = 0; i < vectors; i++)
9335 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9336 ioa_cfg->nvectors = vectors;
9341 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9345 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9347 ipr_wait_for_pci_err_recovery(ioa_cfg);
9351 for (i = 0; i < vectors; i++)
9352 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9353 ioa_cfg->nvectors = vectors;
9358 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9360 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9362 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9363 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9364 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9365 ioa_cfg->vectors_info[vec_idx].
9366 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9370 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9374 for (i = 1; i < ioa_cfg->nvectors; i++) {
9375 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9378 ioa_cfg->vectors_info[i].desc,
9382 free_irq(ioa_cfg->vectors_info[i].vec,
9391 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9392 * @pdev: PCI device struct
9394 * Description: Simply set the msi_received flag to 1 indicating that
9395 * Message Signaled Interrupts are supported.
9398 * 0 on success / non-zero on failure
9400 static irqreturn_t ipr_test_intr(int irq, void *devp)
9402 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9403 unsigned long lock_flags = 0;
9404 irqreturn_t rc = IRQ_HANDLED;
9406 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9409 ioa_cfg->msi_received = 1;
9410 wake_up(&ioa_cfg->msi_wait_q);
9412 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9417 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9418 * @pdev: PCI device struct
9420 * Description: The return value from pci_enable_msi_range() can not always be
9421 * trusted. This routine sets up and initiates a test interrupt to determine
9422 * if the interrupt is received via the ipr_test_intr() service routine.
9423 * If the tests fails, the driver will fall back to LSI.
9426 * 0 on success / non-zero on failure
9428 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9431 volatile u32 int_reg;
9432 unsigned long lock_flags = 0;
9436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9437 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9438 ioa_cfg->msi_received = 0;
9439 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9440 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9441 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9444 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9445 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9447 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9449 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9451 } else if (ipr_debug)
9452 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9454 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9455 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9456 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9457 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9458 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9460 if (!ioa_cfg->msi_received) {
9461 /* MSI test failed */
9462 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9464 } else if (ipr_debug)
9465 dev_info(&pdev->dev, "MSI test succeeded.\n");
9467 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9469 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9470 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9472 free_irq(pdev->irq, ioa_cfg);
9479 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9480 * @pdev: PCI device struct
9481 * @dev_id: PCI device id struct
9484 * 0 on success / non-zero on failure
9486 static int ipr_probe_ioa(struct pci_dev *pdev,
9487 const struct pci_device_id *dev_id)
9489 struct ipr_ioa_cfg *ioa_cfg;
9490 struct Scsi_Host *host;
9491 unsigned long ipr_regs_pci;
9492 void __iomem *ipr_regs;
9493 int rc = PCIBIOS_SUCCESSFUL;
9494 volatile u32 mask, uproc, interrupts;
9495 unsigned long lock_flags, driver_lock_flags;
9499 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9500 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9503 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9508 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9509 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9510 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9512 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9514 if (!ioa_cfg->ipr_chip) {
9515 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9516 dev_id->vendor, dev_id->device);
9517 goto out_scsi_host_put;
9520 /* set SIS 32 or SIS 64 */
9521 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9522 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9523 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9524 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9526 if (ipr_transop_timeout)
9527 ioa_cfg->transop_timeout = ipr_transop_timeout;
9528 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9529 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9531 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9533 ioa_cfg->revid = pdev->revision;
9535 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9537 ipr_regs_pci = pci_resource_start(pdev, 0);
9539 rc = pci_request_regions(pdev, IPR_NAME);
9542 "Couldn't register memory range of registers\n");
9543 goto out_scsi_host_put;
9546 rc = pci_enable_device(pdev);
9548 if (rc || pci_channel_offline(pdev)) {
9549 if (pci_channel_offline(pdev)) {
9550 ipr_wait_for_pci_err_recovery(ioa_cfg);
9551 rc = pci_enable_device(pdev);
9555 dev_err(&pdev->dev, "Cannot enable adapter\n");
9556 ipr_wait_for_pci_err_recovery(ioa_cfg);
9557 goto out_release_regions;
9561 ipr_regs = pci_ioremap_bar(pdev, 0);
9565 "Couldn't map memory range of registers\n");
9570 ioa_cfg->hdw_dma_regs = ipr_regs;
9571 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9572 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9574 ipr_init_regs(ioa_cfg);
9576 if (ioa_cfg->sis64) {
9577 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9579 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9580 rc = dma_set_mask_and_coherent(&pdev->dev,
9584 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9587 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9591 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9592 ioa_cfg->chip_cfg->cache_line_size);
9594 if (rc != PCIBIOS_SUCCESSFUL) {
9595 dev_err(&pdev->dev, "Write of cache line size failed\n");
9596 ipr_wait_for_pci_err_recovery(ioa_cfg);
9601 /* Issue MMIO read to ensure card is not in EEH */
9602 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9603 ipr_wait_for_pci_err_recovery(ioa_cfg);
9605 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9606 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9607 IPR_MAX_MSIX_VECTORS);
9608 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9611 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9612 ipr_enable_msix(ioa_cfg) == 0)
9613 ioa_cfg->intr_flag = IPR_USE_MSIX;
9614 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9615 ipr_enable_msi(ioa_cfg) == 0)
9616 ioa_cfg->intr_flag = IPR_USE_MSI;
9618 ioa_cfg->intr_flag = IPR_USE_LSI;
9619 ioa_cfg->nvectors = 1;
9620 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9623 pci_set_master(pdev);
9625 if (pci_channel_offline(pdev)) {
9626 ipr_wait_for_pci_err_recovery(ioa_cfg);
9627 pci_set_master(pdev);
9628 if (pci_channel_offline(pdev)) {
9630 goto out_msi_disable;
9634 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9635 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9636 rc = ipr_test_msi(ioa_cfg, pdev);
9637 if (rc == -EOPNOTSUPP) {
9638 ipr_wait_for_pci_err_recovery(ioa_cfg);
9639 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9640 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9641 pci_disable_msi(pdev);
9642 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9643 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9644 pci_disable_msix(pdev);
9647 ioa_cfg->intr_flag = IPR_USE_LSI;
9648 ioa_cfg->nvectors = 1;
9651 goto out_msi_disable;
9653 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9654 dev_info(&pdev->dev,
9655 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9656 ioa_cfg->nvectors, pdev->irq);
9657 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9658 dev_info(&pdev->dev,
9659 "Request for %d MSIXs succeeded.",
9664 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9665 (unsigned int)num_online_cpus(),
9666 (unsigned int)IPR_MAX_HRRQ_NUM);
9668 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9669 goto out_msi_disable;
9671 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9672 goto out_msi_disable;
9674 rc = ipr_alloc_mem(ioa_cfg);
9677 "Couldn't allocate enough memory for device driver!\n");
9678 goto out_msi_disable;
9681 /* Save away PCI config space for use following IOA reset */
9682 rc = pci_save_state(pdev);
9684 if (rc != PCIBIOS_SUCCESSFUL) {
9685 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9691 * If HRRQ updated interrupt is not masked, or reset alert is set,
9692 * the card is in an unknown state and needs a hard reset
9694 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9695 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9696 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9697 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9698 ioa_cfg->needs_hard_reset = 1;
9699 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9700 ioa_cfg->needs_hard_reset = 1;
9701 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9702 ioa_cfg->ioa_unit_checked = 1;
9704 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9705 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9706 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9708 if (ioa_cfg->intr_flag == IPR_USE_MSI
9709 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9710 name_msi_vectors(ioa_cfg);
9711 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9713 ioa_cfg->vectors_info[0].desc,
9716 rc = ipr_request_other_msi_irqs(ioa_cfg);
9718 rc = request_irq(pdev->irq, ipr_isr,
9720 IPR_NAME, &ioa_cfg->hrrq[0]);
9723 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9728 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9729 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9730 ioa_cfg->needs_warm_reset = 1;
9731 ioa_cfg->reset = ipr_reset_slot_reset;
9733 ioa_cfg->reset = ipr_reset_start_bist;
9735 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9736 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9737 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9744 ipr_free_mem(ioa_cfg);
9746 ipr_wait_for_pci_err_recovery(ioa_cfg);
9747 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9748 pci_disable_msi(pdev);
9749 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9750 pci_disable_msix(pdev);
9754 pci_disable_device(pdev);
9755 out_release_regions:
9756 pci_release_regions(pdev);
9758 scsi_host_put(host);
9763 * ipr_initiate_ioa_bringdown - Bring down an adapter
9764 * @ioa_cfg: ioa config struct
9765 * @shutdown_type: shutdown type
9767 * Description: This function will initiate bringing down the adapter.
9768 * This consists of issuing an IOA shutdown to the adapter
9769 * to flush the cache, and running BIST.
9770 * If the caller needs to wait on the completion of the reset,
9771 * the caller must sleep on the reset_wait_q.
9776 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9777 enum ipr_shutdown_type shutdown_type)
9780 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9781 ioa_cfg->sdt_state = ABORT_DUMP;
9782 ioa_cfg->reset_retries = 0;
9783 ioa_cfg->in_ioa_bringdown = 1;
9784 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9789 * __ipr_remove - Remove a single adapter
9790 * @pdev: pci device struct
9792 * Adapter hot plug remove entry point.
9797 static void __ipr_remove(struct pci_dev *pdev)
9799 unsigned long host_lock_flags = 0;
9800 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9802 unsigned long driver_lock_flags;
9805 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9806 while (ioa_cfg->in_reset_reload) {
9807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9808 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9809 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9812 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9813 spin_lock(&ioa_cfg->hrrq[i]._lock);
9814 ioa_cfg->hrrq[i].removing_ioa = 1;
9815 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9818 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9821 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9822 flush_work(&ioa_cfg->work_q);
9823 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9824 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9826 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9827 list_del(&ioa_cfg->queue);
9828 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9830 if (ioa_cfg->sdt_state == ABORT_DUMP)
9831 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9834 ipr_free_all_resources(ioa_cfg);
9840 * ipr_remove - IOA hot plug remove entry point
9841 * @pdev: pci device struct
9843 * Adapter hot plug remove entry point.
9848 static void ipr_remove(struct pci_dev *pdev)
9850 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9854 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9856 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9858 scsi_remove_host(ioa_cfg->host);
9866 * ipr_probe - Adapter hot plug add entry point
9869 * 0 on success / non-zero on failure
9871 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9873 struct ipr_ioa_cfg *ioa_cfg;
9876 rc = ipr_probe_ioa(pdev, dev_id);
9881 ioa_cfg = pci_get_drvdata(pdev);
9882 rc = ipr_probe_ioa_part2(ioa_cfg);
9889 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9896 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9900 scsi_remove_host(ioa_cfg->host);
9905 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9909 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9911 scsi_remove_host(ioa_cfg->host);
9916 scsi_scan_host(ioa_cfg->host);
9917 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9919 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9920 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9921 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9922 ioa_cfg->iopoll_weight, ipr_iopoll);
9923 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9927 schedule_work(&ioa_cfg->work_q);
9932 * ipr_shutdown - Shutdown handler.
9933 * @pdev: pci device struct
9935 * This function is invoked upon system shutdown/reboot. It will issue
9936 * an adapter shutdown to the adapter to flush the write cache.
9941 static void ipr_shutdown(struct pci_dev *pdev)
9943 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9944 unsigned long lock_flags = 0;
9947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9948 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9949 ioa_cfg->iopoll_weight = 0;
9950 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9951 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9954 while (ioa_cfg->in_reset_reload) {
9955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9956 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9957 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9960 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9962 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9965 static struct pci_device_id ipr_pci_table[] = {
9966 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9967 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9968 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9969 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9970 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9971 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9972 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9973 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9974 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9975 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9976 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9977 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9978 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9979 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9980 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9981 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9982 IPR_USE_LONG_TRANSOP_TIMEOUT },
9983 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9984 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9985 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9986 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9987 IPR_USE_LONG_TRANSOP_TIMEOUT },
9988 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9989 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9990 IPR_USE_LONG_TRANSOP_TIMEOUT },
9991 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9992 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9993 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9994 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9995 IPR_USE_LONG_TRANSOP_TIMEOUT},
9996 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9997 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9998 IPR_USE_LONG_TRANSOP_TIMEOUT },
9999 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10000 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10001 IPR_USE_LONG_TRANSOP_TIMEOUT },
10002 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10003 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10004 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10005 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10006 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10007 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10008 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10009 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10010 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10011 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10012 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10013 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10014 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10015 IPR_USE_LONG_TRANSOP_TIMEOUT },
10016 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10017 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10018 IPR_USE_LONG_TRANSOP_TIMEOUT },
10019 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10020 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10021 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10022 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10023 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10024 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10025 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10026 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10027 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10028 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10029 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10030 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10031 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10032 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10033 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10034 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10035 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10036 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10037 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10038 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10039 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10040 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10041 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10042 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10043 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10044 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10045 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10046 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10047 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10048 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10049 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10050 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10051 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10052 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10053 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10054 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10055 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10056 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10057 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10058 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10059 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10060 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10061 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10062 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10063 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10064 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10065 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10066 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10067 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10068 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10069 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10070 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10073 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10075 static const struct pci_error_handlers ipr_err_handler = {
10076 .error_detected = ipr_pci_error_detected,
10077 .mmio_enabled = ipr_pci_mmio_enabled,
10078 .slot_reset = ipr_pci_slot_reset,
10081 static struct pci_driver ipr_driver = {
10083 .id_table = ipr_pci_table,
10084 .probe = ipr_probe,
10085 .remove = ipr_remove,
10086 .shutdown = ipr_shutdown,
10087 .err_handler = &ipr_err_handler,
10091 * ipr_halt_done - Shutdown prepare completion
10096 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10098 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10102 * ipr_halt - Issue shutdown prepare to all adapters
10105 * NOTIFY_OK on success / NOTIFY_DONE on failure
10107 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10109 struct ipr_cmnd *ipr_cmd;
10110 struct ipr_ioa_cfg *ioa_cfg;
10111 unsigned long flags = 0, driver_lock_flags;
10113 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10114 return NOTIFY_DONE;
10116 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10118 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10119 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10120 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10121 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10125 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10126 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10127 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10128 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10129 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10131 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10134 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10139 static struct notifier_block ipr_notifier = {
10144 * ipr_init - Module entry point
10147 * 0 on success / negative value on failure
10149 static int __init ipr_init(void)
10151 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10152 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10154 register_reboot_notifier(&ipr_notifier);
10155 return pci_register_driver(&ipr_driver);
10159 * ipr_exit - Module unload
10161 * Module unload entry point.
10166 static void __exit ipr_exit(void)
10168 unregister_reboot_notifier(&ipr_notifier);
10169 pci_unregister_driver(&ipr_driver);
10172 module_init(ipr_init);
10173 module_exit(ipr_exit);