2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size = 0x20,
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size = 0x20,
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
159 .cache_line_size = 0x20,
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
285 "34FF: Disk device format in progress"},
286 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9070: IOA requested reset"},
289 "Synchronization required"},
291 "No ready, IOA shutdown"},
293 "Not ready, IOA has been shutdown"},
294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
295 "3020: Storage subsystem configuration error"},
297 "FFF5: Medium error, data unreadable, recommend reassign"},
299 "7000: Medium error, data unreadable, do not reassign"},
300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
301 "FFF3: Disk media format bad"},
302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
303 "3002: Addressed device failed to respond to selection"},
304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "3100: Device bus error"},
306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
307 "3109: IOA timed out a device command"},
309 "3120: SCSI bus is not operational"},
310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "4100: Hard device bus fabric error"},
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
327 "9000: IOA reserved area data check"},
328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
329 "9001: IOA reserved area invalid data pattern"},
330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
331 "9002: IOA reserved area LRC error"},
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
335 "102E: Out of alternate sectors for disk storage"},
336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
337 "FFF4: Data transfer underlength error"},
338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
339 "FFF4: Data transfer overlength error"},
340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3400: Logical unit failure"},
342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
343 "FFF4: Device microcode is corrupt"},
344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
345 "8150: PCI bus error"},
347 "Unsupported device bus message received"},
348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Disk device problem"},
350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
351 "8150: Permanent IOA failure"},
352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
353 "3010: Disk device returned wrong response to IOA"},
354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
355 "8151: IOA microcode error"},
357 "Device bus status error"},
358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
359 "8157: IOA error requiring IOA reset to recover"},
361 "ATA device status error"},
363 "Message reject received from the device"},
364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8008: A permanent cache battery pack failure occurred"},
366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
367 "9090: Disk unit has been modified after the last known status"},
368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "9081: IOA detected device error"},
370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
371 "9082: IOA detected device error"},
372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
373 "3110: Device bus error, message or command phase"},
374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
375 "3110: SAS Command / Task Management Function failed"},
376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9091: Incorrect hardware configuration change has been detected"},
378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9073: Invalid multi-adapter configuration"},
380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
381 "4010: Incorrect connection between cascaded expanders"},
382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
383 "4020: Connections exceed IOA design limits"},
384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
385 "4030: Incorrect multipath connection"},
386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
387 "4110: Unsupported enclosure function"},
388 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "FFF4: Command to logical unit failed"},
391 "Illegal request, invalid request type or request packet"},
393 "Illegal request, invalid resource handle"},
395 "Illegal request, commands not allowed to this device"},
397 "Illegal request, command not allowed to a secondary adapter"},
399 "Illegal request, command not allowed to a non-optimized resource"},
401 "Illegal request, invalid field in parameter list"},
403 "Illegal request, parameter not supported"},
405 "Illegal request, parameter value invalid"},
407 "Illegal request, command sequence error"},
409 "Illegal request, dual adapter support not enabled"},
410 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9031: Array protection temporarily suspended, protection resuming"},
412 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
413 "9040: Array protection temporarily suspended, protection resuming"},
414 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
415 "3140: Device bus not ready to ready transition"},
416 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
417 "FFFB: SCSI bus was reset"},
419 "FFFE: SCSI bus transition to single ended"},
421 "FFFE: SCSI bus transition to LVD"},
422 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "FFFB: SCSI bus was reset by another initiator"},
424 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
425 "3029: A device replacement has occurred"},
426 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9051: IOA cache data exists for a missing or failed device"},
428 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
430 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "9025: Disk unit is not supported at its physical location"},
432 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
433 "3020: IOA detected a SCSI bus configuration error"},
434 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3150: SCSI bus configuration error"},
436 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
437 "9074: Asymmetric advanced function disk configuration"},
438 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4040: Incomplete multipath connection between IOA and enclosure"},
440 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
441 "4041: Incomplete multipath connection between enclosure and device"},
442 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
443 "9075: Incomplete multipath connection between IOA and remote IOA"},
444 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
445 "9076: Configuration error, missing remote IOA"},
446 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4050: Enclosure does not support a required multipath function"},
448 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "4070: Logically bad block written on device"},
450 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9041: Array protection temporarily suspended"},
452 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9042: Corrupt array parity detected on specified device"},
454 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
455 "9030: Array no longer protected due to missing or failed disk unit"},
456 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "9071: Link operational transition"},
458 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9072: Link not operational transition"},
460 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
461 "9032: Array exposed but still protected"},
462 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
463 "70DD: Device forced failed by disrupt device command"},
464 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4061: Multipath redundancy level got better"},
466 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4060: Multipath redundancy level got worse"},
469 "Failure due to other device"},
470 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "9008: IOA does not support functions expected by devices"},
472 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
473 "9010: Cache data associated with attached devices cannot be found"},
474 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
475 "9011: Cache data belongs to devices other than those attached"},
476 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
477 "9020: Array missing 2 or more devices with only 1 device present"},
478 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
479 "9021: Array missing 2 or more devices with 2 or more devices present"},
480 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9022: Exposed array is missing a required device"},
482 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9023: Array member(s) not at required physical locations"},
484 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9024: Array not functional due to present hardware configuration"},
486 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9026: Array not functional due to present hardware configuration"},
488 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9027: Array is missing a device and parity is out of sync"},
490 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9028: Maximum number of arrays already exist"},
492 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
493 "9050: Required cache data cannot be located for a disk unit"},
494 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9052: Cache data exists for a device that has been modified"},
496 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
497 "9054: IOA resources not available due to previous problems"},
498 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "9092: Disk unit requires initialization before use"},
500 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9029: Incorrect hardware configuration change has been detected"},
502 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9060: One or more disk pairs are missing from an array"},
504 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9061: One or more disks are missing from an array"},
506 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9062: One or more disks are missing from an array"},
508 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9063: Maximum number of functional arrays has been exceeded"},
511 "Aborted command, invalid descriptor"},
513 "Command terminated by host"}
516 static const struct ipr_ses_table_entry ipr_ses_table[] = {
517 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
518 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
519 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
520 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
521 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
522 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
523 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
524 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
525 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
526 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
527 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
528 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
529 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
533 * Function Prototypes
535 static int ipr_reset_alert(struct ipr_cmnd *);
536 static void ipr_process_ccn(struct ipr_cmnd *);
537 static void ipr_process_error(struct ipr_cmnd *);
538 static void ipr_reset_ioa_job(struct ipr_cmnd *);
539 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
540 enum ipr_shutdown_type);
542 #ifdef CONFIG_SCSI_IPR_TRACE
544 * ipr_trc_hook - Add a trace entry to the driver trace
545 * @ipr_cmd: ipr command struct
547 * @add_data: additional data
552 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
553 u8 type, u32 add_data)
555 struct ipr_trace_entry *trace_entry;
556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
558 trace_entry = &ioa_cfg->trace[atomic_add_return
559 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
560 trace_entry->time = jiffies;
561 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
562 trace_entry->type = type;
563 if (ipr_cmd->ioa_cfg->sis64)
564 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
566 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
567 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
568 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
569 trace_entry->u.add_data = add_data;
573 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
577 * ipr_lock_and_done - Acquire lock and complete command
578 * @ipr_cmd: ipr command struct
583 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
585 unsigned long lock_flags;
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
588 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
589 ipr_cmd->done(ipr_cmd);
590 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
594 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
595 * @ipr_cmd: ipr command struct
600 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
602 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
603 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
604 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
605 dma_addr_t dma_addr = ipr_cmd->dma_addr;
608 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
609 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
610 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
611 ioarcb->data_transfer_length = 0;
612 ioarcb->read_data_transfer_length = 0;
613 ioarcb->ioadl_len = 0;
614 ioarcb->read_ioadl_len = 0;
616 if (ipr_cmd->ioa_cfg->sis64) {
617 ioarcb->u.sis64_addr_data.data_ioadl_addr =
618 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
619 ioasa64->u.gata.status = 0;
621 ioarcb->write_ioadl_addr =
622 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
623 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
624 ioasa->u.gata.status = 0;
627 ioasa->hdr.ioasc = 0;
628 ioasa->hdr.residual_data_len = 0;
629 ipr_cmd->scsi_cmd = NULL;
631 ipr_cmd->sense_buffer[0] = 0;
632 ipr_cmd->dma_use_sg = 0;
636 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
637 * @ipr_cmd: ipr command struct
642 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
643 void (*fast_done) (struct ipr_cmnd *))
645 ipr_reinit_ipr_cmnd(ipr_cmd);
646 ipr_cmd->u.scratch = 0;
647 ipr_cmd->sibling = NULL;
648 ipr_cmd->fast_done = fast_done;
649 init_timer(&ipr_cmd->timer);
653 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
654 * @ioa_cfg: ioa config struct
657 * pointer to ipr command struct
660 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
662 struct ipr_cmnd *ipr_cmd = NULL;
664 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
665 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
666 struct ipr_cmnd, queue);
667 list_del(&ipr_cmd->queue);
675 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
676 * @ioa_cfg: ioa config struct
679 * pointer to ipr command struct
682 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
684 struct ipr_cmnd *ipr_cmd =
685 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
686 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
691 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
692 * @ioa_cfg: ioa config struct
693 * @clr_ints: interrupts to clear
695 * This function masks all interrupts on the adapter, then clears the
696 * interrupts specified in the mask
701 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
704 volatile u32 int_reg;
707 /* Stop new interrupts */
708 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
709 spin_lock(&ioa_cfg->hrrq[i]._lock);
710 ioa_cfg->hrrq[i].allow_interrupts = 0;
711 spin_unlock(&ioa_cfg->hrrq[i]._lock);
715 /* Set interrupt mask to stop all new interrupts */
717 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
719 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
721 /* Clear any pending interrupts */
723 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
724 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
725 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
729 * ipr_save_pcix_cmd_reg - Save PCI-X command register
730 * @ioa_cfg: ioa config struct
733 * 0 on success / -EIO on failure
735 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
737 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
739 if (pcix_cmd_reg == 0)
742 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
743 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
744 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
748 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
753 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
754 * @ioa_cfg: ioa config struct
757 * 0 on success / -EIO on failure
759 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
761 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
764 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
765 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
766 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
775 * ipr_sata_eh_done - done function for aborted SATA commands
776 * @ipr_cmd: ipr command struct
778 * This function is invoked for ops generated to SATA
779 * devices which are being aborted.
784 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
786 struct ata_queued_cmd *qc = ipr_cmd->qc;
787 struct ipr_sata_port *sata_port = qc->ap->private_data;
789 qc->err_mask |= AC_ERR_OTHER;
790 sata_port->ioasa.status |= ATA_BUSY;
791 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
796 * ipr_scsi_eh_done - mid-layer done function for aborted ops
797 * @ipr_cmd: ipr command struct
799 * This function is invoked by the interrupt handler for
800 * ops generated by the SCSI mid-layer which are being aborted.
805 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
807 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
809 scsi_cmd->result |= (DID_ERROR << 16);
811 scsi_dma_unmap(ipr_cmd->scsi_cmd);
812 scsi_cmd->scsi_done(scsi_cmd);
813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
817 * ipr_fail_all_ops - Fails all outstanding ops.
818 * @ioa_cfg: ioa config struct
820 * This function fails all outstanding ops.
825 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
827 struct ipr_cmnd *ipr_cmd, *temp;
828 struct ipr_hrr_queue *hrrq;
831 for_each_hrrq(hrrq, ioa_cfg) {
832 spin_lock(&hrrq->_lock);
833 list_for_each_entry_safe(ipr_cmd,
834 temp, &hrrq->hrrq_pending_q, queue) {
835 list_del(&ipr_cmd->queue);
837 ipr_cmd->s.ioasa.hdr.ioasc =
838 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
839 ipr_cmd->s.ioasa.hdr.ilid =
840 cpu_to_be32(IPR_DRIVER_ILID);
842 if (ipr_cmd->scsi_cmd)
843 ipr_cmd->done = ipr_scsi_eh_done;
844 else if (ipr_cmd->qc)
845 ipr_cmd->done = ipr_sata_eh_done;
847 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
848 IPR_IOASC_IOA_WAS_RESET);
849 del_timer(&ipr_cmd->timer);
850 ipr_cmd->done(ipr_cmd);
852 spin_unlock(&hrrq->_lock);
858 * ipr_send_command - Send driver initiated requests.
859 * @ipr_cmd: ipr command struct
861 * This function sends a command to the adapter using the correct write call.
862 * In the case of sis64, calculate the ioarcb size required. Then or in the
868 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
871 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
873 if (ioa_cfg->sis64) {
874 /* The default size is 256 bytes */
875 send_dma_addr |= 0x1;
877 /* If the number of ioadls * size of ioadl > 128 bytes,
878 then use a 512 byte ioarcb */
879 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
880 send_dma_addr |= 0x4;
881 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
883 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
887 * ipr_do_req - Send driver initiated requests.
888 * @ipr_cmd: ipr command struct
889 * @done: done function
890 * @timeout_func: timeout function
891 * @timeout: timeout value
893 * This function sends the specified command to the adapter with the
894 * timeout given. The done function is invoked on command completion.
899 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
900 void (*done) (struct ipr_cmnd *),
901 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
903 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
905 ipr_cmd->done = done;
907 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
908 ipr_cmd->timer.expires = jiffies + timeout;
909 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
911 add_timer(&ipr_cmd->timer);
913 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
915 ipr_send_command(ipr_cmd);
919 * ipr_internal_cmd_done - Op done function for an internally generated op.
920 * @ipr_cmd: ipr command struct
922 * This function is the op done function for an internally generated,
923 * blocking op. It simply wakes the sleeping thread.
928 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
930 if (ipr_cmd->sibling)
931 ipr_cmd->sibling = NULL;
933 complete(&ipr_cmd->completion);
937 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
938 * @ipr_cmd: ipr command struct
939 * @dma_addr: dma address
940 * @len: transfer length
941 * @flags: ioadl flag value
943 * This function initializes an ioadl in the case where there is only a single
949 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
952 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
953 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
955 ipr_cmd->dma_use_sg = 1;
957 if (ipr_cmd->ioa_cfg->sis64) {
958 ioadl64->flags = cpu_to_be32(flags);
959 ioadl64->data_len = cpu_to_be32(len);
960 ioadl64->address = cpu_to_be64(dma_addr);
962 ipr_cmd->ioarcb.ioadl_len =
963 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
964 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
966 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
967 ioadl->address = cpu_to_be32(dma_addr);
969 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
970 ipr_cmd->ioarcb.read_ioadl_len =
971 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
972 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
974 ipr_cmd->ioarcb.ioadl_len =
975 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
976 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
982 * ipr_send_blocking_cmd - Send command and sleep on its completion.
983 * @ipr_cmd: ipr command struct
984 * @timeout_func: function to invoke if command times out
990 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
991 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
996 init_completion(&ipr_cmd->completion);
997 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
999 spin_unlock_irq(ioa_cfg->host->host_lock);
1000 wait_for_completion(&ipr_cmd->completion);
1001 spin_lock_irq(ioa_cfg->host->host_lock);
1004 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1006 if (ioa_cfg->hrrq_num == 1)
1009 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1013 * ipr_send_hcam - Send an HCAM to the adapter.
1014 * @ioa_cfg: ioa config struct
1016 * @hostrcb: hostrcb struct
1018 * This function will send a Host Controlled Async command to the adapter.
1019 * If HCAMs are currently not allowed to be issued to the adapter, it will
1020 * place the hostrcb on the free queue.
1025 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1026 struct ipr_hostrcb *hostrcb)
1028 struct ipr_cmnd *ipr_cmd;
1029 struct ipr_ioarcb *ioarcb;
1031 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1032 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1034 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1036 ipr_cmd->u.hostrcb = hostrcb;
1037 ioarcb = &ipr_cmd->ioarcb;
1039 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1040 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1041 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1042 ioarcb->cmd_pkt.cdb[1] = type;
1043 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1044 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1046 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1047 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1049 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1050 ipr_cmd->done = ipr_process_ccn;
1052 ipr_cmd->done = ipr_process_error;
1054 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1056 ipr_send_command(ipr_cmd);
1058 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1063 * ipr_update_ata_class - Update the ata class in the resource entry
1064 * @res: resource entry struct
1065 * @proto: cfgte device bus protocol value
1070 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1073 case IPR_PROTO_SATA:
1074 case IPR_PROTO_SAS_STP:
1075 res->ata_class = ATA_DEV_ATA;
1077 case IPR_PROTO_SATA_ATAPI:
1078 case IPR_PROTO_SAS_STP_ATAPI:
1079 res->ata_class = ATA_DEV_ATAPI;
1082 res->ata_class = ATA_DEV_UNKNOWN;
1088 * ipr_init_res_entry - Initialize a resource entry struct.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1095 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1100 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1101 struct ipr_resource_entry *gscsi_res = NULL;
1103 res->needs_sync_complete = 0;
1106 res->del_from_ml = 0;
1107 res->resetting_device = 0;
1109 res->sata_port = NULL;
1111 if (ioa_cfg->sis64) {
1112 proto = cfgtew->u.cfgte64->proto;
1113 res->res_flags = cfgtew->u.cfgte64->res_flags;
1114 res->qmodel = IPR_QUEUEING_MODEL64(res);
1115 res->type = cfgtew->u.cfgte64->res_type;
1117 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1118 sizeof(res->res_path));
1121 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1122 sizeof(res->dev_lun.scsi_lun));
1123 res->lun = scsilun_to_int(&res->dev_lun);
1125 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1126 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1127 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1129 res->target = gscsi_res->target;
1134 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1135 ioa_cfg->max_devs_supported);
1136 set_bit(res->target, ioa_cfg->target_ids);
1138 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1139 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1141 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1142 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1143 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1144 ioa_cfg->max_devs_supported);
1145 set_bit(res->target, ioa_cfg->array_ids);
1146 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1147 res->bus = IPR_VSET_VIRTUAL_BUS;
1148 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1149 ioa_cfg->max_devs_supported);
1150 set_bit(res->target, ioa_cfg->vset_ids);
1152 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1153 ioa_cfg->max_devs_supported);
1154 set_bit(res->target, ioa_cfg->target_ids);
1157 proto = cfgtew->u.cfgte->proto;
1158 res->qmodel = IPR_QUEUEING_MODEL(res);
1159 res->flags = cfgtew->u.cfgte->flags;
1160 if (res->flags & IPR_IS_IOA_RESOURCE)
1161 res->type = IPR_RES_TYPE_IOAFP;
1163 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1165 res->bus = cfgtew->u.cfgte->res_addr.bus;
1166 res->target = cfgtew->u.cfgte->res_addr.target;
1167 res->lun = cfgtew->u.cfgte->res_addr.lun;
1168 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1171 ipr_update_ata_class(res, proto);
1175 * ipr_is_same_device - Determine if two devices are the same.
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1180 * 1 if the devices are the same / 0 otherwise
1182 static int ipr_is_same_device(struct ipr_resource_entry *res,
1183 struct ipr_config_table_entry_wrapper *cfgtew)
1185 if (res->ioa_cfg->sis64) {
1186 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1187 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1188 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1189 sizeof(cfgtew->u.cfgte64->lun))) {
1193 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1194 res->target == cfgtew->u.cfgte->res_addr.target &&
1195 res->lun == cfgtew->u.cfgte->res_addr.lun)
1203 * __ipr_format_res_path - Format the resource path for printing.
1204 * @res_path: resource path
1206 * @len: length of buffer provided
1211 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1217 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1218 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1219 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1225 * ipr_format_res_path - Format the resource path for printing.
1226 * @ioa_cfg: ioa config struct
1227 * @res_path: resource path
1229 * @len: length of buffer provided
1234 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1235 u8 *res_path, char *buffer, int len)
1240 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1241 __ipr_format_res_path(res_path, p, len - (buffer - p));
1246 * ipr_update_res_entry - Update the resource entry.
1247 * @res: resource entry struct
1248 * @cfgtew: config table entry wrapper struct
1253 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1254 struct ipr_config_table_entry_wrapper *cfgtew)
1256 char buffer[IPR_MAX_RES_PATH_LENGTH];
1260 if (res->ioa_cfg->sis64) {
1261 res->flags = cfgtew->u.cfgte64->flags;
1262 res->res_flags = cfgtew->u.cfgte64->res_flags;
1263 res->type = cfgtew->u.cfgte64->res_type;
1265 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1266 sizeof(struct ipr_std_inq_data));
1268 res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 proto = cfgtew->u.cfgte64->proto;
1270 res->res_handle = cfgtew->u.cfgte64->res_handle;
1271 res->dev_id = cfgtew->u.cfgte64->dev_id;
1273 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274 sizeof(res->dev_lun.scsi_lun));
1276 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1277 sizeof(res->res_path))) {
1278 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1279 sizeof(res->res_path));
1283 if (res->sdev && new_path)
1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1285 ipr_format_res_path(res->ioa_cfg,
1286 res->res_path, buffer, sizeof(buffer)));
1288 res->flags = cfgtew->u.cfgte->flags;
1289 if (res->flags & IPR_IS_IOA_RESOURCE)
1290 res->type = IPR_RES_TYPE_IOAFP;
1292 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1294 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1295 sizeof(struct ipr_std_inq_data));
1297 res->qmodel = IPR_QUEUEING_MODEL(res);
1298 proto = cfgtew->u.cfgte->proto;
1299 res->res_handle = cfgtew->u.cfgte->res_handle;
1302 ipr_update_ata_class(res, proto);
1306 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1308 * @res: resource entry struct
1309 * @cfgtew: config table entry wrapper struct
1314 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1316 struct ipr_resource_entry *gscsi_res = NULL;
1317 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1319 if (!ioa_cfg->sis64)
1322 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1323 clear_bit(res->target, ioa_cfg->array_ids);
1324 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1325 clear_bit(res->target, ioa_cfg->vset_ids);
1326 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1327 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1328 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1330 clear_bit(res->target, ioa_cfg->target_ids);
1332 } else if (res->bus == 0)
1333 clear_bit(res->target, ioa_cfg->target_ids);
1337 * ipr_handle_config_change - Handle a config change from the adapter
1338 * @ioa_cfg: ioa config struct
1344 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1345 struct ipr_hostrcb *hostrcb)
1347 struct ipr_resource_entry *res = NULL;
1348 struct ipr_config_table_entry_wrapper cfgtew;
1349 __be32 cc_res_handle;
1353 if (ioa_cfg->sis64) {
1354 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1355 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1357 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1358 cc_res_handle = cfgtew.u.cfgte->res_handle;
1361 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1362 if (res->res_handle == cc_res_handle) {
1369 if (list_empty(&ioa_cfg->free_res_q)) {
1370 ipr_send_hcam(ioa_cfg,
1371 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1376 res = list_entry(ioa_cfg->free_res_q.next,
1377 struct ipr_resource_entry, queue);
1379 list_del(&res->queue);
1380 ipr_init_res_entry(res, &cfgtew);
1381 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1384 ipr_update_res_entry(res, &cfgtew);
1386 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1388 res->del_from_ml = 1;
1389 res->res_handle = IPR_INVALID_RES_HANDLE;
1390 if (ioa_cfg->allow_ml_add_del)
1391 schedule_work(&ioa_cfg->work_q);
1393 ipr_clear_res_target(res);
1394 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1396 } else if (!res->sdev || res->del_from_ml) {
1398 if (ioa_cfg->allow_ml_add_del)
1399 schedule_work(&ioa_cfg->work_q);
1402 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1406 * ipr_process_ccn - Op done function for a CCN.
1407 * @ipr_cmd: ipr command struct
1409 * This function is the op done function for a configuration
1410 * change notification host controlled async from the adapter.
1415 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1418 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1419 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1421 list_del(&hostrcb->queue);
1422 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1425 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1426 dev_err(&ioa_cfg->pdev->dev,
1427 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1429 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1431 ipr_handle_config_change(ioa_cfg, hostrcb);
1436 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1437 * @i: index into buffer
1438 * @buf: string to modify
1440 * This function will strip all trailing whitespace, pad the end
1441 * of the string with a single space, and NULL terminate the string.
1444 * new length of string
1446 static int strip_and_pad_whitespace(int i, char *buf)
1448 while (i && buf[i] == ' ')
1456 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1457 * @prefix: string to print at start of printk
1458 * @hostrcb: hostrcb pointer
1459 * @vpd: vendor/product id/sn struct
1464 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1465 struct ipr_vpd *vpd)
1467 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1470 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1471 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1473 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1474 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1476 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1477 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1479 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1483 * ipr_log_vpd - Log the passed VPD to the error log.
1484 * @vpd: vendor/product id/sn struct
1489 static void ipr_log_vpd(struct ipr_vpd *vpd)
1491 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1492 + IPR_SERIAL_NUM_LEN];
1494 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1495 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1497 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1498 ipr_err("Vendor/Product ID: %s\n", buffer);
1500 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1501 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1502 ipr_err(" Serial Number: %s\n", buffer);
1506 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1507 * @prefix: string to print at start of printk
1508 * @hostrcb: hostrcb pointer
1509 * @vpd: vendor/product id/sn/wwn struct
1514 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1515 struct ipr_ext_vpd *vpd)
1517 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1518 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1519 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1523 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1524 * @vpd: vendor/product id/sn/wwn struct
1529 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1531 ipr_log_vpd(&vpd->vpd);
1532 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1533 be32_to_cpu(vpd->wwid[1]));
1537 * ipr_log_enhanced_cache_error - Log a cache error.
1538 * @ioa_cfg: ioa config struct
1539 * @hostrcb: hostrcb struct
1544 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1545 struct ipr_hostrcb *hostrcb)
1547 struct ipr_hostrcb_type_12_error *error;
1550 error = &hostrcb->hcam.u.error64.u.type_12_error;
1552 error = &hostrcb->hcam.u.error.u.type_12_error;
1554 ipr_err("-----Current Configuration-----\n");
1555 ipr_err("Cache Directory Card Information:\n");
1556 ipr_log_ext_vpd(&error->ioa_vpd);
1557 ipr_err("Adapter Card Information:\n");
1558 ipr_log_ext_vpd(&error->cfc_vpd);
1560 ipr_err("-----Expected Configuration-----\n");
1561 ipr_err("Cache Directory Card Information:\n");
1562 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1563 ipr_err("Adapter Card Information:\n");
1564 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1566 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1567 be32_to_cpu(error->ioa_data[0]),
1568 be32_to_cpu(error->ioa_data[1]),
1569 be32_to_cpu(error->ioa_data[2]));
1573 * ipr_log_cache_error - Log a cache error.
1574 * @ioa_cfg: ioa config struct
1575 * @hostrcb: hostrcb struct
1580 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1581 struct ipr_hostrcb *hostrcb)
1583 struct ipr_hostrcb_type_02_error *error =
1584 &hostrcb->hcam.u.error.u.type_02_error;
1586 ipr_err("-----Current Configuration-----\n");
1587 ipr_err("Cache Directory Card Information:\n");
1588 ipr_log_vpd(&error->ioa_vpd);
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_vpd(&error->cfc_vpd);
1592 ipr_err("-----Expected Configuration-----\n");
1593 ipr_err("Cache Directory Card Information:\n");
1594 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1595 ipr_err("Adapter Card Information:\n");
1596 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1598 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1599 be32_to_cpu(error->ioa_data[0]),
1600 be32_to_cpu(error->ioa_data[1]),
1601 be32_to_cpu(error->ioa_data[2]));
1605 * ipr_log_enhanced_config_error - Log a configuration error.
1606 * @ioa_cfg: ioa config struct
1607 * @hostrcb: hostrcb struct
1612 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1613 struct ipr_hostrcb *hostrcb)
1615 int errors_logged, i;
1616 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1617 struct ipr_hostrcb_type_13_error *error;
1619 error = &hostrcb->hcam.u.error.u.type_13_error;
1620 errors_logged = be32_to_cpu(error->errors_logged);
1622 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1623 be32_to_cpu(error->errors_detected), errors_logged);
1625 dev_entry = error->dev;
1627 for (i = 0; i < errors_logged; i++, dev_entry++) {
1630 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1631 ipr_log_ext_vpd(&dev_entry->vpd);
1633 ipr_err("-----New Device Information-----\n");
1634 ipr_log_ext_vpd(&dev_entry->new_vpd);
1636 ipr_err("Cache Directory Card Information:\n");
1637 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1639 ipr_err("Adapter Card Information:\n");
1640 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1645 * ipr_log_sis64_config_error - Log a device error.
1646 * @ioa_cfg: ioa config struct
1647 * @hostrcb: hostrcb struct
1652 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1653 struct ipr_hostrcb *hostrcb)
1655 int errors_logged, i;
1656 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1657 struct ipr_hostrcb_type_23_error *error;
1658 char buffer[IPR_MAX_RES_PATH_LENGTH];
1660 error = &hostrcb->hcam.u.error64.u.type_23_error;
1661 errors_logged = be32_to_cpu(error->errors_logged);
1663 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1664 be32_to_cpu(error->errors_detected), errors_logged);
1666 dev_entry = error->dev;
1668 for (i = 0; i < errors_logged; i++, dev_entry++) {
1671 ipr_err("Device %d : %s", i + 1,
1672 __ipr_format_res_path(dev_entry->res_path,
1673 buffer, sizeof(buffer)));
1674 ipr_log_ext_vpd(&dev_entry->vpd);
1676 ipr_err("-----New Device Information-----\n");
1677 ipr_log_ext_vpd(&dev_entry->new_vpd);
1679 ipr_err("Cache Directory Card Information:\n");
1680 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1682 ipr_err("Adapter Card Information:\n");
1683 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1688 * ipr_log_config_error - Log a configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1695 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1698 int errors_logged, i;
1699 struct ipr_hostrcb_device_data_entry *dev_entry;
1700 struct ipr_hostrcb_type_03_error *error;
1702 error = &hostrcb->hcam.u.error.u.type_03_error;
1703 errors_logged = be32_to_cpu(error->errors_logged);
1705 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706 be32_to_cpu(error->errors_detected), errors_logged);
1708 dev_entry = error->dev;
1710 for (i = 0; i < errors_logged; i++, dev_entry++) {
1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1714 ipr_log_vpd(&dev_entry->vpd);
1716 ipr_err("-----New Device Information-----\n");
1717 ipr_log_vpd(&dev_entry->new_vpd);
1719 ipr_err("Cache Directory Card Information:\n");
1720 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1722 ipr_err("Adapter Card Information:\n");
1723 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1725 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1726 be32_to_cpu(dev_entry->ioa_data[0]),
1727 be32_to_cpu(dev_entry->ioa_data[1]),
1728 be32_to_cpu(dev_entry->ioa_data[2]),
1729 be32_to_cpu(dev_entry->ioa_data[3]),
1730 be32_to_cpu(dev_entry->ioa_data[4]));
1735 * ipr_log_enhanced_array_error - Log an array configuration error.
1736 * @ioa_cfg: ioa config struct
1737 * @hostrcb: hostrcb struct
1742 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1743 struct ipr_hostrcb *hostrcb)
1746 struct ipr_hostrcb_type_14_error *error;
1747 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1748 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1750 error = &hostrcb->hcam.u.error.u.type_14_error;
1754 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1755 error->protection_level,
1756 ioa_cfg->host->host_no,
1757 error->last_func_vset_res_addr.bus,
1758 error->last_func_vset_res_addr.target,
1759 error->last_func_vset_res_addr.lun);
1763 array_entry = error->array_member;
1764 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1765 ARRAY_SIZE(error->array_member));
1767 for (i = 0; i < num_entries; i++, array_entry++) {
1768 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1771 if (be32_to_cpu(error->exposed_mode_adn) == i)
1772 ipr_err("Exposed Array Member %d:\n", i);
1774 ipr_err("Array Member %d:\n", i);
1776 ipr_log_ext_vpd(&array_entry->vpd);
1777 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1778 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1779 "Expected Location");
1786 * ipr_log_array_error - Log an array configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1793 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1797 struct ipr_hostrcb_type_04_error *error;
1798 struct ipr_hostrcb_array_data_entry *array_entry;
1799 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1801 error = &hostrcb->hcam.u.error.u.type_04_error;
1805 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1806 error->protection_level,
1807 ioa_cfg->host->host_no,
1808 error->last_func_vset_res_addr.bus,
1809 error->last_func_vset_res_addr.target,
1810 error->last_func_vset_res_addr.lun);
1814 array_entry = error->array_member;
1816 for (i = 0; i < 18; i++) {
1817 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1820 if (be32_to_cpu(error->exposed_mode_adn) == i)
1821 ipr_err("Exposed Array Member %d:\n", i);
1823 ipr_err("Array Member %d:\n", i);
1825 ipr_log_vpd(&array_entry->vpd);
1827 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1828 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1829 "Expected Location");
1834 array_entry = error->array_member2;
1841 * ipr_log_hex_data - Log additional hex IOA error data.
1842 * @ioa_cfg: ioa config struct
1843 * @data: IOA error data
1849 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1856 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1857 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1859 for (i = 0; i < len / 4; i += 4) {
1860 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1861 be32_to_cpu(data[i]),
1862 be32_to_cpu(data[i+1]),
1863 be32_to_cpu(data[i+2]),
1864 be32_to_cpu(data[i+3]));
1869 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1870 * @ioa_cfg: ioa config struct
1871 * @hostrcb: hostrcb struct
1876 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_hostrcb *hostrcb)
1879 struct ipr_hostrcb_type_17_error *error;
1882 error = &hostrcb->hcam.u.error64.u.type_17_error;
1884 error = &hostrcb->hcam.u.error.u.type_17_error;
1886 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1887 strim(error->failure_reason);
1889 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1890 be32_to_cpu(hostrcb->hcam.u.error.prc));
1891 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1892 ipr_log_hex_data(ioa_cfg, error->data,
1893 be32_to_cpu(hostrcb->hcam.length) -
1894 (offsetof(struct ipr_hostrcb_error, u) +
1895 offsetof(struct ipr_hostrcb_type_17_error, data)));
1899 * ipr_log_dual_ioa_error - Log a dual adapter error.
1900 * @ioa_cfg: ioa config struct
1901 * @hostrcb: hostrcb struct
1906 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1907 struct ipr_hostrcb *hostrcb)
1909 struct ipr_hostrcb_type_07_error *error;
1911 error = &hostrcb->hcam.u.error.u.type_07_error;
1912 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1913 strim(error->failure_reason);
1915 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1916 be32_to_cpu(hostrcb->hcam.u.error.prc));
1917 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1918 ipr_log_hex_data(ioa_cfg, error->data,
1919 be32_to_cpu(hostrcb->hcam.length) -
1920 (offsetof(struct ipr_hostrcb_error, u) +
1921 offsetof(struct ipr_hostrcb_type_07_error, data)));
1924 static const struct {
1927 } path_active_desc[] = {
1928 { IPR_PATH_NO_INFO, "Path" },
1929 { IPR_PATH_ACTIVE, "Active path" },
1930 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1933 static const struct {
1936 } path_state_desc[] = {
1937 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1938 { IPR_PATH_HEALTHY, "is healthy" },
1939 { IPR_PATH_DEGRADED, "is degraded" },
1940 { IPR_PATH_FAILED, "is failed" }
1944 * ipr_log_fabric_path - Log a fabric path error
1945 * @hostrcb: hostrcb struct
1946 * @fabric: fabric descriptor
1951 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1952 struct ipr_hostrcb_fabric_desc *fabric)
1955 u8 path_state = fabric->path_state;
1956 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1957 u8 state = path_state & IPR_PATH_STATE_MASK;
1959 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1960 if (path_active_desc[i].active != active)
1963 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1964 if (path_state_desc[j].state != state)
1967 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1968 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1969 path_active_desc[i].desc, path_state_desc[j].desc,
1971 } else if (fabric->cascaded_expander == 0xff) {
1972 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1973 path_active_desc[i].desc, path_state_desc[j].desc,
1974 fabric->ioa_port, fabric->phy);
1975 } else if (fabric->phy == 0xff) {
1976 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1977 path_active_desc[i].desc, path_state_desc[j].desc,
1978 fabric->ioa_port, fabric->cascaded_expander);
1980 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1981 path_active_desc[i].desc, path_state_desc[j].desc,
1982 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1988 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1989 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1993 * ipr_log64_fabric_path - Log a fabric path error
1994 * @hostrcb: hostrcb struct
1995 * @fabric: fabric descriptor
2000 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2001 struct ipr_hostrcb64_fabric_desc *fabric)
2004 u8 path_state = fabric->path_state;
2005 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2006 u8 state = path_state & IPR_PATH_STATE_MASK;
2007 char buffer[IPR_MAX_RES_PATH_LENGTH];
2009 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2010 if (path_active_desc[i].active != active)
2013 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2014 if (path_state_desc[j].state != state)
2017 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2018 path_active_desc[i].desc, path_state_desc[j].desc,
2019 ipr_format_res_path(hostrcb->ioa_cfg,
2021 buffer, sizeof(buffer)));
2026 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2027 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2028 buffer, sizeof(buffer)));
2031 static const struct {
2034 } path_type_desc[] = {
2035 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2036 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2037 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2038 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2041 static const struct {
2044 } path_status_desc[] = {
2045 { IPR_PATH_CFG_NO_PROB, "Functional" },
2046 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2047 { IPR_PATH_CFG_FAILED, "Failed" },
2048 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2049 { IPR_PATH_NOT_DETECTED, "Missing" },
2050 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2053 static const char *link_rate[] = {
2056 "phy reset problem",
2073 * ipr_log_path_elem - Log a fabric path element.
2074 * @hostrcb: hostrcb struct
2075 * @cfg: fabric path element struct
2080 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2081 struct ipr_hostrcb_config_element *cfg)
2084 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2085 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2087 if (type == IPR_PATH_CFG_NOT_EXIST)
2090 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2091 if (path_type_desc[i].type != type)
2094 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2095 if (path_status_desc[j].status != status)
2098 if (type == IPR_PATH_CFG_IOA_PORT) {
2099 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2100 path_status_desc[j].desc, path_type_desc[i].desc,
2101 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2102 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2104 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2105 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2106 path_status_desc[j].desc, path_type_desc[i].desc,
2107 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2108 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2109 } else if (cfg->cascaded_expander == 0xff) {
2110 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2111 "WWN=%08X%08X\n", path_status_desc[j].desc,
2112 path_type_desc[i].desc, cfg->phy,
2113 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2114 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2115 } else if (cfg->phy == 0xff) {
2116 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2117 "WWN=%08X%08X\n", path_status_desc[j].desc,
2118 path_type_desc[i].desc, cfg->cascaded_expander,
2119 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2120 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2122 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2123 "WWN=%08X%08X\n", path_status_desc[j].desc,
2124 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2125 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2126 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2133 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2134 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2135 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2136 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2140 * ipr_log64_path_elem - Log a fabric path element.
2141 * @hostrcb: hostrcb struct
2142 * @cfg: fabric path element struct
2147 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2148 struct ipr_hostrcb64_config_element *cfg)
2151 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2152 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2153 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2154 char buffer[IPR_MAX_RES_PATH_LENGTH];
2156 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2159 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2160 if (path_type_desc[i].type != type)
2163 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2164 if (path_status_desc[j].status != status)
2167 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2168 path_status_desc[j].desc, path_type_desc[i].desc,
2169 ipr_format_res_path(hostrcb->ioa_cfg,
2170 cfg->res_path, buffer, sizeof(buffer)),
2171 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2172 be32_to_cpu(cfg->wwid[0]),
2173 be32_to_cpu(cfg->wwid[1]));
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status,
2179 ipr_format_res_path(hostrcb->ioa_cfg,
2180 cfg->res_path, buffer, sizeof(buffer)),
2181 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2182 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2186 * ipr_log_fabric_error - Log a fabric error.
2187 * @ioa_cfg: ioa config struct
2188 * @hostrcb: hostrcb struct
2193 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2196 struct ipr_hostrcb_type_20_error *error;
2197 struct ipr_hostrcb_fabric_desc *fabric;
2198 struct ipr_hostrcb_config_element *cfg;
2201 error = &hostrcb->hcam.u.error.u.type_20_error;
2202 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2203 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2205 add_len = be32_to_cpu(hostrcb->hcam.length) -
2206 (offsetof(struct ipr_hostrcb_error, u) +
2207 offsetof(struct ipr_hostrcb_type_20_error, desc));
2209 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2210 ipr_log_fabric_path(hostrcb, fabric);
2211 for_each_fabric_cfg(fabric, cfg)
2212 ipr_log_path_elem(hostrcb, cfg);
2214 add_len -= be16_to_cpu(fabric->length);
2215 fabric = (struct ipr_hostrcb_fabric_desc *)
2216 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2219 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2223 * ipr_log_sis64_array_error - Log a sis64 array error.
2224 * @ioa_cfg: ioa config struct
2225 * @hostrcb: hostrcb struct
2230 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2231 struct ipr_hostrcb *hostrcb)
2234 struct ipr_hostrcb_type_24_error *error;
2235 struct ipr_hostrcb64_array_data_entry *array_entry;
2236 char buffer[IPR_MAX_RES_PATH_LENGTH];
2237 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2239 error = &hostrcb->hcam.u.error64.u.type_24_error;
2243 ipr_err("RAID %s Array Configuration: %s\n",
2244 error->protection_level,
2245 ipr_format_res_path(ioa_cfg, error->last_res_path,
2246 buffer, sizeof(buffer)));
2250 array_entry = error->array_member;
2251 num_entries = min_t(u32, error->num_entries,
2252 ARRAY_SIZE(error->array_member));
2254 for (i = 0; i < num_entries; i++, array_entry++) {
2256 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2259 if (error->exposed_mode_adn == i)
2260 ipr_err("Exposed Array Member %d:\n", i);
2262 ipr_err("Array Member %d:\n", i);
2264 ipr_err("Array Member %d:\n", i);
2265 ipr_log_ext_vpd(&array_entry->vpd);
2266 ipr_err("Current Location: %s\n",
2267 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2268 buffer, sizeof(buffer)));
2269 ipr_err("Expected Location: %s\n",
2270 ipr_format_res_path(ioa_cfg,
2271 array_entry->expected_res_path,
2272 buffer, sizeof(buffer)));
2279 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2280 * @ioa_cfg: ioa config struct
2281 * @hostrcb: hostrcb struct
2286 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2287 struct ipr_hostrcb *hostrcb)
2289 struct ipr_hostrcb_type_30_error *error;
2290 struct ipr_hostrcb64_fabric_desc *fabric;
2291 struct ipr_hostrcb64_config_element *cfg;
2294 error = &hostrcb->hcam.u.error64.u.type_30_error;
2296 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2297 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2299 add_len = be32_to_cpu(hostrcb->hcam.length) -
2300 (offsetof(struct ipr_hostrcb64_error, u) +
2301 offsetof(struct ipr_hostrcb_type_30_error, desc));
2303 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2304 ipr_log64_fabric_path(hostrcb, fabric);
2305 for_each_fabric_cfg(fabric, cfg)
2306 ipr_log64_path_elem(hostrcb, cfg);
2308 add_len -= be16_to_cpu(fabric->length);
2309 fabric = (struct ipr_hostrcb64_fabric_desc *)
2310 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2313 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2317 * ipr_log_generic_error - Log an adapter error.
2318 * @ioa_cfg: ioa config struct
2319 * @hostrcb: hostrcb struct
2324 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2325 struct ipr_hostrcb *hostrcb)
2327 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2328 be32_to_cpu(hostrcb->hcam.length));
2332 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2335 * This function will return the index of into the ipr_error_table
2336 * for the specified IOASC. If the IOASC is not in the table,
2337 * 0 will be returned, which points to the entry used for unknown errors.
2340 * index into the ipr_error_table
2342 static u32 ipr_get_error(u32 ioasc)
2346 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2347 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2354 * ipr_handle_log_data - Log an adapter error.
2355 * @ioa_cfg: ioa config struct
2356 * @hostrcb: hostrcb struct
2358 * This function logs an adapter error to the system.
2363 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2364 struct ipr_hostrcb *hostrcb)
2369 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2372 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2373 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2376 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2378 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2380 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2381 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2382 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2383 scsi_report_bus_reset(ioa_cfg->host,
2384 hostrcb->hcam.u.error.fd_res_addr.bus);
2387 error_index = ipr_get_error(ioasc);
2389 if (!ipr_error_table[error_index].log_hcam)
2392 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2394 /* Set indication we have logged an error */
2395 ioa_cfg->errors_logged++;
2397 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2399 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2400 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2402 switch (hostrcb->hcam.overlay_id) {
2403 case IPR_HOST_RCB_OVERLAY_ID_2:
2404 ipr_log_cache_error(ioa_cfg, hostrcb);
2406 case IPR_HOST_RCB_OVERLAY_ID_3:
2407 ipr_log_config_error(ioa_cfg, hostrcb);
2409 case IPR_HOST_RCB_OVERLAY_ID_4:
2410 case IPR_HOST_RCB_OVERLAY_ID_6:
2411 ipr_log_array_error(ioa_cfg, hostrcb);
2413 case IPR_HOST_RCB_OVERLAY_ID_7:
2414 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2416 case IPR_HOST_RCB_OVERLAY_ID_12:
2417 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2419 case IPR_HOST_RCB_OVERLAY_ID_13:
2420 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2422 case IPR_HOST_RCB_OVERLAY_ID_14:
2423 case IPR_HOST_RCB_OVERLAY_ID_16:
2424 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2426 case IPR_HOST_RCB_OVERLAY_ID_17:
2427 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2429 case IPR_HOST_RCB_OVERLAY_ID_20:
2430 ipr_log_fabric_error(ioa_cfg, hostrcb);
2432 case IPR_HOST_RCB_OVERLAY_ID_23:
2433 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2435 case IPR_HOST_RCB_OVERLAY_ID_24:
2436 case IPR_HOST_RCB_OVERLAY_ID_26:
2437 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2439 case IPR_HOST_RCB_OVERLAY_ID_30:
2440 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2442 case IPR_HOST_RCB_OVERLAY_ID_1:
2443 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2445 ipr_log_generic_error(ioa_cfg, hostrcb);
2451 * ipr_process_error - Op done function for an adapter error log.
2452 * @ipr_cmd: ipr command struct
2454 * This function is the op done function for an error log host
2455 * controlled async from the adapter. It will log the error and
2456 * send the HCAM back to the adapter.
2461 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2464 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2465 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2469 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2471 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2473 list_del(&hostrcb->queue);
2474 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2477 ipr_handle_log_data(ioa_cfg, hostrcb);
2478 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2480 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2481 dev_err(&ioa_cfg->pdev->dev,
2482 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2485 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2489 * ipr_timeout - An internally generated op has timed out.
2490 * @ipr_cmd: ipr command struct
2492 * This function blocks host requests and initiates an
2498 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2500 unsigned long lock_flags = 0;
2501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2506 ioa_cfg->errors_logged++;
2507 dev_err(&ioa_cfg->pdev->dev,
2508 "Adapter being reset due to command timeout.\n");
2510 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2511 ioa_cfg->sdt_state = GET_DUMP;
2513 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2514 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2521 * ipr_oper_timeout - Adapter timed out transitioning to operational
2522 * @ipr_cmd: ipr command struct
2524 * This function blocks host requests and initiates an
2530 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2532 unsigned long lock_flags = 0;
2533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2538 ioa_cfg->errors_logged++;
2539 dev_err(&ioa_cfg->pdev->dev,
2540 "Adapter timed out transitioning to operational.\n");
2542 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2543 ioa_cfg->sdt_state = GET_DUMP;
2545 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2547 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2548 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556 * ipr_find_ses_entry - Find matching SES in SES table
2557 * @res: resource entry struct of SES
2560 * pointer to SES table entry / NULL on failure
2562 static const struct ipr_ses_table_entry *
2563 ipr_find_ses_entry(struct ipr_resource_entry *res)
2566 struct ipr_std_inq_vpids *vpids;
2567 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2569 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2570 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2571 if (ste->compare_product_id_byte[j] == 'X') {
2572 vpids = &res->std_inq_data.vpids;
2573 if (vpids->product_id[j] == ste->product_id[j])
2581 if (matches == IPR_PROD_ID_LEN)
2589 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2590 * @ioa_cfg: ioa config struct
2592 * @bus_width: bus width
2595 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2596 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2597 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2598 * max 160MHz = max 320MB/sec).
2600 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2602 struct ipr_resource_entry *res;
2603 const struct ipr_ses_table_entry *ste;
2604 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2606 /* Loop through each config table entry in the config table buffer */
2607 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2608 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2611 if (bus != res->bus)
2614 if (!(ste = ipr_find_ses_entry(res)))
2617 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2620 return max_xfer_rate;
2624 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2625 * @ioa_cfg: ioa config struct
2626 * @max_delay: max delay in micro-seconds to wait
2628 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2631 * 0 on success / other on failure
2633 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2635 volatile u32 pcii_reg;
2638 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2639 while (delay < max_delay) {
2640 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2642 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2645 /* udelay cannot be used if delay is more than a few milliseconds */
2646 if ((delay / 1000) > MAX_UDELAY_MS)
2647 mdelay(delay / 1000);
2657 * ipr_get_sis64_dump_data_section - Dump IOA memory
2658 * @ioa_cfg: ioa config struct
2659 * @start_addr: adapter address to dump
2660 * @dest: destination kernel buffer
2661 * @length_in_words: length to dump in 4 byte words
2666 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2668 __be32 *dest, u32 length_in_words)
2672 for (i = 0; i < length_in_words; i++) {
2673 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2674 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2682 * ipr_get_ldump_data_section - Dump IOA memory
2683 * @ioa_cfg: ioa config struct
2684 * @start_addr: adapter address to dump
2685 * @dest: destination kernel buffer
2686 * @length_in_words: length to dump in 4 byte words
2689 * 0 on success / -EIO on failure
2691 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2693 __be32 *dest, u32 length_in_words)
2695 volatile u32 temp_pcii_reg;
2699 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2700 dest, length_in_words);
2702 /* Write IOA interrupt reg starting LDUMP state */
2703 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2704 ioa_cfg->regs.set_uproc_interrupt_reg32);
2706 /* Wait for IO debug acknowledge */
2707 if (ipr_wait_iodbg_ack(ioa_cfg,
2708 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2709 dev_err(&ioa_cfg->pdev->dev,
2710 "IOA dump long data transfer timeout\n");
2714 /* Signal LDUMP interlocked - clear IO debug ack */
2715 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2716 ioa_cfg->regs.clr_interrupt_reg);
2718 /* Write Mailbox with starting address */
2719 writel(start_addr, ioa_cfg->ioa_mailbox);
2721 /* Signal address valid - clear IOA Reset alert */
2722 writel(IPR_UPROCI_RESET_ALERT,
2723 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2725 for (i = 0; i < length_in_words; i++) {
2726 /* Wait for IO debug acknowledge */
2727 if (ipr_wait_iodbg_ack(ioa_cfg,
2728 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2729 dev_err(&ioa_cfg->pdev->dev,
2730 "IOA dump short data transfer timeout\n");
2734 /* Read data from mailbox and increment destination pointer */
2735 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2738 /* For all but the last word of data, signal data received */
2739 if (i < (length_in_words - 1)) {
2740 /* Signal dump data received - Clear IO debug Ack */
2741 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2742 ioa_cfg->regs.clr_interrupt_reg);
2746 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2747 writel(IPR_UPROCI_RESET_ALERT,
2748 ioa_cfg->regs.set_uproc_interrupt_reg32);
2750 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2751 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2753 /* Signal dump data received - Clear IO debug Ack */
2754 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2755 ioa_cfg->regs.clr_interrupt_reg);
2757 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2758 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2760 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2762 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2772 #ifdef CONFIG_SCSI_IPR_DUMP
2774 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2775 * @ioa_cfg: ioa config struct
2776 * @pci_address: adapter address
2777 * @length: length of data to copy
2779 * Copy data from PCI adapter to kernel buffer.
2780 * Note: length MUST be a 4 byte multiple
2782 * 0 on success / other on failure
2784 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2785 unsigned long pci_address, u32 length)
2787 int bytes_copied = 0;
2788 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2790 unsigned long lock_flags = 0;
2791 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2794 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2796 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2798 while (bytes_copied < length &&
2799 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2800 if (ioa_dump->page_offset >= PAGE_SIZE ||
2801 ioa_dump->page_offset == 0) {
2802 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2806 return bytes_copied;
2809 ioa_dump->page_offset = 0;
2810 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2811 ioa_dump->next_page_index++;
2813 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2815 rem_len = length - bytes_copied;
2816 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2817 cur_len = min(rem_len, rem_page_len);
2819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2820 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2823 rc = ipr_get_ldump_data_section(ioa_cfg,
2824 pci_address + bytes_copied,
2825 &page[ioa_dump->page_offset / 4],
2826 (cur_len / sizeof(u32)));
2828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2831 ioa_dump->page_offset += cur_len;
2832 bytes_copied += cur_len;
2840 return bytes_copied;
2844 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2845 * @hdr: dump entry header struct
2850 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2852 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2854 hdr->offset = sizeof(*hdr);
2855 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2859 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2860 * @ioa_cfg: ioa config struct
2861 * @driver_dump: driver dump struct
2866 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2867 struct ipr_driver_dump *driver_dump)
2869 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2871 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2872 driver_dump->ioa_type_entry.hdr.len =
2873 sizeof(struct ipr_dump_ioa_type_entry) -
2874 sizeof(struct ipr_dump_entry_header);
2875 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2876 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2877 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2878 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2879 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2880 ucode_vpd->minor_release[1];
2881 driver_dump->hdr.num_entries++;
2885 * ipr_dump_version_data - Fill in the driver version in the dump.
2886 * @ioa_cfg: ioa config struct
2887 * @driver_dump: driver dump struct
2892 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2893 struct ipr_driver_dump *driver_dump)
2895 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2896 driver_dump->version_entry.hdr.len =
2897 sizeof(struct ipr_dump_version_entry) -
2898 sizeof(struct ipr_dump_entry_header);
2899 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2900 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2901 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2902 driver_dump->hdr.num_entries++;
2906 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2907 * @ioa_cfg: ioa config struct
2908 * @driver_dump: driver dump struct
2913 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2914 struct ipr_driver_dump *driver_dump)
2916 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2917 driver_dump->trace_entry.hdr.len =
2918 sizeof(struct ipr_dump_trace_entry) -
2919 sizeof(struct ipr_dump_entry_header);
2920 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2921 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2922 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2923 driver_dump->hdr.num_entries++;
2927 * ipr_dump_location_data - Fill in the IOA location in the dump.
2928 * @ioa_cfg: ioa config struct
2929 * @driver_dump: driver dump struct
2934 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2935 struct ipr_driver_dump *driver_dump)
2937 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2938 driver_dump->location_entry.hdr.len =
2939 sizeof(struct ipr_dump_location_entry) -
2940 sizeof(struct ipr_dump_entry_header);
2941 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2942 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2943 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2944 driver_dump->hdr.num_entries++;
2948 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2949 * @ioa_cfg: ioa config struct
2950 * @dump: dump struct
2955 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2957 unsigned long start_addr, sdt_word;
2958 unsigned long lock_flags = 0;
2959 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2960 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2961 u32 num_entries, max_num_entries, start_off, end_off;
2962 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
2963 struct ipr_sdt *sdt;
2969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2971 if (ioa_cfg->sdt_state != READ_DUMP) {
2972 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2976 if (ioa_cfg->sis64) {
2977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2978 ssleep(IPR_DUMP_DELAY_SECONDS);
2979 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2982 start_addr = readl(ioa_cfg->ioa_mailbox);
2984 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2985 dev_err(&ioa_cfg->pdev->dev,
2986 "Invalid dump table format: %lx\n", start_addr);
2987 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2991 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2993 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2995 /* Initialize the overall dump header */
2996 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2997 driver_dump->hdr.num_entries = 1;
2998 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2999 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3000 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3001 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3003 ipr_dump_version_data(ioa_cfg, driver_dump);
3004 ipr_dump_location_data(ioa_cfg, driver_dump);
3005 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3006 ipr_dump_trace_data(ioa_cfg, driver_dump);
3008 /* Update dump_header */
3009 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3011 /* IOA Dump entry */
3012 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3013 ioa_dump->hdr.len = 0;
3014 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3017 /* First entries in sdt are actually a list of dump addresses and
3018 lengths to gather the real dump data. sdt represents the pointer
3019 to the ioa generated dump table. Dump data will be extracted based
3020 on entries in this table */
3021 sdt = &ioa_dump->sdt;
3023 if (ioa_cfg->sis64) {
3024 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3025 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3027 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3028 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3031 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3032 (max_num_entries * sizeof(struct ipr_sdt_entry));
3033 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3034 bytes_to_copy / sizeof(__be32));
3036 /* Smart Dump table is ready to use and the first entry is valid */
3037 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3038 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3039 dev_err(&ioa_cfg->pdev->dev,
3040 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3041 rc, be32_to_cpu(sdt->hdr.state));
3042 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3043 ioa_cfg->sdt_state = DUMP_OBTAINED;
3044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3048 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3050 if (num_entries > max_num_entries)
3051 num_entries = max_num_entries;
3053 /* Update dump length to the actual data to be copied */
3054 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3056 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3058 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3060 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3062 for (i = 0; i < num_entries; i++) {
3063 if (ioa_dump->hdr.len > max_dump_size) {
3064 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3068 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3069 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3071 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3073 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3074 end_off = be32_to_cpu(sdt->entry[i].end_token);
3076 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3077 bytes_to_copy = end_off - start_off;
3082 if (bytes_to_copy > max_dump_size) {
3083 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3087 /* Copy data from adapter to driver buffers */
3088 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3091 ioa_dump->hdr.len += bytes_copied;
3093 if (bytes_copied != bytes_to_copy) {
3094 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3101 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3103 /* Update dump_header */
3104 driver_dump->hdr.len += ioa_dump->hdr.len;
3106 ioa_cfg->sdt_state = DUMP_OBTAINED;
3111 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3115 * ipr_release_dump - Free adapter dump memory
3116 * @kref: kref struct
3121 static void ipr_release_dump(struct kref *kref)
3123 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3124 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3125 unsigned long lock_flags = 0;
3129 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3130 ioa_cfg->dump = NULL;
3131 ioa_cfg->sdt_state = INACTIVE;
3132 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3134 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3135 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3137 vfree(dump->ioa_dump.ioa_data);
3143 * ipr_worker_thread - Worker thread
3144 * @work: ioa config struct
3146 * Called at task level from a work thread. This function takes care
3147 * of adding and removing device from the mid-layer as configuration
3148 * changes are detected by the adapter.
3153 static void ipr_worker_thread(struct work_struct *work)
3155 unsigned long lock_flags;
3156 struct ipr_resource_entry *res;
3157 struct scsi_device *sdev;
3158 struct ipr_dump *dump;
3159 struct ipr_ioa_cfg *ioa_cfg =
3160 container_of(work, struct ipr_ioa_cfg, work_q);
3161 u8 bus, target, lun;
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3167 if (ioa_cfg->sdt_state == READ_DUMP) {
3168 dump = ioa_cfg->dump;
3170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173 kref_get(&dump->kref);
3174 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3175 ipr_get_ioa_dump(ioa_cfg, dump);
3176 kref_put(&dump->kref, ipr_release_dump);
3178 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3179 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3180 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3188 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3189 !ioa_cfg->allow_ml_add_del) {
3190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3195 if (res->del_from_ml && res->sdev) {
3198 if (!scsi_device_get(sdev)) {
3199 if (!res->add_to_ml)
3200 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3202 res->del_from_ml = 0;
3203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3204 scsi_remove_device(sdev);
3205 scsi_device_put(sdev);
3206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3213 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3214 if (res->add_to_ml) {
3216 target = res->target;
3219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3220 scsi_add_device(ioa_cfg->host, bus, target, lun);
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3231 #ifdef CONFIG_SCSI_IPR_TRACE
3233 * ipr_read_trace - Dump the adapter trace
3234 * @filp: open sysfs file
3235 * @kobj: kobject struct
3236 * @bin_attr: bin_attribute struct
3239 * @count: buffer size
3242 * number of bytes printed to buffer
3244 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3245 struct bin_attribute *bin_attr,
3246 char *buf, loff_t off, size_t count)
3248 struct device *dev = container_of(kobj, struct device, kobj);
3249 struct Scsi_Host *shost = class_to_shost(dev);
3250 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3251 unsigned long lock_flags = 0;
3254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3255 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3262 static struct bin_attribute ipr_trace_attr = {
3268 .read = ipr_read_trace,
3273 * ipr_show_fw_version - Show the firmware version
3274 * @dev: class device struct
3278 * number of bytes printed to buffer
3280 static ssize_t ipr_show_fw_version(struct device *dev,
3281 struct device_attribute *attr, char *buf)
3283 struct Scsi_Host *shost = class_to_shost(dev);
3284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3285 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3286 unsigned long lock_flags = 0;
3289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3291 ucode_vpd->major_release, ucode_vpd->card_type,
3292 ucode_vpd->minor_release[0],
3293 ucode_vpd->minor_release[1]);
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298 static struct device_attribute ipr_fw_version_attr = {
3300 .name = "fw_version",
3303 .show = ipr_show_fw_version,
3307 * ipr_show_log_level - Show the adapter's error logging level
3308 * @dev: class device struct
3312 * number of bytes printed to buffer
3314 static ssize_t ipr_show_log_level(struct device *dev,
3315 struct device_attribute *attr, char *buf)
3317 struct Scsi_Host *shost = class_to_shost(dev);
3318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3319 unsigned long lock_flags = 0;
3322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3323 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 * ipr_store_log_level - Change the adapter's error logging level
3330 * @dev: class device struct
3334 * number of bytes printed to buffer
3336 static ssize_t ipr_store_log_level(struct device *dev,
3337 struct device_attribute *attr,
3338 const char *buf, size_t count)
3340 struct Scsi_Host *shost = class_to_shost(dev);
3341 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3342 unsigned long lock_flags = 0;
3344 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3350 static struct device_attribute ipr_log_level_attr = {
3352 .name = "log_level",
3353 .mode = S_IRUGO | S_IWUSR,
3355 .show = ipr_show_log_level,
3356 .store = ipr_store_log_level
3360 * ipr_store_diagnostics - IOA Diagnostics interface
3361 * @dev: device struct
3363 * @count: buffer size
3365 * This function will reset the adapter and wait a reasonable
3366 * amount of time for any errors that the adapter might log.
3369 * count on success / other on failure
3371 static ssize_t ipr_store_diagnostics(struct device *dev,
3372 struct device_attribute *attr,
3373 const char *buf, size_t count)
3375 struct Scsi_Host *shost = class_to_shost(dev);
3376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3377 unsigned long lock_flags = 0;
3380 if (!capable(CAP_SYS_ADMIN))
3383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384 while (ioa_cfg->in_reset_reload) {
3385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3387 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3390 ioa_cfg->errors_logged = 0;
3391 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3393 if (ioa_cfg->in_reset_reload) {
3394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3395 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3397 /* Wait for a second for any errors to be logged */
3400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3405 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412 static struct device_attribute ipr_diagnostics_attr = {
3414 .name = "run_diagnostics",
3417 .store = ipr_store_diagnostics
3421 * ipr_show_adapter_state - Show the adapter's state
3422 * @class_dev: device struct
3426 * number of bytes printed to buffer
3428 static ssize_t ipr_show_adapter_state(struct device *dev,
3429 struct device_attribute *attr, char *buf)
3431 struct Scsi_Host *shost = class_to_shost(dev);
3432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3433 unsigned long lock_flags = 0;
3436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3437 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3438 len = snprintf(buf, PAGE_SIZE, "offline\n");
3440 len = snprintf(buf, PAGE_SIZE, "online\n");
3441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3446 * ipr_store_adapter_state - Change adapter state
3447 * @dev: device struct
3449 * @count: buffer size
3451 * This function will change the adapter's state.
3454 * count on success / other on failure
3456 static ssize_t ipr_store_adapter_state(struct device *dev,
3457 struct device_attribute *attr,
3458 const char *buf, size_t count)
3460 struct Scsi_Host *shost = class_to_shost(dev);
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3462 unsigned long lock_flags;
3463 int result = count, i;
3465 if (!capable(CAP_SYS_ADMIN))
3468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3469 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3470 !strncmp(buf, "online", 6)) {
3471 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3472 spin_lock(&ioa_cfg->hrrq[i]._lock);
3473 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3474 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3477 ioa_cfg->reset_retries = 0;
3478 ioa_cfg->in_ioa_bringdown = 0;
3479 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3487 static struct device_attribute ipr_ioa_state_attr = {
3489 .name = "online_state",
3490 .mode = S_IRUGO | S_IWUSR,
3492 .show = ipr_show_adapter_state,
3493 .store = ipr_store_adapter_state
3497 * ipr_store_reset_adapter - Reset the adapter
3498 * @dev: device struct
3500 * @count: buffer size
3502 * This function will reset the adapter.
3505 * count on success / other on failure
3507 static ssize_t ipr_store_reset_adapter(struct device *dev,
3508 struct device_attribute *attr,
3509 const char *buf, size_t count)
3511 struct Scsi_Host *shost = class_to_shost(dev);
3512 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3513 unsigned long lock_flags;
3516 if (!capable(CAP_SYS_ADMIN))
3519 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3520 if (!ioa_cfg->in_reset_reload)
3521 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3522 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3523 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3528 static struct device_attribute ipr_ioa_reset_attr = {
3530 .name = "reset_host",
3533 .store = ipr_store_reset_adapter
3536 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3538 * ipr_show_iopoll_weight - Show ipr polling mode
3539 * @dev: class device struct
3543 * number of bytes printed to buffer
3545 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3546 struct device_attribute *attr, char *buf)
3548 struct Scsi_Host *shost = class_to_shost(dev);
3549 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550 unsigned long lock_flags = 0;
3553 spin_lock_irqsave(shost->host_lock, lock_flags);
3554 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3555 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3561 * ipr_store_iopoll_weight - Change the adapter's polling mode
3562 * @dev: class device struct
3566 * number of bytes printed to buffer
3568 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3569 struct device_attribute *attr,
3570 const char *buf, size_t count)
3572 struct Scsi_Host *shost = class_to_shost(dev);
3573 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3574 unsigned long user_iopoll_weight;
3575 unsigned long lock_flags = 0;
3578 if (!ioa_cfg->sis64) {
3579 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3582 if (kstrtoul(buf, 10, &user_iopoll_weight))
3585 if (user_iopoll_weight > 256) {
3586 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3590 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3591 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3595 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3596 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3597 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3598 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3601 spin_lock_irqsave(shost->host_lock, lock_flags);
3602 ioa_cfg->iopoll_weight = user_iopoll_weight;
3603 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3604 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3605 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3606 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3607 ioa_cfg->iopoll_weight, ipr_iopoll);
3608 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3611 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3616 static struct device_attribute ipr_iopoll_weight_attr = {
3618 .name = "iopoll_weight",
3619 .mode = S_IRUGO | S_IWUSR,
3621 .show = ipr_show_iopoll_weight,
3622 .store = ipr_store_iopoll_weight
3626 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3627 * @buf_len: buffer length
3629 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3630 * list to use for microcode download
3633 * pointer to sglist / NULL on failure
3635 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3637 int sg_size, order, bsize_elem, num_elem, i, j;
3638 struct ipr_sglist *sglist;
3639 struct scatterlist *scatterlist;
3642 /* Get the minimum size per scatter/gather element */
3643 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3645 /* Get the actual size per element */
3646 order = get_order(sg_size);
3648 /* Determine the actual number of bytes per element */
3649 bsize_elem = PAGE_SIZE * (1 << order);
3651 /* Determine the actual number of sg entries needed */
3652 if (buf_len % bsize_elem)
3653 num_elem = (buf_len / bsize_elem) + 1;
3655 num_elem = buf_len / bsize_elem;
3657 /* Allocate a scatter/gather list for the DMA */
3658 sglist = kzalloc(sizeof(struct ipr_sglist) +
3659 (sizeof(struct scatterlist) * (num_elem - 1)),
3662 if (sglist == NULL) {
3667 scatterlist = sglist->scatterlist;
3668 sg_init_table(scatterlist, num_elem);
3670 sglist->order = order;
3671 sglist->num_sg = num_elem;
3673 /* Allocate a bunch of sg elements */
3674 for (i = 0; i < num_elem; i++) {
3675 page = alloc_pages(GFP_KERNEL, order);
3679 /* Free up what we already allocated */
3680 for (j = i - 1; j >= 0; j--)
3681 __free_pages(sg_page(&scatterlist[j]), order);
3686 sg_set_page(&scatterlist[i], page, 0, 0);
3693 * ipr_free_ucode_buffer - Frees a microcode download buffer
3694 * @p_dnld: scatter/gather list pointer
3696 * Free a DMA'able ucode download buffer previously allocated with
3697 * ipr_alloc_ucode_buffer
3702 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3706 for (i = 0; i < sglist->num_sg; i++)
3707 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3713 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3714 * @sglist: scatter/gather list pointer
3715 * @buffer: buffer pointer
3716 * @len: buffer length
3718 * Copy a microcode image from a user buffer into a buffer allocated by
3719 * ipr_alloc_ucode_buffer
3722 * 0 on success / other on failure
3724 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3725 u8 *buffer, u32 len)
3727 int bsize_elem, i, result = 0;
3728 struct scatterlist *scatterlist;
3731 /* Determine the actual number of bytes per element */
3732 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3734 scatterlist = sglist->scatterlist;
3736 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3737 struct page *page = sg_page(&scatterlist[i]);
3740 memcpy(kaddr, buffer, bsize_elem);
3743 scatterlist[i].length = bsize_elem;
3751 if (len % bsize_elem) {
3752 struct page *page = sg_page(&scatterlist[i]);
3755 memcpy(kaddr, buffer, len % bsize_elem);
3758 scatterlist[i].length = len % bsize_elem;
3761 sglist->buffer_len = len;
3766 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3767 * @ipr_cmd: ipr command struct
3768 * @sglist: scatter/gather list
3770 * Builds a microcode download IOA data list (IOADL).
3773 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3774 struct ipr_sglist *sglist)
3776 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3777 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3778 struct scatterlist *scatterlist = sglist->scatterlist;
3781 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3782 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3783 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3786 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3787 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3788 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3789 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3790 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3793 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3797 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3798 * @ipr_cmd: ipr command struct
3799 * @sglist: scatter/gather list
3801 * Builds a microcode download IOA data list (IOADL).
3804 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3805 struct ipr_sglist *sglist)
3807 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3808 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3809 struct scatterlist *scatterlist = sglist->scatterlist;
3812 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3813 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3814 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3817 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3819 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3820 ioadl[i].flags_and_data_len =
3821 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3823 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3826 ioadl[i-1].flags_and_data_len |=
3827 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3831 * ipr_update_ioa_ucode - Update IOA's microcode
3832 * @ioa_cfg: ioa config struct
3833 * @sglist: scatter/gather list
3835 * Initiate an adapter reset to update the IOA's microcode
3838 * 0 on success / -EIO on failure
3840 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3841 struct ipr_sglist *sglist)
3843 unsigned long lock_flags;
3845 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3846 while (ioa_cfg->in_reset_reload) {
3847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3848 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3849 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3852 if (ioa_cfg->ucode_sglist) {
3853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3854 dev_err(&ioa_cfg->pdev->dev,
3855 "Microcode download already in progress\n");
3859 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3860 sglist->num_sg, DMA_TO_DEVICE);
3862 if (!sglist->num_dma_sg) {
3863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3864 dev_err(&ioa_cfg->pdev->dev,
3865 "Failed to map microcode download buffer!\n");
3869 ioa_cfg->ucode_sglist = sglist;
3870 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3871 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3872 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3874 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3875 ioa_cfg->ucode_sglist = NULL;
3876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3881 * ipr_store_update_fw - Update the firmware on the adapter
3882 * @class_dev: device struct
3884 * @count: buffer size
3886 * This function will update the firmware on the adapter.
3889 * count on success / other on failure
3891 static ssize_t ipr_store_update_fw(struct device *dev,
3892 struct device_attribute *attr,
3893 const char *buf, size_t count)
3895 struct Scsi_Host *shost = class_to_shost(dev);
3896 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3897 struct ipr_ucode_image_header *image_hdr;
3898 const struct firmware *fw_entry;
3899 struct ipr_sglist *sglist;
3902 int len, result, dnld_size;
3904 if (!capable(CAP_SYS_ADMIN))
3907 len = snprintf(fname, 99, "%s", buf);
3908 fname[len-1] = '\0';
3910 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3911 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3915 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3917 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3918 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3919 sglist = ipr_alloc_ucode_buffer(dnld_size);
3922 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3923 release_firmware(fw_entry);
3927 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3930 dev_err(&ioa_cfg->pdev->dev,
3931 "Microcode buffer copy to DMA buffer failed\n");
3935 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3937 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3942 ipr_free_ucode_buffer(sglist);
3943 release_firmware(fw_entry);
3947 static struct device_attribute ipr_update_fw_attr = {
3949 .name = "update_fw",
3952 .store = ipr_store_update_fw
3956 * ipr_show_fw_type - Show the adapter's firmware type.
3957 * @dev: class device struct
3961 * number of bytes printed to buffer
3963 static ssize_t ipr_show_fw_type(struct device *dev,
3964 struct device_attribute *attr, char *buf)
3966 struct Scsi_Host *shost = class_to_shost(dev);
3967 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3968 unsigned long lock_flags = 0;
3971 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3977 static struct device_attribute ipr_ioa_fw_type_attr = {
3982 .show = ipr_show_fw_type
3985 static struct device_attribute *ipr_ioa_attrs[] = {
3986 &ipr_fw_version_attr,
3987 &ipr_log_level_attr,
3988 &ipr_diagnostics_attr,
3989 &ipr_ioa_state_attr,
3990 &ipr_ioa_reset_attr,
3991 &ipr_update_fw_attr,
3992 &ipr_ioa_fw_type_attr,
3993 &ipr_iopoll_weight_attr,
3997 #ifdef CONFIG_SCSI_IPR_DUMP
3999 * ipr_read_dump - Dump the adapter
4000 * @filp: open sysfs file
4001 * @kobj: kobject struct
4002 * @bin_attr: bin_attribute struct
4005 * @count: buffer size
4008 * number of bytes printed to buffer
4010 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4011 struct bin_attribute *bin_attr,
4012 char *buf, loff_t off, size_t count)
4014 struct device *cdev = container_of(kobj, struct device, kobj);
4015 struct Scsi_Host *shost = class_to_shost(cdev);
4016 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4017 struct ipr_dump *dump;
4018 unsigned long lock_flags = 0;
4023 if (!capable(CAP_SYS_ADMIN))
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 dump = ioa_cfg->dump;
4029 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4033 kref_get(&dump->kref);
4034 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4036 if (off > dump->driver_dump.hdr.len) {
4037 kref_put(&dump->kref, ipr_release_dump);
4041 if (off + count > dump->driver_dump.hdr.len) {
4042 count = dump->driver_dump.hdr.len - off;
4046 if (count && off < sizeof(dump->driver_dump)) {
4047 if (off + count > sizeof(dump->driver_dump))
4048 len = sizeof(dump->driver_dump) - off;
4051 src = (u8 *)&dump->driver_dump + off;
4052 memcpy(buf, src, len);
4058 off -= sizeof(dump->driver_dump);
4061 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4062 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4063 sizeof(struct ipr_sdt_entry));
4065 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4066 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4068 if (count && off < sdt_end) {
4069 if (off + count > sdt_end)
4070 len = sdt_end - off;
4073 src = (u8 *)&dump->ioa_dump + off;
4074 memcpy(buf, src, len);
4083 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4084 len = PAGE_ALIGN(off) - off;
4087 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4088 src += off & ~PAGE_MASK;
4089 memcpy(buf, src, len);
4095 kref_put(&dump->kref, ipr_release_dump);
4100 * ipr_alloc_dump - Prepare for adapter dump
4101 * @ioa_cfg: ioa config struct
4104 * 0 on success / other on failure
4106 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4108 struct ipr_dump *dump;
4110 unsigned long lock_flags = 0;
4112 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4115 ipr_err("Dump memory allocation failed\n");
4120 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4122 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4125 ipr_err("Dump memory allocation failed\n");
4130 dump->ioa_dump.ioa_data = ioa_data;
4132 kref_init(&dump->kref);
4133 dump->ioa_cfg = ioa_cfg;
4135 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4137 if (INACTIVE != ioa_cfg->sdt_state) {
4138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4139 vfree(dump->ioa_dump.ioa_data);
4144 ioa_cfg->dump = dump;
4145 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4146 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4147 ioa_cfg->dump_taken = 1;
4148 schedule_work(&ioa_cfg->work_q);
4150 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4156 * ipr_free_dump - Free adapter dump memory
4157 * @ioa_cfg: ioa config struct
4160 * 0 on success / other on failure
4162 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4164 struct ipr_dump *dump;
4165 unsigned long lock_flags = 0;
4169 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4170 dump = ioa_cfg->dump;
4172 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4176 ioa_cfg->dump = NULL;
4177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4179 kref_put(&dump->kref, ipr_release_dump);
4186 * ipr_write_dump - Setup dump state of adapter
4187 * @filp: open sysfs file
4188 * @kobj: kobject struct
4189 * @bin_attr: bin_attribute struct
4192 * @count: buffer size
4195 * number of bytes printed to buffer
4197 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4198 struct bin_attribute *bin_attr,
4199 char *buf, loff_t off, size_t count)
4201 struct device *cdev = container_of(kobj, struct device, kobj);
4202 struct Scsi_Host *shost = class_to_shost(cdev);
4203 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4206 if (!capable(CAP_SYS_ADMIN))
4210 rc = ipr_alloc_dump(ioa_cfg);
4211 else if (buf[0] == '0')
4212 rc = ipr_free_dump(ioa_cfg);
4222 static struct bin_attribute ipr_dump_attr = {
4225 .mode = S_IRUSR | S_IWUSR,
4228 .read = ipr_read_dump,
4229 .write = ipr_write_dump
4232 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4236 * ipr_change_queue_depth - Change the device's queue depth
4237 * @sdev: scsi device struct
4238 * @qdepth: depth to set
4239 * @reason: calling context
4244 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4247 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4248 struct ipr_resource_entry *res;
4249 unsigned long lock_flags = 0;
4251 if (reason != SCSI_QDEPTH_DEFAULT)
4254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4255 res = (struct ipr_resource_entry *)sdev->hostdata;
4257 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4258 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4261 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4262 return sdev->queue_depth;
4266 * ipr_change_queue_type - Change the device's queue type
4267 * @dsev: scsi device struct
4268 * @tag_type: type of tags to use
4271 * actual queue type set
4273 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4276 struct ipr_resource_entry *res;
4277 unsigned long lock_flags = 0;
4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4280 res = (struct ipr_resource_entry *)sdev->hostdata;
4283 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4285 * We don't bother quiescing the device here since the
4286 * adapter firmware does it for us.
4288 scsi_set_tag_type(sdev, tag_type);
4291 scsi_activate_tcq(sdev, sdev->queue_depth);
4293 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4304 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4305 * @dev: device struct
4306 * @attr: device attribute structure
4310 * number of bytes printed to buffer
4312 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4314 struct scsi_device *sdev = to_scsi_device(dev);
4315 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4316 struct ipr_resource_entry *res;
4317 unsigned long lock_flags = 0;
4318 ssize_t len = -ENXIO;
4320 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4321 res = (struct ipr_resource_entry *)sdev->hostdata;
4323 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4328 static struct device_attribute ipr_adapter_handle_attr = {
4330 .name = "adapter_handle",
4333 .show = ipr_show_adapter_handle
4337 * ipr_show_resource_path - Show the resource path or the resource address for
4339 * @dev: device struct
4340 * @attr: device attribute structure
4344 * number of bytes printed to buffer
4346 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4348 struct scsi_device *sdev = to_scsi_device(dev);
4349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4350 struct ipr_resource_entry *res;
4351 unsigned long lock_flags = 0;
4352 ssize_t len = -ENXIO;
4353 char buffer[IPR_MAX_RES_PATH_LENGTH];
4355 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4356 res = (struct ipr_resource_entry *)sdev->hostdata;
4357 if (res && ioa_cfg->sis64)
4358 len = snprintf(buf, PAGE_SIZE, "%s\n",
4359 __ipr_format_res_path(res->res_path, buffer,
4362 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4363 res->bus, res->target, res->lun);
4365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 static struct device_attribute ipr_resource_path_attr = {
4371 .name = "resource_path",
4374 .show = ipr_show_resource_path
4378 * ipr_show_device_id - Show the device_id for this device.
4379 * @dev: device struct
4380 * @attr: device attribute structure
4384 * number of bytes printed to buffer
4386 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4388 struct scsi_device *sdev = to_scsi_device(dev);
4389 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4390 struct ipr_resource_entry *res;
4391 unsigned long lock_flags = 0;
4392 ssize_t len = -ENXIO;
4394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4395 res = (struct ipr_resource_entry *)sdev->hostdata;
4396 if (res && ioa_cfg->sis64)
4397 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4399 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4405 static struct device_attribute ipr_device_id_attr = {
4407 .name = "device_id",
4410 .show = ipr_show_device_id
4414 * ipr_show_resource_type - Show the resource type for this device.
4415 * @dev: device struct
4416 * @attr: device attribute structure
4420 * number of bytes printed to buffer
4422 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4424 struct scsi_device *sdev = to_scsi_device(dev);
4425 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4426 struct ipr_resource_entry *res;
4427 unsigned long lock_flags = 0;
4428 ssize_t len = -ENXIO;
4430 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4431 res = (struct ipr_resource_entry *)sdev->hostdata;
4434 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4440 static struct device_attribute ipr_resource_type_attr = {
4442 .name = "resource_type",
4445 .show = ipr_show_resource_type
4448 static struct device_attribute *ipr_dev_attrs[] = {
4449 &ipr_adapter_handle_attr,
4450 &ipr_resource_path_attr,
4451 &ipr_device_id_attr,
4452 &ipr_resource_type_attr,
4457 * ipr_biosparam - Return the HSC mapping
4458 * @sdev: scsi device struct
4459 * @block_device: block device pointer
4460 * @capacity: capacity of the device
4461 * @parm: Array containing returned HSC values.
4463 * This function generates the HSC parms that fdisk uses.
4464 * We want to make sure we return something that places partitions
4465 * on 4k boundaries for best performance with the IOA.
4470 static int ipr_biosparam(struct scsi_device *sdev,
4471 struct block_device *block_device,
4472 sector_t capacity, int *parm)
4480 cylinders = capacity;
4481 sector_div(cylinders, (128 * 32));
4486 parm[2] = cylinders;
4492 * ipr_find_starget - Find target based on bus/target.
4493 * @starget: scsi target struct
4496 * resource entry pointer if found / NULL if not found
4498 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4500 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4502 struct ipr_resource_entry *res;
4504 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4505 if ((res->bus == starget->channel) &&
4506 (res->target == starget->id)) {
4514 static struct ata_port_info sata_port_info;
4517 * ipr_target_alloc - Prepare for commands to a SCSI target
4518 * @starget: scsi target struct
4520 * If the device is a SATA device, this function allocates an
4521 * ATA port with libata, else it does nothing.
4524 * 0 on success / non-0 on failure
4526 static int ipr_target_alloc(struct scsi_target *starget)
4528 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4530 struct ipr_sata_port *sata_port;
4531 struct ata_port *ap;
4532 struct ipr_resource_entry *res;
4533 unsigned long lock_flags;
4535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536 res = ipr_find_starget(starget);
4537 starget->hostdata = NULL;
4539 if (res && ipr_is_gata(res)) {
4540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4541 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4545 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4547 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4548 sata_port->ioa_cfg = ioa_cfg;
4550 sata_port->res = res;
4552 res->sata_port = sata_port;
4553 ap->private_data = sata_port;
4554 starget->hostdata = sata_port;
4560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4566 * ipr_target_destroy - Destroy a SCSI target
4567 * @starget: scsi target struct
4569 * If the device was a SATA device, this function frees the libata
4570 * ATA port, else it does nothing.
4573 static void ipr_target_destroy(struct scsi_target *starget)
4575 struct ipr_sata_port *sata_port = starget->hostdata;
4576 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4577 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4579 if (ioa_cfg->sis64) {
4580 if (!ipr_find_starget(starget)) {
4581 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4582 clear_bit(starget->id, ioa_cfg->array_ids);
4583 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4584 clear_bit(starget->id, ioa_cfg->vset_ids);
4585 else if (starget->channel == 0)
4586 clear_bit(starget->id, ioa_cfg->target_ids);
4591 starget->hostdata = NULL;
4592 ata_sas_port_destroy(sata_port->ap);
4598 * ipr_find_sdev - Find device based on bus/target/lun.
4599 * @sdev: scsi device struct
4602 * resource entry pointer if found / NULL if not found
4604 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4607 struct ipr_resource_entry *res;
4609 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4610 if ((res->bus == sdev->channel) &&
4611 (res->target == sdev->id) &&
4612 (res->lun == sdev->lun))
4620 * ipr_slave_destroy - Unconfigure a SCSI device
4621 * @sdev: scsi device struct
4626 static void ipr_slave_destroy(struct scsi_device *sdev)
4628 struct ipr_resource_entry *res;
4629 struct ipr_ioa_cfg *ioa_cfg;
4630 unsigned long lock_flags = 0;
4632 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4635 res = (struct ipr_resource_entry *) sdev->hostdata;
4638 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4639 sdev->hostdata = NULL;
4641 res->sata_port = NULL;
4643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4647 * ipr_slave_configure - Configure a SCSI device
4648 * @sdev: scsi device struct
4650 * This function configures the specified scsi device.
4655 static int ipr_slave_configure(struct scsi_device *sdev)
4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4658 struct ipr_resource_entry *res;
4659 struct ata_port *ap = NULL;
4660 unsigned long lock_flags = 0;
4661 char buffer[IPR_MAX_RES_PATH_LENGTH];
4663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664 res = sdev->hostdata;
4666 if (ipr_is_af_dasd_device(res))
4667 sdev->type = TYPE_RAID;
4668 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4669 sdev->scsi_level = 4;
4670 sdev->no_uld_attach = 1;
4672 if (ipr_is_vset_device(res)) {
4673 blk_queue_rq_timeout(sdev->request_queue,
4674 IPR_VSET_RW_TIMEOUT);
4675 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4677 if (ipr_is_gata(res) && res->sata_port)
4678 ap = res->sata_port->ap;
4679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4682 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4683 ata_sas_slave_configure(sdev, ap);
4685 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4687 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4688 ipr_format_res_path(ioa_cfg,
4689 res->res_path, buffer, sizeof(buffer)));
4692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4697 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4698 * @sdev: scsi device struct
4700 * This function initializes an ATA port so that future commands
4701 * sent through queuecommand will work.
4706 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4708 struct ipr_sata_port *sata_port = NULL;
4712 if (sdev->sdev_target)
4713 sata_port = sdev->sdev_target->hostdata;
4715 rc = ata_sas_port_init(sata_port->ap);
4717 rc = ata_sas_sync_probe(sata_port->ap);
4721 ipr_slave_destroy(sdev);
4728 * ipr_slave_alloc - Prepare for commands to a device.
4729 * @sdev: scsi device struct
4731 * This function saves a pointer to the resource entry
4732 * in the scsi device struct if the device exists. We
4733 * can then use this pointer in ipr_queuecommand when
4734 * handling new commands.
4737 * 0 on success / -ENXIO if device does not exist
4739 static int ipr_slave_alloc(struct scsi_device *sdev)
4741 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4742 struct ipr_resource_entry *res;
4743 unsigned long lock_flags;
4746 sdev->hostdata = NULL;
4748 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4750 res = ipr_find_sdev(sdev);
4755 sdev->hostdata = res;
4756 if (!ipr_is_naca_model(res))
4757 res->needs_sync_complete = 1;
4759 if (ipr_is_gata(res)) {
4760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4761 return ipr_ata_slave_alloc(sdev);
4765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4770 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4772 struct ipr_ioa_cfg *ioa_cfg;
4773 unsigned long lock_flags = 0;
4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4780 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4782 dev_err(&ioa_cfg->pdev->dev,
4783 "Adapter being reset as a result of error recovery.\n");
4785 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4786 ioa_cfg->sdt_state = GET_DUMP;
4789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4790 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793 /* If we got hit with a host reset while we were already resetting
4794 the adapter for some reason, and the reset failed. */
4795 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4800 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4806 * ipr_device_reset - Reset the device
4807 * @ioa_cfg: ioa config struct
4808 * @res: resource entry struct
4810 * This function issues a device reset to the affected device.
4811 * If the device is a SCSI device, a LUN reset will be sent
4812 * to the device first. If that does not work, a target reset
4813 * will be sent. If the device is a SATA device, a PHY reset will
4817 * 0 on success / non-zero on failure
4819 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4820 struct ipr_resource_entry *res)
4822 struct ipr_cmnd *ipr_cmd;
4823 struct ipr_ioarcb *ioarcb;
4824 struct ipr_cmd_pkt *cmd_pkt;
4825 struct ipr_ioarcb_ata_regs *regs;
4829 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4830 ioarcb = &ipr_cmd->ioarcb;
4831 cmd_pkt = &ioarcb->cmd_pkt;
4833 if (ipr_cmd->ioa_cfg->sis64) {
4834 regs = &ipr_cmd->i.ata_ioadl.regs;
4835 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4837 regs = &ioarcb->u.add_data.u.regs;
4839 ioarcb->res_handle = res->res_handle;
4840 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4841 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4842 if (ipr_is_gata(res)) {
4843 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4844 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4845 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4848 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4849 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4850 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4851 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4852 if (ipr_cmd->ioa_cfg->sis64)
4853 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4854 sizeof(struct ipr_ioasa_gata));
4856 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4857 sizeof(struct ipr_ioasa_gata));
4861 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4865 * ipr_sata_reset - Reset the SATA port
4866 * @link: SATA link to reset
4867 * @classes: class of the attached device
4869 * This function issues a SATA phy reset to the affected ATA link.
4872 * 0 on success / non-zero on failure
4874 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4875 unsigned long deadline)
4877 struct ipr_sata_port *sata_port = link->ap->private_data;
4878 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4879 struct ipr_resource_entry *res;
4880 unsigned long lock_flags = 0;
4884 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4885 while (ioa_cfg->in_reset_reload) {
4886 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4887 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4891 res = sata_port->res;
4893 rc = ipr_device_reset(ioa_cfg, res);
4894 *classes = res->ata_class;
4897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4903 * ipr_eh_dev_reset - Reset the device
4904 * @scsi_cmd: scsi command struct
4906 * This function issues a device reset to the affected device.
4907 * A LUN reset will be sent to the device first. If that does
4908 * not work, a target reset will be sent.
4913 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4915 struct ipr_cmnd *ipr_cmd;
4916 struct ipr_ioa_cfg *ioa_cfg;
4917 struct ipr_resource_entry *res;
4918 struct ata_port *ap;
4920 struct ipr_hrr_queue *hrrq;
4923 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4924 res = scsi_cmd->device->hostdata;
4930 * If we are currently going through reset/reload, return failed. This will force the
4931 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4934 if (ioa_cfg->in_reset_reload)
4936 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4939 for_each_hrrq(hrrq, ioa_cfg) {
4940 spin_lock(&hrrq->_lock);
4941 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4942 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4943 if (ipr_cmd->scsi_cmd)
4944 ipr_cmd->done = ipr_scsi_eh_done;
4946 ipr_cmd->done = ipr_sata_eh_done;
4948 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4949 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4950 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4954 spin_unlock(&hrrq->_lock);
4956 res->resetting_device = 1;
4957 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4959 if (ipr_is_gata(res) && res->sata_port) {
4960 ap = res->sata_port->ap;
4961 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4962 ata_std_error_handler(ap);
4963 spin_lock_irq(scsi_cmd->device->host->host_lock);
4965 for_each_hrrq(hrrq, ioa_cfg) {
4966 spin_lock(&hrrq->_lock);
4967 list_for_each_entry(ipr_cmd,
4968 &hrrq->hrrq_pending_q, queue) {
4969 if (ipr_cmd->ioarcb.res_handle ==
4975 spin_unlock(&hrrq->_lock);
4978 rc = ipr_device_reset(ioa_cfg, res);
4979 res->resetting_device = 0;
4982 return rc ? FAILED : SUCCESS;
4985 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
4989 spin_lock_irq(cmd->device->host->host_lock);
4990 rc = __ipr_eh_dev_reset(cmd);
4991 spin_unlock_irq(cmd->device->host->host_lock);
4997 * ipr_bus_reset_done - Op done function for bus reset.
4998 * @ipr_cmd: ipr command struct
5000 * This function is the op done function for a bus reset
5005 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5007 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5008 struct ipr_resource_entry *res;
5011 if (!ioa_cfg->sis64)
5012 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5013 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5014 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5020 * If abort has not completed, indicate the reset has, else call the
5021 * abort's done function to wake the sleeping eh thread
5023 if (ipr_cmd->sibling->sibling)
5024 ipr_cmd->sibling->sibling = NULL;
5026 ipr_cmd->sibling->done(ipr_cmd->sibling);
5028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5033 * ipr_abort_timeout - An abort task has timed out
5034 * @ipr_cmd: ipr command struct
5036 * This function handles when an abort task times out. If this
5037 * happens we issue a bus reset since we have resources tied
5038 * up that must be freed before returning to the midlayer.
5043 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5045 struct ipr_cmnd *reset_cmd;
5046 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5047 struct ipr_cmd_pkt *cmd_pkt;
5048 unsigned long lock_flags = 0;
5051 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5052 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5057 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5058 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5059 ipr_cmd->sibling = reset_cmd;
5060 reset_cmd->sibling = ipr_cmd;
5061 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5062 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5063 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5064 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5065 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5067 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5073 * ipr_cancel_op - Cancel specified op
5074 * @scsi_cmd: scsi command struct
5076 * This function cancels specified op.
5081 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5083 struct ipr_cmnd *ipr_cmd;
5084 struct ipr_ioa_cfg *ioa_cfg;
5085 struct ipr_resource_entry *res;
5086 struct ipr_cmd_pkt *cmd_pkt;
5089 struct ipr_hrr_queue *hrrq;
5092 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5093 res = scsi_cmd->device->hostdata;
5095 /* If we are currently going through reset/reload, return failed.
5096 * This will force the mid-layer to call ipr_eh_host_reset,
5097 * which will then go to sleep and wait for the reset to complete
5099 if (ioa_cfg->in_reset_reload ||
5100 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5106 * If we are aborting a timed out op, chances are that the timeout was caused
5107 * by a still not detected EEH error. In such cases, reading a register will
5108 * trigger the EEH recovery infrastructure.
5110 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5112 if (!ipr_is_gscsi(res))
5115 for_each_hrrq(hrrq, ioa_cfg) {
5116 spin_lock(&hrrq->_lock);
5117 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5118 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5119 ipr_cmd->done = ipr_scsi_eh_done;
5124 spin_unlock(&hrrq->_lock);
5130 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5131 ipr_cmd->ioarcb.res_handle = res->res_handle;
5132 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5133 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5134 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5135 ipr_cmd->u.sdev = scsi_cmd->device;
5137 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5139 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5140 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5143 * If the abort task timed out and we sent a bus reset, we will get
5144 * one the following responses to the abort
5146 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5151 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5152 if (!ipr_is_naca_model(res))
5153 res->needs_sync_complete = 1;
5156 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5160 * ipr_eh_abort - Abort a single op
5161 * @scsi_cmd: scsi command struct
5166 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5168 unsigned long flags;
5173 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5174 rc = ipr_cancel_op(scsi_cmd);
5175 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5182 * ipr_handle_other_interrupt - Handle "other" interrupts
5183 * @ioa_cfg: ioa config struct
5184 * @int_reg: interrupt register
5187 * IRQ_NONE / IRQ_HANDLED
5189 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5192 irqreturn_t rc = IRQ_HANDLED;
5195 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5196 int_reg &= ~int_mask_reg;
5198 /* If an interrupt on the adapter did not occur, ignore it.
5199 * Or in the case of SIS 64, check for a stage change interrupt.
5201 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5202 if (ioa_cfg->sis64) {
5203 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5204 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5205 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5207 /* clear stage change */
5208 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5209 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5210 list_del(&ioa_cfg->reset_cmd->queue);
5211 del_timer(&ioa_cfg->reset_cmd->timer);
5212 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5220 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5221 /* Mask the interrupt */
5222 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5224 /* Clear the interrupt */
5225 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5226 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5228 list_del(&ioa_cfg->reset_cmd->queue);
5229 del_timer(&ioa_cfg->reset_cmd->timer);
5230 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5231 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5232 if (ioa_cfg->clear_isr) {
5233 if (ipr_debug && printk_ratelimit())
5234 dev_err(&ioa_cfg->pdev->dev,
5235 "Spurious interrupt detected. 0x%08X\n", int_reg);
5236 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5237 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5241 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5242 ioa_cfg->ioa_unit_checked = 1;
5243 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5244 dev_err(&ioa_cfg->pdev->dev,
5245 "No Host RRQ. 0x%08X\n", int_reg);
5247 dev_err(&ioa_cfg->pdev->dev,
5248 "Permanent IOA failure. 0x%08X\n", int_reg);
5250 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5251 ioa_cfg->sdt_state = GET_DUMP;
5253 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5254 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5261 * ipr_isr_eh - Interrupt service routine error handler
5262 * @ioa_cfg: ioa config struct
5263 * @msg: message to log
5268 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5270 ioa_cfg->errors_logged++;
5271 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5273 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5274 ioa_cfg->sdt_state = GET_DUMP;
5276 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5279 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5280 struct list_head *doneq)
5284 struct ipr_cmnd *ipr_cmd;
5285 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5288 /* If interrupts are disabled, ignore the interrupt */
5289 if (!hrr_queue->allow_interrupts)
5292 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5293 hrr_queue->toggle_bit) {
5295 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5296 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5297 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5299 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5300 cmd_index < hrr_queue->min_cmd_id)) {
5302 "Invalid response handle from IOA: ",
5307 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5308 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5310 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5312 list_move_tail(&ipr_cmd->queue, doneq);
5314 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5315 hrr_queue->hrrq_curr++;
5317 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5318 hrr_queue->toggle_bit ^= 1u;
5321 if (budget > 0 && num_hrrq >= budget)
5328 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5330 struct ipr_ioa_cfg *ioa_cfg;
5331 struct ipr_hrr_queue *hrrq;
5332 struct ipr_cmnd *ipr_cmd, *temp;
5333 unsigned long hrrq_flags;
5337 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5338 ioa_cfg = hrrq->ioa_cfg;
5340 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5341 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5343 if (completed_ops < budget)
5344 blk_iopoll_complete(iop);
5345 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5347 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5348 list_del(&ipr_cmd->queue);
5349 del_timer(&ipr_cmd->timer);
5350 ipr_cmd->fast_done(ipr_cmd);
5353 return completed_ops;
5357 * ipr_isr - Interrupt service routine
5359 * @devp: pointer to ioa config struct
5362 * IRQ_NONE / IRQ_HANDLED
5364 static irqreturn_t ipr_isr(int irq, void *devp)
5366 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5367 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5368 unsigned long hrrq_flags = 0;
5372 struct ipr_cmnd *ipr_cmd, *temp;
5373 irqreturn_t rc = IRQ_NONE;
5376 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5377 /* If interrupts are disabled, ignore the interrupt */
5378 if (!hrrq->allow_interrupts) {
5379 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5384 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5387 if (!ioa_cfg->clear_isr)
5390 /* Clear the PCI interrupt */
5393 writel(IPR_PCII_HRRQ_UPDATED,
5394 ioa_cfg->regs.clr_interrupt_reg32);
5395 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5396 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5397 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5399 } else if (rc == IRQ_NONE && irq_none == 0) {
5400 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5402 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5403 int_reg & IPR_PCII_HRRQ_UPDATED) {
5405 "Error clearing HRRQ: ", num_hrrq);
5412 if (unlikely(rc == IRQ_NONE))
5413 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5415 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5416 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5417 list_del(&ipr_cmd->queue);
5418 del_timer(&ipr_cmd->timer);
5419 ipr_cmd->fast_done(ipr_cmd);
5425 * ipr_isr_mhrrq - Interrupt service routine
5427 * @devp: pointer to ioa config struct
5430 * IRQ_NONE / IRQ_HANDLED
5432 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5434 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5435 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5436 unsigned long hrrq_flags = 0;
5437 struct ipr_cmnd *ipr_cmd, *temp;
5438 irqreturn_t rc = IRQ_NONE;
5441 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5443 /* If interrupts are disabled, ignore the interrupt */
5444 if (!hrrq->allow_interrupts) {
5445 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5449 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5450 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5451 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5453 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5454 blk_iopoll_sched(&hrrq->iopoll);
5455 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5459 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5462 if (ipr_process_hrrq(hrrq, -1, &doneq))
5466 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5468 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5469 list_del(&ipr_cmd->queue);
5470 del_timer(&ipr_cmd->timer);
5471 ipr_cmd->fast_done(ipr_cmd);
5477 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5478 * @ioa_cfg: ioa config struct
5479 * @ipr_cmd: ipr command struct
5482 * 0 on success / -1 on failure
5484 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5485 struct ipr_cmnd *ipr_cmd)
5488 struct scatterlist *sg;
5490 u32 ioadl_flags = 0;
5491 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5492 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5493 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5495 length = scsi_bufflen(scsi_cmd);
5499 nseg = scsi_dma_map(scsi_cmd);
5501 if (printk_ratelimit())
5502 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5506 ipr_cmd->dma_use_sg = nseg;
5508 ioarcb->data_transfer_length = cpu_to_be32(length);
5510 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5512 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5513 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5514 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5515 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5516 ioadl_flags = IPR_IOADL_FLAGS_READ;
5518 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5519 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5520 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5521 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5524 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5529 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5530 * @ioa_cfg: ioa config struct
5531 * @ipr_cmd: ipr command struct
5534 * 0 on success / -1 on failure
5536 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5537 struct ipr_cmnd *ipr_cmd)
5540 struct scatterlist *sg;
5542 u32 ioadl_flags = 0;
5543 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5544 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5545 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5547 length = scsi_bufflen(scsi_cmd);
5551 nseg = scsi_dma_map(scsi_cmd);
5553 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5557 ipr_cmd->dma_use_sg = nseg;
5559 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5560 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5561 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5562 ioarcb->data_transfer_length = cpu_to_be32(length);
5564 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5565 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5566 ioadl_flags = IPR_IOADL_FLAGS_READ;
5567 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5568 ioarcb->read_ioadl_len =
5569 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5572 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5573 ioadl = ioarcb->u.add_data.u.ioadl;
5574 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5575 offsetof(struct ipr_ioarcb, u.add_data));
5576 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5579 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5580 ioadl[i].flags_and_data_len =
5581 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5582 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5585 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5590 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5591 * @scsi_cmd: scsi command struct
5596 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5599 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5601 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5603 case MSG_SIMPLE_TAG:
5604 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5607 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5609 case MSG_ORDERED_TAG:
5610 rc = IPR_FLAGS_LO_ORDERED_TASK;
5619 * ipr_erp_done - Process completion of ERP for a device
5620 * @ipr_cmd: ipr command struct
5622 * This function copies the sense buffer into the scsi_cmd
5623 * struct and pushes the scsi_done function.
5628 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5630 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5631 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5632 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5634 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5635 scsi_cmd->result |= (DID_ERROR << 16);
5636 scmd_printk(KERN_ERR, scsi_cmd,
5637 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5639 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5640 SCSI_SENSE_BUFFERSIZE);
5644 if (!ipr_is_naca_model(res))
5645 res->needs_sync_complete = 1;
5648 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5650 scsi_cmd->scsi_done(scsi_cmd);
5654 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5655 * @ipr_cmd: ipr command struct
5660 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5662 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5663 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5664 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5666 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5667 ioarcb->data_transfer_length = 0;
5668 ioarcb->read_data_transfer_length = 0;
5669 ioarcb->ioadl_len = 0;
5670 ioarcb->read_ioadl_len = 0;
5671 ioasa->hdr.ioasc = 0;
5672 ioasa->hdr.residual_data_len = 0;
5674 if (ipr_cmd->ioa_cfg->sis64)
5675 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5676 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5678 ioarcb->write_ioadl_addr =
5679 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5680 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5685 * ipr_erp_request_sense - Send request sense to a device
5686 * @ipr_cmd: ipr command struct
5688 * This function sends a request sense to a device as a result
5689 * of a check condition.
5694 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5696 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5697 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5699 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5700 ipr_erp_done(ipr_cmd);
5704 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5706 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5707 cmd_pkt->cdb[0] = REQUEST_SENSE;
5708 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5709 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5710 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5711 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5713 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5714 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5716 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5717 IPR_REQUEST_SENSE_TIMEOUT * 2);
5721 * ipr_erp_cancel_all - Send cancel all to a device
5722 * @ipr_cmd: ipr command struct
5724 * This function sends a cancel all to a device to clear the
5725 * queue. If we are running TCQ on the device, QERR is set to 1,
5726 * which means all outstanding ops have been dropped on the floor.
5727 * Cancel all will return them to us.
5732 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5734 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5735 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5736 struct ipr_cmd_pkt *cmd_pkt;
5740 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5742 if (!scsi_get_tag_type(scsi_cmd->device)) {
5743 ipr_erp_request_sense(ipr_cmd);
5747 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5748 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5749 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5751 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5752 IPR_CANCEL_ALL_TIMEOUT);
5756 * ipr_dump_ioasa - Dump contents of IOASA
5757 * @ioa_cfg: ioa config struct
5758 * @ipr_cmd: ipr command struct
5759 * @res: resource entry struct
5761 * This function is invoked by the interrupt handler when ops
5762 * fail. It will log the IOASA if appropriate. Only called
5768 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5769 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5773 u32 ioasc, fd_ioasc;
5774 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5775 __be32 *ioasa_data = (__be32 *)ioasa;
5778 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5779 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5784 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5787 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5788 error_index = ipr_get_error(fd_ioasc);
5790 error_index = ipr_get_error(ioasc);
5792 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5793 /* Don't log an error if the IOA already logged one */
5794 if (ioasa->hdr.ilid != 0)
5797 if (!ipr_is_gscsi(res))
5800 if (ipr_error_table[error_index].log_ioasa == 0)
5804 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5806 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5807 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5808 data_len = sizeof(struct ipr_ioasa64);
5809 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5810 data_len = sizeof(struct ipr_ioasa);
5812 ipr_err("IOASA Dump:\n");
5814 for (i = 0; i < data_len / 4; i += 4) {
5815 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5816 be32_to_cpu(ioasa_data[i]),
5817 be32_to_cpu(ioasa_data[i+1]),
5818 be32_to_cpu(ioasa_data[i+2]),
5819 be32_to_cpu(ioasa_data[i+3]));
5824 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5826 * @sense_buf: sense data buffer
5831 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5834 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5835 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5836 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5837 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5839 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5841 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5844 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5846 if (ipr_is_vset_device(res) &&
5847 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5848 ioasa->u.vset.failing_lba_hi != 0) {
5849 sense_buf[0] = 0x72;
5850 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5851 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5852 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5856 sense_buf[9] = 0x0A;
5857 sense_buf[10] = 0x80;
5859 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5861 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5862 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5863 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5864 sense_buf[15] = failing_lba & 0x000000ff;
5866 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5868 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5869 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5870 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5871 sense_buf[19] = failing_lba & 0x000000ff;
5873 sense_buf[0] = 0x70;
5874 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5875 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5876 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5878 /* Illegal request */
5879 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5880 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5881 sense_buf[7] = 10; /* additional length */
5883 /* IOARCB was in error */
5884 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5885 sense_buf[15] = 0xC0;
5886 else /* Parameter data was invalid */
5887 sense_buf[15] = 0x80;
5890 ((IPR_FIELD_POINTER_MASK &
5891 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5893 (IPR_FIELD_POINTER_MASK &
5894 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5896 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5897 if (ipr_is_vset_device(res))
5898 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5900 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5902 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5903 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5904 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5905 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5906 sense_buf[6] = failing_lba & 0x000000ff;
5909 sense_buf[7] = 6; /* additional length */
5915 * ipr_get_autosense - Copy autosense data to sense buffer
5916 * @ipr_cmd: ipr command struct
5918 * This function copies the autosense buffer to the buffer
5919 * in the scsi_cmd, if there is autosense available.
5922 * 1 if autosense was available / 0 if not
5924 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5926 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5927 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5929 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5932 if (ipr_cmd->ioa_cfg->sis64)
5933 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5934 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5935 SCSI_SENSE_BUFFERSIZE));
5937 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5938 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5939 SCSI_SENSE_BUFFERSIZE));
5944 * ipr_erp_start - Process an error response for a SCSI op
5945 * @ioa_cfg: ioa config struct
5946 * @ipr_cmd: ipr command struct
5948 * This function determines whether or not to initiate ERP
5949 * on the affected device.
5954 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5955 struct ipr_cmnd *ipr_cmd)
5957 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5958 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5959 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5960 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5963 ipr_scsi_eh_done(ipr_cmd);
5967 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5968 ipr_gen_sense(ipr_cmd);
5970 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5972 switch (masked_ioasc) {
5973 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5974 if (ipr_is_naca_model(res))
5975 scsi_cmd->result |= (DID_ABORT << 16);
5977 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5979 case IPR_IOASC_IR_RESOURCE_HANDLE:
5980 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5981 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5983 case IPR_IOASC_HW_SEL_TIMEOUT:
5984 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5985 if (!ipr_is_naca_model(res))
5986 res->needs_sync_complete = 1;
5988 case IPR_IOASC_SYNC_REQUIRED:
5990 res->needs_sync_complete = 1;
5991 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5993 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5994 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5995 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5997 case IPR_IOASC_BUS_WAS_RESET:
5998 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6000 * Report the bus reset and ask for a retry. The device
6001 * will give CC/UA the next command.
6003 if (!res->resetting_device)
6004 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6005 scsi_cmd->result |= (DID_ERROR << 16);
6006 if (!ipr_is_naca_model(res))
6007 res->needs_sync_complete = 1;
6009 case IPR_IOASC_HW_DEV_BUS_STATUS:
6010 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6011 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6012 if (!ipr_get_autosense(ipr_cmd)) {
6013 if (!ipr_is_naca_model(res)) {
6014 ipr_erp_cancel_all(ipr_cmd);
6019 if (!ipr_is_naca_model(res))
6020 res->needs_sync_complete = 1;
6022 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6025 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6026 scsi_cmd->result |= (DID_ERROR << 16);
6027 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6028 res->needs_sync_complete = 1;
6032 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6033 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6034 scsi_cmd->scsi_done(scsi_cmd);
6038 * ipr_scsi_done - mid-layer done function
6039 * @ipr_cmd: ipr command struct
6041 * This function is invoked by the interrupt handler for
6042 * ops generated by the SCSI mid-layer
6047 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6050 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6051 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6052 unsigned long hrrq_flags;
6054 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6056 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6057 scsi_dma_unmap(scsi_cmd);
6059 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6060 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6061 scsi_cmd->scsi_done(scsi_cmd);
6062 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6064 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6065 ipr_erp_start(ioa_cfg, ipr_cmd);
6066 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6071 * ipr_queuecommand - Queue a mid-layer request
6072 * @shost: scsi host struct
6073 * @scsi_cmd: scsi command struct
6075 * This function queues a request generated by the mid-layer.
6079 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6080 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6082 static int ipr_queuecommand(struct Scsi_Host *shost,
6083 struct scsi_cmnd *scsi_cmd)
6085 struct ipr_ioa_cfg *ioa_cfg;
6086 struct ipr_resource_entry *res;
6087 struct ipr_ioarcb *ioarcb;
6088 struct ipr_cmnd *ipr_cmd;
6089 unsigned long hrrq_flags, lock_flags;
6091 struct ipr_hrr_queue *hrrq;
6094 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6096 scsi_cmd->result = (DID_OK << 16);
6097 res = scsi_cmd->device->hostdata;
6099 if (ipr_is_gata(res) && res->sata_port) {
6100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6101 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6106 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6107 hrrq = &ioa_cfg->hrrq[hrrq_id];
6109 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6111 * We are currently blocking all devices due to a host reset
6112 * We have told the host to stop giving us new requests, but
6113 * ERP ops don't count. FIXME
6115 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6116 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6117 return SCSI_MLQUEUE_HOST_BUSY;
6121 * FIXME - Create scsi_set_host_offline interface
6122 * and the ioa_is_dead check can be removed
6124 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6125 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6129 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6130 if (ipr_cmd == NULL) {
6131 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6132 return SCSI_MLQUEUE_HOST_BUSY;
6134 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6136 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6137 ioarcb = &ipr_cmd->ioarcb;
6139 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6140 ipr_cmd->scsi_cmd = scsi_cmd;
6141 ipr_cmd->done = ipr_scsi_eh_done;
6143 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6144 if (scsi_cmd->underflow == 0)
6145 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6147 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6148 if (ipr_is_gscsi(res))
6149 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6150 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6151 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6154 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6155 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6156 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6160 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6162 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6164 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6165 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6166 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6167 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6169 scsi_dma_unmap(scsi_cmd);
6170 return SCSI_MLQUEUE_HOST_BUSY;
6173 if (unlikely(hrrq->ioa_is_dead)) {
6174 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6175 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6176 scsi_dma_unmap(scsi_cmd);
6180 ioarcb->res_handle = res->res_handle;
6181 if (res->needs_sync_complete) {
6182 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6183 res->needs_sync_complete = 0;
6185 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6186 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6187 ipr_send_command(ipr_cmd);
6188 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6192 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6193 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6194 scsi_cmd->result = (DID_NO_CONNECT << 16);
6195 scsi_cmd->scsi_done(scsi_cmd);
6196 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6201 * ipr_ioctl - IOCTL handler
6202 * @sdev: scsi device struct
6207 * 0 on success / other on failure
6209 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6211 struct ipr_resource_entry *res;
6213 res = (struct ipr_resource_entry *)sdev->hostdata;
6214 if (res && ipr_is_gata(res)) {
6215 if (cmd == HDIO_GET_IDENTITY)
6217 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6224 * ipr_info - Get information about the card/driver
6225 * @scsi_host: scsi host struct
6228 * pointer to buffer with description string
6230 static const char *ipr_ioa_info(struct Scsi_Host *host)
6232 static char buffer[512];
6233 struct ipr_ioa_cfg *ioa_cfg;
6234 unsigned long lock_flags = 0;
6236 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6238 spin_lock_irqsave(host->host_lock, lock_flags);
6239 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6240 spin_unlock_irqrestore(host->host_lock, lock_flags);
6245 static struct scsi_host_template driver_template = {
6246 .module = THIS_MODULE,
6248 .info = ipr_ioa_info,
6250 .queuecommand = ipr_queuecommand,
6251 .eh_abort_handler = ipr_eh_abort,
6252 .eh_device_reset_handler = ipr_eh_dev_reset,
6253 .eh_host_reset_handler = ipr_eh_host_reset,
6254 .slave_alloc = ipr_slave_alloc,
6255 .slave_configure = ipr_slave_configure,
6256 .slave_destroy = ipr_slave_destroy,
6257 .target_alloc = ipr_target_alloc,
6258 .target_destroy = ipr_target_destroy,
6259 .change_queue_depth = ipr_change_queue_depth,
6260 .change_queue_type = ipr_change_queue_type,
6261 .bios_param = ipr_biosparam,
6262 .can_queue = IPR_MAX_COMMANDS,
6264 .sg_tablesize = IPR_MAX_SGLIST,
6265 .max_sectors = IPR_IOA_MAX_SECTORS,
6266 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6267 .use_clustering = ENABLE_CLUSTERING,
6268 .shost_attrs = ipr_ioa_attrs,
6269 .sdev_attrs = ipr_dev_attrs,
6270 .proc_name = IPR_NAME
6274 * ipr_ata_phy_reset - libata phy_reset handler
6275 * @ap: ata port to reset
6278 static void ipr_ata_phy_reset(struct ata_port *ap)
6280 unsigned long flags;
6281 struct ipr_sata_port *sata_port = ap->private_data;
6282 struct ipr_resource_entry *res = sata_port->res;
6283 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6287 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6288 while (ioa_cfg->in_reset_reload) {
6289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6290 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6291 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6294 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6297 rc = ipr_device_reset(ioa_cfg, res);
6300 ap->link.device[0].class = ATA_DEV_NONE;
6304 ap->link.device[0].class = res->ata_class;
6305 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6306 ap->link.device[0].class = ATA_DEV_NONE;
6309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6314 * ipr_ata_post_internal - Cleanup after an internal command
6315 * @qc: ATA queued command
6320 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6322 struct ipr_sata_port *sata_port = qc->ap->private_data;
6323 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6324 struct ipr_cmnd *ipr_cmd;
6325 struct ipr_hrr_queue *hrrq;
6326 unsigned long flags;
6328 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6329 while (ioa_cfg->in_reset_reload) {
6330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6331 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6332 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6335 for_each_hrrq(hrrq, ioa_cfg) {
6336 spin_lock(&hrrq->_lock);
6337 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6338 if (ipr_cmd->qc == qc) {
6339 ipr_device_reset(ioa_cfg, sata_port->res);
6343 spin_unlock(&hrrq->_lock);
6345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6349 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6350 * @regs: destination
6351 * @tf: source ATA taskfile
6356 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6357 struct ata_taskfile *tf)
6359 regs->feature = tf->feature;
6360 regs->nsect = tf->nsect;
6361 regs->lbal = tf->lbal;
6362 regs->lbam = tf->lbam;
6363 regs->lbah = tf->lbah;
6364 regs->device = tf->device;
6365 regs->command = tf->command;
6366 regs->hob_feature = tf->hob_feature;
6367 regs->hob_nsect = tf->hob_nsect;
6368 regs->hob_lbal = tf->hob_lbal;
6369 regs->hob_lbam = tf->hob_lbam;
6370 regs->hob_lbah = tf->hob_lbah;
6371 regs->ctl = tf->ctl;
6375 * ipr_sata_done - done function for SATA commands
6376 * @ipr_cmd: ipr command struct
6378 * This function is invoked by the interrupt handler for
6379 * ops generated by the SCSI mid-layer to SATA devices
6384 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6387 struct ata_queued_cmd *qc = ipr_cmd->qc;
6388 struct ipr_sata_port *sata_port = qc->ap->private_data;
6389 struct ipr_resource_entry *res = sata_port->res;
6390 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6392 spin_lock(&ipr_cmd->hrrq->_lock);
6393 if (ipr_cmd->ioa_cfg->sis64)
6394 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6395 sizeof(struct ipr_ioasa_gata));
6397 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6398 sizeof(struct ipr_ioasa_gata));
6399 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6401 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6402 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6404 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6405 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6407 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6408 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6409 spin_unlock(&ipr_cmd->hrrq->_lock);
6410 ata_qc_complete(qc);
6414 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6415 * @ipr_cmd: ipr command struct
6416 * @qc: ATA queued command
6419 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6420 struct ata_queued_cmd *qc)
6422 u32 ioadl_flags = 0;
6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6426 int len = qc->nbytes;
6427 struct scatterlist *sg;
6429 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6434 if (qc->dma_dir == DMA_TO_DEVICE) {
6435 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6436 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6437 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6438 ioadl_flags = IPR_IOADL_FLAGS_READ;
6440 ioarcb->data_transfer_length = cpu_to_be32(len);
6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6443 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6446 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6447 ioadl64->flags = cpu_to_be32(ioadl_flags);
6448 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6449 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6451 last_ioadl64 = ioadl64;
6455 if (likely(last_ioadl64))
6456 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6460 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6461 * @ipr_cmd: ipr command struct
6462 * @qc: ATA queued command
6465 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6466 struct ata_queued_cmd *qc)
6468 u32 ioadl_flags = 0;
6469 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6470 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6471 struct ipr_ioadl_desc *last_ioadl = NULL;
6472 int len = qc->nbytes;
6473 struct scatterlist *sg;
6479 if (qc->dma_dir == DMA_TO_DEVICE) {
6480 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6481 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6482 ioarcb->data_transfer_length = cpu_to_be32(len);
6484 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6485 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6486 ioadl_flags = IPR_IOADL_FLAGS_READ;
6487 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6488 ioarcb->read_ioadl_len =
6489 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6492 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6493 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6494 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6500 if (likely(last_ioadl))
6501 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6505 * ipr_qc_defer - Get a free ipr_cmd
6506 * @qc: queued command
6511 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6513 struct ata_port *ap = qc->ap;
6514 struct ipr_sata_port *sata_port = ap->private_data;
6515 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6516 struct ipr_cmnd *ipr_cmd;
6517 struct ipr_hrr_queue *hrrq;
6520 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6521 hrrq = &ioa_cfg->hrrq[hrrq_id];
6523 qc->lldd_task = NULL;
6524 spin_lock(&hrrq->_lock);
6525 if (unlikely(hrrq->ioa_is_dead)) {
6526 spin_unlock(&hrrq->_lock);
6530 if (unlikely(!hrrq->allow_cmds)) {
6531 spin_unlock(&hrrq->_lock);
6532 return ATA_DEFER_LINK;
6535 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6536 if (ipr_cmd == NULL) {
6537 spin_unlock(&hrrq->_lock);
6538 return ATA_DEFER_LINK;
6541 qc->lldd_task = ipr_cmd;
6542 spin_unlock(&hrrq->_lock);
6547 * ipr_qc_issue - Issue a SATA qc to a device
6548 * @qc: queued command
6553 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6555 struct ata_port *ap = qc->ap;
6556 struct ipr_sata_port *sata_port = ap->private_data;
6557 struct ipr_resource_entry *res = sata_port->res;
6558 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6559 struct ipr_cmnd *ipr_cmd;
6560 struct ipr_ioarcb *ioarcb;
6561 struct ipr_ioarcb_ata_regs *regs;
6563 if (qc->lldd_task == NULL)
6566 ipr_cmd = qc->lldd_task;
6567 if (ipr_cmd == NULL)
6568 return AC_ERR_SYSTEM;
6570 qc->lldd_task = NULL;
6571 spin_lock(&ipr_cmd->hrrq->_lock);
6572 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6573 ipr_cmd->hrrq->ioa_is_dead)) {
6574 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6575 spin_unlock(&ipr_cmd->hrrq->_lock);
6576 return AC_ERR_SYSTEM;
6579 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6580 ioarcb = &ipr_cmd->ioarcb;
6582 if (ioa_cfg->sis64) {
6583 regs = &ipr_cmd->i.ata_ioadl.regs;
6584 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6586 regs = &ioarcb->u.add_data.u.regs;
6588 memset(regs, 0, sizeof(*regs));
6589 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6591 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6593 ipr_cmd->done = ipr_sata_done;
6594 ipr_cmd->ioarcb.res_handle = res->res_handle;
6595 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6596 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6597 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6598 ipr_cmd->dma_use_sg = qc->n_elem;
6601 ipr_build_ata_ioadl64(ipr_cmd, qc);
6603 ipr_build_ata_ioadl(ipr_cmd, qc);
6605 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6606 ipr_copy_sata_tf(regs, &qc->tf);
6607 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6608 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6610 switch (qc->tf.protocol) {
6611 case ATA_PROT_NODATA:
6616 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6619 case ATAPI_PROT_PIO:
6620 case ATAPI_PROT_NODATA:
6621 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6624 case ATAPI_PROT_DMA:
6625 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6626 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6631 spin_unlock(&ipr_cmd->hrrq->_lock);
6632 return AC_ERR_INVALID;
6635 ipr_send_command(ipr_cmd);
6636 spin_unlock(&ipr_cmd->hrrq->_lock);
6642 * ipr_qc_fill_rtf - Read result TF
6643 * @qc: ATA queued command
6648 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6650 struct ipr_sata_port *sata_port = qc->ap->private_data;
6651 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6652 struct ata_taskfile *tf = &qc->result_tf;
6654 tf->feature = g->error;
6655 tf->nsect = g->nsect;
6659 tf->device = g->device;
6660 tf->command = g->status;
6661 tf->hob_nsect = g->hob_nsect;
6662 tf->hob_lbal = g->hob_lbal;
6663 tf->hob_lbam = g->hob_lbam;
6664 tf->hob_lbah = g->hob_lbah;
6665 tf->ctl = g->alt_status;
6670 static struct ata_port_operations ipr_sata_ops = {
6671 .phy_reset = ipr_ata_phy_reset,
6672 .hardreset = ipr_sata_reset,
6673 .post_internal_cmd = ipr_ata_post_internal,
6674 .qc_prep = ata_noop_qc_prep,
6675 .qc_defer = ipr_qc_defer,
6676 .qc_issue = ipr_qc_issue,
6677 .qc_fill_rtf = ipr_qc_fill_rtf,
6678 .port_start = ata_sas_port_start,
6679 .port_stop = ata_sas_port_stop
6682 static struct ata_port_info sata_port_info = {
6683 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6684 .pio_mask = ATA_PIO4_ONLY,
6685 .mwdma_mask = ATA_MWDMA2,
6686 .udma_mask = ATA_UDMA6,
6687 .port_ops = &ipr_sata_ops
6690 #ifdef CONFIG_PPC_PSERIES
6691 static const u16 ipr_blocked_processors[] = {
6703 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6704 * @ioa_cfg: ioa cfg struct
6706 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6707 * certain pSeries hardware. This function determines if the given
6708 * adapter is in one of these confgurations or not.
6711 * 1 if adapter is not supported / 0 if adapter is supported
6713 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6717 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6718 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6719 if (pvr_version_is(ipr_blocked_processors[i]))
6726 #define ipr_invalid_adapter(ioa_cfg) 0
6730 * ipr_ioa_bringdown_done - IOA bring down completion.
6731 * @ipr_cmd: ipr command struct
6733 * This function processes the completion of an adapter bring down.
6734 * It wakes any reset sleepers.
6739 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6745 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6747 spin_unlock_irq(ioa_cfg->host->host_lock);
6748 scsi_unblock_requests(ioa_cfg->host);
6749 spin_lock_irq(ioa_cfg->host->host_lock);
6752 ioa_cfg->in_reset_reload = 0;
6753 ioa_cfg->reset_retries = 0;
6754 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6755 spin_lock(&ioa_cfg->hrrq[i]._lock);
6756 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6757 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6761 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6762 wake_up_all(&ioa_cfg->reset_wait_q);
6765 return IPR_RC_JOB_RETURN;
6769 * ipr_ioa_reset_done - IOA reset completion.
6770 * @ipr_cmd: ipr command struct
6772 * This function processes the completion of an adapter reset.
6773 * It schedules any necessary mid-layer add/removes and
6774 * wakes any reset sleepers.
6779 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6782 struct ipr_resource_entry *res;
6783 struct ipr_hostrcb *hostrcb, *temp;
6787 ioa_cfg->in_reset_reload = 0;
6788 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6789 spin_lock(&ioa_cfg->hrrq[j]._lock);
6790 ioa_cfg->hrrq[j].allow_cmds = 1;
6791 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6794 ioa_cfg->reset_cmd = NULL;
6795 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6797 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6798 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6803 schedule_work(&ioa_cfg->work_q);
6805 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6806 list_del(&hostrcb->queue);
6807 if (i++ < IPR_NUM_LOG_HCAMS)
6808 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6810 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6813 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6814 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6816 ioa_cfg->reset_retries = 0;
6817 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6818 wake_up_all(&ioa_cfg->reset_wait_q);
6820 spin_unlock(ioa_cfg->host->host_lock);
6821 scsi_unblock_requests(ioa_cfg->host);
6822 spin_lock(ioa_cfg->host->host_lock);
6824 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6825 scsi_block_requests(ioa_cfg->host);
6828 return IPR_RC_JOB_RETURN;
6832 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6833 * @supported_dev: supported device struct
6834 * @vpids: vendor product id struct
6839 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6840 struct ipr_std_inq_vpids *vpids)
6842 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6843 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6844 supported_dev->num_records = 1;
6845 supported_dev->data_length =
6846 cpu_to_be16(sizeof(struct ipr_supported_device));
6847 supported_dev->reserved = 0;
6851 * ipr_set_supported_devs - Send Set Supported Devices for a device
6852 * @ipr_cmd: ipr command struct
6854 * This function sends a Set Supported Devices to the adapter
6857 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6859 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6861 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6862 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6863 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6864 struct ipr_resource_entry *res = ipr_cmd->u.res;
6866 ipr_cmd->job_step = ipr_ioa_reset_done;
6868 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6869 if (!ipr_is_scsi_disk(res))
6872 ipr_cmd->u.res = res;
6873 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6875 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6876 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6877 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6879 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6880 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6881 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6882 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6884 ipr_init_ioadl(ipr_cmd,
6885 ioa_cfg->vpd_cbs_dma +
6886 offsetof(struct ipr_misc_cbs, supp_dev),
6887 sizeof(struct ipr_supported_device),
6888 IPR_IOADL_FLAGS_WRITE_LAST);
6890 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6891 IPR_SET_SUP_DEVICE_TIMEOUT);
6893 if (!ioa_cfg->sis64)
6894 ipr_cmd->job_step = ipr_set_supported_devs;
6896 return IPR_RC_JOB_RETURN;
6900 return IPR_RC_JOB_CONTINUE;
6904 * ipr_get_mode_page - Locate specified mode page
6905 * @mode_pages: mode page buffer
6906 * @page_code: page code to find
6907 * @len: minimum required length for mode page
6910 * pointer to mode page / NULL on failure
6912 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6913 u32 page_code, u32 len)
6915 struct ipr_mode_page_hdr *mode_hdr;
6919 if (!mode_pages || (mode_pages->hdr.length == 0))
6922 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6923 mode_hdr = (struct ipr_mode_page_hdr *)
6924 (mode_pages->data + mode_pages->hdr.block_desc_len);
6927 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6928 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6932 page_length = (sizeof(struct ipr_mode_page_hdr) +
6933 mode_hdr->page_length);
6934 length -= page_length;
6935 mode_hdr = (struct ipr_mode_page_hdr *)
6936 ((unsigned long)mode_hdr + page_length);
6943 * ipr_check_term_power - Check for term power errors
6944 * @ioa_cfg: ioa config struct
6945 * @mode_pages: IOAFP mode pages buffer
6947 * Check the IOAFP's mode page 28 for term power errors
6952 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6953 struct ipr_mode_pages *mode_pages)
6957 struct ipr_dev_bus_entry *bus;
6958 struct ipr_mode_page28 *mode_page;
6960 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6961 sizeof(struct ipr_mode_page28));
6963 entry_length = mode_page->entry_length;
6965 bus = mode_page->bus;
6967 for (i = 0; i < mode_page->num_entries; i++) {
6968 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6969 dev_err(&ioa_cfg->pdev->dev,
6970 "Term power is absent on scsi bus %d\n",
6974 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6979 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6980 * @ioa_cfg: ioa config struct
6982 * Looks through the config table checking for SES devices. If
6983 * the SES device is in the SES table indicating a maximum SCSI
6984 * bus speed, the speed is limited for the bus.
6989 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6994 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6995 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6996 ioa_cfg->bus_attr[i].bus_width);
6998 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6999 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7004 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7005 * @ioa_cfg: ioa config struct
7006 * @mode_pages: mode page 28 buffer
7008 * Updates mode page 28 based on driver configuration
7013 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7014 struct ipr_mode_pages *mode_pages)
7016 int i, entry_length;
7017 struct ipr_dev_bus_entry *bus;
7018 struct ipr_bus_attributes *bus_attr;
7019 struct ipr_mode_page28 *mode_page;
7021 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7022 sizeof(struct ipr_mode_page28));
7024 entry_length = mode_page->entry_length;
7026 /* Loop for each device bus entry */
7027 for (i = 0, bus = mode_page->bus;
7028 i < mode_page->num_entries;
7029 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7030 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7031 dev_err(&ioa_cfg->pdev->dev,
7032 "Invalid resource address reported: 0x%08X\n",
7033 IPR_GET_PHYS_LOC(bus->res_addr));
7037 bus_attr = &ioa_cfg->bus_attr[i];
7038 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7039 bus->bus_width = bus_attr->bus_width;
7040 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7041 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7042 if (bus_attr->qas_enabled)
7043 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7045 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7050 * ipr_build_mode_select - Build a mode select command
7051 * @ipr_cmd: ipr command struct
7052 * @res_handle: resource handle to send command to
7053 * @parm: Byte 2 of Mode Sense command
7054 * @dma_addr: DMA buffer address
7055 * @xfer_len: data transfer length
7060 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7061 __be32 res_handle, u8 parm,
7062 dma_addr_t dma_addr, u8 xfer_len)
7064 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7066 ioarcb->res_handle = res_handle;
7067 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7068 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7069 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7070 ioarcb->cmd_pkt.cdb[1] = parm;
7071 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7073 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7077 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7078 * @ipr_cmd: ipr command struct
7080 * This function sets up the SCSI bus attributes and sends
7081 * a Mode Select for Page 28 to activate them.
7086 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7088 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7089 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7093 ipr_scsi_bus_speed_limit(ioa_cfg);
7094 ipr_check_term_power(ioa_cfg, mode_pages);
7095 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7096 length = mode_pages->hdr.length + 1;
7097 mode_pages->hdr.length = 0;
7099 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7100 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7103 ipr_cmd->job_step = ipr_set_supported_devs;
7104 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7105 struct ipr_resource_entry, queue);
7106 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7109 return IPR_RC_JOB_RETURN;
7113 * ipr_build_mode_sense - Builds a mode sense command
7114 * @ipr_cmd: ipr command struct
7115 * @res: resource entry struct
7116 * @parm: Byte 2 of mode sense command
7117 * @dma_addr: DMA address of mode sense buffer
7118 * @xfer_len: Size of DMA buffer
7123 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7125 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7127 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7129 ioarcb->res_handle = res_handle;
7130 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7131 ioarcb->cmd_pkt.cdb[2] = parm;
7132 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7133 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7135 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7139 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7140 * @ipr_cmd: ipr command struct
7142 * This function handles the failure of an IOA bringup command.
7147 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7149 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7150 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7152 dev_err(&ioa_cfg->pdev->dev,
7153 "0x%02X failed with IOASC: 0x%08X\n",
7154 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7156 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7157 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7158 return IPR_RC_JOB_RETURN;
7162 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7163 * @ipr_cmd: ipr command struct
7165 * This function handles the failure of a Mode Sense to the IOAFP.
7166 * Some adapters do not handle all mode pages.
7169 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7171 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7173 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7174 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7176 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7177 ipr_cmd->job_step = ipr_set_supported_devs;
7178 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7179 struct ipr_resource_entry, queue);
7180 return IPR_RC_JOB_CONTINUE;
7183 return ipr_reset_cmd_failed(ipr_cmd);
7187 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7188 * @ipr_cmd: ipr command struct
7190 * This function send a Page 28 mode sense to the IOA to
7191 * retrieve SCSI bus attributes.
7196 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7201 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7202 0x28, ioa_cfg->vpd_cbs_dma +
7203 offsetof(struct ipr_misc_cbs, mode_pages),
7204 sizeof(struct ipr_mode_pages));
7206 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7207 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7209 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7212 return IPR_RC_JOB_RETURN;
7216 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7217 * @ipr_cmd: ipr command struct
7219 * This function enables dual IOA RAID support if possible.
7224 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7227 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7228 struct ipr_mode_page24 *mode_page;
7232 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7233 sizeof(struct ipr_mode_page24));
7236 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7238 length = mode_pages->hdr.length + 1;
7239 mode_pages->hdr.length = 0;
7241 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7242 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7245 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7246 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7249 return IPR_RC_JOB_RETURN;
7253 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7254 * @ipr_cmd: ipr command struct
7256 * This function handles the failure of a Mode Sense to the IOAFP.
7257 * Some adapters do not handle all mode pages.
7260 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7262 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7264 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7266 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7267 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7268 return IPR_RC_JOB_CONTINUE;
7271 return ipr_reset_cmd_failed(ipr_cmd);
7275 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7276 * @ipr_cmd: ipr command struct
7278 * This function send a mode sense to the IOA to retrieve
7279 * the IOA Advanced Function Control mode page.
7284 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7286 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7289 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7290 0x24, ioa_cfg->vpd_cbs_dma +
7291 offsetof(struct ipr_misc_cbs, mode_pages),
7292 sizeof(struct ipr_mode_pages));
7294 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7295 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7297 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7300 return IPR_RC_JOB_RETURN;
7304 * ipr_init_res_table - Initialize the resource table
7305 * @ipr_cmd: ipr command struct
7307 * This function looks through the existing resource table, comparing
7308 * it with the config table. This function will take care of old/new
7309 * devices and schedule adding/removing them from the mid-layer
7313 * IPR_RC_JOB_CONTINUE
7315 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7317 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7318 struct ipr_resource_entry *res, *temp;
7319 struct ipr_config_table_entry_wrapper cfgtew;
7320 int entries, found, flag, i;
7325 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7327 flag = ioa_cfg->u.cfg_table->hdr.flags;
7329 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7330 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7332 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7333 list_move_tail(&res->queue, &old_res);
7336 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7338 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7340 for (i = 0; i < entries; i++) {
7342 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7344 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7347 list_for_each_entry_safe(res, temp, &old_res, queue) {
7348 if (ipr_is_same_device(res, &cfgtew)) {
7349 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7356 if (list_empty(&ioa_cfg->free_res_q)) {
7357 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7362 res = list_entry(ioa_cfg->free_res_q.next,
7363 struct ipr_resource_entry, queue);
7364 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7365 ipr_init_res_entry(res, &cfgtew);
7367 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7368 res->sdev->allow_restart = 1;
7371 ipr_update_res_entry(res, &cfgtew);
7374 list_for_each_entry_safe(res, temp, &old_res, queue) {
7376 res->del_from_ml = 1;
7377 res->res_handle = IPR_INVALID_RES_HANDLE;
7378 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7382 list_for_each_entry_safe(res, temp, &old_res, queue) {
7383 ipr_clear_res_target(res);
7384 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7387 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7388 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7390 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7393 return IPR_RC_JOB_CONTINUE;
7397 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7398 * @ipr_cmd: ipr command struct
7400 * This function sends a Query IOA Configuration command
7401 * to the adapter to retrieve the IOA configuration table.
7406 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7408 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7409 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7410 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7411 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7414 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7415 ioa_cfg->dual_raid = 1;
7416 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7417 ucode_vpd->major_release, ucode_vpd->card_type,
7418 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7419 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7420 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7422 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7423 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7424 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7425 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7427 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7428 IPR_IOADL_FLAGS_READ_LAST);
7430 ipr_cmd->job_step = ipr_init_res_table;
7432 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7435 return IPR_RC_JOB_RETURN;
7439 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7440 * @ipr_cmd: ipr command struct
7442 * This utility function sends an inquiry to the adapter.
7447 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7448 dma_addr_t dma_addr, u8 xfer_len)
7450 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7453 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7454 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7456 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7457 ioarcb->cmd_pkt.cdb[1] = flags;
7458 ioarcb->cmd_pkt.cdb[2] = page;
7459 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7461 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7463 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7468 * ipr_inquiry_page_supported - Is the given inquiry page supported
7469 * @page0: inquiry page 0 buffer
7472 * This function determines if the specified inquiry page is supported.
7475 * 1 if page is supported / 0 if not
7477 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7481 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7482 if (page0->page[i] == page)
7489 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7490 * @ipr_cmd: ipr command struct
7492 * This function sends a Page 0xD0 inquiry to the adapter
7493 * to retrieve adapter capabilities.
7496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7498 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7501 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7502 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7505 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7506 memset(cap, 0, sizeof(*cap));
7508 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7509 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7510 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7511 sizeof(struct ipr_inquiry_cap));
7512 return IPR_RC_JOB_RETURN;
7516 return IPR_RC_JOB_CONTINUE;
7520 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7521 * @ipr_cmd: ipr command struct
7523 * This function sends a Page 3 inquiry to the adapter
7524 * to retrieve software VPD information.
7527 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7529 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7531 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7535 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7537 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7538 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7539 sizeof(struct ipr_inquiry_page3));
7542 return IPR_RC_JOB_RETURN;
7546 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7547 * @ipr_cmd: ipr command struct
7549 * This function sends a Page 0 inquiry to the adapter
7550 * to retrieve supported inquiry pages.
7553 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7555 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7557 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7562 /* Grab the type out of the VPD and store it away */
7563 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7565 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7567 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7569 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7570 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7571 sizeof(struct ipr_inquiry_page0));
7574 return IPR_RC_JOB_RETURN;
7578 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7579 * @ipr_cmd: ipr command struct
7581 * This function sends a standard inquiry to the adapter.
7586 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7588 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7591 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7593 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7594 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7595 sizeof(struct ipr_ioa_vpd));
7598 return IPR_RC_JOB_RETURN;
7602 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7603 * @ipr_cmd: ipr command struct
7605 * This function send an Identify Host Request Response Queue
7606 * command to establish the HRRQ with the adapter.
7611 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7614 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7615 struct ipr_hrr_queue *hrrq;
7618 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7619 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7621 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7622 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7624 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7625 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7627 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7629 ioarcb->cmd_pkt.cdb[1] = 0x1;
7631 if (ioa_cfg->nvectors == 1)
7632 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7634 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7636 ioarcb->cmd_pkt.cdb[2] =
7637 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7638 ioarcb->cmd_pkt.cdb[3] =
7639 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7640 ioarcb->cmd_pkt.cdb[4] =
7641 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7642 ioarcb->cmd_pkt.cdb[5] =
7643 ((u64) hrrq->host_rrq_dma) & 0xff;
7644 ioarcb->cmd_pkt.cdb[7] =
7645 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7646 ioarcb->cmd_pkt.cdb[8] =
7647 (sizeof(u32) * hrrq->size) & 0xff;
7649 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7650 ioarcb->cmd_pkt.cdb[9] =
7651 ioa_cfg->identify_hrrq_index;
7653 if (ioa_cfg->sis64) {
7654 ioarcb->cmd_pkt.cdb[10] =
7655 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7656 ioarcb->cmd_pkt.cdb[11] =
7657 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7658 ioarcb->cmd_pkt.cdb[12] =
7659 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7660 ioarcb->cmd_pkt.cdb[13] =
7661 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7664 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7665 ioarcb->cmd_pkt.cdb[14] =
7666 ioa_cfg->identify_hrrq_index;
7668 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7669 IPR_INTERNAL_TIMEOUT);
7671 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7672 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7675 return IPR_RC_JOB_RETURN;
7679 return IPR_RC_JOB_CONTINUE;
7683 * ipr_reset_timer_done - Adapter reset timer function
7684 * @ipr_cmd: ipr command struct
7686 * Description: This function is used in adapter reset processing
7687 * for timing events. If the reset_cmd pointer in the IOA
7688 * config struct is not this adapter's we are doing nested
7689 * resets and fail_all_ops will take care of freeing the
7695 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7698 unsigned long lock_flags = 0;
7700 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7702 if (ioa_cfg->reset_cmd == ipr_cmd) {
7703 list_del(&ipr_cmd->queue);
7704 ipr_cmd->done(ipr_cmd);
7707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7711 * ipr_reset_start_timer - Start a timer for adapter reset job
7712 * @ipr_cmd: ipr command struct
7713 * @timeout: timeout value
7715 * Description: This function is used in adapter reset processing
7716 * for timing events. If the reset_cmd pointer in the IOA
7717 * config struct is not this adapter's we are doing nested
7718 * resets and fail_all_ops will take care of freeing the
7724 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7725 unsigned long timeout)
7729 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7730 ipr_cmd->done = ipr_reset_ioa_job;
7732 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7733 ipr_cmd->timer.expires = jiffies + timeout;
7734 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7735 add_timer(&ipr_cmd->timer);
7739 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7740 * @ioa_cfg: ioa cfg struct
7745 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7747 struct ipr_hrr_queue *hrrq;
7749 for_each_hrrq(hrrq, ioa_cfg) {
7750 spin_lock(&hrrq->_lock);
7751 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7753 /* Initialize Host RRQ pointers */
7754 hrrq->hrrq_start = hrrq->host_rrq;
7755 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7756 hrrq->hrrq_curr = hrrq->hrrq_start;
7757 hrrq->toggle_bit = 1;
7758 spin_unlock(&hrrq->_lock);
7762 ioa_cfg->identify_hrrq_index = 0;
7763 if (ioa_cfg->hrrq_num == 1)
7764 atomic_set(&ioa_cfg->hrrq_index, 0);
7766 atomic_set(&ioa_cfg->hrrq_index, 1);
7768 /* Zero out config table */
7769 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7773 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7774 * @ipr_cmd: ipr command struct
7777 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7779 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7781 unsigned long stage, stage_time;
7783 volatile u32 int_reg;
7784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7787 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7788 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7789 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7791 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7793 /* sanity check the stage_time value */
7794 if (stage_time == 0)
7795 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7796 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7797 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7798 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7799 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7801 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7802 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7803 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7804 stage_time = ioa_cfg->transop_timeout;
7805 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7806 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7807 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7808 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7809 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7810 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7811 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7812 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7813 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7814 return IPR_RC_JOB_CONTINUE;
7818 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7819 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7820 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7821 ipr_cmd->done = ipr_reset_ioa_job;
7822 add_timer(&ipr_cmd->timer);
7824 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7826 return IPR_RC_JOB_RETURN;
7830 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7831 * @ipr_cmd: ipr command struct
7833 * This function reinitializes some control blocks and
7834 * enables destructive diagnostics on the adapter.
7839 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7842 volatile u32 int_reg;
7843 volatile u64 maskval;
7847 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7848 ipr_init_ioa_mem(ioa_cfg);
7850 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7851 spin_lock(&ioa_cfg->hrrq[i]._lock);
7852 ioa_cfg->hrrq[i].allow_interrupts = 1;
7853 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7856 if (ioa_cfg->sis64) {
7857 /* Set the adapter to the correct endian mode. */
7858 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7859 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7862 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7864 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7865 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7866 ioa_cfg->regs.clr_interrupt_mask_reg32);
7867 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7868 return IPR_RC_JOB_CONTINUE;
7871 /* Enable destructive diagnostics on IOA */
7872 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7874 if (ioa_cfg->sis64) {
7875 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7876 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7877 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7879 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7881 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7883 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7885 if (ioa_cfg->sis64) {
7886 ipr_cmd->job_step = ipr_reset_next_stage;
7887 return IPR_RC_JOB_CONTINUE;
7890 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7891 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7892 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7893 ipr_cmd->done = ipr_reset_ioa_job;
7894 add_timer(&ipr_cmd->timer);
7895 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7898 return IPR_RC_JOB_RETURN;
7902 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7903 * @ipr_cmd: ipr command struct
7905 * This function is invoked when an adapter dump has run out
7906 * of processing time.
7909 * IPR_RC_JOB_CONTINUE
7911 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7913 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7915 if (ioa_cfg->sdt_state == GET_DUMP)
7916 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7917 else if (ioa_cfg->sdt_state == READ_DUMP)
7918 ioa_cfg->sdt_state = ABORT_DUMP;
7920 ioa_cfg->dump_timeout = 1;
7921 ipr_cmd->job_step = ipr_reset_alert;
7923 return IPR_RC_JOB_CONTINUE;
7927 * ipr_unit_check_no_data - Log a unit check/no data error log
7928 * @ioa_cfg: ioa config struct
7930 * Logs an error indicating the adapter unit checked, but for some
7931 * reason, we were unable to fetch the unit check buffer.
7936 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7938 ioa_cfg->errors_logged++;
7939 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7943 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7944 * @ioa_cfg: ioa config struct
7946 * Fetches the unit check buffer from the adapter by clocking the data
7947 * through the mailbox register.
7952 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7954 unsigned long mailbox;
7955 struct ipr_hostrcb *hostrcb;
7956 struct ipr_uc_sdt sdt;
7960 mailbox = readl(ioa_cfg->ioa_mailbox);
7962 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7963 ipr_unit_check_no_data(ioa_cfg);
7967 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7968 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7969 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7971 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7972 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7973 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7974 ipr_unit_check_no_data(ioa_cfg);
7978 /* Find length of the first sdt entry (UC buffer) */
7979 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7980 length = be32_to_cpu(sdt.entry[0].end_token);
7982 length = (be32_to_cpu(sdt.entry[0].end_token) -
7983 be32_to_cpu(sdt.entry[0].start_token)) &
7984 IPR_FMT2_MBX_ADDR_MASK;
7986 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7987 struct ipr_hostrcb, queue);
7988 list_del(&hostrcb->queue);
7989 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7991 rc = ipr_get_ldump_data_section(ioa_cfg,
7992 be32_to_cpu(sdt.entry[0].start_token),
7993 (__be32 *)&hostrcb->hcam,
7994 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7997 ipr_handle_log_data(ioa_cfg, hostrcb);
7998 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7999 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8000 ioa_cfg->sdt_state == GET_DUMP)
8001 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8003 ipr_unit_check_no_data(ioa_cfg);
8005 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8009 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8010 * @ipr_cmd: ipr command struct
8012 * Description: This function will call to get the unit check buffer.
8017 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8022 ioa_cfg->ioa_unit_checked = 0;
8023 ipr_get_unit_check_buffer(ioa_cfg);
8024 ipr_cmd->job_step = ipr_reset_alert;
8025 ipr_reset_start_timer(ipr_cmd, 0);
8028 return IPR_RC_JOB_RETURN;
8032 * ipr_reset_restore_cfg_space - Restore PCI config space.
8033 * @ipr_cmd: ipr command struct
8035 * Description: This function restores the saved PCI config space of
8036 * the adapter, fails all outstanding ops back to the callers, and
8037 * fetches the dump/unit check if applicable to this reset.
8040 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8042 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8044 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8048 ioa_cfg->pdev->state_saved = true;
8049 pci_restore_state(ioa_cfg->pdev);
8051 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8052 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8053 return IPR_RC_JOB_CONTINUE;
8056 ipr_fail_all_ops(ioa_cfg);
8058 if (ioa_cfg->sis64) {
8059 /* Set the adapter to the correct endian mode. */
8060 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8061 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8064 if (ioa_cfg->ioa_unit_checked) {
8065 if (ioa_cfg->sis64) {
8066 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8067 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8068 return IPR_RC_JOB_RETURN;
8070 ioa_cfg->ioa_unit_checked = 0;
8071 ipr_get_unit_check_buffer(ioa_cfg);
8072 ipr_cmd->job_step = ipr_reset_alert;
8073 ipr_reset_start_timer(ipr_cmd, 0);
8074 return IPR_RC_JOB_RETURN;
8078 if (ioa_cfg->in_ioa_bringdown) {
8079 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8081 ipr_cmd->job_step = ipr_reset_enable_ioa;
8083 if (GET_DUMP == ioa_cfg->sdt_state) {
8084 ioa_cfg->sdt_state = READ_DUMP;
8085 ioa_cfg->dump_timeout = 0;
8087 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8089 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8090 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8091 schedule_work(&ioa_cfg->work_q);
8092 return IPR_RC_JOB_RETURN;
8097 return IPR_RC_JOB_CONTINUE;
8101 * ipr_reset_bist_done - BIST has completed on the adapter.
8102 * @ipr_cmd: ipr command struct
8104 * Description: Unblock config space and resume the reset process.
8107 * IPR_RC_JOB_CONTINUE
8109 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8114 if (ioa_cfg->cfg_locked)
8115 pci_cfg_access_unlock(ioa_cfg->pdev);
8116 ioa_cfg->cfg_locked = 0;
8117 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8119 return IPR_RC_JOB_CONTINUE;
8123 * ipr_reset_start_bist - Run BIST on the adapter.
8124 * @ipr_cmd: ipr command struct
8126 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8129 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8131 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8134 int rc = PCIBIOS_SUCCESSFUL;
8137 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8138 writel(IPR_UPROCI_SIS64_START_BIST,
8139 ioa_cfg->regs.set_uproc_interrupt_reg32);
8141 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8143 if (rc == PCIBIOS_SUCCESSFUL) {
8144 ipr_cmd->job_step = ipr_reset_bist_done;
8145 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8146 rc = IPR_RC_JOB_RETURN;
8148 if (ioa_cfg->cfg_locked)
8149 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8150 ioa_cfg->cfg_locked = 0;
8151 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8152 rc = IPR_RC_JOB_CONTINUE;
8160 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8161 * @ipr_cmd: ipr command struct
8163 * Description: This clears PCI reset to the adapter and delays two seconds.
8168 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8171 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8172 ipr_cmd->job_step = ipr_reset_bist_done;
8173 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8175 return IPR_RC_JOB_RETURN;
8179 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8180 * @ipr_cmd: ipr command struct
8182 * Description: This asserts PCI reset to the adapter.
8187 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8189 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8190 struct pci_dev *pdev = ioa_cfg->pdev;
8193 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8194 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8195 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8197 return IPR_RC_JOB_RETURN;
8201 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8202 * @ipr_cmd: ipr command struct
8204 * Description: This attempts to block config access to the IOA.
8207 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8209 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8211 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8212 int rc = IPR_RC_JOB_CONTINUE;
8214 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8215 ioa_cfg->cfg_locked = 1;
8216 ipr_cmd->job_step = ioa_cfg->reset;
8218 if (ipr_cmd->u.time_left) {
8219 rc = IPR_RC_JOB_RETURN;
8220 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8221 ipr_reset_start_timer(ipr_cmd,
8222 IPR_CHECK_FOR_RESET_TIMEOUT);
8224 ipr_cmd->job_step = ioa_cfg->reset;
8225 dev_err(&ioa_cfg->pdev->dev,
8226 "Timed out waiting to lock config access. Resetting anyway.\n");
8234 * ipr_reset_block_config_access - Block config access to the IOA
8235 * @ipr_cmd: ipr command struct
8237 * Description: This attempts to block config access to the IOA
8240 * IPR_RC_JOB_CONTINUE
8242 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8244 ipr_cmd->ioa_cfg->cfg_locked = 0;
8245 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8246 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8247 return IPR_RC_JOB_CONTINUE;
8251 * ipr_reset_allowed - Query whether or not IOA can be reset
8252 * @ioa_cfg: ioa config struct
8255 * 0 if reset not allowed / non-zero if reset is allowed
8257 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8259 volatile u32 temp_reg;
8261 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8262 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8266 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8267 * @ipr_cmd: ipr command struct
8269 * Description: This function waits for adapter permission to run BIST,
8270 * then runs BIST. If the adapter does not give permission after a
8271 * reasonable time, we will reset the adapter anyway. The impact of
8272 * resetting the adapter without warning the adapter is the risk of
8273 * losing the persistent error log on the adapter. If the adapter is
8274 * reset while it is writing to the flash on the adapter, the flash
8275 * segment will have bad ECC and be zeroed.
8278 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8280 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8282 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8283 int rc = IPR_RC_JOB_RETURN;
8285 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8286 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8287 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8289 ipr_cmd->job_step = ipr_reset_block_config_access;
8290 rc = IPR_RC_JOB_CONTINUE;
8297 * ipr_reset_alert - Alert the adapter of a pending reset
8298 * @ipr_cmd: ipr command struct
8300 * Description: This function alerts the adapter that it will be reset.
8301 * If memory space is not currently enabled, proceed directly
8302 * to running BIST on the adapter. The timer must always be started
8303 * so we guarantee we do not run BIST from ipr_isr.
8308 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8315 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8317 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8318 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8319 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8320 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8322 ipr_cmd->job_step = ipr_reset_block_config_access;
8325 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8326 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8329 return IPR_RC_JOB_RETURN;
8333 * ipr_reset_ucode_download_done - Microcode download completion
8334 * @ipr_cmd: ipr command struct
8336 * Description: This function unmaps the microcode download buffer.
8339 * IPR_RC_JOB_CONTINUE
8341 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8343 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8344 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8346 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8347 sglist->num_sg, DMA_TO_DEVICE);
8349 ipr_cmd->job_step = ipr_reset_alert;
8350 return IPR_RC_JOB_CONTINUE;
8354 * ipr_reset_ucode_download - Download microcode to the adapter
8355 * @ipr_cmd: ipr command struct
8357 * Description: This function checks to see if it there is microcode
8358 * to download to the adapter. If there is, a download is performed.
8361 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8363 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8366 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8369 ipr_cmd->job_step = ipr_reset_alert;
8372 return IPR_RC_JOB_CONTINUE;
8374 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8375 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8376 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8377 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8378 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8379 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8380 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8383 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8385 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8386 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8388 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8389 IPR_WRITE_BUFFER_TIMEOUT);
8392 return IPR_RC_JOB_RETURN;
8396 * ipr_reset_shutdown_ioa - Shutdown the adapter
8397 * @ipr_cmd: ipr command struct
8399 * Description: This function issues an adapter shutdown of the
8400 * specified type to the specified adapter as part of the
8401 * adapter reset job.
8404 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8406 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8408 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8409 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8410 unsigned long timeout;
8411 int rc = IPR_RC_JOB_CONTINUE;
8414 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8415 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8416 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8417 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8418 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8419 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8421 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8422 timeout = IPR_SHUTDOWN_TIMEOUT;
8423 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8424 timeout = IPR_INTERNAL_TIMEOUT;
8425 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8426 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8428 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8430 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8432 rc = IPR_RC_JOB_RETURN;
8433 ipr_cmd->job_step = ipr_reset_ucode_download;
8435 ipr_cmd->job_step = ipr_reset_alert;
8442 * ipr_reset_ioa_job - Adapter reset job
8443 * @ipr_cmd: ipr command struct
8445 * Description: This function is the job router for the adapter reset job.
8450 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8453 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8456 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8458 if (ioa_cfg->reset_cmd != ipr_cmd) {
8460 * We are doing nested adapter resets and this is
8461 * not the current reset job.
8463 list_add_tail(&ipr_cmd->queue,
8464 &ipr_cmd->hrrq->hrrq_free_q);
8468 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8469 rc = ipr_cmd->job_step_failed(ipr_cmd);
8470 if (rc == IPR_RC_JOB_RETURN)
8474 ipr_reinit_ipr_cmnd(ipr_cmd);
8475 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8476 rc = ipr_cmd->job_step(ipr_cmd);
8477 } while (rc == IPR_RC_JOB_CONTINUE);
8481 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8482 * @ioa_cfg: ioa config struct
8483 * @job_step: first job step of reset job
8484 * @shutdown_type: shutdown type
8486 * Description: This function will initiate the reset of the given adapter
8487 * starting at the selected job step.
8488 * If the caller needs to wait on the completion of the reset,
8489 * the caller must sleep on the reset_wait_q.
8494 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8495 int (*job_step) (struct ipr_cmnd *),
8496 enum ipr_shutdown_type shutdown_type)
8498 struct ipr_cmnd *ipr_cmd;
8501 ioa_cfg->in_reset_reload = 1;
8502 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8503 spin_lock(&ioa_cfg->hrrq[i]._lock);
8504 ioa_cfg->hrrq[i].allow_cmds = 0;
8505 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8508 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8509 scsi_block_requests(ioa_cfg->host);
8511 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8512 ioa_cfg->reset_cmd = ipr_cmd;
8513 ipr_cmd->job_step = job_step;
8514 ipr_cmd->u.shutdown_type = shutdown_type;
8516 ipr_reset_ioa_job(ipr_cmd);
8520 * ipr_initiate_ioa_reset - Initiate an adapter reset
8521 * @ioa_cfg: ioa config struct
8522 * @shutdown_type: shutdown type
8524 * Description: This function will initiate the reset of the given adapter.
8525 * If the caller needs to wait on the completion of the reset,
8526 * the caller must sleep on the reset_wait_q.
8531 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8532 enum ipr_shutdown_type shutdown_type)
8536 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8539 if (ioa_cfg->in_reset_reload) {
8540 if (ioa_cfg->sdt_state == GET_DUMP)
8541 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8542 else if (ioa_cfg->sdt_state == READ_DUMP)
8543 ioa_cfg->sdt_state = ABORT_DUMP;
8546 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8547 dev_err(&ioa_cfg->pdev->dev,
8548 "IOA taken offline - error recovery failed\n");
8550 ioa_cfg->reset_retries = 0;
8551 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8552 spin_lock(&ioa_cfg->hrrq[i]._lock);
8553 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8554 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8558 if (ioa_cfg->in_ioa_bringdown) {
8559 ioa_cfg->reset_cmd = NULL;
8560 ioa_cfg->in_reset_reload = 0;
8561 ipr_fail_all_ops(ioa_cfg);
8562 wake_up_all(&ioa_cfg->reset_wait_q);
8564 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8565 spin_unlock_irq(ioa_cfg->host->host_lock);
8566 scsi_unblock_requests(ioa_cfg->host);
8567 spin_lock_irq(ioa_cfg->host->host_lock);
8571 ioa_cfg->in_ioa_bringdown = 1;
8572 shutdown_type = IPR_SHUTDOWN_NONE;
8576 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8581 * ipr_reset_freeze - Hold off all I/O activity
8582 * @ipr_cmd: ipr command struct
8584 * Description: If the PCI slot is frozen, hold off all I/O
8585 * activity; then, as soon as the slot is available again,
8586 * initiate an adapter reset.
8588 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8593 /* Disallow new interrupts, avoid loop */
8594 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8595 spin_lock(&ioa_cfg->hrrq[i]._lock);
8596 ioa_cfg->hrrq[i].allow_interrupts = 0;
8597 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8600 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8601 ipr_cmd->done = ipr_reset_ioa_job;
8602 return IPR_RC_JOB_RETURN;
8606 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8607 * @pdev: PCI device struct
8609 * Description: This routine is called to tell us that the PCI bus
8610 * is down. Can't do anything here, except put the device driver
8611 * into a holding pattern, waiting for the PCI bus to come back.
8613 static void ipr_pci_frozen(struct pci_dev *pdev)
8615 unsigned long flags = 0;
8616 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8618 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8619 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8624 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8625 * @pdev: PCI device struct
8627 * Description: This routine is called by the pci error recovery
8628 * code after the PCI slot has been reset, just before we
8629 * should resume normal operations.
8631 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8633 unsigned long flags = 0;
8634 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8636 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8637 if (ioa_cfg->needs_warm_reset)
8638 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8640 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8643 return PCI_ERS_RESULT_RECOVERED;
8647 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8648 * @pdev: PCI device struct
8650 * Description: This routine is called when the PCI bus has
8651 * permanently failed.
8653 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8655 unsigned long flags = 0;
8656 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8659 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8660 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8661 ioa_cfg->sdt_state = ABORT_DUMP;
8662 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8663 ioa_cfg->in_ioa_bringdown = 1;
8664 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8665 spin_lock(&ioa_cfg->hrrq[i]._lock);
8666 ioa_cfg->hrrq[i].allow_cmds = 0;
8667 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8670 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8675 * ipr_pci_error_detected - Called when a PCI error is detected.
8676 * @pdev: PCI device struct
8677 * @state: PCI channel state
8679 * Description: Called when a PCI error is detected.
8682 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8684 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8685 pci_channel_state_t state)
8688 case pci_channel_io_frozen:
8689 ipr_pci_frozen(pdev);
8690 return PCI_ERS_RESULT_NEED_RESET;
8691 case pci_channel_io_perm_failure:
8692 ipr_pci_perm_failure(pdev);
8693 return PCI_ERS_RESULT_DISCONNECT;
8698 return PCI_ERS_RESULT_NEED_RESET;
8702 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8703 * @ioa_cfg: ioa cfg struct
8705 * Description: This is the second phase of adapter intialization
8706 * This function takes care of initilizing the adapter to the point
8707 * where it can accept new commands.
8710 * 0 on success / -EIO on failure
8712 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8715 unsigned long host_lock_flags = 0;
8718 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8719 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8720 if (ioa_cfg->needs_hard_reset) {
8721 ioa_cfg->needs_hard_reset = 0;
8722 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8724 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8727 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8728 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8730 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8732 } else if (ipr_invalid_adapter(ioa_cfg)) {
8736 dev_err(&ioa_cfg->pdev->dev,
8737 "Adapter not supported in this hardware configuration.\n");
8740 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8747 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8748 * @ioa_cfg: ioa config struct
8753 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8757 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8758 if (ioa_cfg->ipr_cmnd_list[i])
8759 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8760 ioa_cfg->ipr_cmnd_list[i],
8761 ioa_cfg->ipr_cmnd_list_dma[i]);
8763 ioa_cfg->ipr_cmnd_list[i] = NULL;
8766 if (ioa_cfg->ipr_cmd_pool)
8767 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8769 kfree(ioa_cfg->ipr_cmnd_list);
8770 kfree(ioa_cfg->ipr_cmnd_list_dma);
8771 ioa_cfg->ipr_cmnd_list = NULL;
8772 ioa_cfg->ipr_cmnd_list_dma = NULL;
8773 ioa_cfg->ipr_cmd_pool = NULL;
8777 * ipr_free_mem - Frees memory allocated for an adapter
8778 * @ioa_cfg: ioa cfg struct
8783 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8787 kfree(ioa_cfg->res_entries);
8788 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8789 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8790 ipr_free_cmd_blks(ioa_cfg);
8792 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8793 pci_free_consistent(ioa_cfg->pdev,
8794 sizeof(u32) * ioa_cfg->hrrq[i].size,
8795 ioa_cfg->hrrq[i].host_rrq,
8796 ioa_cfg->hrrq[i].host_rrq_dma);
8798 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8799 ioa_cfg->u.cfg_table,
8800 ioa_cfg->cfg_table_dma);
8802 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8803 pci_free_consistent(ioa_cfg->pdev,
8804 sizeof(struct ipr_hostrcb),
8805 ioa_cfg->hostrcb[i],
8806 ioa_cfg->hostrcb_dma[i]);
8809 ipr_free_dump(ioa_cfg);
8810 kfree(ioa_cfg->trace);
8814 * ipr_free_all_resources - Free all allocated resources for an adapter.
8815 * @ipr_cmd: ipr command struct
8817 * This function frees all allocated resources for the
8818 * specified adapter.
8823 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8825 struct pci_dev *pdev = ioa_cfg->pdev;
8828 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8829 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8831 for (i = 0; i < ioa_cfg->nvectors; i++)
8832 free_irq(ioa_cfg->vectors_info[i].vec,
8835 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8837 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8838 pci_disable_msi(pdev);
8839 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8840 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8841 pci_disable_msix(pdev);
8842 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8845 iounmap(ioa_cfg->hdw_dma_regs);
8846 pci_release_regions(pdev);
8847 ipr_free_mem(ioa_cfg);
8848 scsi_host_put(ioa_cfg->host);
8849 pci_disable_device(pdev);
8854 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8855 * @ioa_cfg: ioa config struct
8858 * 0 on success / -ENOMEM on allocation failure
8860 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8862 struct ipr_cmnd *ipr_cmd;
8863 struct ipr_ioarcb *ioarcb;
8864 dma_addr_t dma_addr;
8865 int i, entries_each_hrrq, hrrq_id = 0;
8867 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8868 sizeof(struct ipr_cmnd), 512, 0);
8870 if (!ioa_cfg->ipr_cmd_pool)
8873 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8874 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8876 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8877 ipr_free_cmd_blks(ioa_cfg);
8881 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8882 if (ioa_cfg->hrrq_num > 1) {
8884 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8885 ioa_cfg->hrrq[i].min_cmd_id = 0;
8886 ioa_cfg->hrrq[i].max_cmd_id =
8887 (entries_each_hrrq - 1);
8890 IPR_NUM_BASE_CMD_BLKS/
8891 (ioa_cfg->hrrq_num - 1);
8892 ioa_cfg->hrrq[i].min_cmd_id =
8893 IPR_NUM_INTERNAL_CMD_BLKS +
8894 (i - 1) * entries_each_hrrq;
8895 ioa_cfg->hrrq[i].max_cmd_id =
8896 (IPR_NUM_INTERNAL_CMD_BLKS +
8897 i * entries_each_hrrq - 1);
8900 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8901 ioa_cfg->hrrq[i].min_cmd_id = 0;
8902 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8904 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8907 BUG_ON(ioa_cfg->hrrq_num == 0);
8909 i = IPR_NUM_CMD_BLKS -
8910 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8912 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8913 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8916 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8917 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8920 ipr_free_cmd_blks(ioa_cfg);
8924 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8925 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8926 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8928 ioarcb = &ipr_cmd->ioarcb;
8929 ipr_cmd->dma_addr = dma_addr;
8931 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8933 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8935 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8936 if (ioa_cfg->sis64) {
8937 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8938 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8939 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8940 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8942 ioarcb->write_ioadl_addr =
8943 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8944 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8945 ioarcb->ioasa_host_pci_addr =
8946 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8948 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8949 ipr_cmd->cmd_index = i;
8950 ipr_cmd->ioa_cfg = ioa_cfg;
8951 ipr_cmd->sense_buffer_dma = dma_addr +
8952 offsetof(struct ipr_cmnd, sense_buffer);
8954 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8955 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8956 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8957 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8965 * ipr_alloc_mem - Allocate memory for an adapter
8966 * @ioa_cfg: ioa config struct
8969 * 0 on success / non-zero for error
8971 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8973 struct pci_dev *pdev = ioa_cfg->pdev;
8974 int i, rc = -ENOMEM;
8977 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8978 ioa_cfg->max_devs_supported, GFP_KERNEL);
8980 if (!ioa_cfg->res_entries)
8983 if (ioa_cfg->sis64) {
8984 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8985 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8986 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8987 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8988 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8989 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8991 if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
8992 || !ioa_cfg->vset_ids)
8993 goto out_free_res_entries;
8996 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8997 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8998 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9001 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9002 sizeof(struct ipr_misc_cbs),
9003 &ioa_cfg->vpd_cbs_dma);
9005 if (!ioa_cfg->vpd_cbs)
9006 goto out_free_res_entries;
9008 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9009 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9010 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9011 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9013 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9015 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9018 if (ipr_alloc_cmd_blks(ioa_cfg))
9019 goto out_free_vpd_cbs;
9021 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9022 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9023 sizeof(u32) * ioa_cfg->hrrq[i].size,
9024 &ioa_cfg->hrrq[i].host_rrq_dma);
9026 if (!ioa_cfg->hrrq[i].host_rrq) {
9028 pci_free_consistent(pdev,
9029 sizeof(u32) * ioa_cfg->hrrq[i].size,
9030 ioa_cfg->hrrq[i].host_rrq,
9031 ioa_cfg->hrrq[i].host_rrq_dma);
9032 goto out_ipr_free_cmd_blocks;
9034 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9037 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9038 ioa_cfg->cfg_table_size,
9039 &ioa_cfg->cfg_table_dma);
9041 if (!ioa_cfg->u.cfg_table)
9042 goto out_free_host_rrq;
9044 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9045 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9046 sizeof(struct ipr_hostrcb),
9047 &ioa_cfg->hostrcb_dma[i]);
9049 if (!ioa_cfg->hostrcb[i])
9050 goto out_free_hostrcb_dma;
9052 ioa_cfg->hostrcb[i]->hostrcb_dma =
9053 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9054 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9055 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9058 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9059 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9061 if (!ioa_cfg->trace)
9062 goto out_free_hostrcb_dma;
9069 out_free_hostrcb_dma:
9071 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9072 ioa_cfg->hostrcb[i],
9073 ioa_cfg->hostrcb_dma[i]);
9075 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9076 ioa_cfg->u.cfg_table,
9077 ioa_cfg->cfg_table_dma);
9079 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9080 pci_free_consistent(pdev,
9081 sizeof(u32) * ioa_cfg->hrrq[i].size,
9082 ioa_cfg->hrrq[i].host_rrq,
9083 ioa_cfg->hrrq[i].host_rrq_dma);
9085 out_ipr_free_cmd_blocks:
9086 ipr_free_cmd_blks(ioa_cfg);
9088 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9089 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9090 out_free_res_entries:
9091 kfree(ioa_cfg->res_entries);
9092 kfree(ioa_cfg->target_ids);
9093 kfree(ioa_cfg->array_ids);
9094 kfree(ioa_cfg->vset_ids);
9099 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9100 * @ioa_cfg: ioa config struct
9105 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9109 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9110 ioa_cfg->bus_attr[i].bus = i;
9111 ioa_cfg->bus_attr[i].qas_enabled = 0;
9112 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9113 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9114 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9116 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9121 * ipr_init_ioa_cfg - Initialize IOA config struct
9122 * @ioa_cfg: ioa config struct
9123 * @host: scsi host struct
9124 * @pdev: PCI dev struct
9129 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9130 struct Scsi_Host *host, struct pci_dev *pdev)
9132 const struct ipr_interrupt_offsets *p;
9133 struct ipr_interrupts *t;
9136 ioa_cfg->host = host;
9137 ioa_cfg->pdev = pdev;
9138 ioa_cfg->log_level = ipr_log_level;
9139 ioa_cfg->doorbell = IPR_DOORBELL;
9140 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9141 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9142 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9143 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9144 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9145 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9147 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9148 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9149 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9150 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9151 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9152 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9153 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9154 ioa_cfg->sdt_state = INACTIVE;
9156 ipr_initialize_bus_attr(ioa_cfg);
9157 ioa_cfg->max_devs_supported = ipr_max_devs;
9159 if (ioa_cfg->sis64) {
9160 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9161 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9162 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9163 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9165 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9166 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9167 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9168 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9170 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9171 host->unique_id = host->host_no;
9172 host->max_cmd_len = IPR_MAX_CDB_LEN;
9173 host->can_queue = ioa_cfg->max_cmds;
9174 pci_set_drvdata(pdev, ioa_cfg);
9176 p = &ioa_cfg->chip_cfg->regs;
9178 base = ioa_cfg->hdw_dma_regs;
9180 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9181 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9182 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9183 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9184 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9185 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9186 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9187 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9188 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9189 t->ioarrin_reg = base + p->ioarrin_reg;
9190 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9191 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9192 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9193 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9194 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9195 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9197 if (ioa_cfg->sis64) {
9198 t->init_feedback_reg = base + p->init_feedback_reg;
9199 t->dump_addr_reg = base + p->dump_addr_reg;
9200 t->dump_data_reg = base + p->dump_data_reg;
9201 t->endian_swap_reg = base + p->endian_swap_reg;
9206 * ipr_get_chip_info - Find adapter chip information
9207 * @dev_id: PCI device id struct
9210 * ptr to chip information on success / NULL on failure
9212 static const struct ipr_chip_t *
9213 ipr_get_chip_info(const struct pci_device_id *dev_id)
9217 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9218 if (ipr_chip[i].vendor == dev_id->vendor &&
9219 ipr_chip[i].device == dev_id->device)
9220 return &ipr_chip[i];
9224 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9226 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9227 int i, err, vectors;
9229 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9230 entries[i].entry = i;
9232 vectors = ipr_number_of_msix;
9234 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9238 pci_disable_msix(ioa_cfg->pdev);
9243 for (i = 0; i < vectors; i++)
9244 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9245 ioa_cfg->nvectors = vectors;
9251 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9253 int i, err, vectors;
9255 vectors = ipr_number_of_msix;
9257 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9261 pci_disable_msi(ioa_cfg->pdev);
9266 for (i = 0; i < vectors; i++)
9267 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9268 ioa_cfg->nvectors = vectors;
9274 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9276 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9278 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9279 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9280 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9281 ioa_cfg->vectors_info[vec_idx].
9282 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9286 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9290 for (i = 1; i < ioa_cfg->nvectors; i++) {
9291 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9294 ioa_cfg->vectors_info[i].desc,
9298 free_irq(ioa_cfg->vectors_info[i].vec,
9307 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9308 * @pdev: PCI device struct
9310 * Description: Simply set the msi_received flag to 1 indicating that
9311 * Message Signaled Interrupts are supported.
9314 * 0 on success / non-zero on failure
9316 static irqreturn_t ipr_test_intr(int irq, void *devp)
9318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9319 unsigned long lock_flags = 0;
9320 irqreturn_t rc = IRQ_HANDLED;
9322 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9325 ioa_cfg->msi_received = 1;
9326 wake_up(&ioa_cfg->msi_wait_q);
9328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9333 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9334 * @pdev: PCI device struct
9336 * Description: The return value from pci_enable_msi() can not always be
9337 * trusted. This routine sets up and initiates a test interrupt to determine
9338 * if the interrupt is received via the ipr_test_intr() service routine.
9339 * If the tests fails, the driver will fall back to LSI.
9342 * 0 on success / non-zero on failure
9344 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9347 volatile u32 int_reg;
9348 unsigned long lock_flags = 0;
9352 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9353 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9354 ioa_cfg->msi_received = 0;
9355 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9356 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9357 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9360 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9361 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9363 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9365 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9367 } else if (ipr_debug)
9368 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9370 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9371 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9372 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9373 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9374 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9376 if (!ioa_cfg->msi_received) {
9377 /* MSI test failed */
9378 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9380 } else if (ipr_debug)
9381 dev_info(&pdev->dev, "MSI test succeeded.\n");
9383 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9385 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9386 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9388 free_irq(pdev->irq, ioa_cfg);
9395 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9396 * @pdev: PCI device struct
9397 * @dev_id: PCI device id struct
9400 * 0 on success / non-zero on failure
9402 static int ipr_probe_ioa(struct pci_dev *pdev,
9403 const struct pci_device_id *dev_id)
9405 struct ipr_ioa_cfg *ioa_cfg;
9406 struct Scsi_Host *host;
9407 unsigned long ipr_regs_pci;
9408 void __iomem *ipr_regs;
9409 int rc = PCIBIOS_SUCCESSFUL;
9410 volatile u32 mask, uproc, interrupts;
9411 unsigned long lock_flags;
9415 if ((rc = pci_enable_device(pdev))) {
9416 dev_err(&pdev->dev, "Cannot enable adapter\n");
9420 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9422 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9425 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9430 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9431 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9432 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9434 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9436 if (!ioa_cfg->ipr_chip) {
9437 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9438 dev_id->vendor, dev_id->device);
9439 goto out_scsi_host_put;
9442 /* set SIS 32 or SIS 64 */
9443 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9444 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9445 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9446 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9448 if (ipr_transop_timeout)
9449 ioa_cfg->transop_timeout = ipr_transop_timeout;
9450 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9451 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9453 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9455 ioa_cfg->revid = pdev->revision;
9457 ipr_regs_pci = pci_resource_start(pdev, 0);
9459 rc = pci_request_regions(pdev, IPR_NAME);
9462 "Couldn't register memory range of registers\n");
9463 goto out_scsi_host_put;
9466 ipr_regs = pci_ioremap_bar(pdev, 0);
9470 "Couldn't map memory range of registers\n");
9472 goto out_release_regions;
9475 ioa_cfg->hdw_dma_regs = ipr_regs;
9476 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9477 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9479 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9481 pci_set_master(pdev);
9483 if (ioa_cfg->sis64) {
9484 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9486 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9487 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9491 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9494 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9498 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9499 ioa_cfg->chip_cfg->cache_line_size);
9501 if (rc != PCIBIOS_SUCCESSFUL) {
9502 dev_err(&pdev->dev, "Write of cache line size failed\n");
9507 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9508 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9509 IPR_MAX_MSIX_VECTORS);
9510 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9513 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9514 ipr_enable_msix(ioa_cfg) == 0)
9515 ioa_cfg->intr_flag = IPR_USE_MSIX;
9516 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9517 ipr_enable_msi(ioa_cfg) == 0)
9518 ioa_cfg->intr_flag = IPR_USE_MSI;
9520 ioa_cfg->intr_flag = IPR_USE_LSI;
9521 ioa_cfg->nvectors = 1;
9522 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9525 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9526 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9527 rc = ipr_test_msi(ioa_cfg, pdev);
9528 if (rc == -EOPNOTSUPP) {
9529 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9530 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9531 pci_disable_msi(pdev);
9532 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9533 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9534 pci_disable_msix(pdev);
9537 ioa_cfg->intr_flag = IPR_USE_LSI;
9538 ioa_cfg->nvectors = 1;
9541 goto out_msi_disable;
9543 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9544 dev_info(&pdev->dev,
9545 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9546 ioa_cfg->nvectors, pdev->irq);
9547 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9548 dev_info(&pdev->dev,
9549 "Request for %d MSIXs succeeded.",
9554 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9555 (unsigned int)num_online_cpus(),
9556 (unsigned int)IPR_MAX_HRRQ_NUM);
9558 /* Save away PCI config space for use following IOA reset */
9559 rc = pci_save_state(pdev);
9561 if (rc != PCIBIOS_SUCCESSFUL) {
9562 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9564 goto out_msi_disable;
9567 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9568 goto out_msi_disable;
9570 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9571 goto out_msi_disable;
9574 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9575 + ((sizeof(struct ipr_config_table_entry64)
9576 * ioa_cfg->max_devs_supported)));
9578 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9579 + ((sizeof(struct ipr_config_table_entry)
9580 * ioa_cfg->max_devs_supported)));
9582 rc = ipr_alloc_mem(ioa_cfg);
9585 "Couldn't allocate enough memory for device driver!\n");
9586 goto out_msi_disable;
9590 * If HRRQ updated interrupt is not masked, or reset alert is set,
9591 * the card is in an unknown state and needs a hard reset
9593 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9594 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9595 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9596 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9597 ioa_cfg->needs_hard_reset = 1;
9598 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9599 ioa_cfg->needs_hard_reset = 1;
9600 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9601 ioa_cfg->ioa_unit_checked = 1;
9603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9604 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9607 if (ioa_cfg->intr_flag == IPR_USE_MSI
9608 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9609 name_msi_vectors(ioa_cfg);
9610 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9612 ioa_cfg->vectors_info[0].desc,
9615 rc = ipr_request_other_msi_irqs(ioa_cfg);
9617 rc = request_irq(pdev->irq, ipr_isr,
9619 IPR_NAME, &ioa_cfg->hrrq[0]);
9622 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9627 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9628 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9629 ioa_cfg->needs_warm_reset = 1;
9630 ioa_cfg->reset = ipr_reset_slot_reset;
9632 ioa_cfg->reset = ipr_reset_start_bist;
9634 spin_lock(&ipr_driver_lock);
9635 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9636 spin_unlock(&ipr_driver_lock);
9643 ipr_free_mem(ioa_cfg);
9645 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9646 pci_disable_msi(pdev);
9647 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9648 pci_disable_msix(pdev);
9651 out_release_regions:
9652 pci_release_regions(pdev);
9654 scsi_host_put(host);
9656 pci_disable_device(pdev);
9661 * ipr_scan_vsets - Scans for VSET devices
9662 * @ioa_cfg: ioa config struct
9664 * Description: Since the VSET resources do not follow SAM in that we can have
9665 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9670 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9674 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9675 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9676 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9680 * ipr_initiate_ioa_bringdown - Bring down an adapter
9681 * @ioa_cfg: ioa config struct
9682 * @shutdown_type: shutdown type
9684 * Description: This function will initiate bringing down the adapter.
9685 * This consists of issuing an IOA shutdown to the adapter
9686 * to flush the cache, and running BIST.
9687 * If the caller needs to wait on the completion of the reset,
9688 * the caller must sleep on the reset_wait_q.
9693 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9694 enum ipr_shutdown_type shutdown_type)
9697 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9698 ioa_cfg->sdt_state = ABORT_DUMP;
9699 ioa_cfg->reset_retries = 0;
9700 ioa_cfg->in_ioa_bringdown = 1;
9701 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9706 * __ipr_remove - Remove a single adapter
9707 * @pdev: pci device struct
9709 * Adapter hot plug remove entry point.
9714 static void __ipr_remove(struct pci_dev *pdev)
9716 unsigned long host_lock_flags = 0;
9717 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9721 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9722 while (ioa_cfg->in_reset_reload) {
9723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9724 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9725 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9728 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9729 spin_lock(&ioa_cfg->hrrq[i]._lock);
9730 ioa_cfg->hrrq[i].removing_ioa = 1;
9731 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9734 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9737 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9738 flush_work(&ioa_cfg->work_q);
9739 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9740 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9742 spin_lock(&ipr_driver_lock);
9743 list_del(&ioa_cfg->queue);
9744 spin_unlock(&ipr_driver_lock);
9746 if (ioa_cfg->sdt_state == ABORT_DUMP)
9747 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9750 ipr_free_all_resources(ioa_cfg);
9756 * ipr_remove - IOA hot plug remove entry point
9757 * @pdev: pci device struct
9759 * Adapter hot plug remove entry point.
9764 static void ipr_remove(struct pci_dev *pdev)
9766 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9770 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9772 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9774 scsi_remove_host(ioa_cfg->host);
9782 * ipr_probe - Adapter hot plug add entry point
9785 * 0 on success / non-zero on failure
9787 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9789 struct ipr_ioa_cfg *ioa_cfg;
9792 rc = ipr_probe_ioa(pdev, dev_id);
9797 ioa_cfg = pci_get_drvdata(pdev);
9798 rc = ipr_probe_ioa_part2(ioa_cfg);
9805 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9812 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9816 scsi_remove_host(ioa_cfg->host);
9821 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9825 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9827 scsi_remove_host(ioa_cfg->host);
9832 scsi_scan_host(ioa_cfg->host);
9833 ipr_scan_vsets(ioa_cfg);
9834 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9835 ioa_cfg->allow_ml_add_del = 1;
9836 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9837 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9839 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9840 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9841 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9842 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9843 ioa_cfg->iopoll_weight, ipr_iopoll);
9844 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9848 schedule_work(&ioa_cfg->work_q);
9853 * ipr_shutdown - Shutdown handler.
9854 * @pdev: pci device struct
9856 * This function is invoked upon system shutdown/reboot. It will issue
9857 * an adapter shutdown to the adapter to flush the write cache.
9862 static void ipr_shutdown(struct pci_dev *pdev)
9864 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9865 unsigned long lock_flags = 0;
9868 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9869 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9870 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9871 ioa_cfg->iopoll_weight = 0;
9872 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9873 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9876 while (ioa_cfg->in_reset_reload) {
9877 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9878 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9879 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9882 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9884 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9887 static struct pci_device_id ipr_pci_table[] = {
9888 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9890 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9892 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9894 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9902 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9903 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9904 IPR_USE_LONG_TRANSOP_TIMEOUT },
9905 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9906 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9907 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9908 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9909 IPR_USE_LONG_TRANSOP_TIMEOUT },
9910 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9911 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9912 IPR_USE_LONG_TRANSOP_TIMEOUT },
9913 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9914 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9915 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9916 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9917 IPR_USE_LONG_TRANSOP_TIMEOUT},
9918 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9919 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9920 IPR_USE_LONG_TRANSOP_TIMEOUT },
9921 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9922 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9923 IPR_USE_LONG_TRANSOP_TIMEOUT },
9924 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9925 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9926 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9927 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9928 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9929 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9930 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9931 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9932 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9933 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9934 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9935 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9936 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9937 IPR_USE_LONG_TRANSOP_TIMEOUT },
9938 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9939 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9940 IPR_USE_LONG_TRANSOP_TIMEOUT },
9941 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9942 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9943 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9944 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9945 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9946 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9947 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9948 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9949 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9950 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9951 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9952 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9953 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9954 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9955 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9956 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9957 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9958 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9959 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9960 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9961 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9962 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9963 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9964 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9965 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9967 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9968 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9969 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9970 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9973 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9975 static const struct pci_error_handlers ipr_err_handler = {
9976 .error_detected = ipr_pci_error_detected,
9977 .slot_reset = ipr_pci_slot_reset,
9980 static struct pci_driver ipr_driver = {
9982 .id_table = ipr_pci_table,
9984 .remove = ipr_remove,
9985 .shutdown = ipr_shutdown,
9986 .err_handler = &ipr_err_handler,
9990 * ipr_halt_done - Shutdown prepare completion
9995 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9997 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10001 * ipr_halt - Issue shutdown prepare to all adapters
10004 * NOTIFY_OK on success / NOTIFY_DONE on failure
10006 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10008 struct ipr_cmnd *ipr_cmd;
10009 struct ipr_ioa_cfg *ioa_cfg;
10010 unsigned long flags = 0;
10012 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10013 return NOTIFY_DONE;
10015 spin_lock(&ipr_driver_lock);
10017 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10018 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10019 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10024 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10025 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10026 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10027 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10028 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10030 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10031 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10033 spin_unlock(&ipr_driver_lock);
10038 static struct notifier_block ipr_notifier = {
10043 * ipr_init - Module entry point
10046 * 0 on success / negative value on failure
10048 static int __init ipr_init(void)
10050 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10051 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10053 register_reboot_notifier(&ipr_notifier);
10054 return pci_register_driver(&ipr_driver);
10058 * ipr_exit - Module unload
10060 * Module unload entry point.
10065 static void __exit ipr_exit(void)
10067 unregister_reboot_notifier(&ipr_notifier);
10068 pci_unregister_driver(&ipr_driver);
10071 module_init(ipr_init);
10072 module_exit(ipr_exit);