2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @dev_id: SCSI driver HA context
27 * Called by system whenever the host adapter generates an interrupt.
29 * Returns handled flag.
32 qla2100_intr_handler(int irq, void *dev_id)
35 struct qla_hw_data *ha;
36 struct device_reg_2xxx __iomem *reg;
44 rsp = (struct rsp_que *) dev_id;
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
52 reg = &ha->iobase->isp;
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(®->hccr);
59 if (hccr & HCCR_RISC_PAUSE) {
60 if (pci_channel_offline(ha->pdev))
64 * Issue a "HARD" reset in order for the RISC interrupt
65 * bit to be cleared. Schedule a big hammer to get
66 * out of the RISC PAUSED state.
68 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
69 RD_REG_WORD(®->hccr);
71 ha->isp_ops->fw_dump(vha, 1);
72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
77 if (RD_REG_WORD(®->semaphore) & BIT_0) {
78 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
79 RD_REG_WORD(®->hccr);
81 /* Get mailbox data. */
82 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
84 qla2x00_mbx_completion(vha, mb[0]);
85 status |= MBX_INTERRUPT;
86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
90 qla2x00_async_event(vha, rsp, mb);
93 ql_dbg(ql_dbg_async, vha, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
97 /* Release mailbox registers. */
98 WRT_REG_WORD(®->semaphore, 0);
99 RD_REG_WORD(®->semaphore);
101 qla2x00_process_response_queue(rsp);
103 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
104 RD_REG_WORD(®->hccr);
107 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
110 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
111 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
112 complete(&ha->mbx_intr_comp);
115 return (IRQ_HANDLED);
119 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121 * @dev_id: SCSI driver HA context
123 * Called by system whenever the host adapter generates an interrupt.
125 * Returns handled flag.
128 qla2300_intr_handler(int irq, void *dev_id)
130 scsi_qla_host_t *vha;
131 struct device_reg_2xxx __iomem *reg;
138 struct qla_hw_data *ha;
141 rsp = (struct rsp_que *) dev_id;
143 ql_log(ql_log_info, NULL, 0x5058,
144 "%s: NULL response queue pointer.\n", __func__);
149 reg = &ha->iobase->isp;
152 spin_lock_irqsave(&ha->hardware_lock, flags);
153 vha = pci_get_drvdata(ha->pdev);
154 for (iter = 50; iter--; ) {
155 stat = RD_REG_DWORD(®->u.isp2300.host_status);
156 if (stat & HSR_RISC_PAUSED) {
157 if (unlikely(pci_channel_offline(ha->pdev)))
160 hccr = RD_REG_WORD(®->hccr);
161 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
162 ql_log(ql_log_warn, vha, 0x5026,
163 "Parity error -- HCCR=%x, Dumping "
164 "firmware.\n", hccr);
166 ql_log(ql_log_warn, vha, 0x5027,
167 "RISC paused -- HCCR=%x, Dumping "
168 "firmware.\n", hccr);
171 * Issue a "HARD" reset in order for the RISC
172 * interrupt bit to be cleared. Schedule a big
173 * hammer to get out of the RISC PAUSED state.
175 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
176 RD_REG_WORD(®->hccr);
178 ha->isp_ops->fw_dump(vha, 1);
179 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
181 } else if ((stat & HSR_RISC_INT) == 0)
184 switch (stat & 0xff) {
189 qla2x00_mbx_completion(vha, MSW(stat));
190 status |= MBX_INTERRUPT;
192 /* Release mailbox registers. */
193 WRT_REG_WORD(®->semaphore, 0);
197 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
198 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
199 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
200 qla2x00_async_event(vha, rsp, mb);
203 qla2x00_process_response_queue(rsp);
206 mb[0] = MBA_CMPLT_1_16BIT;
208 qla2x00_async_event(vha, rsp, mb);
211 mb[0] = MBA_SCSI_COMPLETION;
213 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
214 qla2x00_async_event(vha, rsp, mb);
217 ql_dbg(ql_dbg_async, vha, 0x5028,
218 "Unrecognized interrupt type (%d).\n", stat & 0xff);
221 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
222 RD_REG_WORD_RELAXED(®->hccr);
224 spin_unlock_irqrestore(&ha->hardware_lock, flags);
226 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229 complete(&ha->mbx_intr_comp);
232 return (IRQ_HANDLED);
236 * qla2x00_mbx_completion() - Process mailbox command completions.
237 * @ha: SCSI driver HA context
238 * @mb0: Mailbox0 register
241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
245 uint16_t __iomem *wptr;
246 struct qla_hw_data *ha = vha->hw;
247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
249 /* Read all mbox registers? */
250 mboxes = (1 << ha->mbx_count) - 1;
252 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
254 mboxes = ha->mcp->in_mb;
256 /* Load return mailbox registers. */
257 ha->flags.mbox_int = 1;
258 ha->mailbox_out[0] = mb0;
260 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
262 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
263 if (IS_QLA2200(ha) && cnt == 8)
264 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
265 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
266 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
267 else if (mboxes & BIT_0)
268 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
276 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
278 static char *event[] =
279 { "Complete", "Request Notification", "Time Extension" };
281 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
282 uint16_t __iomem *wptr;
283 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
285 /* Seed data -- mailbox1 -> mailbox7. */
286 wptr = (uint16_t __iomem *)®24->mailbox1;
287 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
288 mb[cnt] = RD_REG_WORD(wptr);
290 ql_dbg(ql_dbg_async, vha, 0x5021,
291 "Inter-Driver Communication %s -- "
292 "%04x %04x %04x %04x %04x %04x %04x.\n",
293 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
294 mb[4], mb[5], mb[6]);
295 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
296 vha->hw->flags.idc_compl_status = 1;
297 if (vha->hw->notify_dcbx_comp)
298 complete(&vha->hw->dcbx_comp);
301 /* Acknowledgement needed? [Notify && non-zero timeout]. */
302 timeout = (descr >> 8) & 0xf;
303 if (aen != MBA_IDC_NOTIFY || !timeout)
306 ql_dbg(ql_dbg_async, vha, 0x5022,
307 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
308 vha->host_no, event[aen & 0xff], timeout);
310 rval = qla2x00_post_idc_ack_work(vha, mb);
311 if (rval != QLA_SUCCESS)
312 ql_log(ql_log_warn, vha, 0x5023,
313 "IDC failed to post ACK.\n");
318 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
320 static const char * const link_speeds[] = {
321 "1", "2", "?", "4", "8", "16", "10"
324 if (IS_QLA2100(ha) || IS_QLA2200(ha))
325 return link_speeds[0];
326 else if (speed == 0x13)
327 return link_speeds[6];
329 return link_speeds[speed];
331 return link_speeds[LS_UNKNOWN];
335 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
337 struct qla_hw_data *ha = vha->hw;
340 * 8200 AEN Interpretation:
342 * mb[1] = AEN Reason code
343 * mb[2] = LSW of Peg-Halt Status-1 Register
344 * mb[6] = MSW of Peg-Halt Status-1 Register
345 * mb[3] = LSW of Peg-Halt Status-2 register
346 * mb[7] = MSW of Peg-Halt Status-2 register
347 * mb[4] = IDC Device-State Register value
348 * mb[5] = IDC Driver-Presence Register value
350 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
351 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
352 mb[0], mb[1], mb[2], mb[6]);
353 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
354 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
355 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
357 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
358 IDC_HEARTBEAT_FAILURE)) {
359 ha->flags.nic_core_hung = 1;
360 ql_log(ql_log_warn, vha, 0x5060,
361 "83XX: F/W Error Reported: Check if reset required.\n");
363 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
364 uint32_t protocol_engine_id, fw_err_code, err_level;
367 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
368 * - PEG-Halt Status-1 Register:
369 * (LSW = mb[2], MSW = mb[6])
370 * Bits 0-7 = protocol-engine ID
371 * Bits 8-28 = f/w error code
372 * Bits 29-31 = Error-level
373 * Error-level 0x1 = Non-Fatal error
374 * Error-level 0x2 = Recoverable Fatal error
375 * Error-level 0x4 = UnRecoverable Fatal error
376 * - PEG-Halt Status-2 Register:
377 * (LSW = mb[3], MSW = mb[7])
379 protocol_engine_id = (mb[2] & 0xff);
380 fw_err_code = (((mb[2] & 0xff00) >> 8) |
381 ((mb[6] & 0x1fff) << 8));
382 err_level = ((mb[6] & 0xe000) >> 13);
383 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
384 "Register: protocol_engine_id=0x%x "
385 "fw_err_code=0x%x err_level=0x%x.\n",
386 protocol_engine_id, fw_err_code, err_level);
387 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
388 "Register: 0x%x%x.\n", mb[7], mb[3]);
389 if (err_level == ERR_LEVEL_NON_FATAL) {
390 ql_log(ql_log_warn, vha, 0x5063,
391 "Not a fatal error, f/w has recovered "
393 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
394 ql_log(ql_log_fatal, vha, 0x5064,
395 "Recoverable Fatal error: Chip reset "
397 qla83xx_schedule_work(vha,
398 QLA83XX_NIC_CORE_RESET);
399 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
400 ql_log(ql_log_fatal, vha, 0x5065,
401 "Unrecoverable Fatal error: Set FAILED "
402 "state, reboot required.\n");
403 qla83xx_schedule_work(vha,
404 QLA83XX_NIC_CORE_UNRECOVERABLE);
408 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
409 uint16_t peg_fw_state, nw_interface_link_up;
410 uint16_t nw_interface_signal_detect, sfp_status;
411 uint16_t htbt_counter, htbt_monitor_enable;
412 uint16_t sfp_additonal_info, sfp_multirate;
413 uint16_t sfp_tx_fault, link_speed, dcbx_status;
416 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
417 * - PEG-to-FC Status Register:
418 * (LSW = mb[2], MSW = mb[6])
419 * Bits 0-7 = Peg-Firmware state
420 * Bit 8 = N/W Interface Link-up
421 * Bit 9 = N/W Interface signal detected
422 * Bits 10-11 = SFP Status
423 * SFP Status 0x0 = SFP+ transceiver not expected
424 * SFP Status 0x1 = SFP+ transceiver not present
425 * SFP Status 0x2 = SFP+ transceiver invalid
426 * SFP Status 0x3 = SFP+ transceiver present and
428 * Bits 12-14 = Heartbeat Counter
429 * Bit 15 = Heartbeat Monitor Enable
430 * Bits 16-17 = SFP Additional Info
431 * SFP info 0x0 = Unregocnized transceiver for
433 * SFP info 0x1 = SFP+ brand validation failed
434 * SFP info 0x2 = SFP+ speed validation failed
435 * SFP info 0x3 = SFP+ access error
436 * Bit 18 = SFP Multirate
437 * Bit 19 = SFP Tx Fault
438 * Bits 20-22 = Link Speed
439 * Bits 23-27 = Reserved
440 * Bits 28-30 = DCBX Status
441 * DCBX Status 0x0 = DCBX Disabled
442 * DCBX Status 0x1 = DCBX Enabled
443 * DCBX Status 0x2 = DCBX Exchange error
446 peg_fw_state = (mb[2] & 0x00ff);
447 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
448 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
449 sfp_status = ((mb[2] & 0x0c00) >> 10);
450 htbt_counter = ((mb[2] & 0x7000) >> 12);
451 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
452 sfp_additonal_info = (mb[6] & 0x0003);
453 sfp_multirate = ((mb[6] & 0x0004) >> 2);
454 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
455 link_speed = ((mb[6] & 0x0070) >> 4);
456 dcbx_status = ((mb[6] & 0x7000) >> 12);
458 ql_log(ql_log_warn, vha, 0x5066,
459 "Peg-to-Fc Status Register:\n"
460 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
461 "nw_interface_signal_detect=0x%x"
462 "\nsfp_statis=0x%x.\n ", peg_fw_state,
463 nw_interface_link_up, nw_interface_signal_detect,
465 ql_log(ql_log_warn, vha, 0x5067,
466 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
467 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
468 htbt_counter, htbt_monitor_enable,
469 sfp_additonal_info, sfp_multirate);
470 ql_log(ql_log_warn, vha, 0x5068,
471 "sfp_tx_fault=0x%x, link_state=0x%x, "
472 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
475 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
478 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
479 ql_log(ql_log_warn, vha, 0x5069,
480 "Heartbeat Failure encountered, chip reset "
483 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
487 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
488 ql_log(ql_log_info, vha, 0x506a,
489 "IDC Device-State changed = 0x%x.\n", mb[4]);
490 if (ha->flags.nic_core_reset_owner)
492 qla83xx_schedule_work(vha, MBA_IDC_AEN);
497 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
499 struct qla_hw_data *ha = vha->hw;
508 spin_lock_irqsave(&ha->vport_slock, flags);
509 list_for_each_entry(vp, &ha->vp_list, list) {
510 vp_did = vp->d_id.b24;
511 if (vp_did == rscn_entry) {
516 spin_unlock_irqrestore(&ha->vport_slock, flags);
522 * qla2x00_async_event() - Process aynchronous events.
523 * @ha: SCSI driver HA context
524 * @mb: Mailbox registers (0 - 3)
527 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
532 struct qla_hw_data *ha = vha->hw;
533 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
534 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
535 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
536 uint32_t rscn_entry, host_pid;
539 /* Setup to process RIO completion. */
541 if (IS_CNA_CAPABLE(ha))
544 case MBA_SCSI_COMPLETION:
545 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
548 case MBA_CMPLT_1_16BIT:
551 mb[0] = MBA_SCSI_COMPLETION;
553 case MBA_CMPLT_2_16BIT:
557 mb[0] = MBA_SCSI_COMPLETION;
559 case MBA_CMPLT_3_16BIT:
564 mb[0] = MBA_SCSI_COMPLETION;
566 case MBA_CMPLT_4_16BIT:
570 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
572 mb[0] = MBA_SCSI_COMPLETION;
574 case MBA_CMPLT_5_16BIT:
578 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
579 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
581 mb[0] = MBA_SCSI_COMPLETION;
583 case MBA_CMPLT_2_32BIT:
584 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
585 handles[1] = le32_to_cpu(
586 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
587 RD_MAILBOX_REG(ha, reg, 6));
589 mb[0] = MBA_SCSI_COMPLETION;
596 case MBA_SCSI_COMPLETION: /* Fast Post */
597 if (!vha->flags.online)
600 for (cnt = 0; cnt < handle_cnt; cnt++)
601 qla2x00_process_completed_request(vha, rsp->req,
605 case MBA_RESET: /* Reset */
606 ql_dbg(ql_dbg_async, vha, 0x5002,
607 "Asynchronous RESET.\n");
609 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
612 case MBA_SYSTEM_ERR: /* System Error */
613 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
614 RD_REG_WORD(®24->mailbox7) : 0;
615 ql_log(ql_log_warn, vha, 0x5003,
616 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
617 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
619 ha->isp_ops->fw_dump(vha, 1);
621 if (IS_FWI2_CAPABLE(ha)) {
622 if (mb[1] == 0 && mb[2] == 0) {
623 ql_log(ql_log_fatal, vha, 0x5004,
624 "Unrecoverable Hardware Error: adapter "
625 "marked OFFLINE!\n");
626 vha->flags.online = 0;
627 vha->device_flags |= DFLG_DEV_FAILED;
629 /* Check to see if MPI timeout occurred */
630 if ((mbx & MBX_3) && (ha->flags.port0))
631 set_bit(MPI_RESET_NEEDED,
634 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
636 } else if (mb[1] == 0) {
637 ql_log(ql_log_fatal, vha, 0x5005,
638 "Unrecoverable Hardware Error: adapter marked "
640 vha->flags.online = 0;
641 vha->device_flags |= DFLG_DEV_FAILED;
643 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
646 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
647 ql_log(ql_log_warn, vha, 0x5006,
648 "ISP Request Transfer Error (%x).\n", mb[1]);
650 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
653 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
654 ql_log(ql_log_warn, vha, 0x5007,
655 "ISP Response Transfer Error.\n");
657 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
660 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
661 ql_dbg(ql_dbg_async, vha, 0x5008,
662 "Asynchronous WAKEUP_THRES.\n");
665 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
666 ql_dbg(ql_dbg_async, vha, 0x5009,
667 "LIP occurred (%x).\n", mb[1]);
669 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
670 atomic_set(&vha->loop_state, LOOP_DOWN);
671 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
672 qla2x00_mark_all_devices_lost(vha, 1);
676 atomic_set(&vha->vp_state, VP_FAILED);
677 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
680 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
681 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
683 vha->flags.management_server_logged_in = 0;
684 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
687 case MBA_LOOP_UP: /* Loop Up Event */
688 if (IS_QLA2100(ha) || IS_QLA2200(ha))
689 ha->link_data_rate = PORT_SPEED_1GB;
691 ha->link_data_rate = mb[1];
693 ql_dbg(ql_dbg_async, vha, 0x500a,
694 "LOOP UP detected (%s Gbps).\n",
695 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
697 vha->flags.management_server_logged_in = 0;
698 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
701 case MBA_LOOP_DOWN: /* Loop Down Event */
702 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
703 ? RD_REG_WORD(®24->mailbox4) : 0;
704 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx;
705 ql_dbg(ql_dbg_async, vha, 0x500b,
706 "LOOP DOWN detected (%x %x %x %x).\n",
707 mb[1], mb[2], mb[3], mbx);
709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
710 atomic_set(&vha->loop_state, LOOP_DOWN);
711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
712 vha->device_flags |= DFLG_NO_CABLE;
713 qla2x00_mark_all_devices_lost(vha, 1);
717 atomic_set(&vha->vp_state, VP_FAILED);
718 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
721 vha->flags.management_server_logged_in = 0;
722 ha->link_data_rate = PORT_SPEED_UNKNOWN;
723 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
726 case MBA_LIP_RESET: /* LIP reset occurred */
727 ql_dbg(ql_dbg_async, vha, 0x500c,
728 "LIP reset occurred (%x).\n", mb[1]);
730 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
731 atomic_set(&vha->loop_state, LOOP_DOWN);
732 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
733 qla2x00_mark_all_devices_lost(vha, 1);
737 atomic_set(&vha->vp_state, VP_FAILED);
738 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
741 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
743 ha->operating_mode = LOOP;
744 vha->flags.management_server_logged_in = 0;
745 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
748 /* case MBA_DCBX_COMPLETE: */
749 case MBA_POINT_TO_POINT: /* Point-to-Point */
753 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
754 ql_dbg(ql_dbg_async, vha, 0x500d,
755 "DCBX Completed -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
757 if (ha->notify_dcbx_comp)
758 complete(&ha->dcbx_comp);
761 ql_dbg(ql_dbg_async, vha, 0x500e,
762 "Asynchronous P2P MODE received.\n");
765 * Until there's a transition from loop down to loop up, treat
766 * this as loop down only.
768 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
769 atomic_set(&vha->loop_state, LOOP_DOWN);
770 if (!atomic_read(&vha->loop_down_timer))
771 atomic_set(&vha->loop_down_timer,
773 qla2x00_mark_all_devices_lost(vha, 1);
777 atomic_set(&vha->vp_state, VP_FAILED);
778 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
781 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
782 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
784 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
785 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
787 ha->flags.gpsc_supported = 1;
788 vha->flags.management_server_logged_in = 0;
791 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
795 ql_dbg(ql_dbg_async, vha, 0x500f,
796 "Configuration change detected: value=%x.\n", mb[1]);
798 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
799 atomic_set(&vha->loop_state, LOOP_DOWN);
800 if (!atomic_read(&vha->loop_down_timer))
801 atomic_set(&vha->loop_down_timer,
803 qla2x00_mark_all_devices_lost(vha, 1);
807 atomic_set(&vha->vp_state, VP_FAILED);
808 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
811 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
812 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
815 case MBA_PORT_UPDATE: /* Port database update */
817 * Handle only global and vn-port update events
820 * mb[1] = N_Port handle of changed port
821 * OR 0xffff for global event
822 * mb[2] = New login state
823 * 7 = Port logged out
824 * mb[3] = LSB is vp_idx, 0xff = all vps
826 * Skip processing if:
827 * Event is global, vp_idx is NOT all vps,
828 * vp_idx does not match
829 * Event is not global, vp_idx does not match
831 if (IS_QLA2XXX_MIDTYPE(ha) &&
832 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
833 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
836 /* Global event -- port logout or port unavailable. */
837 if (mb[1] == 0xffff && mb[2] == 0x7) {
838 ql_dbg(ql_dbg_async, vha, 0x5010,
839 "Port unavailable %04x %04x %04x.\n",
840 mb[1], mb[2], mb[3]);
841 ql_log(ql_log_warn, vha, 0x505e,
842 "Link is offline.\n");
844 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
845 atomic_set(&vha->loop_state, LOOP_DOWN);
846 atomic_set(&vha->loop_down_timer,
848 vha->device_flags |= DFLG_NO_CABLE;
849 qla2x00_mark_all_devices_lost(vha, 1);
853 atomic_set(&vha->vp_state, VP_FAILED);
854 fc_vport_set_state(vha->fc_vport,
856 qla2x00_mark_all_devices_lost(vha, 1);
859 vha->flags.management_server_logged_in = 0;
860 ha->link_data_rate = PORT_SPEED_UNKNOWN;
865 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
866 * event etc. earlier indicating loop is down) then process
867 * it. Otherwise ignore it and Wait for RSCN to come in.
869 atomic_set(&vha->loop_down_timer, 0);
870 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
871 ql_dbg(ql_dbg_async, vha, 0x5011,
872 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
873 mb[1], mb[2], mb[3]);
875 qlt_async_event(mb[0], vha, mb);
879 ql_dbg(ql_dbg_async, vha, 0x5012,
880 "Port database changed %04x %04x %04x.\n",
881 mb[1], mb[2], mb[3]);
882 ql_log(ql_log_warn, vha, 0x505f,
883 "Link is operational (%s Gbps).\n",
884 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
887 * Mark all devices as missing so we will login again.
889 atomic_set(&vha->loop_state, LOOP_UP);
891 qla2x00_mark_all_devices_lost(vha, 1);
893 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
894 set_bit(SCR_PENDING, &vha->dpc_flags);
896 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
897 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
899 qlt_async_event(mb[0], vha, mb);
902 case MBA_RSCN_UPDATE: /* State Change Registration */
903 /* Check if the Vport has issued a SCR */
904 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
906 /* Only handle SCNs for our Vport index. */
907 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
910 ql_dbg(ql_dbg_async, vha, 0x5013,
911 "RSCN database changed -- %04x %04x %04x.\n",
912 mb[1], mb[2], mb[3]);
914 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
915 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
917 if (rscn_entry == host_pid) {
918 ql_dbg(ql_dbg_async, vha, 0x5014,
919 "Ignoring RSCN update to local host "
920 "port ID (%06x).\n", host_pid);
924 /* Ignore reserved bits from RSCN-payload. */
925 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
927 /* Skip RSCNs for virtual ports on the same physical port */
928 if (qla2x00_is_a_vp_did(vha, rscn_entry))
931 atomic_set(&vha->loop_down_timer, 0);
932 vha->flags.management_server_logged_in = 0;
934 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
935 set_bit(RSCN_UPDATE, &vha->dpc_flags);
936 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
939 /* case MBA_RIO_RESPONSE: */
940 case MBA_ZIO_RESPONSE:
941 ql_dbg(ql_dbg_async, vha, 0x5015,
942 "[R|Z]IO update completion.\n");
944 if (IS_FWI2_CAPABLE(ha))
945 qla24xx_process_response_queue(vha, rsp);
947 qla2x00_process_response_queue(rsp);
950 case MBA_DISCARD_RND_FRAME:
951 ql_dbg(ql_dbg_async, vha, 0x5016,
952 "Discard RND Frame -- %04x %04x %04x.\n",
953 mb[1], mb[2], mb[3]);
956 case MBA_TRACE_NOTIFICATION:
957 ql_dbg(ql_dbg_async, vha, 0x5017,
958 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
961 case MBA_ISP84XX_ALERT:
962 ql_dbg(ql_dbg_async, vha, 0x5018,
963 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
964 mb[1], mb[2], mb[3]);
966 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
968 case A84_PANIC_RECOVERY:
969 ql_log(ql_log_info, vha, 0x5019,
970 "Alert 84XX: panic recovery %04x %04x.\n",
973 case A84_OP_LOGIN_COMPLETE:
974 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
975 ql_log(ql_log_info, vha, 0x501a,
976 "Alert 84XX: firmware version %x.\n",
977 ha->cs84xx->op_fw_version);
979 case A84_DIAG_LOGIN_COMPLETE:
980 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
981 ql_log(ql_log_info, vha, 0x501b,
982 "Alert 84XX: diagnostic firmware version %x.\n",
983 ha->cs84xx->diag_fw_version);
985 case A84_GOLD_LOGIN_COMPLETE:
986 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
987 ha->cs84xx->fw_update = 1;
988 ql_log(ql_log_info, vha, 0x501c,
989 "Alert 84XX: gold firmware version %x.\n",
990 ha->cs84xx->gold_fw_version);
993 ql_log(ql_log_warn, vha, 0x501d,
994 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
995 mb[1], mb[2], mb[3]);
997 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1000 ql_dbg(ql_dbg_async, vha, 0x501e,
1001 "DCBX Started -- %04x %04x %04x.\n",
1002 mb[1], mb[2], mb[3]);
1004 case MBA_DCBX_PARAM_UPDATE:
1005 ql_dbg(ql_dbg_async, vha, 0x501f,
1006 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1007 mb[1], mb[2], mb[3]);
1009 case MBA_FCF_CONF_ERR:
1010 ql_dbg(ql_dbg_async, vha, 0x5020,
1011 "FCF Configuration Error -- %04x %04x %04x.\n",
1012 mb[1], mb[2], mb[3]);
1014 case MBA_IDC_NOTIFY:
1015 if (IS_QLA8031(vha->hw)) {
1016 mb[4] = RD_REG_WORD(®24->mailbox4);
1017 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1018 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1019 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1020 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1022 * Extend loop down timer since port is active.
1024 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1025 atomic_set(&vha->loop_down_timer,
1027 qla2xxx_wake_dpc(vha);
1030 case MBA_IDC_COMPLETE:
1031 if (ha->notify_lb_portup_comp)
1032 complete(&ha->lb_portup_comp);
1034 case MBA_IDC_TIME_EXT:
1035 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1036 qla81xx_idc_event(vha, mb[0], mb[1]);
1040 mb[4] = RD_REG_WORD(®24->mailbox4);
1041 mb[5] = RD_REG_WORD(®24->mailbox5);
1042 mb[6] = RD_REG_WORD(®24->mailbox6);
1043 mb[7] = RD_REG_WORD(®24->mailbox7);
1044 qla83xx_handle_8200_aen(vha, mb);
1048 ql_dbg(ql_dbg_async, vha, 0x5057,
1049 "Unknown AEN:%04x %04x %04x %04x\n",
1050 mb[0], mb[1], mb[2], mb[3]);
1053 qlt_async_event(mb[0], vha, mb);
1055 if (!vha->vp_idx && ha->num_vhosts)
1056 qla2x00_alert_all_vps(rsp, mb);
1060 * qla2x00_process_completed_request() - Process a Fast Post response.
1061 * @ha: SCSI driver HA context
1065 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1066 struct req_que *req, uint32_t index)
1069 struct qla_hw_data *ha = vha->hw;
1071 /* Validate handle. */
1072 if (index >= req->num_outstanding_cmds) {
1073 ql_log(ql_log_warn, vha, 0x3014,
1074 "Invalid SCSI command index (%x).\n", index);
1077 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1079 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1083 sp = req->outstanding_cmds[index];
1085 /* Free outstanding command slot. */
1086 req->outstanding_cmds[index] = NULL;
1088 /* Save ISP completion status */
1089 sp->done(ha, sp, DID_OK << 16);
1091 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1094 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1096 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1101 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1102 struct req_que *req, void *iocb)
1104 struct qla_hw_data *ha = vha->hw;
1105 sts_entry_t *pkt = iocb;
1109 index = LSW(pkt->handle);
1110 if (index >= req->num_outstanding_cmds) {
1111 ql_log(ql_log_warn, vha, 0x5031,
1112 "Invalid command index (%x).\n", index);
1114 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1119 sp = req->outstanding_cmds[index];
1121 ql_log(ql_log_warn, vha, 0x5032,
1122 "Invalid completion handle (%x) -- timed-out.\n", index);
1125 if (sp->handle != index) {
1126 ql_log(ql_log_warn, vha, 0x5033,
1127 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1131 req->outstanding_cmds[index] = NULL;
1138 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1139 struct mbx_entry *mbx)
1141 const char func[] = "MBX-IOCB";
1145 struct srb_iocb *lio;
1149 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1153 lio = &sp->u.iocb_cmd;
1155 fcport = sp->fcport;
1156 data = lio->u.logio.data;
1158 data[0] = MBS_COMMAND_ERROR;
1159 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1160 QLA_LOGIO_LOGIN_RETRIED : 0;
1161 if (mbx->entry_status) {
1162 ql_dbg(ql_dbg_async, vha, 0x5043,
1163 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1164 "entry-status=%x status=%x state-flag=%x "
1165 "status-flags=%x.\n", type, sp->handle,
1166 fcport->d_id.b.domain, fcport->d_id.b.area,
1167 fcport->d_id.b.al_pa, mbx->entry_status,
1168 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1169 le16_to_cpu(mbx->status_flags));
1171 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1172 (uint8_t *)mbx, sizeof(*mbx));
1177 status = le16_to_cpu(mbx->status);
1178 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1179 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1181 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1182 ql_dbg(ql_dbg_async, vha, 0x5045,
1183 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1184 type, sp->handle, fcport->d_id.b.domain,
1185 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1186 le16_to_cpu(mbx->mb1));
1188 data[0] = MBS_COMMAND_COMPLETE;
1189 if (sp->type == SRB_LOGIN_CMD) {
1190 fcport->port_type = FCT_TARGET;
1191 if (le16_to_cpu(mbx->mb1) & BIT_0)
1192 fcport->port_type = FCT_INITIATOR;
1193 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1194 fcport->flags |= FCF_FCP2_DEVICE;
1199 data[0] = le16_to_cpu(mbx->mb0);
1201 case MBS_PORT_ID_USED:
1202 data[1] = le16_to_cpu(mbx->mb1);
1204 case MBS_LOOP_ID_USED:
1207 data[0] = MBS_COMMAND_ERROR;
1211 ql_log(ql_log_warn, vha, 0x5046,
1212 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1213 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1214 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1215 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1216 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1217 le16_to_cpu(mbx->mb7));
1220 sp->done(vha, sp, 0);
1224 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1225 sts_entry_t *pkt, int iocb_type)
1227 const char func[] = "CT_IOCB";
1230 struct fc_bsg_job *bsg_job;
1231 uint16_t comp_status;
1234 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1238 bsg_job = sp->u.bsg_job;
1240 type = "ct pass-through";
1242 comp_status = le16_to_cpu(pkt->comp_status);
1244 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1245 * fc payload to the caller
1247 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1248 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1250 if (comp_status != CS_COMPLETE) {
1251 if (comp_status == CS_DATA_UNDERRUN) {
1253 bsg_job->reply->reply_payload_rcv_len =
1254 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1256 ql_log(ql_log_warn, vha, 0x5048,
1257 "CT pass-through-%s error "
1258 "comp_status-status=0x%x total_byte = 0x%x.\n",
1260 bsg_job->reply->reply_payload_rcv_len);
1262 ql_log(ql_log_warn, vha, 0x5049,
1263 "CT pass-through-%s error "
1264 "comp_status-status=0x%x.\n", type, comp_status);
1265 res = DID_ERROR << 16;
1266 bsg_job->reply->reply_payload_rcv_len = 0;
1268 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1269 (uint8_t *)pkt, sizeof(*pkt));
1272 bsg_job->reply->reply_payload_rcv_len =
1273 bsg_job->reply_payload.payload_len;
1274 bsg_job->reply_len = 0;
1277 sp->done(vha, sp, res);
1281 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1282 struct sts_entry_24xx *pkt, int iocb_type)
1284 const char func[] = "ELS_CT_IOCB";
1287 struct fc_bsg_job *bsg_job;
1288 uint16_t comp_status;
1289 uint32_t fw_status[3];
1290 uint8_t* fw_sts_ptr;
1293 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1296 bsg_job = sp->u.bsg_job;
1300 case SRB_ELS_CMD_RPT:
1301 case SRB_ELS_CMD_HST:
1305 type = "ct pass-through";
1308 ql_dbg(ql_dbg_user, vha, 0x503e,
1309 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1313 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1314 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1315 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1317 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1318 * fc payload to the caller
1320 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1321 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1323 if (comp_status != CS_COMPLETE) {
1324 if (comp_status == CS_DATA_UNDERRUN) {
1326 bsg_job->reply->reply_payload_rcv_len =
1327 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1329 ql_dbg(ql_dbg_user, vha, 0x503f,
1330 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1331 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1332 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1333 le16_to_cpu(((struct els_sts_entry_24xx *)
1334 pkt)->total_byte_count));
1335 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1336 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1339 ql_dbg(ql_dbg_user, vha, 0x5040,
1340 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1341 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1342 type, sp->handle, comp_status,
1343 le16_to_cpu(((struct els_sts_entry_24xx *)
1344 pkt)->error_subcode_1),
1345 le16_to_cpu(((struct els_sts_entry_24xx *)
1346 pkt)->error_subcode_2));
1347 res = DID_ERROR << 16;
1348 bsg_job->reply->reply_payload_rcv_len = 0;
1349 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1350 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1352 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1353 (uint8_t *)pkt, sizeof(*pkt));
1357 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1358 bsg_job->reply_len = 0;
1361 sp->done(vha, sp, res);
1365 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1366 struct logio_entry_24xx *logio)
1368 const char func[] = "LOGIO-IOCB";
1372 struct srb_iocb *lio;
1376 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1380 lio = &sp->u.iocb_cmd;
1382 fcport = sp->fcport;
1383 data = lio->u.logio.data;
1385 data[0] = MBS_COMMAND_ERROR;
1386 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1387 QLA_LOGIO_LOGIN_RETRIED : 0;
1388 if (logio->entry_status) {
1389 ql_log(ql_log_warn, fcport->vha, 0x5034,
1390 "Async-%s error entry - hdl=%x"
1391 "portid=%02x%02x%02x entry-status=%x.\n",
1392 type, sp->handle, fcport->d_id.b.domain,
1393 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1394 logio->entry_status);
1395 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1396 (uint8_t *)logio, sizeof(*logio));
1401 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1402 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1403 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1404 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1405 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1406 le32_to_cpu(logio->io_parameter[0]));
1408 data[0] = MBS_COMMAND_COMPLETE;
1409 if (sp->type != SRB_LOGIN_CMD)
1412 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1413 if (iop[0] & BIT_4) {
1414 fcport->port_type = FCT_TARGET;
1416 fcport->flags |= FCF_FCP2_DEVICE;
1417 } else if (iop[0] & BIT_5)
1418 fcport->port_type = FCT_INITIATOR;
1421 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1423 if (logio->io_parameter[7] || logio->io_parameter[8])
1424 fcport->supported_classes |= FC_COS_CLASS2;
1425 if (logio->io_parameter[9] || logio->io_parameter[10])
1426 fcport->supported_classes |= FC_COS_CLASS3;
1431 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1432 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1434 case LSC_SCODE_PORTID_USED:
1435 data[0] = MBS_PORT_ID_USED;
1436 data[1] = LSW(iop[1]);
1438 case LSC_SCODE_NPORT_USED:
1439 data[0] = MBS_LOOP_ID_USED;
1442 data[0] = MBS_COMMAND_ERROR;
1446 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1447 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1448 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1449 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1450 le16_to_cpu(logio->comp_status),
1451 le32_to_cpu(logio->io_parameter[0]),
1452 le32_to_cpu(logio->io_parameter[1]));
1455 sp->done(vha, sp, 0);
1459 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1460 struct tsk_mgmt_entry *tsk)
1462 const char func[] = "TMF-IOCB";
1466 struct srb_iocb *iocb;
1467 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1470 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1474 iocb = &sp->u.iocb_cmd;
1476 fcport = sp->fcport;
1478 if (sts->entry_status) {
1479 ql_log(ql_log_warn, fcport->vha, 0x5038,
1480 "Async-%s error - hdl=%x entry-status(%x).\n",
1481 type, sp->handle, sts->entry_status);
1482 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1483 ql_log(ql_log_warn, fcport->vha, 0x5039,
1484 "Async-%s error - hdl=%x completion status(%x).\n",
1485 type, sp->handle, sts->comp_status);
1486 } else if (!(le16_to_cpu(sts->scsi_status) &
1487 SS_RESPONSE_INFO_LEN_VALID)) {
1488 ql_log(ql_log_warn, fcport->vha, 0x503a,
1489 "Async-%s error - hdl=%x no response info(%x).\n",
1490 type, sp->handle, sts->scsi_status);
1491 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1492 ql_log(ql_log_warn, fcport->vha, 0x503b,
1493 "Async-%s error - hdl=%x not enough response(%d).\n",
1494 type, sp->handle, sts->rsp_data_len);
1495 } else if (sts->data[3]) {
1496 ql_log(ql_log_warn, fcport->vha, 0x503c,
1497 "Async-%s error - hdl=%x response(%x).\n",
1498 type, sp->handle, sts->data[3]);
1504 iocb->u.tmf.data = error;
1505 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1506 (uint8_t *)sts, sizeof(*sts));
1509 sp->done(vha, sp, 0);
1513 * qla2x00_process_response_queue() - Process response queue entries.
1514 * @ha: SCSI driver HA context
1517 qla2x00_process_response_queue(struct rsp_que *rsp)
1519 struct scsi_qla_host *vha;
1520 struct qla_hw_data *ha = rsp->hw;
1521 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1523 uint16_t handle_cnt;
1526 vha = pci_get_drvdata(ha->pdev);
1528 if (!vha->flags.online)
1531 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1532 pkt = (sts_entry_t *)rsp->ring_ptr;
1535 if (rsp->ring_index == rsp->length) {
1536 rsp->ring_index = 0;
1537 rsp->ring_ptr = rsp->ring;
1542 if (pkt->entry_status != 0) {
1543 qla2x00_error_entry(vha, rsp, pkt);
1544 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1549 switch (pkt->entry_type) {
1551 qla2x00_status_entry(vha, rsp, pkt);
1553 case STATUS_TYPE_21:
1554 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1555 for (cnt = 0; cnt < handle_cnt; cnt++) {
1556 qla2x00_process_completed_request(vha, rsp->req,
1557 ((sts21_entry_t *)pkt)->handle[cnt]);
1560 case STATUS_TYPE_22:
1561 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1562 for (cnt = 0; cnt < handle_cnt; cnt++) {
1563 qla2x00_process_completed_request(vha, rsp->req,
1564 ((sts22_entry_t *)pkt)->handle[cnt]);
1567 case STATUS_CONT_TYPE:
1568 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1571 qla2x00_mbx_iocb_entry(vha, rsp->req,
1572 (struct mbx_entry *)pkt);
1575 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1578 /* Type Not Supported. */
1579 ql_log(ql_log_warn, vha, 0x504a,
1580 "Received unknown response pkt type %x "
1581 "entry status=%x.\n",
1582 pkt->entry_type, pkt->entry_status);
1585 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1589 /* Adjust ring index */
1590 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1594 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1595 uint32_t sense_len, struct rsp_que *rsp, int res)
1597 struct scsi_qla_host *vha = sp->fcport->vha;
1598 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1599 uint32_t track_sense_len;
1601 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1602 sense_len = SCSI_SENSE_BUFFERSIZE;
1604 SET_CMD_SENSE_LEN(sp, sense_len);
1605 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1606 track_sense_len = sense_len;
1608 if (sense_len > par_sense_len)
1609 sense_len = par_sense_len;
1611 memcpy(cp->sense_buffer, sense_data, sense_len);
1613 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1614 track_sense_len -= sense_len;
1615 SET_CMD_SENSE_LEN(sp, track_sense_len);
1617 if (track_sense_len != 0) {
1618 rsp->status_srb = sp;
1623 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1624 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1625 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1627 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1628 cp->sense_buffer, sense_len);
1632 struct scsi_dif_tuple {
1633 __be16 guard; /* Checksum */
1634 __be16 app_tag; /* APPL identifier */
1635 __be32 ref_tag; /* Target LBA or indirect LBA */
1639 * Checks the guard or meta-data for the type of error
1640 * detected by the HBA. In case of errors, we set the
1641 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1642 * to indicate to the kernel that the HBA detected error.
1645 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1647 struct scsi_qla_host *vha = sp->fcport->vha;
1648 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1649 uint8_t *ap = &sts24->data[12];
1650 uint8_t *ep = &sts24->data[20];
1651 uint32_t e_ref_tag, a_ref_tag;
1652 uint16_t e_app_tag, a_app_tag;
1653 uint16_t e_guard, a_guard;
1656 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1657 * would make guard field appear at offset 2
1659 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1660 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1661 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1662 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1663 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1664 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1666 ql_dbg(ql_dbg_io, vha, 0x3023,
1667 "iocb(s) %p Returned STATUS.\n", sts24);
1669 ql_dbg(ql_dbg_io, vha, 0x3024,
1670 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1671 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1672 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1673 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1674 a_app_tag, e_app_tag, a_guard, e_guard);
1678 * For type 3: ref & app tag is all 'f's
1679 * For type 0,1,2: app tag is all 'f's
1681 if ((a_app_tag == 0xffff) &&
1682 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1683 (a_ref_tag == 0xffffffff))) {
1684 uint32_t blocks_done, resid;
1685 sector_t lba_s = scsi_get_lba(cmd);
1687 /* 2TB boundary case covered automatically with this */
1688 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1690 resid = scsi_bufflen(cmd) - (blocks_done *
1691 cmd->device->sector_size);
1693 scsi_set_resid(cmd, resid);
1694 cmd->result = DID_OK << 16;
1696 /* Update protection tag */
1697 if (scsi_prot_sg_count(cmd)) {
1698 uint32_t i, j = 0, k = 0, num_ent;
1699 struct scatterlist *sg;
1700 struct sd_dif_tuple *spt;
1702 /* Patch the corresponding protection tags */
1703 scsi_for_each_prot_sg(cmd, sg,
1704 scsi_prot_sg_count(cmd), i) {
1705 num_ent = sg_dma_len(sg) / 8;
1706 if (k + num_ent < blocks_done) {
1710 j = blocks_done - k - 1;
1715 if (k != blocks_done) {
1716 ql_log(ql_log_warn, vha, 0x302f,
1717 "unexpected tag values tag:lba=%x:%llx)\n",
1718 e_ref_tag, (unsigned long long)lba_s);
1722 spt = page_address(sg_page(sg)) + sg->offset;
1725 spt->app_tag = 0xffff;
1726 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1727 spt->ref_tag = 0xffffffff;
1734 if (e_guard != a_guard) {
1735 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1737 set_driver_byte(cmd, DRIVER_SENSE);
1738 set_host_byte(cmd, DID_ABORT);
1739 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1744 if (e_ref_tag != a_ref_tag) {
1745 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1747 set_driver_byte(cmd, DRIVER_SENSE);
1748 set_host_byte(cmd, DID_ABORT);
1749 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1753 /* check appl tag */
1754 if (e_app_tag != a_app_tag) {
1755 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1757 set_driver_byte(cmd, DRIVER_SENSE);
1758 set_host_byte(cmd, DID_ABORT);
1759 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1767 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1768 struct req_que *req, uint32_t index)
1770 struct qla_hw_data *ha = vha->hw;
1772 uint16_t comp_status;
1773 uint16_t scsi_status;
1775 uint32_t rval = EXT_STATUS_OK;
1776 struct fc_bsg_job *bsg_job = NULL;
1778 struct sts_entry_24xx *sts24;
1779 sts = (sts_entry_t *) pkt;
1780 sts24 = (struct sts_entry_24xx *) pkt;
1782 /* Validate handle. */
1783 if (index >= req->num_outstanding_cmds) {
1784 ql_log(ql_log_warn, vha, 0x70af,
1785 "Invalid SCSI completion handle 0x%x.\n", index);
1786 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1790 sp = req->outstanding_cmds[index];
1792 /* Free outstanding command slot. */
1793 req->outstanding_cmds[index] = NULL;
1794 bsg_job = sp->u.bsg_job;
1796 ql_log(ql_log_warn, vha, 0x70b0,
1797 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1800 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1804 if (IS_FWI2_CAPABLE(ha)) {
1805 comp_status = le16_to_cpu(sts24->comp_status);
1806 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1808 comp_status = le16_to_cpu(sts->comp_status);
1809 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1812 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1813 switch (comp_status) {
1815 if (scsi_status == 0) {
1816 bsg_job->reply->reply_payload_rcv_len =
1817 bsg_job->reply_payload.payload_len;
1818 rval = EXT_STATUS_OK;
1822 case CS_DATA_OVERRUN:
1823 ql_dbg(ql_dbg_user, vha, 0x70b1,
1824 "Command completed with date overrun thread_id=%d\n",
1826 rval = EXT_STATUS_DATA_OVERRUN;
1829 case CS_DATA_UNDERRUN:
1830 ql_dbg(ql_dbg_user, vha, 0x70b2,
1831 "Command completed with date underrun thread_id=%d\n",
1833 rval = EXT_STATUS_DATA_UNDERRUN;
1835 case CS_BIDIR_RD_OVERRUN:
1836 ql_dbg(ql_dbg_user, vha, 0x70b3,
1837 "Command completed with read data overrun thread_id=%d\n",
1839 rval = EXT_STATUS_DATA_OVERRUN;
1842 case CS_BIDIR_RD_WR_OVERRUN:
1843 ql_dbg(ql_dbg_user, vha, 0x70b4,
1844 "Command completed with read and write data overrun "
1845 "thread_id=%d\n", thread_id);
1846 rval = EXT_STATUS_DATA_OVERRUN;
1849 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1850 ql_dbg(ql_dbg_user, vha, 0x70b5,
1851 "Command completed with read data over and write data "
1852 "underrun thread_id=%d\n", thread_id);
1853 rval = EXT_STATUS_DATA_OVERRUN;
1856 case CS_BIDIR_RD_UNDERRUN:
1857 ql_dbg(ql_dbg_user, vha, 0x70b6,
1858 "Command completed with read data data underrun "
1859 "thread_id=%d\n", thread_id);
1860 rval = EXT_STATUS_DATA_UNDERRUN;
1863 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1864 ql_dbg(ql_dbg_user, vha, 0x70b7,
1865 "Command completed with read data under and write data "
1866 "overrun thread_id=%d\n", thread_id);
1867 rval = EXT_STATUS_DATA_UNDERRUN;
1870 case CS_BIDIR_RD_WR_UNDERRUN:
1871 ql_dbg(ql_dbg_user, vha, 0x70b8,
1872 "Command completed with read and write data underrun "
1873 "thread_id=%d\n", thread_id);
1874 rval = EXT_STATUS_DATA_UNDERRUN;
1878 ql_dbg(ql_dbg_user, vha, 0x70b9,
1879 "Command completed with data DMA error thread_id=%d\n",
1881 rval = EXT_STATUS_DMA_ERR;
1885 ql_dbg(ql_dbg_user, vha, 0x70ba,
1886 "Command completed with timeout thread_id=%d\n",
1888 rval = EXT_STATUS_TIMEOUT;
1891 ql_dbg(ql_dbg_user, vha, 0x70bb,
1892 "Command completed with completion status=0x%x "
1893 "thread_id=%d\n", comp_status, thread_id);
1894 rval = EXT_STATUS_ERR;
1897 bsg_job->reply->reply_payload_rcv_len = 0;
1900 /* Return the vendor specific reply to API */
1901 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1902 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1903 /* Always return DID_OK, bsg will send the vendor specific response
1904 * in this case only */
1905 sp->done(vha, sp, (DID_OK << 6));
1910 * qla2x00_status_entry() - Process a Status IOCB entry.
1911 * @ha: SCSI driver HA context
1912 * @pkt: Entry pointer
1915 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1919 struct scsi_cmnd *cp;
1921 struct sts_entry_24xx *sts24;
1922 uint16_t comp_status;
1923 uint16_t scsi_status;
1925 uint8_t lscsi_status;
1927 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1929 uint8_t *rsp_info, *sense_data;
1930 struct qla_hw_data *ha = vha->hw;
1933 struct req_que *req;
1936 uint16_t state_flags = 0;
1938 sts = (sts_entry_t *) pkt;
1939 sts24 = (struct sts_entry_24xx *) pkt;
1940 if (IS_FWI2_CAPABLE(ha)) {
1941 comp_status = le16_to_cpu(sts24->comp_status);
1942 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1943 state_flags = le16_to_cpu(sts24->state_flags);
1945 comp_status = le16_to_cpu(sts->comp_status);
1946 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1948 handle = (uint32_t) LSW(sts->handle);
1949 que = MSW(sts->handle);
1950 req = ha->req_q_map[que];
1952 /* Validate handle. */
1953 if (handle < req->num_outstanding_cmds)
1954 sp = req->outstanding_cmds[handle];
1959 ql_dbg(ql_dbg_io, vha, 0x3017,
1960 "Invalid status handle (0x%x).\n", sts->handle);
1963 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1965 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1966 qla2xxx_wake_dpc(vha);
1970 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1971 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1975 /* Fast path completion. */
1976 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1977 qla2x00_do_host_ramp_up(vha);
1978 qla2x00_process_completed_request(vha, req, handle);
1983 req->outstanding_cmds[handle] = NULL;
1984 cp = GET_CMD_SP(sp);
1986 ql_dbg(ql_dbg_io, vha, 0x3018,
1987 "Command already returned (0x%x/%p).\n",
1993 lscsi_status = scsi_status & STATUS_MASK;
1995 fcport = sp->fcport;
1998 sense_len = par_sense_len = rsp_info_len = resid_len =
2000 if (IS_FWI2_CAPABLE(ha)) {
2001 if (scsi_status & SS_SENSE_LEN_VALID)
2002 sense_len = le32_to_cpu(sts24->sense_len);
2003 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2004 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2005 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2006 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2007 if (comp_status == CS_DATA_UNDERRUN)
2008 fw_resid_len = le32_to_cpu(sts24->residual_len);
2009 rsp_info = sts24->data;
2010 sense_data = sts24->data;
2011 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2012 ox_id = le16_to_cpu(sts24->ox_id);
2013 par_sense_len = sizeof(sts24->data);
2015 if (scsi_status & SS_SENSE_LEN_VALID)
2016 sense_len = le16_to_cpu(sts->req_sense_length);
2017 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2018 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2019 resid_len = le32_to_cpu(sts->residual_length);
2020 rsp_info = sts->rsp_info;
2021 sense_data = sts->req_sense_data;
2022 par_sense_len = sizeof(sts->req_sense_data);
2025 /* Check for any FCP transport errors. */
2026 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2027 /* Sense data lies beyond any FCP RESPONSE data. */
2028 if (IS_FWI2_CAPABLE(ha)) {
2029 sense_data += rsp_info_len;
2030 par_sense_len -= rsp_info_len;
2032 if (rsp_info_len > 3 && rsp_info[3]) {
2033 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2034 "FCP I/O protocol failure (0x%x/0x%x).\n",
2035 rsp_info_len, rsp_info[3]);
2037 res = DID_BUS_BUSY << 16;
2042 /* Check for overrun. */
2043 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2044 scsi_status & SS_RESIDUAL_OVER)
2045 comp_status = CS_DATA_OVERRUN;
2048 * Based on Host and scsi status generate status code for Linux
2050 switch (comp_status) {
2053 if (scsi_status == 0) {
2057 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2059 scsi_set_resid(cp, resid);
2061 if (!lscsi_status &&
2062 ((unsigned)(scsi_bufflen(cp) - resid) <
2064 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2065 "Mid-layer underflow "
2066 "detected (0x%x of 0x%x bytes).\n",
2067 resid, scsi_bufflen(cp));
2069 res = DID_ERROR << 16;
2073 res = DID_OK << 16 | lscsi_status;
2075 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2076 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2077 "QUEUE FULL detected.\n");
2081 if (lscsi_status != SS_CHECK_CONDITION)
2084 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2085 if (!(scsi_status & SS_SENSE_LEN_VALID))
2088 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2092 case CS_DATA_UNDERRUN:
2093 /* Use F/W calculated residual length. */
2094 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2095 scsi_set_resid(cp, resid);
2096 if (scsi_status & SS_RESIDUAL_UNDER) {
2097 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2098 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2099 "Dropped frame(s) detected "
2100 "(0x%x of 0x%x bytes).\n",
2101 resid, scsi_bufflen(cp));
2103 res = DID_ERROR << 16 | lscsi_status;
2104 goto check_scsi_status;
2107 if (!lscsi_status &&
2108 ((unsigned)(scsi_bufflen(cp) - resid) <
2110 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2111 "Mid-layer underflow "
2112 "detected (0x%x of 0x%x bytes).\n",
2113 resid, scsi_bufflen(cp));
2115 res = DID_ERROR << 16;
2118 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2119 lscsi_status != SAM_STAT_BUSY) {
2121 * scsi status of task set and busy are considered to be
2122 * task not completed.
2125 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2126 "Dropped frame(s) detected (0x%x "
2127 "of 0x%x bytes).\n", resid,
2130 res = DID_ERROR << 16 | lscsi_status;
2131 goto check_scsi_status;
2133 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2134 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2135 scsi_status, lscsi_status);
2138 res = DID_OK << 16 | lscsi_status;
2143 * Check to see if SCSI Status is non zero. If so report SCSI
2146 if (lscsi_status != 0) {
2147 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2148 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2149 "QUEUE FULL detected.\n");
2153 if (lscsi_status != SS_CHECK_CONDITION)
2156 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2157 if (!(scsi_status & SS_SENSE_LEN_VALID))
2160 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2161 sense_len, rsp, res);
2165 case CS_PORT_LOGGED_OUT:
2166 case CS_PORT_CONFIG_CHG:
2169 case CS_PORT_UNAVAILABLE:
2174 * We are going to have the fc class block the rport
2175 * while we try to recover so instruct the mid layer
2176 * to requeue until the class decides how to handle this.
2178 res = DID_TRANSPORT_DISRUPTED << 16;
2180 if (comp_status == CS_TIMEOUT) {
2181 if (IS_FWI2_CAPABLE(ha))
2183 else if ((le16_to_cpu(sts->status_flags) &
2184 SF_LOGOUT_SENT) == 0)
2188 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2189 "Port down status: port-state=0x%x.\n",
2190 atomic_read(&fcport->state));
2192 if (atomic_read(&fcport->state) == FCS_ONLINE)
2193 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2197 res = DID_RESET << 16;
2201 logit = qla2x00_handle_dif_error(sp, sts24);
2206 res = DID_ERROR << 16;
2208 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2211 if (state_flags & BIT_4)
2212 scmd_printk(KERN_WARNING, cp,
2213 "Unsupported device '%s' found.\n",
2214 cp->device->vendor);
2218 res = DID_ERROR << 16;
2224 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2225 "FCP command status: 0x%x-0x%x (0x%x) "
2226 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2227 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2228 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2229 comp_status, scsi_status, res, vha->host_no,
2230 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2231 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2232 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2233 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2234 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2235 resid_len, fw_resid_len);
2238 qla2x00_do_host_ramp_up(vha);
2240 if (rsp->status_srb == NULL)
2241 sp->done(ha, sp, res);
2245 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2246 * @ha: SCSI driver HA context
2247 * @pkt: Entry pointer
2249 * Extended sense data.
2252 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2254 uint8_t sense_sz = 0;
2255 struct qla_hw_data *ha = rsp->hw;
2256 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2257 srb_t *sp = rsp->status_srb;
2258 struct scsi_cmnd *cp;
2262 if (!sp || !GET_CMD_SENSE_LEN(sp))
2265 sense_len = GET_CMD_SENSE_LEN(sp);
2266 sense_ptr = GET_CMD_SENSE_PTR(sp);
2268 cp = GET_CMD_SP(sp);
2270 ql_log(ql_log_warn, vha, 0x3025,
2271 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2273 rsp->status_srb = NULL;
2277 if (sense_len > sizeof(pkt->data))
2278 sense_sz = sizeof(pkt->data);
2280 sense_sz = sense_len;
2282 /* Move sense data. */
2283 if (IS_FWI2_CAPABLE(ha))
2284 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2285 memcpy(sense_ptr, pkt->data, sense_sz);
2286 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2287 sense_ptr, sense_sz);
2289 sense_len -= sense_sz;
2290 sense_ptr += sense_sz;
2292 SET_CMD_SENSE_PTR(sp, sense_ptr);
2293 SET_CMD_SENSE_LEN(sp, sense_len);
2295 /* Place command on done queue. */
2296 if (sense_len == 0) {
2297 rsp->status_srb = NULL;
2298 sp->done(ha, sp, cp->result);
2303 * qla2x00_error_entry() - Process an error entry.
2304 * @ha: SCSI driver HA context
2305 * @pkt: Entry pointer
2308 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2311 struct qla_hw_data *ha = vha->hw;
2312 const char func[] = "ERROR-IOCB";
2313 uint16_t que = MSW(pkt->handle);
2314 struct req_que *req = NULL;
2315 int res = DID_ERROR << 16;
2317 ql_dbg(ql_dbg_async, vha, 0x502a,
2318 "type of error status in response: 0x%x\n", pkt->entry_status);
2320 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2323 req = ha->req_q_map[que];
2325 if (pkt->entry_status & RF_BUSY)
2326 res = DID_BUS_BUSY << 16;
2328 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2330 sp->done(ha, sp, res);
2334 ql_log(ql_log_warn, vha, 0x5030,
2335 "Error entry - invalid handle/queue.\n");
2338 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2340 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2341 qla2xxx_wake_dpc(vha);
2345 * qla24xx_mbx_completion() - Process mailbox command completions.
2346 * @ha: SCSI driver HA context
2347 * @mb0: Mailbox0 register
2350 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2354 uint16_t __iomem *wptr;
2355 struct qla_hw_data *ha = vha->hw;
2356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2358 /* Read all mbox registers? */
2359 mboxes = (1 << ha->mbx_count) - 1;
2361 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2363 mboxes = ha->mcp->in_mb;
2365 /* Load return mailbox registers. */
2366 ha->flags.mbox_int = 1;
2367 ha->mailbox_out[0] = mb0;
2369 wptr = (uint16_t __iomem *)®->mailbox1;
2371 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2373 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2381 * qla24xx_process_response_queue() - Process response queue entries.
2382 * @ha: SCSI driver HA context
2384 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2385 struct rsp_que *rsp)
2387 struct sts_entry_24xx *pkt;
2388 struct qla_hw_data *ha = vha->hw;
2390 if (!vha->flags.online)
2393 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2394 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2397 if (rsp->ring_index == rsp->length) {
2398 rsp->ring_index = 0;
2399 rsp->ring_ptr = rsp->ring;
2404 if (pkt->entry_status != 0) {
2405 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2407 (void)qlt_24xx_process_response_error(vha, pkt);
2409 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2414 switch (pkt->entry_type) {
2416 qla2x00_status_entry(vha, rsp, pkt);
2418 case STATUS_CONT_TYPE:
2419 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2421 case VP_RPT_ID_IOCB_TYPE:
2422 qla24xx_report_id_acquisition(vha,
2423 (struct vp_rpt_id_entry_24xx *)pkt);
2425 case LOGINOUT_PORT_IOCB_TYPE:
2426 qla24xx_logio_entry(vha, rsp->req,
2427 (struct logio_entry_24xx *)pkt);
2429 case TSK_MGMT_IOCB_TYPE:
2430 qla24xx_tm_iocb_entry(vha, rsp->req,
2431 (struct tsk_mgmt_entry *)pkt);
2434 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2437 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2439 case ABTS_RECV_24XX:
2440 /* ensure that the ATIO queue is empty */
2441 qlt_24xx_process_atio_queue(vha);
2442 case ABTS_RESP_24XX:
2444 case NOTIFY_ACK_TYPE:
2445 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2448 /* Do nothing in this case, this check is to prevent it
2449 * from falling into default case
2453 /* Type Not Supported. */
2454 ql_dbg(ql_dbg_async, vha, 0x5042,
2455 "Received unknown response pkt type %x "
2456 "entry status=%x.\n",
2457 pkt->entry_type, pkt->entry_status);
2460 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2464 /* Adjust ring index */
2465 if (IS_QLA82XX(ha)) {
2466 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2467 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2469 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2473 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2477 struct qla_hw_data *ha = vha->hw;
2478 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2480 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2484 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2485 RD_REG_DWORD(®->iobase_addr);
2486 WRT_REG_DWORD(®->iobase_window, 0x0001);
2487 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2488 rval == QLA_SUCCESS; cnt--) {
2490 WRT_REG_DWORD(®->iobase_window, 0x0001);
2493 rval = QLA_FUNCTION_TIMEOUT;
2495 if (rval == QLA_SUCCESS)
2498 WRT_REG_DWORD(®->iobase_window, 0x0003);
2499 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2500 rval == QLA_SUCCESS; cnt--) {
2502 WRT_REG_DWORD(®->iobase_window, 0x0003);
2505 rval = QLA_FUNCTION_TIMEOUT;
2507 if (rval != QLA_SUCCESS)
2511 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2512 ql_log(ql_log_info, vha, 0x504c,
2513 "Additional code -- 0x55AA.\n");
2516 WRT_REG_DWORD(®->iobase_window, 0x0000);
2517 RD_REG_DWORD(®->iobase_window);
2521 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2523 * @dev_id: SCSI driver HA context
2525 * Called by system whenever the host adapter generates an interrupt.
2527 * Returns handled flag.
2530 qla24xx_intr_handler(int irq, void *dev_id)
2532 scsi_qla_host_t *vha;
2533 struct qla_hw_data *ha;
2534 struct device_reg_24xx __iomem *reg;
2540 struct rsp_que *rsp;
2541 unsigned long flags;
2543 rsp = (struct rsp_que *) dev_id;
2545 ql_log(ql_log_info, NULL, 0x5059,
2546 "%s: NULL response queue pointer.\n", __func__);
2551 reg = &ha->iobase->isp24;
2554 if (unlikely(pci_channel_offline(ha->pdev)))
2557 spin_lock_irqsave(&ha->hardware_lock, flags);
2558 vha = pci_get_drvdata(ha->pdev);
2559 for (iter = 50; iter--; ) {
2560 stat = RD_REG_DWORD(®->host_status);
2561 if (stat & HSRX_RISC_PAUSED) {
2562 if (unlikely(pci_channel_offline(ha->pdev)))
2565 hccr = RD_REG_DWORD(®->hccr);
2567 ql_log(ql_log_warn, vha, 0x504b,
2568 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2571 qla2xxx_check_risc_status(vha);
2573 ha->isp_ops->fw_dump(vha, 1);
2574 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2576 } else if ((stat & HSRX_RISC_INT) == 0)
2579 switch (stat & 0xff) {
2580 case INTR_ROM_MB_SUCCESS:
2581 case INTR_ROM_MB_FAILED:
2582 case INTR_MB_SUCCESS:
2583 case INTR_MB_FAILED:
2584 qla24xx_mbx_completion(vha, MSW(stat));
2585 status |= MBX_INTERRUPT;
2588 case INTR_ASYNC_EVENT:
2590 mb[1] = RD_REG_WORD(®->mailbox1);
2591 mb[2] = RD_REG_WORD(®->mailbox2);
2592 mb[3] = RD_REG_WORD(®->mailbox3);
2593 qla2x00_async_event(vha, rsp, mb);
2595 case INTR_RSP_QUE_UPDATE:
2596 case INTR_RSP_QUE_UPDATE_83XX:
2597 qla24xx_process_response_queue(vha, rsp);
2599 case INTR_ATIO_QUE_UPDATE:
2600 qlt_24xx_process_atio_queue(vha);
2602 case INTR_ATIO_RSP_QUE_UPDATE:
2603 qlt_24xx_process_atio_queue(vha);
2604 qla24xx_process_response_queue(vha, rsp);
2607 ql_dbg(ql_dbg_async, vha, 0x504f,
2608 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2611 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2612 RD_REG_DWORD_RELAXED(®->hccr);
2613 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2616 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2618 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2619 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2620 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2621 complete(&ha->mbx_intr_comp);
2628 qla24xx_msix_rsp_q(int irq, void *dev_id)
2630 struct qla_hw_data *ha;
2631 struct rsp_que *rsp;
2632 struct device_reg_24xx __iomem *reg;
2633 struct scsi_qla_host *vha;
2634 unsigned long flags;
2636 rsp = (struct rsp_que *) dev_id;
2638 ql_log(ql_log_info, NULL, 0x505a,
2639 "%s: NULL response queue pointer.\n", __func__);
2643 reg = &ha->iobase->isp24;
2645 spin_lock_irqsave(&ha->hardware_lock, flags);
2647 vha = pci_get_drvdata(ha->pdev);
2648 qla24xx_process_response_queue(vha, rsp);
2649 if (!ha->flags.disable_msix_handshake) {
2650 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2651 RD_REG_DWORD_RELAXED(®->hccr);
2653 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2659 qla25xx_msix_rsp_q(int irq, void *dev_id)
2661 struct qla_hw_data *ha;
2662 struct rsp_que *rsp;
2663 struct device_reg_24xx __iomem *reg;
2664 unsigned long flags;
2666 rsp = (struct rsp_que *) dev_id;
2668 ql_log(ql_log_info, NULL, 0x505b,
2669 "%s: NULL response queue pointer.\n", __func__);
2674 /* Clear the interrupt, if enabled, for this response queue */
2675 if (!ha->flags.disable_msix_handshake) {
2676 reg = &ha->iobase->isp24;
2677 spin_lock_irqsave(&ha->hardware_lock, flags);
2678 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2679 RD_REG_DWORD_RELAXED(®->hccr);
2680 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2682 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2688 qla24xx_msix_default(int irq, void *dev_id)
2690 scsi_qla_host_t *vha;
2691 struct qla_hw_data *ha;
2692 struct rsp_que *rsp;
2693 struct device_reg_24xx __iomem *reg;
2698 unsigned long flags;
2700 rsp = (struct rsp_que *) dev_id;
2702 ql_log(ql_log_info, NULL, 0x505c,
2703 "%s: NULL response queue pointer.\n", __func__);
2707 reg = &ha->iobase->isp24;
2710 spin_lock_irqsave(&ha->hardware_lock, flags);
2711 vha = pci_get_drvdata(ha->pdev);
2713 stat = RD_REG_DWORD(®->host_status);
2714 if (stat & HSRX_RISC_PAUSED) {
2715 if (unlikely(pci_channel_offline(ha->pdev)))
2718 hccr = RD_REG_DWORD(®->hccr);
2720 ql_log(ql_log_info, vha, 0x5050,
2721 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2724 qla2xxx_check_risc_status(vha);
2726 ha->isp_ops->fw_dump(vha, 1);
2727 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2729 } else if ((stat & HSRX_RISC_INT) == 0)
2732 switch (stat & 0xff) {
2733 case INTR_ROM_MB_SUCCESS:
2734 case INTR_ROM_MB_FAILED:
2735 case INTR_MB_SUCCESS:
2736 case INTR_MB_FAILED:
2737 qla24xx_mbx_completion(vha, MSW(stat));
2738 status |= MBX_INTERRUPT;
2741 case INTR_ASYNC_EVENT:
2743 mb[1] = RD_REG_WORD(®->mailbox1);
2744 mb[2] = RD_REG_WORD(®->mailbox2);
2745 mb[3] = RD_REG_WORD(®->mailbox3);
2746 qla2x00_async_event(vha, rsp, mb);
2748 case INTR_RSP_QUE_UPDATE:
2749 case INTR_RSP_QUE_UPDATE_83XX:
2750 qla24xx_process_response_queue(vha, rsp);
2752 case INTR_ATIO_QUE_UPDATE:
2753 qlt_24xx_process_atio_queue(vha);
2755 case INTR_ATIO_RSP_QUE_UPDATE:
2756 qlt_24xx_process_atio_queue(vha);
2757 qla24xx_process_response_queue(vha, rsp);
2760 ql_dbg(ql_dbg_async, vha, 0x5051,
2761 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2764 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2766 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2768 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2769 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2770 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2771 complete(&ha->mbx_intr_comp);
2776 /* Interrupt handling helpers. */
2778 struct qla_init_msix_entry {
2780 irq_handler_t handler;
2783 static struct qla_init_msix_entry msix_entries[3] = {
2784 { "qla2xxx (default)", qla24xx_msix_default },
2785 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2786 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2789 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2790 { "qla2xxx (default)", qla82xx_msix_default },
2791 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2794 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2795 { "qla2xxx (default)", qla24xx_msix_default },
2796 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2797 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2801 qla24xx_disable_msix(struct qla_hw_data *ha)
2804 struct qla_msix_entry *qentry;
2805 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2807 for (i = 0; i < ha->msix_count; i++) {
2808 qentry = &ha->msix_entries[i];
2809 if (qentry->have_irq)
2810 free_irq(qentry->vector, qentry->rsp);
2812 pci_disable_msix(ha->pdev);
2813 kfree(ha->msix_entries);
2814 ha->msix_entries = NULL;
2815 ha->flags.msix_enabled = 0;
2816 ql_dbg(ql_dbg_init, vha, 0x0042,
2817 "Disabled the MSI.\n");
2821 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2823 #define MIN_MSIX_COUNT 2
2825 struct msix_entry *entries;
2826 struct qla_msix_entry *qentry;
2827 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2829 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2832 ql_log(ql_log_warn, vha, 0x00bc,
2833 "Failed to allocate memory for msix_entry.\n");
2837 for (i = 0; i < ha->msix_count; i++)
2838 entries[i].entry = i;
2840 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2842 if (ret < MIN_MSIX_COUNT)
2845 ql_log(ql_log_warn, vha, 0x00c6,
2846 "MSI-X: Failed to enable support "
2847 "-- %d/%d\n Retry with %d vectors.\n",
2848 ha->msix_count, ret, ret);
2849 ha->msix_count = ret;
2850 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2853 ql_log(ql_log_fatal, vha, 0x00c7,
2854 "MSI-X: Failed to enable support, "
2855 "giving up -- %d/%d.\n",
2856 ha->msix_count, ret);
2859 ha->max_rsp_queues = ha->msix_count - 1;
2861 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2862 ha->msix_count, GFP_KERNEL);
2863 if (!ha->msix_entries) {
2864 ql_log(ql_log_fatal, vha, 0x00c8,
2865 "Failed to allocate memory for ha->msix_entries.\n");
2869 ha->flags.msix_enabled = 1;
2871 for (i = 0; i < ha->msix_count; i++) {
2872 qentry = &ha->msix_entries[i];
2873 qentry->vector = entries[i].vector;
2874 qentry->entry = entries[i].entry;
2875 qentry->have_irq = 0;
2879 /* Enable MSI-X vectors for the base queue */
2880 for (i = 0; i < ha->msix_count; i++) {
2881 qentry = &ha->msix_entries[i];
2882 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2883 ret = request_irq(qentry->vector,
2884 qla83xx_msix_entries[i].handler,
2885 0, qla83xx_msix_entries[i].name, rsp);
2886 } else if (IS_QLA82XX(ha)) {
2887 ret = request_irq(qentry->vector,
2888 qla82xx_msix_entries[i].handler,
2889 0, qla82xx_msix_entries[i].name, rsp);
2891 ret = request_irq(qentry->vector,
2892 msix_entries[i].handler,
2893 0, msix_entries[i].name, rsp);
2896 ql_log(ql_log_fatal, vha, 0x00cb,
2897 "MSI-X: unable to register handler -- %x/%d.\n",
2898 qentry->vector, ret);
2899 qla24xx_disable_msix(ha);
2903 qentry->have_irq = 1;
2908 /* Enable MSI-X vector for response queue update for queue 0 */
2909 if (IS_QLA83XX(ha)) {
2910 if (ha->msixbase && ha->mqiobase &&
2911 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2915 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2917 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2918 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2919 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2920 ql_dbg(ql_dbg_init, vha, 0x0055,
2921 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2922 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2930 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2933 device_reg_t __iomem *reg = ha->iobase;
2934 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2936 /* If possible, enable MSI-X. */
2937 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2938 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2941 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2942 (ha->pdev->subsystem_device == 0x7040 ||
2943 ha->pdev->subsystem_device == 0x7041 ||
2944 ha->pdev->subsystem_device == 0x1705)) {
2945 ql_log(ql_log_warn, vha, 0x0034,
2946 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2947 ha->pdev->subsystem_vendor,
2948 ha->pdev->subsystem_device);
2952 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2953 ql_log(ql_log_warn, vha, 0x0035,
2954 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2955 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2959 ret = qla24xx_enable_msix(ha, rsp);
2961 ql_dbg(ql_dbg_init, vha, 0x0036,
2962 "MSI-X: Enabled (0x%X, 0x%X).\n",
2963 ha->chip_revision, ha->fw_attributes);
2964 goto clear_risc_ints;
2966 ql_log(ql_log_info, vha, 0x0037,
2967 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2970 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2971 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
2974 ret = pci_enable_msi(ha->pdev);
2976 ql_dbg(ql_dbg_init, vha, 0x0038,
2978 ha->flags.msi_enabled = 1;
2980 ql_log(ql_log_warn, vha, 0x0039,
2981 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2983 /* Skip INTx on ISP82xx. */
2984 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2985 return QLA_FUNCTION_FAILED;
2989 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2990 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2991 QLA2XXX_DRIVER_NAME, rsp);
2993 ql_log(ql_log_warn, vha, 0x003a,
2994 "Failed to reserve interrupt %d already in use.\n",
2997 } else if (!ha->flags.msi_enabled) {
2998 ql_dbg(ql_dbg_init, vha, 0x0125,
2999 "INTa mode: Enabled.\n");
3000 ha->flags.mr_intr_valid = 1;
3005 spin_lock_irq(&ha->hardware_lock);
3006 if (!IS_FWI2_CAPABLE(ha))
3007 WRT_REG_WORD(®->isp.semaphore, 0);
3008 spin_unlock_irq(&ha->hardware_lock);
3015 qla2x00_free_irqs(scsi_qla_host_t *vha)
3017 struct qla_hw_data *ha = vha->hw;
3018 struct rsp_que *rsp;
3021 * We need to check that ha->rsp_q_map is valid in case we are called
3022 * from a probe failure context.
3024 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3026 rsp = ha->rsp_q_map[0];
3028 if (ha->flags.msix_enabled)
3029 qla24xx_disable_msix(ha);
3030 else if (ha->flags.msi_enabled) {
3031 free_irq(ha->pdev->irq, rsp);
3032 pci_disable_msi(ha->pdev);
3034 free_irq(ha->pdev->irq, rsp);
3038 int qla25xx_request_irq(struct rsp_que *rsp)
3040 struct qla_hw_data *ha = rsp->hw;
3041 struct qla_init_msix_entry *intr = &msix_entries[2];
3042 struct qla_msix_entry *msix = rsp->msix;
3043 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3046 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3048 ql_log(ql_log_fatal, vha, 0x00e6,
3049 "MSI-X: Unable to register handler -- %x/%d.\n",