2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/delay.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13 #include <linux/utsname.h>
16 /* QLAFX00 specific Mailbox implementation functions */
19 * qlafx00_mailbox_command
20 * Issue mailbox command and waits for completion.
23 * ha = adapter block pointer.
24 * mcp = driver internal mbx struct pointer.
27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
30 * 0 : QLA_SUCCESS = cmd performed success
31 * 1 : QLA_FUNCTION_FAILED (error encountered)
32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
38 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
42 unsigned long flags = 0;
43 device_reg_t __iomem *reg;
48 uint32_t __iomem *optr;
51 unsigned long wait_time;
52 struct qla_hw_data *ha = vha->hw;
53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
55 if (ha->pdev->error_state > pci_channel_io_frozen) {
56 ql_log(ql_log_warn, vha, 0x115c,
57 "error_state is greater than pci_channel_io_frozen, "
59 return QLA_FUNCTION_TIMEOUT;
62 if (vha->device_flags & DFLG_DEV_FAILED) {
63 ql_log(ql_log_warn, vha, 0x115f,
64 "Device in failed state, exiting.\n");
65 return QLA_FUNCTION_TIMEOUT;
69 io_lock_on = base_vha->flags.init_done;
72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
74 if (ha->flags.pci_channel_io_perm_failure) {
75 ql_log(ql_log_warn, vha, 0x1175,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT;
80 if (ha->flags.isp82xx_fw_hung) {
81 /* Setting Link-Down error */
82 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 ql_log(ql_log_warn, vha, 0x1176,
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 rval = QLA_FUNCTION_FAILED;
90 * Wait for active mailbox commands to finish by waiting at most tov
91 * seconds. This is to serialize actual issuing of mailbox cmds during
94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
95 /* Timeout occurred. Return error. */
96 ql_log(ql_log_warn, vha, 0x1177,
97 "Cmd access timeout, cmd=0x%x, Exiting.\n",
99 return QLA_FUNCTION_TIMEOUT;
102 ha->flags.mbox_busy = 1;
103 /* Save mailbox command for debug */
106 ql_dbg(ql_dbg_mbx, vha, 0x1178,
107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
109 spin_lock_irqsave(&ha->hardware_lock, flags);
111 /* Load mailbox registers. */
112 optr = (uint32_t __iomem *)®->ispfx00.mailbox0;
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
120 WRT_REG_DWORD(optr, *iptr);
127 /* Issue set host interrupt command to send cmd out. */
128 ha->flags.mbox_int = 0;
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
132 (uint8_t *)mcp->mb, 16);
133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
134 ((uint8_t *)mcp->mb + 0x10), 16);
135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
136 ((uint8_t *)mcp->mb + 0x20), 8);
138 /* Unlock mbx registers and wait for interrupt */
139 ql_dbg(ql_dbg_mbx, vha, 0x1179,
140 "Going to unlock irq & waiting for interrupts. "
141 "jiffies=%lx.\n", jiffies);
143 /* Wait for mbx cmd completion until timeout */
144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
152 ql_dbg(ql_dbg_mbx, vha, 0x112c,
153 "Cmd=%x Polling Mode.\n", command);
155 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
158 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
159 while (!ha->flags.mbox_int) {
160 if (time_after(jiffies, wait_time))
163 /* Check for pending interrupts. */
164 qla2x00_poll(ha->rsp_q_map[0]);
166 if (!ha->flags.mbox_int &&
168 command == MBC_LOAD_RISC_RAM_EXTENDED))
169 usleep_range(10000, 11000);
171 ql_dbg(ql_dbg_mbx, vha, 0x112d,
173 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
176 /* Check whether we timed out */
177 if (ha->flags.mbox_int) {
180 ql_dbg(ql_dbg_mbx, vha, 0x112e,
181 "Cmd=%x completed.\n", command);
183 /* Got interrupt. Clear the flag. */
184 ha->flags.mbox_int = 0;
185 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
187 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
188 rval = QLA_FUNCTION_FAILED;
190 /* Load return mailbox registers. */
192 iptr = (uint32_t *)&ha->mailbox_out32[0];
194 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
204 rval = QLA_FUNCTION_TIMEOUT;
207 ha->flags.mbox_busy = 0;
212 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
213 ql_dbg(ql_dbg_mbx, vha, 0x113a,
214 "checking for additional resp interrupt.\n");
216 /* polling mode for non isp_abort commands. */
217 qla2x00_poll(ha->rsp_q_map[0]);
220 if (rval == QLA_FUNCTION_TIMEOUT &&
221 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
222 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
223 ha->flags.eeh_busy) {
224 /* not in dpc. schedule it for dpc to take over. */
225 ql_dbg(ql_dbg_mbx, vha, 0x115d,
226 "Timeout, schedule isp_abort_needed.\n");
228 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
229 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
230 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
232 ql_log(ql_log_info, base_vha, 0x115e,
233 "Mailbox cmd timeout occurred, cmd=0x%x, "
234 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
235 "abort.\n", command, mcp->mb[0],
237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
238 qla2xxx_wake_dpc(vha);
240 } else if (!abort_active) {
241 /* call abort directly since we are in the DPC thread */
242 ql_dbg(ql_dbg_mbx, vha, 0x1160,
243 "Timeout, calling abort_isp.\n");
245 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
246 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
247 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
249 ql_log(ql_log_info, base_vha, 0x1161,
250 "Mailbox cmd timeout occurred, cmd=0x%x, "
251 "mb[0]=0x%x. Scheduling ISP abort ",
252 command, mcp->mb[0]);
254 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
255 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
256 if (ha->isp_ops->abort_isp(vha)) {
257 /* Failed. retry later. */
258 set_bit(ISP_ABORT_NEEDED,
261 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
262 ql_dbg(ql_dbg_mbx, vha, 0x1162,
263 "Finished abort_isp.\n");
269 /* Allow next mbx cmd to come in. */
270 complete(&ha->mbx_cmd_comp);
273 ql_log(ql_log_warn, base_vha, 0x1163,
274 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
275 "mb[3]=%x, cmd=%x ****.\n",
276 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
278 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
285 * qlafx00_driver_shutdown
286 * Indicate a driver shutdown to firmware.
289 * ha = adapter block pointer.
292 * local function return status code.
298 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
301 struct mbx_cmd_32 mc;
302 struct mbx_cmd_32 *mcp = &mc;
304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
305 "Entered %s.\n", __func__);
307 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
313 mcp->tov = MBX_TOV_SECONDS;
315 rval = qlafx00_mailbox_command(vha, mcp);
317 if (rval != QLA_SUCCESS) {
318 ql_dbg(ql_dbg_mbx, vha, 0x1167,
319 "Failed=%x.\n", rval);
321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
322 "Done %s.\n", __func__);
329 * qlafx00_get_firmware_state
330 * Get adapter firmware state.
333 * ha = adapter block pointer.
334 * TARGET_QUEUE_LOCK must be released.
335 * ADAPTER_STATE_LOCK must be released.
338 * qla7xxx local function return status code.
344 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
347 struct mbx_cmd_32 mc;
348 struct mbx_cmd_32 *mcp = &mc;
350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
351 "Entered %s.\n", __func__);
353 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
355 mcp->in_mb = MBX_1|MBX_0;
356 mcp->tov = MBX_TOV_SECONDS;
358 rval = qlafx00_mailbox_command(vha, mcp);
360 /* Return firmware states. */
361 states[0] = mcp->mb[1];
363 if (rval != QLA_SUCCESS) {
364 ql_dbg(ql_dbg_mbx, vha, 0x116a,
365 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
368 "Done %s.\n", __func__);
374 * qlafx00_init_firmware
375 * Initialize adapter firmware.
378 * ha = adapter block pointer.
379 * dptr = Initialization control block pointer.
380 * size = size of initialization control block.
381 * TARGET_QUEUE_LOCK must be released.
382 * ADAPTER_STATE_LOCK must be released.
385 * qlafx00 local function return status code.
391 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
394 struct mbx_cmd_32 mc;
395 struct mbx_cmd_32 *mcp = &mc;
396 struct qla_hw_data *ha = vha->hw;
398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
399 "Entered %s.\n", __func__);
401 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
404 mcp->mb[2] = MSD(ha->init_cb_dma);
405 mcp->mb[3] = LSD(ha->init_cb_dma);
407 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
409 mcp->buf_size = size;
410 mcp->flags = MBX_DMA_OUT;
411 mcp->tov = MBX_TOV_SECONDS;
412 rval = qlafx00_mailbox_command(vha, mcp);
414 if (rval != QLA_SUCCESS) {
415 ql_dbg(ql_dbg_mbx, vha, 0x116d,
416 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
419 "Done %s.\n", __func__);
425 * qlafx00_mbx_reg_test
428 qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
431 struct mbx_cmd_32 mc;
432 struct mbx_cmd_32 *mcp = &mc;
434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
435 "Entered %s.\n", __func__);
438 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
448 mcp->mb[10] = 0xBB66;
449 mcp->mb[11] = 0x66BB;
450 mcp->mb[12] = 0xB6B6;
451 mcp->mb[13] = 0x6B6B;
452 mcp->mb[14] = 0x3636;
453 mcp->mb[15] = 0xCCCC;
456 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
457 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
458 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
459 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
461 mcp->flags = MBX_DMA_OUT;
462 mcp->tov = MBX_TOV_SECONDS;
463 rval = qlafx00_mailbox_command(vha, mcp);
464 if (rval == QLA_SUCCESS) {
465 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
466 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
467 rval = QLA_FUNCTION_FAILED;
468 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
469 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
470 rval = QLA_FUNCTION_FAILED;
471 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
472 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
473 rval = QLA_FUNCTION_FAILED;
474 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
475 mcp->mb[31] != 0xCCCC)
476 rval = QLA_FUNCTION_FAILED;
479 if (rval != QLA_SUCCESS) {
480 ql_dbg(ql_dbg_mbx, vha, 0x1170,
481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
484 "Done %s.\n", __func__);
490 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
493 * Returns 0 on success.
496 qlafx00_pci_config(scsi_qla_host_t *vha)
499 struct qla_hw_data *ha = vha->hw;
501 pci_set_master(ha->pdev);
502 pci_try_set_mwi(ha->pdev);
504 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
505 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
506 w &= ~PCI_COMMAND_INTX_DISABLE;
507 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
509 /* PCIe -- adjust Maximum Read Request Size (2048). */
510 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
511 pcie_set_readrq(ha->pdev, 2048);
513 ha->chip_revision = ha->pdev->revision;
519 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
524 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
526 unsigned long flags = 0;
527 struct qla_hw_data *ha = vha->hw;
531 /* Set all 4 cores in reset */
532 for (i = 0; i < 4; i++) {
533 QLAFX00_SET_HBA_SOC_REG(ha,
534 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
537 /* Set all 4 core Clock gating control */
538 for (i = 0; i < 4; i++) {
539 QLAFX00_SET_HBA_SOC_REG(ha,
540 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
543 /* Reset all units in Fabric */
544 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
546 /* Reset all interrupt control registers */
547 for (i = 0; i < 115; i++) {
548 QLAFX00_SET_HBA_SOC_REG(ha,
549 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
552 /* Reset Timers control registers. per core */
553 for (core = 0; core < 4; core++)
554 for (i = 0; i < 8; i++)
555 QLAFX00_SET_HBA_SOC_REG(ha,
556 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
558 /* Reset per core IRQ ack register */
559 for (core = 0; core < 4; core++)
560 QLAFX00_SET_HBA_SOC_REG(ha,
561 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
563 /* Set Fabric control and config to defaults */
564 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
565 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
567 spin_lock_irqsave(&ha->hardware_lock, flags);
569 /* Kick in Fabric units */
570 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
572 /* Kick in Core0 to start boot process */
573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
575 /* Wait 10secs for soft-reset to complete. */
576 for (cnt = 10; cnt; cnt--) {
580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
584 * qlafx00_soft_reset() - Soft Reset ISPFx00.
587 * Returns 0 on success.
590 qlafx00_soft_reset(scsi_qla_host_t *vha)
592 struct qla_hw_data *ha = vha->hw;
594 if (unlikely(pci_channel_offline(ha->pdev) &&
595 ha->flags.pci_channel_io_perm_failure))
598 ha->isp_ops->disable_intrs(ha);
599 qlafx00_soc_cpu_reset(vha);
600 ha->isp_ops->enable_intrs(ha);
604 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
607 * Returns 0 on success.
610 qlafx00_chip_diag(scsi_qla_host_t *vha)
613 struct qla_hw_data *ha = vha->hw;
614 struct req_que *req = ha->req_q_map[0];
616 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
618 rval = qlafx00_mbx_reg_test(vha);
620 ql_log(ql_log_warn, vha, 0x1165,
621 "Failed mailbox send register test\n");
623 /* Flag a successful rval */
630 qlafx00_config_rings(struct scsi_qla_host *vha)
632 struct qla_hw_data *ha = vha->hw;
633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
634 struct init_cb_fx *icb;
635 struct req_que *req = ha->req_q_map[0];
636 struct rsp_que *rsp = ha->rsp_q_map[0];
638 /* Setup ring parameters in initialization control block. */
639 icb = (struct init_cb_fx *)ha->init_cb;
640 icb->request_q_outpointer = __constant_cpu_to_le16(0);
641 icb->response_q_inpointer = __constant_cpu_to_le16(0);
642 icb->request_q_length = cpu_to_le16(req->length);
643 icb->response_q_length = cpu_to_le16(rsp->length);
644 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
645 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
646 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
647 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
649 WRT_REG_DWORD(®->req_q_in, 0);
650 WRT_REG_DWORD(®->req_q_out, 0);
652 WRT_REG_DWORD(®->rsp_q_in, 0);
653 WRT_REG_DWORD(®->rsp_q_out, 0);
656 RD_REG_DWORD(®->rsp_q_out);
660 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
662 struct qla_hw_data *ha = vha->hw;
665 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
667 strcpy(str, "PCIe iSA");
674 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
676 struct qla_hw_data *ha = vha->hw;
678 sprintf(str, "%s", ha->mr.fw_version);
683 qlafx00_enable_intrs(struct qla_hw_data *ha)
685 unsigned long flags = 0;
687 spin_lock_irqsave(&ha->hardware_lock, flags);
688 ha->interrupts_on = 1;
689 QLAFX00_ENABLE_ICNTRL_REG(ha);
690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
694 qlafx00_disable_intrs(struct qla_hw_data *ha)
696 unsigned long flags = 0;
698 spin_lock_irqsave(&ha->hardware_lock, flags);
699 ha->interrupts_on = 0;
700 QLAFX00_DISABLE_ICNTRL_REG(ha);
701 spin_unlock_irqrestore(&ha->hardware_lock, flags);
705 qlafx00_tmf_iocb_timeout(void *data)
707 srb_t *sp = (srb_t *)data;
708 struct srb_iocb *tmf = &sp->u.iocb_cmd;
710 tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
711 complete(&tmf->u.tmf.comp);
715 qlafx00_tmf_sp_done(void *data, void *ptr, int res)
717 srb_t *sp = (srb_t *)ptr;
718 struct srb_iocb *tmf = &sp->u.iocb_cmd;
720 complete(&tmf->u.tmf.comp);
724 qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
725 uint32_t lun, uint32_t tag)
727 scsi_qla_host_t *vha = fcport->vha;
728 struct srb_iocb *tm_iocb;
730 int rval = QLA_FUNCTION_FAILED;
732 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
736 tm_iocb = &sp->u.iocb_cmd;
737 sp->type = SRB_TM_CMD;
739 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
740 tm_iocb->u.tmf.flags = flags;
741 tm_iocb->u.tmf.lun = lun;
742 tm_iocb->u.tmf.data = tag;
743 sp->done = qlafx00_tmf_sp_done;
744 tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
745 init_completion(&tm_iocb->u.tmf.comp);
747 rval = qla2x00_start_sp(sp);
748 if (rval != QLA_SUCCESS)
751 ql_dbg(ql_dbg_async, vha, 0x507b,
752 "Task management command issued target_id=%x\n",
755 wait_for_completion(&tm_iocb->u.tmf.comp);
757 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
758 QLA_SUCCESS : QLA_FUNCTION_FAILED;
767 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
769 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
773 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
775 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
779 qlafx00_loop_reset(scsi_qla_host_t *vha)
782 struct fc_port *fcport;
783 struct qla_hw_data *ha = vha->hw;
785 if (ql2xtargetreset) {
786 list_for_each_entry(fcport, &vha->vp_fcports, list) {
787 if (fcport->port_type != FCT_TARGET)
790 ret = ha->isp_ops->target_reset(fcport, 0, 0);
791 if (ret != QLA_SUCCESS) {
792 ql_dbg(ql_dbg_taskm, vha, 0x803d,
793 "Bus Reset failed: Reset=%d "
794 "d_id=%x.\n", ret, fcport->d_id.b24);
802 qlafx00_iospace_config(struct qla_hw_data *ha)
804 if (pci_request_selected_regions(ha->pdev, ha->bars,
805 QLA2XXX_DRIVER_NAME)) {
806 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
807 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
809 goto iospace_error_exit;
812 /* Use MMIO operations for all accesses. */
813 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
814 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
815 "Invalid pci I/O region size (%s).\n",
817 goto iospace_error_exit;
819 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
820 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
821 "Invalid PCI mem BAR0 region size (%s), aborting\n",
823 goto iospace_error_exit;
827 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
829 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
830 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
831 goto iospace_error_exit;
834 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
835 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
836 "region #2 not an MMIO resource (%s), aborting\n",
838 goto iospace_error_exit;
840 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
841 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
842 "Invalid PCI mem BAR2 region size (%s), aborting\n",
844 goto iospace_error_exit;
848 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
850 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
851 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
852 goto iospace_error_exit;
855 /* Determine queue resources */
856 ha->max_req_queues = ha->max_rsp_queues = 1;
858 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
859 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
860 ha->bars, ha->cregbase, ha->iobase);
869 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
871 struct qla_hw_data *ha = vha->hw;
872 struct req_que *req = ha->req_q_map[0];
873 struct rsp_que *rsp = ha->rsp_q_map[0];
875 req->length_fx00 = req->length;
876 req->ring_fx00 = req->ring;
877 req->dma_fx00 = req->dma;
879 rsp->length_fx00 = rsp->length;
880 rsp->ring_fx00 = rsp->ring;
881 rsp->dma_fx00 = rsp->dma;
883 ql_dbg(ql_dbg_init, vha, 0x012d,
884 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
885 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
886 req->length_fx00, (u64)req->dma_fx00);
888 ql_dbg(ql_dbg_init, vha, 0x012e,
889 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
890 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
891 rsp->length_fx00, (u64)rsp->dma_fx00);
895 qlafx00_config_queues(struct scsi_qla_host *vha)
897 struct qla_hw_data *ha = vha->hw;
898 struct req_que *req = ha->req_q_map[0];
899 struct rsp_que *rsp = ha->rsp_q_map[0];
900 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
902 req->length = ha->req_que_len;
903 req->ring = (void *)ha->iobase + ha->req_que_off;
904 req->dma = bar2_hdl + ha->req_que_off;
905 if ((!req->ring) || (req->length == 0)) {
906 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
907 "Unable to allocate memory for req_ring\n");
908 return QLA_FUNCTION_FAILED;
911 ql_dbg(ql_dbg_init, vha, 0x0130,
912 "req: %p req_ring pointer %p req len 0x%x "
913 "req off 0x%x\n, req->dma: 0x%llx",
914 req, req->ring, req->length,
915 ha->req_que_off, (u64)req->dma);
917 rsp->length = ha->rsp_que_len;
918 rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
919 rsp->dma = bar2_hdl + ha->rsp_que_off;
920 if ((!rsp->ring) || (rsp->length == 0)) {
921 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
922 "Unable to allocate memory for rsp_ring\n");
923 return QLA_FUNCTION_FAILED;
926 ql_dbg(ql_dbg_init, vha, 0x0132,
927 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
928 "rsp off 0x%x, rsp->dma: 0x%llx\n",
929 rsp, rsp->ring, rsp->length,
930 ha->rsp_que_off, (u64)rsp->dma);
936 qlafx00_init_fw_ready(scsi_qla_host_t *vha)
940 uint16_t wait_time; /* Wait time */
941 struct qla_hw_data *ha = vha->hw;
942 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
943 uint32_t aenmbx, aenmbx7 = 0;
947 /* 30 seconds wait - Adjust if required */
950 /* wait time before firmware ready */
951 wtime = jiffies + (wait_time * HZ);
953 aenmbx = RD_REG_DWORD(®->aenmailbox0);
955 ql_dbg(ql_dbg_mbx, vha, 0x0133,
956 "aenmbx: 0x%x\n", aenmbx);
959 case MBA_FW_NOT_STARTED:
960 case MBA_FW_STARTING:
964 case MBA_REQ_TRANSFER_ERR:
965 case MBA_RSP_TRANSFER_ERR:
966 case MBA_FW_INIT_FAILURE:
967 qlafx00_soft_reset(vha);
970 case MBA_FW_RESTART_CMPLT:
971 /* Set the mbx and rqstq intr code */
972 aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
973 ha->mbx_intr_code = MSW(aenmbx7);
974 ha->rqstq_intr_code = LSW(aenmbx7);
975 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
976 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
977 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
978 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
979 WRT_REG_DWORD(®->aenmailbox0, 0);
980 RD_REG_DWORD_RELAXED(®->aenmailbox0);
981 ql_dbg(ql_dbg_init, vha, 0x0134,
982 "f/w returned mbx_intr_code: 0x%x, "
983 "rqstq_intr_code: 0x%x\n",
984 ha->mbx_intr_code, ha->rqstq_intr_code);
985 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
991 /* If fw is apparently not ready. In order to continue,
992 * we might need to issue Mbox cmd, but the problem is
993 * that the DoorBell vector values that come with the
994 * 8060 AEN are most likely gone by now (and thus no
995 * bell would be rung on the fw side when mbox cmd is
996 * issued). We have to therefore grab the 8060 AEN
997 * shadow regs (filled in by FW when the last 8060
998 * AEN was being posted).
999 * Do the following to determine what is needed in
1000 * order to get the FW ready:
1001 * 1. reload the 8060 AEN values from the shadow regs
1002 * 2. clear int status to get rid of possible pending
1004 * 3. issue Get FW State Mbox cmd to determine fw state
1005 * Set the mbx and rqstq intr code from Shadow Regs
1007 aenmbx7 = RD_REG_DWORD(®->initval7);
1008 ha->mbx_intr_code = MSW(aenmbx7);
1009 ha->rqstq_intr_code = LSW(aenmbx7);
1010 ha->req_que_off = RD_REG_DWORD(®->initval1);
1011 ha->rsp_que_off = RD_REG_DWORD(®->initval3);
1012 ha->req_que_len = RD_REG_DWORD(®->initval5);
1013 ha->rsp_que_len = RD_REG_DWORD(®->initval6);
1014 ql_dbg(ql_dbg_init, vha, 0x0135,
1015 "f/w returned mbx_intr_code: 0x%x, "
1016 "rqstq_intr_code: 0x%x\n",
1017 ha->mbx_intr_code, ha->rqstq_intr_code);
1018 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1020 /* Get the FW state */
1021 rval = qlafx00_get_firmware_state(vha, state);
1022 if (rval != QLA_SUCCESS) {
1023 /* Retry if timer has not expired */
1027 if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
1028 /* Firmware is waiting to be
1029 * initialized by driver
1036 /* Issue driver shutdown and wait until f/w recovers.
1037 * Driver should continue to poll until 8060 AEN is
1038 * received indicating firmware recovery.
1040 ql_dbg(ql_dbg_init, vha, 0x0136,
1041 "Sending Driver shutdown fw_state 0x%x\n",
1044 rval = qlafx00_driver_shutdown(vha, 10);
1045 if (rval != QLA_SUCCESS) {
1046 rval = QLA_FUNCTION_FAILED;
1051 wtime = jiffies + (wait_time * HZ);
1056 if (time_after_eq(jiffies, wtime)) {
1057 ql_dbg(ql_dbg_init, vha, 0x0137,
1058 "Init f/w failed: aen[7]: 0x%x\n",
1059 RD_REG_DWORD(®->aenmailbox7));
1060 rval = QLA_FUNCTION_FAILED;
1064 /* Delay for a while */
1070 ql_dbg(ql_dbg_init, vha, 0x0138,
1071 "%s **** FAILED ****.\n", __func__);
1073 ql_dbg(ql_dbg_init, vha, 0x0139,
1074 "%s **** SUCCESS ****.\n", __func__);
1080 * qlafx00_fw_ready() - Waits for firmware ready.
1083 * Returns 0 on success.
1086 qlafx00_fw_ready(scsi_qla_host_t *vha)
1089 unsigned long wtime;
1090 uint16_t wait_time; /* Wait time if loop is coming ready */
1097 /* wait time before firmware ready */
1098 wtime = jiffies + (wait_time * HZ);
1100 /* Wait for ISP to finish init */
1101 if (!vha->flags.init_done)
1102 ql_dbg(ql_dbg_init, vha, 0x013a,
1103 "Waiting for init to complete...\n");
1106 rval = qlafx00_get_firmware_state(vha, state);
1108 if (rval == QLA_SUCCESS) {
1109 if (state[0] == FSTATE_FX00_INITIALIZED) {
1110 ql_dbg(ql_dbg_init, vha, 0x013b,
1111 "fw_state=%x\n", state[0]);
1116 rval = QLA_FUNCTION_FAILED;
1118 if (time_after_eq(jiffies, wtime))
1121 /* Delay for a while */
1124 ql_dbg(ql_dbg_init, vha, 0x013c,
1125 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1130 ql_dbg(ql_dbg_init, vha, 0x013d,
1131 "Firmware ready **** FAILED ****.\n");
1133 ql_dbg(ql_dbg_init, vha, 0x013e,
1134 "Firmware ready **** SUCCESS ****.\n");
1140 qlafx00_find_all_targets(scsi_qla_host_t *vha,
1141 struct list_head *new_fcports)
1145 fc_port_t *fcport, *new_fcport;
1147 struct qla_hw_data *ha = vha->hw;
1151 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1152 return QLA_FUNCTION_FAILED;
1154 if ((atomic_read(&vha->loop_down_timer) ||
1155 STATE_TRANSITION(vha))) {
1156 atomic_set(&vha->loop_down_timer, 0);
1157 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1158 return QLA_FUNCTION_FAILED;
1161 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1162 "Listing Target bit map...\n");
1163 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
1164 0x2089, (uint8_t *)ha->gid_list, 32);
1166 /* Allocate temporary rmtport for any new rmtports discovered. */
1167 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1168 if (new_fcport == NULL)
1169 return QLA_MEMORY_ALLOC_FAILED;
1171 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1172 QLAFX00_TGT_NODE_LIST_SIZE) {
1174 /* Send get target node info */
1175 new_fcport->tgt_id = tgt_id;
1176 rval = qlafx00_fx_disc(vha, new_fcport,
1177 FXDISC_GET_TGT_NODE_INFO);
1178 if (rval != QLA_SUCCESS) {
1179 ql_log(ql_log_warn, vha, 0x208a,
1180 "Target info scan failed -- assuming zero-entry "
1185 /* Locate matching device in database. */
1187 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1188 if (memcmp(new_fcport->port_name,
1189 fcport->port_name, WWN_SIZE))
1195 * If tgt_id is same and state FCS_ONLINE, nothing
1198 if (fcport->tgt_id == new_fcport->tgt_id &&
1199 atomic_read(&fcport->state) == FCS_ONLINE)
1203 * Tgt ID changed or device was marked to be updated.
1205 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1206 "TGT-ID Change(%s): Present tgt id: "
1208 "wwnn = %llx wwpn = %llx.\n",
1209 __func__, fcport->tgt_id,
1210 atomic_read(&fcport->state),
1211 (unsigned long long)wwn_to_u64(fcport->node_name),
1212 (unsigned long long)wwn_to_u64(fcport->port_name));
1214 ql_log(ql_log_info, vha, 0x208c,
1215 "TGT-ID Announce(%s): Discovered tgt "
1216 "id 0x%x wwnn = %llx "
1217 "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1218 (unsigned long long)
1219 wwn_to_u64(new_fcport->node_name),
1220 (unsigned long long)
1221 wwn_to_u64(new_fcport->port_name));
1223 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1224 fcport->old_tgt_id = fcport->tgt_id;
1225 fcport->tgt_id = new_fcport->tgt_id;
1226 ql_log(ql_log_info, vha, 0x208d,
1227 "TGT-ID: New fcport Added: %p\n", fcport);
1228 qla2x00_update_fcport(vha, fcport);
1230 ql_log(ql_log_info, vha, 0x208e,
1231 " Existing TGT-ID %x did not get "
1232 " offline event from firmware.\n",
1233 fcport->old_tgt_id);
1234 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1235 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1245 /* If device was not in our fcports list, then add it. */
1246 list_add_tail(&new_fcport->list, new_fcports);
1248 /* Allocate a new replacement fcport. */
1249 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1250 if (new_fcport == NULL)
1251 return QLA_MEMORY_ALLOC_FAILED;
1259 * qlafx00_configure_all_targets
1260 * Setup target devices with node ID's.
1263 * ha = adapter block pointer.
1270 qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1273 fc_port_t *fcport, *rmptemp;
1274 LIST_HEAD(new_fcports);
1276 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1277 FXDISC_GET_TGT_NODE_LIST);
1278 if (rval != QLA_SUCCESS) {
1279 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1283 rval = qlafx00_find_all_targets(vha, &new_fcports);
1284 if (rval != QLA_SUCCESS) {
1285 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1290 * Delete all previous devices marked lost.
1292 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1293 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1296 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1297 if (fcport->port_type != FCT_INITIATOR)
1298 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1303 * Add the new devices to our devices list.
1305 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1306 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1309 qla2x00_update_fcport(vha, fcport);
1310 list_move_tail(&fcport->list, &vha->vp_fcports);
1311 ql_log(ql_log_info, vha, 0x208f,
1312 "Attach new target id 0x%x wwnn = %llx "
1315 (unsigned long long)wwn_to_u64(fcport->node_name),
1316 (unsigned long long)wwn_to_u64(fcport->port_name));
1319 /* Free all new device structures not processed. */
1320 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1321 list_del(&fcport->list);
1329 * qlafx00_configure_devices
1330 * Updates Fibre Channel Device Database with what is actually on loop.
1333 * ha = adapter block pointer.
1338 * 2 = database was full and device was not configured.
1341 qlafx00_configure_devices(scsi_qla_host_t *vha)
1344 unsigned long flags, save_flags;
1347 save_flags = flags = vha->dpc_flags;
1349 ql_dbg(ql_dbg_disc, vha, 0x2090,
1350 "Configure devices -- dpc flags =0x%lx\n", flags);
1352 rval = qlafx00_configure_all_targets(vha);
1354 if (rval == QLA_SUCCESS) {
1355 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1356 rval = QLA_FUNCTION_FAILED;
1358 atomic_set(&vha->loop_state, LOOP_READY);
1359 ql_log(ql_log_info, vha, 0x2091,
1365 ql_dbg(ql_dbg_disc, vha, 0x2092,
1366 "%s *** FAILED ***.\n", __func__);
1368 ql_dbg(ql_dbg_disc, vha, 0x2093,
1369 "%s: exiting normally.\n", __func__);
1375 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1377 struct qla_hw_data *ha = vha->hw;
1380 vha->flags.online = 0;
1381 ha->mr.fw_hbt_en = 0;
1384 ha->flags.chip_reset_done = 0;
1385 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1386 vha->qla_stats.total_isp_aborts++;
1387 ql_log(ql_log_info, vha, 0x013f,
1388 "Performing ISP error recovery - ha = %p.\n", ha);
1389 ha->isp_ops->reset_chip(vha);
1392 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1393 atomic_set(&vha->loop_state, LOOP_DOWN);
1394 atomic_set(&vha->loop_down_timer,
1395 QLAFX00_LOOP_DOWN_TIME);
1397 if (!atomic_read(&vha->loop_down_timer))
1398 atomic_set(&vha->loop_down_timer,
1399 QLAFX00_LOOP_DOWN_TIME);
1402 /* Clear all async request states across all VPs. */
1403 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1405 if (atomic_read(&fcport->state) == FCS_ONLINE)
1406 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1409 if (!ha->flags.eeh_busy) {
1411 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1413 /* Requeue all commands in outstanding command list. */
1414 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1418 qla2x00_free_irqs(vha);
1420 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1422 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1424 /* Clear the Interrupts */
1425 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1427 ql_log(ql_log_info, vha, 0x0140,
1428 "%s Done done - ha=%p.\n", __func__, ha);
1432 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1435 * Beginning of request ring has initialization control block already built
1436 * by nvram config routine.
1438 * Returns 0 on success.
1441 qlafx00_init_response_q_entries(struct rsp_que *rsp)
1446 rsp->ring_ptr = rsp->ring;
1447 rsp->ring_index = 0;
1448 rsp->status_srb = NULL;
1449 pkt = rsp->ring_ptr;
1450 for (cnt = 0; cnt < rsp->length; cnt++) {
1451 pkt->signature = RESPONSE_PROCESSED;
1452 WRT_REG_DWORD((void __iomem *)&pkt->signature,
1453 RESPONSE_PROCESSED);
1459 qlafx00_rescan_isp(scsi_qla_host_t *vha)
1461 uint32_t status = QLA_FUNCTION_FAILED;
1462 struct qla_hw_data *ha = vha->hw;
1463 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1466 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1468 aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
1469 ha->mbx_intr_code = MSW(aenmbx7);
1470 ha->rqstq_intr_code = LSW(aenmbx7);
1471 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
1472 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
1473 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
1474 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
1476 ql_dbg(ql_dbg_disc, vha, 0x2094,
1477 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1478 " Req que offset 0x%x Rsp que offset 0x%x\n",
1479 ha->mbx_intr_code, ha->rqstq_intr_code,
1480 ha->req_que_off, ha->rsp_que_len);
1482 /* Clear the Interrupts */
1483 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1485 status = qla2x00_init_rings(vha);
1487 vha->flags.online = 1;
1489 /* if no cable then assume it's good */
1490 if ((vha->device_flags & DFLG_NO_CABLE))
1492 /* Register system information */
1493 if (qlafx00_fx_disc(vha,
1494 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1495 ql_dbg(ql_dbg_disc, vha, 0x2095,
1496 "failed to register host info\n");
1498 scsi_unblock_requests(vha->host);
1503 qlafx00_timer_routine(scsi_qla_host_t *vha)
1505 struct qla_hw_data *ha = vha->hw;
1506 uint32_t fw_heart_beat;
1508 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1511 /* Check firmware health */
1512 if (ha->mr.fw_hbt_cnt)
1513 ha->mr.fw_hbt_cnt--;
1515 if ((!ha->flags.mr_reset_hdlr_active) &&
1516 (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1517 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1518 (ha->mr.fw_hbt_en)) {
1519 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat);
1520 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1521 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1522 ha->mr.fw_hbt_miss_cnt = 0;
1524 ha->mr.fw_hbt_miss_cnt++;
1525 if (ha->mr.fw_hbt_miss_cnt ==
1526 QLAFX00_HEARTBEAT_MISS_CNT) {
1527 set_bit(ISP_ABORT_NEEDED,
1529 qla2xxx_wake_dpc(vha);
1530 ha->mr.fw_hbt_miss_cnt = 0;
1534 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1537 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1538 /* Reset recovery to be performed in timer routine */
1539 aenmbx0 = RD_REG_DWORD(®->aenmailbox0);
1540 if (ha->mr.fw_reset_timer_exp) {
1541 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1542 qla2xxx_wake_dpc(vha);
1543 ha->mr.fw_reset_timer_exp = 0;
1544 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1545 /* Wake up DPC to rescan the targets */
1546 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1547 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1548 qla2xxx_wake_dpc(vha);
1549 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1550 } else if ((aenmbx0 == MBA_FW_STARTING) &&
1551 (!ha->mr.fw_hbt_en)) {
1552 ha->mr.fw_hbt_en = 1;
1553 } else if (!ha->mr.fw_reset_timer_tick) {
1554 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1555 ha->mr.fw_reset_timer_exp = 1;
1556 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1557 } else if (aenmbx0 == 0xFFFFFFFF) {
1558 uint32_t data0, data1;
1560 data0 = QLAFX00_RD_REG(ha,
1561 QLAFX00_BAR1_BASE_ADDR_REG);
1562 data1 = QLAFX00_RD_REG(ha,
1563 QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1565 data0 &= 0xffff0000;
1566 data1 &= 0x0000ffff;
1569 QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1571 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1572 ha->mr.fw_reset_timer_tick =
1573 QLAFX00_MAX_RESET_INTERVAL;
1574 } else if (aenmbx0 == MBA_FW_RESET_FCT) {
1575 ha->mr.fw_reset_timer_tick =
1576 QLAFX00_MAX_RESET_INTERVAL;
1578 ha->mr.old_aenmbx0_state = aenmbx0;
1579 ha->mr.fw_reset_timer_tick--;
1581 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1583 * Critical temperature recovery to be
1584 * performed in timer routine
1586 if (ha->mr.fw_critemp_timer_tick == 0) {
1587 tempc = QLAFX00_GET_TEMPERATURE(ha);
1588 ql_log(ql_dbg_timer, vha, 0x6012,
1589 "ISPFx00(%s): Critical temp timer, "
1590 "current SOC temperature: %d\n",
1592 if (tempc < ha->mr.critical_temperature) {
1593 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1594 clear_bit(FX00_CRITEMP_RECOVERY,
1596 qla2xxx_wake_dpc(vha);
1598 ha->mr.fw_critemp_timer_tick =
1599 QLAFX00_CRITEMP_INTERVAL;
1601 ha->mr.fw_critemp_timer_tick--;
1607 * qlfx00a_reset_initialize
1608 * Re-initialize after a iSA device reset.
1611 * ha = adapter block pointer.
1617 qlafx00_reset_initialize(scsi_qla_host_t *vha)
1619 struct qla_hw_data *ha = vha->hw;
1621 if (vha->device_flags & DFLG_DEV_FAILED) {
1622 ql_dbg(ql_dbg_init, vha, 0x0142,
1623 "Device in failed state\n");
1627 ha->flags.mr_reset_hdlr_active = 1;
1629 if (vha->flags.online) {
1630 scsi_block_requests(vha->host);
1631 qlafx00_abort_isp_cleanup(vha, false);
1634 ql_log(ql_log_info, vha, 0x0143,
1635 "(%s): succeeded.\n", __func__);
1636 ha->flags.mr_reset_hdlr_active = 0;
1642 * Resets ISP and aborts all outstanding commands.
1645 * ha = adapter block pointer.
1651 qlafx00_abort_isp(scsi_qla_host_t *vha)
1653 struct qla_hw_data *ha = vha->hw;
1655 if (vha->flags.online) {
1656 if (unlikely(pci_channel_offline(ha->pdev) &&
1657 ha->flags.pci_channel_io_perm_failure)) {
1658 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1662 scsi_block_requests(vha->host);
1663 qlafx00_abort_isp_cleanup(vha, false);
1665 scsi_block_requests(vha->host);
1666 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1667 vha->qla_stats.total_isp_aborts++;
1668 ha->isp_ops->reset_chip(vha);
1669 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1670 /* Clear the Interrupts */
1671 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1674 ql_log(ql_log_info, vha, 0x0145,
1675 "(%s): succeeded.\n", __func__);
1680 static inline fc_port_t*
1681 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1685 /* Check for matching device in remote port list. */
1687 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1688 if (fcport->tgt_id == tgt_id) {
1689 ql_dbg(ql_dbg_async, vha, 0x5072,
1690 "Matching fcport(%p) found with TGT-ID: 0x%x "
1691 "and Remote TGT_ID: 0x%x\n",
1692 fcport, fcport->tgt_id, tgt_id);
1700 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1704 ql_log(ql_log_info, vha, 0x5073,
1705 "Detach TGT-ID: 0x%x\n", tgt_id);
1707 fcport = qlafx00_get_fcport(vha, tgt_id);
1711 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1717 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1720 uint32_t aen_code, aen_data;
1722 aen_code = FCH_EVT_VENDOR_UNIQUE;
1723 aen_data = evt->u.aenfx.evtcode;
1725 switch (evt->u.aenfx.evtcode) {
1726 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
1727 if (evt->u.aenfx.mbx[1] == 0) {
1728 if (evt->u.aenfx.mbx[2] == 1) {
1729 if (!vha->flags.fw_tgt_reported)
1730 vha->flags.fw_tgt_reported = 1;
1731 atomic_set(&vha->loop_down_timer, 0);
1732 atomic_set(&vha->loop_state, LOOP_UP);
1733 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1734 qla2xxx_wake_dpc(vha);
1735 } else if (evt->u.aenfx.mbx[2] == 2) {
1736 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1738 } else if (evt->u.aenfx.mbx[1] == 0xffff) {
1739 if (evt->u.aenfx.mbx[2] == 1) {
1740 if (!vha->flags.fw_tgt_reported)
1741 vha->flags.fw_tgt_reported = 1;
1742 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1743 } else if (evt->u.aenfx.mbx[2] == 2) {
1744 vha->device_flags |= DFLG_NO_CABLE;
1745 qla2x00_mark_all_devices_lost(vha, 1);
1749 case QLAFX00_MBA_LINK_UP:
1750 aen_code = FCH_EVT_LINKUP;
1753 case QLAFX00_MBA_LINK_DOWN:
1754 aen_code = FCH_EVT_LINKDOWN;
1757 case QLAFX00_MBA_TEMP_OVER:
1758 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
1759 ql_log(ql_log_info, vha, 0x5082,
1760 "Process critical temperature event "
1762 evt->u.aenfx.evtcode);
1763 scsi_block_requests(vha->host);
1764 qlafx00_abort_isp_cleanup(vha, true);
1765 scsi_unblock_requests(vha->host);
1769 fc_host_post_event(vha->host, fc_get_event_number(),
1770 aen_code, aen_data);
1776 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1778 u64 port_name = 0, node_name = 0;
1780 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1781 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1783 fc_host_node_name(vha->host) = node_name;
1784 fc_host_port_name(vha->host) = port_name;
1785 if (!pinfo->port_type)
1786 vha->hw->current_topology = ISP_CFG_F;
1787 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1788 atomic_set(&vha->loop_state, LOOP_READY);
1789 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1790 atomic_set(&vha->loop_state, LOOP_DOWN);
1791 vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1795 qla2x00_fxdisc_iocb_timeout(void *data)
1797 srb_t *sp = (srb_t *)data;
1798 struct srb_iocb *lio = &sp->u.iocb_cmd;
1800 complete(&lio->u.fxiocb.fxiocb_comp);
1804 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
1806 srb_t *sp = (srb_t *)ptr;
1807 struct srb_iocb *lio = &sp->u.iocb_cmd;
1809 complete(&lio->u.fxiocb.fxiocb_comp);
1813 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1816 struct srb_iocb *fdisc;
1817 int rval = QLA_FUNCTION_FAILED;
1818 struct qla_hw_data *ha = vha->hw;
1819 struct host_system_info *phost_info;
1820 struct register_host_info *preg_hsi;
1821 struct new_utsname *p_sysid = NULL;
1824 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1828 fdisc = &sp->u.iocb_cmd;
1830 case FXDISC_GET_CONFIG_INFO:
1831 fdisc->u.fxiocb.flags =
1832 SRB_FXDISC_RESP_DMA_VALID;
1833 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1835 case FXDISC_GET_PORT_INFO:
1836 fdisc->u.fxiocb.flags =
1837 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1838 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1839 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
1841 case FXDISC_GET_TGT_NODE_INFO:
1842 fdisc->u.fxiocb.flags =
1843 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1844 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1845 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
1847 case FXDISC_GET_TGT_NODE_LIST:
1848 fdisc->u.fxiocb.flags =
1849 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1850 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1852 case FXDISC_REG_HOST_INFO:
1853 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1854 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1855 p_sysid = utsname();
1857 ql_log(ql_log_warn, vha, 0x303c,
1858 "Not able to get the system informtion\n");
1866 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1867 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1868 fdisc->u.fxiocb.req_len,
1869 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1870 if (!fdisc->u.fxiocb.req_addr)
1873 if (fx_type == FXDISC_REG_HOST_INFO) {
1874 preg_hsi = (struct register_host_info *)
1875 fdisc->u.fxiocb.req_addr;
1876 phost_info = &preg_hsi->hsi;
1877 memset(preg_hsi, 0, sizeof(struct register_host_info));
1878 phost_info->os_type = OS_TYPE_LINUX;
1879 strncpy(phost_info->sysname,
1880 p_sysid->sysname, SYSNAME_LENGTH);
1881 strncpy(phost_info->nodename,
1882 p_sysid->nodename, NODENAME_LENGTH);
1883 strncpy(phost_info->release,
1884 p_sysid->release, RELEASE_LENGTH);
1885 strncpy(phost_info->version,
1886 p_sysid->version, VERSION_LENGTH);
1887 strncpy(phost_info->machine,
1888 p_sysid->machine, MACHINE_LENGTH);
1889 strncpy(phost_info->domainname,
1890 p_sysid->domainname, DOMNAME_LENGTH);
1891 strncpy(phost_info->hostdriver,
1892 QLA2XXX_VERSION, VERSION_LENGTH);
1893 do_gettimeofday(&tv);
1894 preg_hsi->utc = (uint64_t)tv.tv_sec;
1895 ql_dbg(ql_dbg_init, vha, 0x0149,
1896 "ISP%04X: Host registration with firmware\n",
1898 ql_dbg(ql_dbg_init, vha, 0x014a,
1899 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1900 phost_info->os_type,
1901 phost_info->sysname,
1902 phost_info->nodename);
1903 ql_dbg(ql_dbg_init, vha, 0x014b,
1904 "release = '%s', version = '%s'\n",
1905 phost_info->release,
1906 phost_info->version);
1907 ql_dbg(ql_dbg_init, vha, 0x014c,
1909 "domainname = '%s', hostdriver = '%s'\n",
1910 phost_info->machine,
1911 phost_info->domainname,
1912 phost_info->hostdriver);
1913 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1914 (uint8_t *)phost_info,
1915 sizeof(struct host_system_info));
1919 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1920 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1921 fdisc->u.fxiocb.rsp_len,
1922 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1923 if (!fdisc->u.fxiocb.rsp_addr)
1924 goto done_unmap_req;
1927 sp->type = SRB_FXIOCB_DCMD;
1928 sp->name = "fxdisc";
1929 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1930 fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1931 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
1932 sp->done = qla2x00_fxdisc_sp_done;
1934 rval = qla2x00_start_sp(sp);
1935 if (rval != QLA_SUCCESS)
1936 goto done_unmap_dma;
1938 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1940 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1941 struct config_info_data *pinfo =
1942 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1943 memcpy(&vha->hw->mr.product_name, pinfo->product_name,
1944 sizeof(vha->hw->mr.product_name));
1945 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1946 sizeof(vha->hw->mr.symbolic_name));
1947 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1948 sizeof(vha->hw->mr.serial_num));
1949 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1950 sizeof(vha->hw->mr.hw_version));
1951 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1952 sizeof(vha->hw->mr.fw_version));
1953 strim(vha->hw->mr.fw_version);
1954 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1955 sizeof(vha->hw->mr.uboot_version));
1956 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1957 sizeof(vha->hw->mr.fru_serial_num));
1958 vha->hw->mr.critical_temperature = pinfo->nominal_temp_value;
1959 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1960 QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1961 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1962 struct port_info_data *pinfo =
1963 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1964 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1965 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1966 vha->d_id.b.domain = pinfo->port_id[0];
1967 vha->d_id.b.area = pinfo->port_id[1];
1968 vha->d_id.b.al_pa = pinfo->port_id[2];
1969 qlafx00_update_host_attr(vha, pinfo);
1970 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1971 (uint8_t *)pinfo, 16);
1972 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1973 struct qlafx00_tgt_node_info *pinfo =
1974 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1975 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1976 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1977 fcport->port_type = FCT_TARGET;
1978 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1979 (uint8_t *)pinfo, 16);
1980 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1981 struct qlafx00_tgt_node_info *pinfo =
1982 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1983 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1984 (uint8_t *)pinfo, 16);
1985 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1987 rval = le32_to_cpu(fdisc->u.fxiocb.result);
1990 if (fdisc->u.fxiocb.rsp_addr)
1991 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1992 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
1995 if (fdisc->u.fxiocb.req_addr)
1996 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1997 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
2005 qlafx00_abort_iocb_timeout(void *data)
2007 srb_t *sp = (srb_t *)data;
2008 struct srb_iocb *abt = &sp->u.iocb_cmd;
2010 abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
2011 complete(&abt->u.abt.comp);
2015 qlafx00_abort_sp_done(void *data, void *ptr, int res)
2017 srb_t *sp = (srb_t *)ptr;
2018 struct srb_iocb *abt = &sp->u.iocb_cmd;
2020 complete(&abt->u.abt.comp);
2024 qlafx00_async_abt_cmd(srb_t *cmd_sp)
2026 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
2027 fc_port_t *fcport = cmd_sp->fcport;
2028 struct srb_iocb *abt_iocb;
2030 int rval = QLA_FUNCTION_FAILED;
2032 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2036 abt_iocb = &sp->u.iocb_cmd;
2037 sp->type = SRB_ABT_CMD;
2039 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
2040 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
2041 sp->done = qlafx00_abort_sp_done;
2042 abt_iocb->timeout = qlafx00_abort_iocb_timeout;
2043 init_completion(&abt_iocb->u.abt.comp);
2045 rval = qla2x00_start_sp(sp);
2046 if (rval != QLA_SUCCESS)
2049 ql_dbg(ql_dbg_async, vha, 0x507c,
2050 "Abort command issued - hdl=%x, target_id=%x\n",
2051 cmd_sp->handle, fcport->tgt_id);
2053 wait_for_completion(&abt_iocb->u.abt.comp);
2055 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
2056 QLA_SUCCESS : QLA_FUNCTION_FAILED;
2065 qlafx00_abort_command(srb_t *sp)
2067 unsigned long flags = 0;
2070 fc_port_t *fcport = sp->fcport;
2071 struct scsi_qla_host *vha = fcport->vha;
2072 struct qla_hw_data *ha = vha->hw;
2073 struct req_que *req = vha->req;
2075 spin_lock_irqsave(&ha->hardware_lock, flags);
2076 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
2077 if (req->outstanding_cmds[handle] == sp)
2080 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2081 if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
2082 /* Command not found. */
2083 return QLA_FUNCTION_FAILED;
2085 return qlafx00_async_abt_cmd(sp);
2089 * qlafx00_initialize_adapter
2093 * ha = adapter block pointer.
2099 qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2102 struct qla_hw_data *ha = vha->hw;
2105 /* Clear adapter flags. */
2106 vha->flags.online = 0;
2107 ha->flags.chip_reset_done = 0;
2108 vha->flags.reset_active = 0;
2109 ha->flags.pci_channel_io_perm_failure = 0;
2110 ha->flags.eeh_busy = 0;
2111 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2112 atomic_set(&vha->loop_state, LOOP_DOWN);
2113 vha->device_flags = DFLG_NO_CABLE;
2115 vha->flags.management_server_logged_in = 0;
2116 vha->marker_needed = 0;
2117 ha->isp_abort_cnt = 0;
2118 ha->beacon_blink_led = 0;
2120 set_bit(0, ha->req_qid_map);
2121 set_bit(0, ha->rsp_qid_map);
2123 ql_dbg(ql_dbg_init, vha, 0x0147,
2124 "Configuring PCI space...\n");
2126 rval = ha->isp_ops->pci_config(vha);
2128 ql_log(ql_log_warn, vha, 0x0148,
2129 "Unable to configure PCI space.\n");
2133 rval = qlafx00_init_fw_ready(vha);
2134 if (rval != QLA_SUCCESS)
2137 qlafx00_save_queue_ptrs(vha);
2139 rval = qlafx00_config_queues(vha);
2140 if (rval != QLA_SUCCESS)
2144 * Allocate the array of outstanding commands
2145 * now that we know the firmware resources.
2147 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2148 if (rval != QLA_SUCCESS)
2151 rval = qla2x00_init_rings(vha);
2152 ha->flags.chip_reset_done = 1;
2154 tempc = QLAFX00_GET_TEMPERATURE(ha);
2155 ql_dbg(ql_dbg_init, vha, 0x0152,
2156 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2163 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2166 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2167 int rval = QLA_FUNCTION_FAILED;
2170 if (qla2x00_reset_active(vha))
2171 ql_log(ql_log_warn, vha, 0x70ce,
2172 "ISP reset active.\n");
2173 else if (!vha->hw->flags.eeh_busy) {
2174 rval = qlafx00_get_firmware_state(vha, state);
2176 if (rval != QLA_SUCCESS)
2177 memset(state, -1, sizeof(state));
2183 qlafx00_get_host_speed(struct Scsi_Host *shost)
2185 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2186 (shost_priv(shost)))->hw;
2187 u32 speed = FC_PORTSPEED_UNKNOWN;
2189 switch (ha->link_data_rate) {
2190 case QLAFX00_PORT_SPEED_2G:
2191 speed = FC_PORTSPEED_2GBIT;
2193 case QLAFX00_PORT_SPEED_4G:
2194 speed = FC_PORTSPEED_4GBIT;
2196 case QLAFX00_PORT_SPEED_8G:
2197 speed = FC_PORTSPEED_8GBIT;
2199 case QLAFX00_PORT_SPEED_10G:
2200 speed = FC_PORTSPEED_10GBIT;
2203 fc_host_speed(shost) = speed;
2206 /** QLAFX00 specific ISR implementation functions */
2209 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2210 uint32_t sense_len, struct rsp_que *rsp, int res)
2212 struct scsi_qla_host *vha = sp->fcport->vha;
2213 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2214 uint32_t track_sense_len;
2216 SET_FW_SENSE_LEN(sp, sense_len);
2218 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2219 sense_len = SCSI_SENSE_BUFFERSIZE;
2221 SET_CMD_SENSE_LEN(sp, sense_len);
2222 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2223 track_sense_len = sense_len;
2225 if (sense_len > par_sense_len)
2226 sense_len = par_sense_len;
2228 memcpy(cp->sense_buffer, sense_data, sense_len);
2230 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2232 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2233 track_sense_len -= sense_len;
2234 SET_CMD_SENSE_LEN(sp, track_sense_len);
2236 ql_dbg(ql_dbg_io, vha, 0x304d,
2237 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2238 sense_len, par_sense_len, track_sense_len);
2239 if (GET_FW_SENSE_LEN(sp) > 0) {
2240 rsp->status_srb = sp;
2245 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2246 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
2247 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
2249 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2250 cp->sense_buffer, sense_len);
2255 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2256 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2257 __le16 sstatus, __le16 cpstatus)
2259 struct srb_iocb *tmf;
2261 tmf = &sp->u.iocb_cmd;
2262 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
2263 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
2264 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
2265 tmf->u.tmf.comp_status = cpstatus;
2266 sp->done(vha, sp, 0);
2270 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2271 struct abort_iocb_entry_fx00 *pkt)
2273 const char func[] = "ABT_IOCB";
2275 struct srb_iocb *abt;
2277 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2281 abt = &sp->u.iocb_cmd;
2282 abt->u.abt.comp_status = pkt->tgt_id_sts;
2283 sp->done(vha, sp, 0);
2287 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2288 struct ioctl_iocb_entry_fx00 *pkt)
2290 const char func[] = "IOSB_IOCB";
2292 struct fc_bsg_job *bsg_job;
2293 struct srb_iocb *iocb_job;
2295 struct qla_mt_iocb_rsp_fx00 fstatus;
2296 uint8_t *fw_sts_ptr;
2298 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2302 if (sp->type == SRB_FXIOCB_DCMD) {
2303 iocb_job = &sp->u.iocb_cmd;
2304 iocb_job->u.fxiocb.seq_number = pkt->seq_no;
2305 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
2306 iocb_job->u.fxiocb.result = pkt->status;
2307 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2308 iocb_job->u.fxiocb.req_data =
2311 bsg_job = sp->u.bsg_job;
2313 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2315 fstatus.reserved_1 = pkt->reserved_0;
2316 fstatus.func_type = pkt->comp_func_num;
2317 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2318 fstatus.ioctl_data = pkt->dataword_r;
2319 fstatus.adapid = pkt->adapid;
2320 fstatus.adapid_hi = pkt->adapid_hi;
2321 fstatus.reserved_2 = pkt->reserved_1;
2322 fstatus.res_count = pkt->residuallen;
2323 fstatus.status = pkt->status;
2324 fstatus.seq_number = pkt->seq_no;
2325 memcpy(fstatus.reserved_3,
2326 pkt->reserved_2, 20 * sizeof(uint8_t));
2328 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
2329 sizeof(struct fc_bsg_reply);
2331 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2332 sizeof(struct qla_mt_iocb_rsp_fx00));
2333 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2334 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2336 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2337 sp->fcport->vha, 0x5080,
2338 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
2340 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2341 sp->fcport->vha, 0x5074,
2342 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2344 res = bsg_job->reply->result = DID_OK << 16;
2345 bsg_job->reply->reply_payload_rcv_len =
2346 bsg_job->reply_payload.payload_len;
2348 sp->done(vha, sp, res);
2352 * qlafx00_status_entry() - Process a Status IOCB entry.
2353 * @ha: SCSI driver HA context
2354 * @pkt: Entry pointer
2357 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2361 struct scsi_cmnd *cp;
2362 struct sts_entry_fx00 *sts;
2366 __le16 lscsi_status;
2368 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2370 uint8_t *rsp_info = NULL, *sense_data = NULL;
2371 struct qla_hw_data *ha = vha->hw;
2372 uint32_t hindex, handle;
2374 struct req_que *req;
2378 sts = (struct sts_entry_fx00 *) pkt;
2380 comp_status = sts->comp_status;
2381 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
2382 hindex = sts->handle;
2383 handle = LSW(hindex);
2386 req = ha->req_q_map[que];
2388 /* Validate handle. */
2389 if (handle < req->num_outstanding_cmds)
2390 sp = req->outstanding_cmds[handle];
2395 ql_dbg(ql_dbg_io, vha, 0x3034,
2396 "Invalid status handle (0x%x).\n", handle);
2398 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2399 qla2xxx_wake_dpc(vha);
2403 if (sp->type == SRB_TM_CMD) {
2404 req->outstanding_cmds[handle] = NULL;
2405 qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2406 scsi_status, comp_status);
2410 /* Fast path completion. */
2411 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2412 qla2x00_do_host_ramp_up(vha);
2413 qla2x00_process_completed_request(vha, req, handle);
2417 req->outstanding_cmds[handle] = NULL;
2418 cp = GET_CMD_SP(sp);
2420 ql_dbg(ql_dbg_io, vha, 0x3048,
2421 "Command already returned (0x%x/%p).\n",
2427 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
2429 fcport = sp->fcport;
2432 sense_len = par_sense_len = rsp_info_len = resid_len =
2434 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
2435 sense_len = sts->sense_len;
2436 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2437 | (uint16_t)SS_RESIDUAL_OVER)))
2438 resid_len = le32_to_cpu(sts->residual_len);
2439 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
2440 fw_resid_len = le32_to_cpu(sts->residual_len);
2441 rsp_info = sense_data = sts->data;
2442 par_sense_len = sizeof(sts->data);
2444 /* Check for overrun. */
2445 if (comp_status == CS_COMPLETE &&
2446 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
2447 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
2450 * Based on Host and scsi status generate status code for Linux
2452 switch (le16_to_cpu(comp_status)) {
2455 if (scsi_status == 0) {
2459 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2460 | (uint16_t)SS_RESIDUAL_OVER))) {
2462 scsi_set_resid(cp, resid);
2464 if (!lscsi_status &&
2465 ((unsigned)(scsi_bufflen(cp) - resid) <
2467 ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2468 "Mid-layer underflow "
2469 "detected (0x%x of 0x%x bytes).\n",
2470 resid, scsi_bufflen(cp));
2472 res = DID_ERROR << 16;
2476 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2479 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2480 ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2481 "QUEUE FULL detected.\n");
2485 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2488 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2489 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2492 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2496 case CS_DATA_UNDERRUN:
2497 /* Use F/W calculated residual length. */
2498 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2499 resid = fw_resid_len;
2502 scsi_set_resid(cp, resid);
2503 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
2504 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2505 && fw_resid_len != resid_len) {
2506 ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2507 "Dropped frame(s) detected "
2508 "(0x%x of 0x%x bytes).\n",
2509 resid, scsi_bufflen(cp));
2511 res = DID_ERROR << 16 |
2512 le16_to_cpu(lscsi_status);
2513 goto check_scsi_status;
2516 if (!lscsi_status &&
2517 ((unsigned)(scsi_bufflen(cp) - resid) <
2519 ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2520 "Mid-layer underflow "
2521 "detected (0x%x of 0x%x bytes, "
2522 "cp->underflow: 0x%x).\n",
2523 resid, scsi_bufflen(cp), cp->underflow);
2525 res = DID_ERROR << 16;
2528 } else if (lscsi_status !=
2529 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
2530 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
2532 * scsi status of task set and busy are considered
2533 * to be task not completed.
2536 ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2537 "Dropped frame(s) detected (0x%x "
2538 "of 0x%x bytes).\n", resid,
2541 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
2542 goto check_scsi_status;
2544 ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2545 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2546 scsi_status, lscsi_status);
2549 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2554 * Check to see if SCSI Status is non zero. If so report SCSI
2557 if (lscsi_status != 0) {
2559 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2560 ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2561 "QUEUE FULL detected.\n");
2566 cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2569 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2571 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2574 qlafx00_handle_sense(sp, sense_data, par_sense_len,
2575 sense_len, rsp, res);
2579 case CS_PORT_LOGGED_OUT:
2580 case CS_PORT_CONFIG_CHG:
2583 case CS_PORT_UNAVAILABLE:
2588 * We are going to have the fc class block the rport
2589 * while we try to recover so instruct the mid layer
2590 * to requeue until the class decides how to handle this.
2592 res = DID_TRANSPORT_DISRUPTED << 16;
2594 ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2595 "Port down status: port-state=0x%x.\n",
2596 atomic_read(&fcport->state));
2598 if (atomic_read(&fcport->state) == FCS_ONLINE)
2599 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2603 res = DID_RESET << 16;
2607 res = DID_ERROR << 16;
2612 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2613 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2614 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2615 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2616 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2617 comp_status, scsi_status, res, vha->host_no,
2618 cp->device->id, cp->device->lun, fcport->tgt_id,
2619 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2620 rsp_info_len, resid_len, fw_resid_len, sense_len,
2621 par_sense_len, rsp_info_len);
2624 qla2x00_do_host_ramp_up(vha);
2626 if (rsp->status_srb == NULL)
2627 sp->done(ha, sp, res);
2631 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2632 * @ha: SCSI driver HA context
2633 * @pkt: Entry pointer
2635 * Extended sense data.
2638 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2640 uint8_t sense_sz = 0;
2641 struct qla_hw_data *ha = rsp->hw;
2642 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2643 srb_t *sp = rsp->status_srb;
2644 struct scsi_cmnd *cp;
2649 ql_dbg(ql_dbg_io, vha, 0x3037,
2650 "no SP, sp = %p\n", sp);
2654 if (!GET_FW_SENSE_LEN(sp)) {
2655 ql_dbg(ql_dbg_io, vha, 0x304b,
2656 "no fw sense data, sp = %p\n", sp);
2659 cp = GET_CMD_SP(sp);
2661 ql_log(ql_log_warn, vha, 0x303b,
2662 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2664 rsp->status_srb = NULL;
2668 if (!GET_CMD_SENSE_LEN(sp)) {
2669 ql_dbg(ql_dbg_io, vha, 0x304c,
2670 "no sense data, sp = %p\n", sp);
2672 sense_len = GET_CMD_SENSE_LEN(sp);
2673 sense_ptr = GET_CMD_SENSE_PTR(sp);
2674 ql_dbg(ql_dbg_io, vha, 0x304f,
2675 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2676 sp, sense_len, sense_ptr);
2678 if (sense_len > sizeof(pkt->data))
2679 sense_sz = sizeof(pkt->data);
2681 sense_sz = sense_len;
2683 /* Move sense data. */
2684 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2685 (uint8_t *)pkt, sizeof(sts_cont_entry_t));
2686 memcpy(sense_ptr, pkt->data, sense_sz);
2687 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2688 sense_ptr, sense_sz);
2690 sense_len -= sense_sz;
2691 sense_ptr += sense_sz;
2693 SET_CMD_SENSE_PTR(sp, sense_ptr);
2694 SET_CMD_SENSE_LEN(sp, sense_len);
2696 sense_len = GET_FW_SENSE_LEN(sp);
2697 sense_len = (sense_len > sizeof(pkt->data)) ?
2698 (sense_len - sizeof(pkt->data)) : 0;
2699 SET_FW_SENSE_LEN(sp, sense_len);
2701 /* Place command on done queue. */
2702 if (sense_len == 0) {
2703 rsp->status_srb = NULL;
2704 sp->done(ha, sp, cp->result);
2709 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2710 * @ha: SCSI driver HA context
2713 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2714 struct rsp_que *rsp, void *pkt)
2717 struct multi_sts_entry_fx00 *stsmfx;
2718 struct qla_hw_data *ha = vha->hw;
2719 uint32_t handle, hindex, handle_count, i;
2721 struct req_que *req;
2724 stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2726 handle_count = stsmfx->handle_count;
2728 if (handle_count > MAX_HANDLE_COUNT) {
2729 ql_dbg(ql_dbg_io, vha, 0x3035,
2730 "Invalid handle count (0x%x).\n", handle_count);
2731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2732 qla2xxx_wake_dpc(vha);
2736 handle_ptr = &stsmfx->handles[0];
2738 for (i = 0; i < handle_count; i++) {
2739 hindex = le32_to_cpu(*handle_ptr);
2740 handle = LSW(hindex);
2742 req = ha->req_q_map[que];
2744 /* Validate handle. */
2745 if (handle < req->num_outstanding_cmds)
2746 sp = req->outstanding_cmds[handle];
2751 ql_dbg(ql_dbg_io, vha, 0x3044,
2752 "Invalid status handle (0x%x).\n", handle);
2753 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2754 qla2xxx_wake_dpc(vha);
2757 qla2x00_process_completed_request(vha, req, handle);
2763 * qlafx00_error_entry() - Process an error entry.
2764 * @ha: SCSI driver HA context
2765 * @pkt: Entry pointer
2768 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2769 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
2772 struct qla_hw_data *ha = vha->hw;
2773 const char func[] = "ERROR-IOCB";
2774 uint16_t que = MSW(pkt->handle);
2775 struct req_que *req = NULL;
2776 int res = DID_ERROR << 16;
2778 ql_dbg(ql_dbg_async, vha, 0x507f,
2779 "type of error status in response: 0x%x\n", estatus);
2781 req = ha->req_q_map[que];
2783 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2785 sp->done(ha, sp, res);
2789 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2790 qla2xxx_wake_dpc(vha);
2794 * qlafx00_process_response_queue() - Process response queue entries.
2795 * @ha: SCSI driver HA context
2798 qlafx00_process_response_queue(struct scsi_qla_host *vha,
2799 struct rsp_que *rsp)
2801 struct sts_entry_fx00 *pkt;
2804 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
2805 RESPONSE_PROCESSED) {
2806 lptr = rsp->ring_ptr;
2807 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2808 sizeof(rsp->rsp_pkt));
2809 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2812 if (rsp->ring_index == rsp->length) {
2813 rsp->ring_index = 0;
2814 rsp->ring_ptr = rsp->ring;
2819 if (pkt->entry_status != 0 &&
2820 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2821 qlafx00_error_entry(vha, rsp,
2822 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2828 switch (pkt->entry_type) {
2829 case STATUS_TYPE_FX00:
2830 qlafx00_status_entry(vha, rsp, pkt);
2833 case STATUS_CONT_TYPE_FX00:
2834 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2837 case MULTI_STATUS_TYPE_FX00:
2838 qlafx00_multistatus_entry(vha, rsp, pkt);
2841 case ABORT_IOCB_TYPE_FX00:
2842 qlafx00_abort_iocb_entry(vha, rsp->req,
2843 (struct abort_iocb_entry_fx00 *)pkt);
2846 case IOCTL_IOSB_TYPE_FX00:
2847 qlafx00_ioctl_iosb_entry(vha, rsp->req,
2848 (struct ioctl_iocb_entry_fx00 *)pkt);
2851 /* Type Not Supported. */
2852 ql_dbg(ql_dbg_async, vha, 0x5081,
2853 "Received unknown response pkt type %x "
2854 "entry status=%x.\n",
2855 pkt->entry_type, pkt->entry_status);
2859 WRT_REG_DWORD((void __iomem *)&lptr->signature,
2860 RESPONSE_PROCESSED);
2864 /* Adjust ring index */
2865 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2869 * qlafx00_async_event() - Process aynchronous events.
2870 * @ha: SCSI driver HA context
2873 qlafx00_async_event(scsi_qla_host_t *vha)
2875 struct qla_hw_data *ha = vha->hw;
2876 struct device_reg_fx00 __iomem *reg;
2879 reg = &ha->iobase->ispfx00;
2880 /* Setup to process RIO completion. */
2881 switch (ha->aenmb[0]) {
2882 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
2883 ql_log(ql_log_warn, vha, 0x5079,
2884 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2888 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
2889 ql_dbg(ql_dbg_async, vha, 0x5076,
2890 "Asynchronous FW shutdown requested.\n");
2891 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2892 qla2xxx_wake_dpc(vha);
2895 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2896 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1);
2897 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2);
2898 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3);
2899 ql_dbg(ql_dbg_async, vha, 0x5077,
2900 "Asynchronous port Update received "
2901 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2902 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2906 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
2907 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
2908 ql_log(ql_log_info, vha, 0x5083,
2909 "Asynchronous critical temperature event received "
2912 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2913 (uint32_t *)ha->aenmb, 1);
2917 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1);
2918 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2);
2919 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3);
2920 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4);
2921 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5);
2922 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6);
2923 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7);
2924 ql_dbg(ql_dbg_async, vha, 0x5078,
2925 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2926 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2927 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2930 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2931 (uint32_t *)ha->aenmb, data_size);
2936 * qlafx00x_mbx_completion() - Process mailbox command completions.
2937 * @ha: SCSI driver HA context
2938 * @mb16: Mailbox16 register
2941 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2944 uint16_t __iomem *wptr;
2945 struct qla_hw_data *ha = vha->hw;
2946 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2949 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2951 /* Load return mailbox registers. */
2952 ha->flags.mbox_int = 1;
2953 ha->mailbox_out32[0] = mb0;
2954 wptr = (uint16_t __iomem *)®->mailbox17;
2956 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2957 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
2963 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2965 * @dev_id: SCSI driver HA context
2967 * Called by system whenever the host adapter generates an interrupt.
2969 * Returns handled flag.
2972 qlafx00_intr_handler(int irq, void *dev_id)
2974 scsi_qla_host_t *vha;
2975 struct qla_hw_data *ha;
2976 struct device_reg_fx00 __iomem *reg;
2981 struct rsp_que *rsp;
2982 unsigned long flags;
2983 uint32_t clr_intr = 0;
2985 rsp = (struct rsp_que *) dev_id;
2987 ql_log(ql_log_info, NULL, 0x507d,
2988 "%s: NULL response queue pointer.\n", __func__);
2993 reg = &ha->iobase->ispfx00;
2996 if (unlikely(pci_channel_offline(ha->pdev)))
2999 spin_lock_irqsave(&ha->hardware_lock, flags);
3000 vha = pci_get_drvdata(ha->pdev);
3001 for (iter = 50; iter--; clr_intr = 0) {
3002 stat = QLAFX00_RD_INTR_REG(ha);
3003 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
3006 switch (stat & QLAFX00_HST_INT_STS_BITS) {
3007 case QLAFX00_INTR_MB_CMPLT:
3008 case QLAFX00_INTR_MB_RSP_CMPLT:
3009 case QLAFX00_INTR_MB_ASYNC_CMPLT:
3010 case QLAFX00_INTR_ALL_CMPLT:
3011 mb[0] = RD_REG_WORD(®->mailbox16);
3012 qlafx00_mbx_completion(vha, mb[0]);
3013 status |= MBX_INTERRUPT;
3014 clr_intr |= QLAFX00_INTR_MB_CMPLT;
3016 case QLAFX00_INTR_ASYNC_CMPLT:
3017 case QLAFX00_INTR_RSP_ASYNC_CMPLT:
3018 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0);
3019 qlafx00_async_event(vha);
3020 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
3022 case QLAFX00_INTR_RSP_CMPLT:
3023 qlafx00_process_response_queue(vha, rsp);
3024 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
3027 ql_dbg(ql_dbg_async, vha, 0x507a,
3028 "Unrecognized interrupt type (%d).\n", stat);
3031 QLAFX00_CLR_INTR_REG(ha, clr_intr);
3032 QLAFX00_RD_INTR_REG(ha);
3035 qla2x00_handle_mbx_completion(ha, status);
3036 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3041 /** QLAFX00 specific IOCB implementation functions */
3043 static inline cont_a64_entry_t *
3044 qlafx00_prep_cont_type1_iocb(struct req_que *req,
3045 cont_a64_entry_t *lcont_pkt)
3047 cont_a64_entry_t *cont_pkt;
3049 /* Adjust ring index. */
3051 if (req->ring_index == req->length) {
3052 req->ring_index = 0;
3053 req->ring_ptr = req->ring;
3058 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
3060 /* Load packet defaults. */
3061 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
3067 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3068 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
3070 uint16_t avail_dsds;
3072 scsi_qla_host_t *vha;
3073 struct scsi_cmnd *cmd;
3074 struct scatterlist *sg;
3076 struct req_que *req;
3077 cont_a64_entry_t lcont_pkt;
3078 cont_a64_entry_t *cont_pkt;
3080 vha = sp->fcport->vha;
3083 cmd = GET_CMD_SP(sp);
3087 /* Update entry type to indicate Command Type 3 IOCB */
3088 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
3090 /* No data transfer */
3091 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3092 lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
3096 /* Set transfer direction */
3097 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3098 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
3099 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3100 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3101 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
3102 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3105 /* One DSD is available in the Command Type 3 IOCB */
3107 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
3109 /* Load data segments */
3110 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3113 /* Allocate additional continuation packets? */
3114 if (avail_dsds == 0) {
3116 * Five DSDs are available in the Continuation
3119 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3121 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3122 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
3127 sle_dma = sg_dma_address(sg);
3128 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3129 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3130 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3132 if (avail_dsds == 0 && cont == 1) {
3134 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3135 REQUEST_ENTRY_SIZE);
3139 if (avail_dsds != 0 && cont == 1) {
3140 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3141 REQUEST_ENTRY_SIZE);
3146 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3147 * @sp: command to send to the ISP
3149 * Returns non-zero if a failure occurred, else zero.
3152 qlafx00_start_scsi(srb_t *sp)
3155 unsigned long flags;
3161 struct req_que *req = NULL;
3162 struct rsp_que *rsp = NULL;
3163 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3164 struct scsi_qla_host *vha = sp->fcport->vha;
3165 struct qla_hw_data *ha = vha->hw;
3166 struct cmd_type_7_fx00 *cmd_pkt;
3167 struct cmd_type_7_fx00 lcmd_pkt;
3168 struct scsi_lun llun;
3171 /* Setup device pointers. */
3174 rsp = ha->rsp_q_map[0];
3177 /* So we know we haven't pci_map'ed anything yet */
3180 /* Forcing marker needed for now */
3181 vha->marker_needed = 0;
3183 /* Send marker if required */
3184 if (vha->marker_needed != 0) {
3185 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
3187 return QLA_FUNCTION_FAILED;
3188 vha->marker_needed = 0;
3191 /* Acquire ring specific lock */
3192 spin_lock_irqsave(&ha->hardware_lock, flags);
3194 /* Check for room in outstanding command list. */
3195 handle = req->current_outstanding_cmd;
3196 for (index = 1; index < req->num_outstanding_cmds; index++) {
3198 if (handle == req->num_outstanding_cmds)
3200 if (!req->outstanding_cmds[handle])
3203 if (index == req->num_outstanding_cmds)
3206 /* Map the sg table so we have an accurate count of sg entries needed */
3207 if (scsi_sg_count(cmd)) {
3208 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3209 scsi_sg_count(cmd), cmd->sc_data_direction);
3210 if (unlikely(!nseg))
3216 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3217 if (req->cnt < (req_cnt + 2)) {
3218 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
3220 if (req->ring_index < cnt)
3221 req->cnt = cnt - req->ring_index;
3223 req->cnt = req->length -
3224 (req->ring_index - cnt);
3225 if (req->cnt < (req_cnt + 2))
3229 /* Build command packet. */
3230 req->current_outstanding_cmd = handle;
3231 req->outstanding_cmds[handle] = sp;
3232 sp->handle = handle;
3233 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3234 req->cnt -= req_cnt;
3236 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3238 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3240 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3241 lcmd_pkt.handle_hi = 0;
3242 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3243 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3245 int_to_scsilun(cmd->device->lun, &llun);
3246 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3247 sizeof(lcmd_pkt.lun));
3249 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3250 if (scsi_populate_tag_msg(cmd, tag)) {
3252 case HEAD_OF_QUEUE_TAG:
3253 lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
3255 case ORDERED_QUEUE_TAG:
3256 lcmd_pkt.task = TSK_ORDERED;
3261 /* Load SCSI command packet. */
3262 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3263 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3265 /* Build IOCB segments */
3266 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3268 /* Set total data segment count. */
3269 lcmd_pkt.entry_count = (uint8_t)req_cnt;
3271 /* Specify response queue number where completion should happen */
3272 lcmd_pkt.entry_status = (uint8_t) rsp->id;
3274 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3275 (uint8_t *)cmd->cmnd, cmd->cmd_len);
3276 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3277 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
3279 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3282 /* Adjust ring index. */
3284 if (req->ring_index == req->length) {
3285 req->ring_index = 0;
3286 req->ring_ptr = req->ring;
3290 sp->flags |= SRB_DMA_VALID;
3292 /* Set chip new ring index. */
3293 WRT_REG_DWORD(req->req_q_in, req->ring_index);
3294 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3296 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3301 scsi_dma_unmap(cmd);
3303 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3305 return QLA_FUNCTION_FAILED;
3309 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3311 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3312 scsi_qla_host_t *vha = sp->fcport->vha;
3313 struct req_que *req = vha->req;
3314 struct tsk_mgmt_entry_fx00 tm_iocb;
3315 struct scsi_lun llun;
3317 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3318 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3319 tm_iocb.entry_count = 1;
3320 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3321 tm_iocb.handle_hi = 0;
3322 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3323 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3324 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3325 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
3326 int_to_scsilun(fxio->u.tmf.lun, &llun);
3327 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3328 sizeof(struct scsi_lun));
3331 memcpy((void *)ptm_iocb, &tm_iocb,
3332 sizeof(struct tsk_mgmt_entry_fx00));
3337 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3339 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3340 scsi_qla_host_t *vha = sp->fcport->vha;
3341 struct req_que *req = vha->req;
3342 struct abort_iocb_entry_fx00 abt_iocb;
3344 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3345 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3346 abt_iocb.entry_count = 1;
3347 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3348 abt_iocb.abort_handle =
3349 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
3350 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3351 abt_iocb.req_que_no = cpu_to_le16(req->id);
3353 memcpy((void *)pabt_iocb, &abt_iocb,
3354 sizeof(struct abort_iocb_entry_fx00));
3359 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3361 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3362 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3363 struct fc_bsg_job *bsg_job;
3364 struct fxdisc_entry_fx00 fx_iocb;
3365 uint8_t entry_cnt = 1;
3367 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3368 fx_iocb.entry_type = FX00_IOCB_TYPE;
3369 fx_iocb.handle = cpu_to_le32(sp->handle);
3370 fx_iocb.entry_count = entry_cnt;
3372 if (sp->type == SRB_FXIOCB_DCMD) {
3374 sp->u.iocb_cmd.u.fxiocb.req_func_type;
3375 fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
3376 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
3377 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
3378 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
3379 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
3381 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3382 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3383 fx_iocb.req_xfrcnt =
3384 cpu_to_le16(fxio->u.fxiocb.req_len);
3385 fx_iocb.dseg_rq_address[0] =
3386 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
3387 fx_iocb.dseg_rq_address[1] =
3388 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3389 fx_iocb.dseg_rq_len =
3390 cpu_to_le32(fxio->u.fxiocb.req_len);
3393 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3394 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3395 fx_iocb.rsp_xfrcnt =
3396 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3397 fx_iocb.dseg_rsp_address[0] =
3398 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
3399 fx_iocb.dseg_rsp_address[1] =
3400 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3401 fx_iocb.dseg_rsp_len =
3402 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3405 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3406 fx_iocb.dataword = fxio->u.fxiocb.req_data;
3408 fx_iocb.flags = fxio->u.fxiocb.flags;
3410 struct scatterlist *sg;
3411 bsg_job = sp->u.bsg_job;
3412 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3413 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
3415 fx_iocb.func_num = piocb_rqst->func_type;
3416 fx_iocb.adapid = piocb_rqst->adapid;
3417 fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3418 fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3419 fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3420 fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3421 fx_iocb.dataword = piocb_rqst->dataword;
3422 fx_iocb.req_xfrcnt = piocb_rqst->req_len;
3423 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
3425 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3426 int avail_dsds, tot_dsds;
3427 cont_a64_entry_t lcont_pkt;
3428 cont_a64_entry_t *cont_pkt = NULL;
3430 int index = 0, cont = 0;
3432 fx_iocb.req_dsdcnt =
3433 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3435 bsg_job->request_payload.sg_cnt;
3436 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
3438 for_each_sg(bsg_job->request_payload.sg_list, sg,
3442 /* Allocate additional continuation packets? */
3443 if (avail_dsds == 0) {
3445 * Five DSDs are available in the Cont.
3448 memset(&lcont_pkt, 0,
3449 REQUEST_ENTRY_SIZE);
3451 qlafx00_prep_cont_type1_iocb(
3452 sp->fcport->vha->req,
3454 cur_dsd = (__le32 *)
3455 lcont_pkt.dseg_0_address;
3461 sle_dma = sg_dma_address(sg);
3462 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3463 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3464 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3467 if (avail_dsds == 0 && cont == 1) {
3470 (void __iomem *)cont_pkt,
3471 &lcont_pkt, REQUEST_ENTRY_SIZE);
3473 ql_dbg_user + ql_dbg_verbose,
3474 sp->fcport->vha, 0x3042,
3475 (uint8_t *)&lcont_pkt,
3476 REQUEST_ENTRY_SIZE);
3479 if (avail_dsds != 0 && cont == 1) {
3480 memcpy_toio((void __iomem *)cont_pkt,
3481 &lcont_pkt, REQUEST_ENTRY_SIZE);
3482 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3483 sp->fcport->vha, 0x3043,
3484 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3488 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3489 int avail_dsds, tot_dsds;
3490 cont_a64_entry_t lcont_pkt;
3491 cont_a64_entry_t *cont_pkt = NULL;
3493 int index = 0, cont = 0;
3495 fx_iocb.rsp_dsdcnt =
3496 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3497 tot_dsds = bsg_job->reply_payload.sg_cnt;
3498 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
3501 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3505 /* Allocate additional continuation packets? */
3506 if (avail_dsds == 0) {
3508 * Five DSDs are available in the Cont.
3511 memset(&lcont_pkt, 0,
3512 REQUEST_ENTRY_SIZE);
3514 qlafx00_prep_cont_type1_iocb(
3515 sp->fcport->vha->req,
3517 cur_dsd = (__le32 *)
3518 lcont_pkt.dseg_0_address;
3524 sle_dma = sg_dma_address(sg);
3525 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3526 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3527 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3530 if (avail_dsds == 0 && cont == 1) {
3532 memcpy_toio((void __iomem *)cont_pkt,
3534 REQUEST_ENTRY_SIZE);
3536 ql_dbg_user + ql_dbg_verbose,
3537 sp->fcport->vha, 0x3045,
3538 (uint8_t *)&lcont_pkt,
3539 REQUEST_ENTRY_SIZE);
3542 if (avail_dsds != 0 && cont == 1) {
3543 memcpy_toio((void __iomem *)cont_pkt,
3544 &lcont_pkt, REQUEST_ENTRY_SIZE);
3545 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3546 sp->fcport->vha, 0x3046,
3547 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3551 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3552 fx_iocb.dataword = piocb_rqst->dataword;
3553 fx_iocb.flags = piocb_rqst->flags;
3554 fx_iocb.entry_count = entry_cnt;
3557 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3558 sp->fcport->vha, 0x3047,
3559 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3561 memcpy((void *)pfxiocb, &fx_iocb,
3562 sizeof(struct fxdisc_entry_fx00));