2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
13 /* BSG support for ELS/CT pass through */
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
17 srb_t *sp = (srb_t *)ptr;
18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
21 bsg_job->reply->result = res;
22 bsg_job->job_done(bsg_job);
27 qla2x00_bsg_sp_free(void *data, void *ptr)
29 srb_t *sp = (srb_t *)ptr;
30 struct scsi_qla_host *vha = sp->fcport->vha;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32 struct qla_hw_data *ha = vha->hw;
33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
35 if (sp->type == SRB_FXIOCB_BCMD) {
36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
40 dma_unmap_sg(&ha->pdev->dev,
41 bsg_job->request_payload.sg_list,
42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->reply_payload.sg_list,
47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
56 if (sp->type == SRB_CT_CMD ||
57 sp->type == SRB_FXIOCB_BCMD ||
58 sp->type == SRB_ELS_CMD_HST)
60 qla2x00_rel_sp(vha, sp);
64 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
65 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
67 int i, ret, num_valid;
69 struct qla_fcp_prio_entry *pri_entry;
70 uint32_t *bcode_val_ptr, bcode_val;
74 bcode = (uint8_t *)pri_cfg;
75 bcode_val_ptr = (uint32_t *)pri_cfg;
76 bcode_val = (uint32_t)(*bcode_val_ptr);
78 if (bcode_val == 0xFFFFFFFF) {
79 /* No FCP Priority config data in flash */
80 ql_dbg(ql_dbg_user, vha, 0x7051,
81 "No FCP Priority config data.\n");
85 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
87 /* Invalid FCP priority data header*/
88 ql_dbg(ql_dbg_user, vha, 0x7052,
89 "Invalid FCP Priority data header. bcode=0x%x.\n",
96 pri_entry = &pri_cfg->entry[0];
97 for (i = 0; i < pri_cfg->num_entries; i++) {
98 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
103 if (num_valid == 0) {
104 /* No valid FCP priority data entries */
105 ql_dbg(ql_dbg_user, vha, 0x7053,
106 "No valid FCP Priority data entries.\n");
109 /* FCP priority data is valid */
110 ql_dbg(ql_dbg_user, vha, 0x7054,
111 "Valid FCP priority data. num entries = %d.\n",
119 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
121 struct Scsi_Host *host = bsg_job->shost;
122 scsi_qla_host_t *vha = shost_priv(host);
123 struct qla_hw_data *ha = vha->hw;
128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
130 goto exit_fcp_prio_cfg;
133 /* Get the sub command */
134 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
136 /* Only set config is allowed if config memory is not allocated */
137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
139 goto exit_fcp_prio_cfg;
142 case QLFC_FCP_PRIO_DISABLE:
143 if (ha->flags.fcp_prio_enabled) {
144 ha->flags.fcp_prio_enabled = 0;
145 ha->fcp_prio_cfg->attributes &=
146 ~FCP_PRIO_ATTR_ENABLE;
147 qla24xx_update_all_fcp_prio(vha);
148 bsg_job->reply->result = DID_OK;
151 bsg_job->reply->result = (DID_ERROR << 16);
152 goto exit_fcp_prio_cfg;
156 case QLFC_FCP_PRIO_ENABLE:
157 if (!ha->flags.fcp_prio_enabled) {
158 if (ha->fcp_prio_cfg) {
159 ha->flags.fcp_prio_enabled = 1;
160 ha->fcp_prio_cfg->attributes |=
161 FCP_PRIO_ATTR_ENABLE;
162 qla24xx_update_all_fcp_prio(vha);
163 bsg_job->reply->result = DID_OK;
166 bsg_job->reply->result = (DID_ERROR << 16);
167 goto exit_fcp_prio_cfg;
172 case QLFC_FCP_PRIO_GET_CONFIG:
173 len = bsg_job->reply_payload.payload_len;
174 if (!len || len > FCP_PRIO_CFG_SIZE) {
176 bsg_job->reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg;
180 bsg_job->reply->result = DID_OK;
181 bsg_job->reply->reply_payload_rcv_len =
183 bsg_job->reply_payload.sg_list,
184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
189 case QLFC_FCP_PRIO_SET_CONFIG:
190 len = bsg_job->request_payload.payload_len;
191 if (!len || len > FCP_PRIO_CFG_SIZE) {
192 bsg_job->reply->result = (DID_ERROR << 16);
194 goto exit_fcp_prio_cfg;
197 if (!ha->fcp_prio_cfg) {
198 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
199 if (!ha->fcp_prio_cfg) {
200 ql_log(ql_log_warn, vha, 0x7050,
201 "Unable to allocate memory for fcp prio "
202 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
203 bsg_job->reply->result = (DID_ERROR << 16);
205 goto exit_fcp_prio_cfg;
209 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
210 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
211 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
214 /* validate fcp priority data */
216 if (!qla24xx_fcp_prio_cfg_valid(vha,
217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
218 bsg_job->reply->result = (DID_ERROR << 16);
220 /* If buffer was invalidatic int
221 * fcp_prio_cfg is of no use
223 vfree(ha->fcp_prio_cfg);
224 ha->fcp_prio_cfg = NULL;
225 goto exit_fcp_prio_cfg;
228 ha->flags.fcp_prio_enabled = 0;
229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
230 ha->flags.fcp_prio_enabled = 1;
231 qla24xx_update_all_fcp_prio(vha);
232 bsg_job->reply->result = DID_OK;
240 bsg_job->job_done(bsg_job);
245 qla2x00_process_els(struct fc_bsg_job *bsg_job)
247 struct fc_rport *rport;
248 fc_port_t *fcport = NULL;
249 struct Scsi_Host *host;
250 scsi_qla_host_t *vha;
251 struct qla_hw_data *ha;
254 int req_sg_cnt, rsp_sg_cnt;
255 int rval = (DRIVER_ERROR << 16);
256 uint16_t nextlid = 0;
258 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
259 rport = bsg_job->rport;
260 fcport = *(fc_port_t **) rport->dd_data;
261 host = rport_to_shost(rport);
262 vha = shost_priv(host);
264 type = "FC_BSG_RPT_ELS";
266 host = bsg_job->shost;
267 vha = shost_priv(host);
269 type = "FC_BSG_HST_ELS_NOLOGIN";
272 if (!vha->flags.online) {
273 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
278 /* pass through is supported only for ISP 4Gb or higher */
279 if (!IS_FWI2_CAPABLE(ha)) {
280 ql_dbg(ql_dbg_user, vha, 0x7001,
281 "ELS passthru not supported for ISP23xx based adapters.\n");
286 /* Multiple SG's are not supported for ELS requests */
287 if (bsg_job->request_payload.sg_cnt > 1 ||
288 bsg_job->reply_payload.sg_cnt > 1) {
289 ql_dbg(ql_dbg_user, vha, 0x7002,
290 "Multiple SG's are not suppored for ELS requests, "
291 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
292 bsg_job->request_payload.sg_cnt,
293 bsg_job->reply_payload.sg_cnt);
298 /* ELS request for rport */
299 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
300 /* make sure the rport is logged in,
301 * if not perform fabric login
303 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
304 ql_dbg(ql_dbg_user, vha, 0x7003,
305 "Failed to login port %06X for ELS passthru.\n",
311 /* Allocate a dummy fcport structure, since functions
312 * preparing the IOCB and mailbox command retrieves port
313 * specific information from fcport structure. For Host based
314 * ELS commands there will be no fcport structure allocated
316 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
322 /* Initialize all required fields of fcport */
324 fcport->d_id.b.al_pa =
325 bsg_job->request->rqst_data.h_els.port_id[0];
326 fcport->d_id.b.area =
327 bsg_job->request->rqst_data.h_els.port_id[1];
328 fcport->d_id.b.domain =
329 bsg_job->request->rqst_data.h_els.port_id[2];
331 (fcport->d_id.b.al_pa == 0xFD) ?
332 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
337 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
340 goto done_free_fcport;
343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
344 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
347 goto done_free_fcport;
350 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
351 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
352 ql_log(ql_log_warn, vha, 0x7008,
353 "dma mapping resulted in different sg counts, "
354 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
355 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
356 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
361 /* Alloc SRB structure */
362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
369 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
370 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
372 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
373 "bsg_els_rpt" : "bsg_els_hst");
374 sp->u.bsg_job = bsg_job;
375 sp->free = qla2x00_bsg_sp_free;
376 sp->done = qla2x00_bsg_job_done;
378 ql_dbg(ql_dbg_user, vha, 0x700a,
379 "bsg rqst type: %s els type: %x - loop-id=%x "
380 "portid=%-2x%02x%02x.\n", type,
381 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
382 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
384 rval = qla2x00_start_sp(sp);
385 if (rval != QLA_SUCCESS) {
386 ql_log(ql_log_warn, vha, 0x700e,
387 "qla2x00_start_sp failed = %d\n", rval);
388 qla2x00_rel_sp(vha, sp);
395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
396 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
398 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
399 goto done_free_fcport;
402 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
409 qla24xx_calc_ct_iocbs(uint16_t dsds)
415 iocbs += (dsds - 2) / 5;
423 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
426 struct Scsi_Host *host = bsg_job->shost;
427 scsi_qla_host_t *vha = shost_priv(host);
428 struct qla_hw_data *ha = vha->hw;
429 int rval = (DRIVER_ERROR << 16);
430 int req_sg_cnt, rsp_sg_cnt;
432 struct fc_port *fcport;
433 char *type = "FC_BSG_HST_CT";
436 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
437 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
439 ql_log(ql_log_warn, vha, 0x700f,
440 "dma_map_sg return %d for request\n", req_sg_cnt);
445 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
446 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
448 ql_log(ql_log_warn, vha, 0x7010,
449 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
454 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
455 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
456 ql_log(ql_log_warn, vha, 0x7011,
457 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
458 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
459 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
464 if (!vha->flags.online) {
465 ql_log(ql_log_warn, vha, 0x7012,
466 "Host is not online.\n");
472 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
476 loop_id = cpu_to_le16(NPH_SNS);
479 loop_id = vha->mgmt_svr_loop_id;
482 ql_dbg(ql_dbg_user, vha, 0x7013,
483 "Unknown loop id: %x.\n", loop_id);
488 /* Allocate a dummy fcport structure, since functions preparing the
489 * IOCB and mailbox command retrieves port specific information
490 * from fcport structure. For Host based ELS commands there will be
491 * no fcport structure allocated
493 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
495 ql_log(ql_log_warn, vha, 0x7014,
496 "Failed to allocate fcport.\n");
501 /* Initialize all required fields of fcport */
503 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
504 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
505 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
506 fcport->loop_id = loop_id;
508 /* Alloc SRB structure */
509 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
511 ql_log(ql_log_warn, vha, 0x7015,
512 "qla2x00_get_sp failed.\n");
514 goto done_free_fcport;
517 sp->type = SRB_CT_CMD;
519 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
520 sp->u.bsg_job = bsg_job;
521 sp->free = qla2x00_bsg_sp_free;
522 sp->done = qla2x00_bsg_job_done;
524 ql_dbg(ql_dbg_user, vha, 0x7016,
525 "bsg rqst type: %s else type: %x - "
526 "loop-id=%x portid=%02x%02x%02x.\n", type,
527 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
528 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
529 fcport->d_id.b.al_pa);
531 rval = qla2x00_start_sp(sp);
532 if (rval != QLA_SUCCESS) {
533 ql_log(ql_log_warn, vha, 0x7017,
534 "qla2x00_start_sp failed=%d.\n", rval);
535 qla2x00_rel_sp(vha, sp);
537 goto done_free_fcport;
544 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
545 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
546 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
547 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
552 /* Disable loopback mode */
554 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
559 uint16_t new_config[4];
560 struct qla_hw_data *ha = vha->hw;
562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
563 goto done_reset_internal;
565 memset(new_config, 0 , sizeof(new_config));
566 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
567 ENABLE_INTERNAL_LOOPBACK ||
568 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
569 ENABLE_EXTERNAL_LOOPBACK) {
570 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
571 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
572 (new_config[0] & INTERNAL_LOOPBACK_MASK));
573 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
575 ha->notify_dcbx_comp = wait;
576 ha->notify_lb_portup_comp = wait2;
578 ret = qla81xx_set_port_config(vha, new_config);
579 if (ret != QLA_SUCCESS) {
580 ql_log(ql_log_warn, vha, 0x7025,
581 "Set port config failed.\n");
582 ha->notify_dcbx_comp = 0;
583 ha->notify_lb_portup_comp = 0;
585 goto done_reset_internal;
588 /* Wait for DCBX complete event */
589 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
590 (DCBX_COMP_TIMEOUT * HZ))) {
591 ql_dbg(ql_dbg_user, vha, 0x7026,
592 "DCBX completion not received.\n");
593 ha->notify_dcbx_comp = 0;
594 ha->notify_lb_portup_comp = 0;
596 goto done_reset_internal;
598 ql_dbg(ql_dbg_user, vha, 0x7027,
599 "DCBX completion received.\n");
602 !wait_for_completion_timeout(&ha->lb_portup_comp,
603 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
604 ql_dbg(ql_dbg_user, vha, 0x70c5,
605 "Port up completion not received.\n");
606 ha->notify_lb_portup_comp = 0;
608 goto done_reset_internal;
610 ql_dbg(ql_dbg_user, vha, 0x70c6,
611 "Port up completion received.\n");
613 ha->notify_dcbx_comp = 0;
614 ha->notify_lb_portup_comp = 0;
621 * Set the port configuration to enable the internal or external loopback
622 * depending on the loopback mode.
625 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
626 uint16_t *new_config, uint16_t mode)
630 unsigned long rem_tmo = 0, current_tmo = 0;
631 struct qla_hw_data *ha = vha->hw;
633 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
634 goto done_set_internal;
636 if (mode == INTERNAL_LOOPBACK)
637 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
638 else if (mode == EXTERNAL_LOOPBACK)
639 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
640 ql_dbg(ql_dbg_user, vha, 0x70be,
641 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
643 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
645 ha->notify_dcbx_comp = 1;
646 ret = qla81xx_set_port_config(vha, new_config);
647 if (ret != QLA_SUCCESS) {
648 ql_log(ql_log_warn, vha, 0x7021,
649 "set port config failed.\n");
650 ha->notify_dcbx_comp = 0;
652 goto done_set_internal;
655 /* Wait for DCBX complete event */
656 current_tmo = DCBX_COMP_TIMEOUT * HZ;
658 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
660 if (!ha->idc_extend_tmo || rem_tmo) {
661 ha->idc_extend_tmo = 0;
664 current_tmo = ha->idc_extend_tmo * HZ;
665 ha->idc_extend_tmo = 0;
669 ql_dbg(ql_dbg_user, vha, 0x7022,
670 "DCBX completion not received.\n");
671 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
673 * If the reset of the loopback mode doesn't work take a FCoE
674 * dump and reset the chip.
677 ha->isp_ops->fw_dump(vha, 0);
678 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
682 if (ha->flags.idc_compl_status) {
683 ql_dbg(ql_dbg_user, vha, 0x70c3,
684 "Bad status in IDC Completion AEN\n");
686 ha->flags.idc_compl_status = 0;
688 ql_dbg(ql_dbg_user, vha, 0x7023,
689 "DCBX completion received.\n");
692 ha->notify_dcbx_comp = 0;
693 ha->idc_extend_tmo = 0;
700 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
702 struct Scsi_Host *host = bsg_job->shost;
703 scsi_qla_host_t *vha = shost_priv(host);
704 struct qla_hw_data *ha = vha->hw;
706 uint8_t command_sent;
708 struct msg_echo_lb elreq;
709 uint16_t response[MAILBOX_REGISTER_COUNT];
710 uint16_t config[4], new_config[4];
712 uint8_t *req_data = NULL;
713 dma_addr_t req_data_dma;
714 uint32_t req_data_len;
715 uint8_t *rsp_data = NULL;
716 dma_addr_t rsp_data_dma;
717 uint32_t rsp_data_len;
719 if (!vha->flags.online) {
720 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
724 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
725 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
728 if (!elreq.req_sg_cnt) {
729 ql_log(ql_log_warn, vha, 0x701a,
730 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
734 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
735 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
738 if (!elreq.rsp_sg_cnt) {
739 ql_log(ql_log_warn, vha, 0x701b,
740 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
742 goto done_unmap_req_sg;
745 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
746 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
747 ql_log(ql_log_warn, vha, 0x701c,
748 "dma mapping resulted in different sg counts, "
749 "request_sg_cnt: %x dma_request_sg_cnt: %x "
750 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
751 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
752 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
756 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
757 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
758 &req_data_dma, GFP_KERNEL);
760 ql_log(ql_log_warn, vha, 0x701d,
761 "dma alloc failed for req_data.\n");
766 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
767 &rsp_data_dma, GFP_KERNEL);
769 ql_log(ql_log_warn, vha, 0x7004,
770 "dma alloc failed for rsp_data.\n");
772 goto done_free_dma_req;
775 /* Copy the request buffer in req_data now */
776 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
777 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
779 elreq.send_dma = req_data_dma;
780 elreq.rcv_dma = rsp_data_dma;
781 elreq.transfer_size = req_data_len;
783 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
784 elreq.iteration_count =
785 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
787 if (atomic_read(&vha->loop_state) == LOOP_READY &&
788 (ha->current_topology == ISP_CFG_F ||
789 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
790 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
791 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
792 elreq.options == EXTERNAL_LOOPBACK) {
793 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
794 ql_dbg(ql_dbg_user, vha, 0x701e,
795 "BSG request type: %s.\n", type);
796 command_sent = INT_DEF_LB_ECHO_CMD;
797 rval = qla2x00_echo_test(vha, &elreq, response);
799 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
800 memset(config, 0, sizeof(config));
801 memset(new_config, 0, sizeof(new_config));
803 if (qla81xx_get_port_config(vha, config)) {
804 ql_log(ql_log_warn, vha, 0x701f,
805 "Get port config failed.\n");
807 goto done_free_dma_rsp;
810 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
811 ql_dbg(ql_dbg_user, vha, 0x70c4,
812 "Loopback operation already in "
815 goto done_free_dma_rsp;
818 ql_dbg(ql_dbg_user, vha, 0x70c0,
819 "elreq.options=%04x\n", elreq.options);
821 if (elreq.options == EXTERNAL_LOOPBACK)
822 if (IS_QLA8031(ha) || IS_QLA8044(ha))
823 rval = qla81xx_set_loopback_mode(vha,
824 config, new_config, elreq.options);
826 rval = qla81xx_reset_loopback_mode(vha,
829 rval = qla81xx_set_loopback_mode(vha, config,
830 new_config, elreq.options);
834 goto done_free_dma_rsp;
837 type = "FC_BSG_HST_VENDOR_LOOPBACK";
838 ql_dbg(ql_dbg_user, vha, 0x7028,
839 "BSG request type: %s.\n", type);
841 command_sent = INT_DEF_LB_LOOPBACK_CMD;
842 rval = qla2x00_loopback_test(vha, &elreq, response);
844 if (response[0] == MBS_COMMAND_ERROR &&
845 response[1] == MBS_LB_RESET) {
846 ql_log(ql_log_warn, vha, 0x7029,
847 "MBX command error, Aborting ISP.\n");
848 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
849 qla2xxx_wake_dpc(vha);
850 qla2x00_wait_for_chip_reset(vha);
851 /* Also reset the MPI */
852 if (IS_QLA81XX(ha)) {
853 if (qla81xx_restart_mpi_firmware(vha) !=
855 ql_log(ql_log_warn, vha, 0x702a,
856 "MPI reset failed.\n");
861 goto done_free_dma_rsp;
867 /* Revert back to original port config
868 * Also clear internal loopback
870 ret = qla81xx_reset_loopback_mode(vha,
874 * If the reset of the loopback mode
875 * doesn't work take FCoE dump and then
878 ha->isp_ops->fw_dump(vha, 0);
879 set_bit(ISP_ABORT_NEEDED,
886 type = "FC_BSG_HST_VENDOR_LOOPBACK";
887 ql_dbg(ql_dbg_user, vha, 0x702b,
888 "BSG request type: %s.\n", type);
889 command_sent = INT_DEF_LB_LOOPBACK_CMD;
890 rval = qla2x00_loopback_test(vha, &elreq, response);
895 ql_log(ql_log_warn, vha, 0x702c,
896 "Vendor request %s failed.\n", type);
899 bsg_job->reply->result = (DID_ERROR << 16);
900 bsg_job->reply->reply_payload_rcv_len = 0;
902 ql_dbg(ql_dbg_user, vha, 0x702d,
903 "Vendor request %s completed.\n", type);
904 bsg_job->reply->result = (DID_OK << 16);
905 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
906 bsg_job->reply_payload.sg_cnt, rsp_data,
910 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
911 sizeof(response) + sizeof(uint8_t);
912 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
913 sizeof(struct fc_bsg_reply);
914 memcpy(fw_sts_ptr, response, sizeof(response));
915 fw_sts_ptr += sizeof(response);
916 *fw_sts_ptr = command_sent;
919 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
920 rsp_data, rsp_data_dma);
922 dma_free_coherent(&ha->pdev->dev, req_data_len,
923 req_data, req_data_dma);
925 dma_unmap_sg(&ha->pdev->dev,
926 bsg_job->reply_payload.sg_list,
927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
929 dma_unmap_sg(&ha->pdev->dev,
930 bsg_job->request_payload.sg_list,
931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
933 bsg_job->job_done(bsg_job);
938 qla84xx_reset(struct fc_bsg_job *bsg_job)
940 struct Scsi_Host *host = bsg_job->shost;
941 scsi_qla_host_t *vha = shost_priv(host);
942 struct qla_hw_data *ha = vha->hw;
946 if (!IS_QLA84XX(ha)) {
947 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
951 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
953 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
956 ql_log(ql_log_warn, vha, 0x7030,
957 "Vendor request 84xx reset failed.\n");
958 rval = (DID_ERROR << 16);
961 ql_dbg(ql_dbg_user, vha, 0x7031,
962 "Vendor request 84xx reset completed.\n");
963 bsg_job->reply->result = DID_OK;
964 bsg_job->job_done(bsg_job);
971 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
973 struct Scsi_Host *host = bsg_job->shost;
974 scsi_qla_host_t *vha = shost_priv(host);
975 struct qla_hw_data *ha = vha->hw;
976 struct verify_chip_entry_84xx *mn = NULL;
977 dma_addr_t mn_dma, fw_dma;
986 if (!IS_QLA84XX(ha)) {
987 ql_dbg(ql_dbg_user, vha, 0x7032,
988 "Not 84xx, exiting.\n");
992 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
995 ql_log(ql_log_warn, vha, 0x7033,
996 "dma_map_sg returned %d for request.\n", sg_cnt);
1000 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1001 ql_log(ql_log_warn, vha, 0x7034,
1002 "DMA mapping resulted in different sg counts, "
1003 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1004 bsg_job->request_payload.sg_cnt, sg_cnt);
1009 data_len = bsg_job->request_payload.payload_len;
1010 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1011 &fw_dma, GFP_KERNEL);
1013 ql_log(ql_log_warn, vha, 0x7035,
1014 "DMA alloc failed for fw_buf.\n");
1019 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1020 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1022 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1024 ql_log(ql_log_warn, vha, 0x7036,
1025 "DMA alloc failed for fw buffer.\n");
1027 goto done_free_fw_buf;
1030 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1031 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1033 memset(mn, 0, sizeof(struct access_chip_84xx));
1034 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1035 mn->entry_count = 1;
1037 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1038 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1039 options |= VCO_DIAG_FW;
1041 mn->options = cpu_to_le16(options);
1042 mn->fw_ver = cpu_to_le32(fw_ver);
1043 mn->fw_size = cpu_to_le32(data_len);
1044 mn->fw_seq_size = cpu_to_le32(data_len);
1045 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1046 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1047 mn->dseg_length = cpu_to_le32(data_len);
1048 mn->data_seg_cnt = cpu_to_le16(1);
1050 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1053 ql_log(ql_log_warn, vha, 0x7037,
1054 "Vendor request 84xx updatefw failed.\n");
1056 rval = (DID_ERROR << 16);
1058 ql_dbg(ql_dbg_user, vha, 0x7038,
1059 "Vendor request 84xx updatefw completed.\n");
1061 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1062 bsg_job->reply->result = DID_OK;
1065 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1068 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1071 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1072 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1075 bsg_job->job_done(bsg_job);
1080 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1082 struct Scsi_Host *host = bsg_job->shost;
1083 scsi_qla_host_t *vha = shost_priv(host);
1084 struct qla_hw_data *ha = vha->hw;
1085 struct access_chip_84xx *mn = NULL;
1086 dma_addr_t mn_dma, mgmt_dma;
1087 void *mgmt_b = NULL;
1089 struct qla_bsg_a84_mgmt *ql84_mgmt;
1091 uint32_t data_len = 0;
1092 uint32_t dma_direction = DMA_NONE;
1094 if (!IS_QLA84XX(ha)) {
1095 ql_log(ql_log_warn, vha, 0x703a,
1096 "Not 84xx, exiting.\n");
1100 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1102 ql_log(ql_log_warn, vha, 0x703c,
1103 "DMA alloc failed for fw buffer.\n");
1107 memset(mn, 0, sizeof(struct access_chip_84xx));
1108 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1109 mn->entry_count = 1;
1110 ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
1111 switch (ql84_mgmt->mgmt.cmd) {
1112 case QLA84_MGMT_READ_MEM:
1113 case QLA84_MGMT_GET_INFO:
1114 sg_cnt = dma_map_sg(&ha->pdev->dev,
1115 bsg_job->reply_payload.sg_list,
1116 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1118 ql_log(ql_log_warn, vha, 0x703d,
1119 "dma_map_sg returned %d for reply.\n", sg_cnt);
1124 dma_direction = DMA_FROM_DEVICE;
1126 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1127 ql_log(ql_log_warn, vha, 0x703e,
1128 "DMA mapping resulted in different sg counts, "
1129 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1130 bsg_job->reply_payload.sg_cnt, sg_cnt);
1135 data_len = bsg_job->reply_payload.payload_len;
1137 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1138 &mgmt_dma, GFP_KERNEL);
1140 ql_log(ql_log_warn, vha, 0x703f,
1141 "DMA alloc failed for mgmt_b.\n");
1146 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1147 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1150 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1152 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1153 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1155 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1159 ql84_mgmt->mgmt.mgmtp.u.info.context);
1163 case QLA84_MGMT_WRITE_MEM:
1164 sg_cnt = dma_map_sg(&ha->pdev->dev,
1165 bsg_job->request_payload.sg_list,
1166 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1169 ql_log(ql_log_warn, vha, 0x7040,
1170 "dma_map_sg returned %d.\n", sg_cnt);
1175 dma_direction = DMA_TO_DEVICE;
1177 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1178 ql_log(ql_log_warn, vha, 0x7041,
1179 "DMA mapping resulted in different sg counts, "
1180 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1181 bsg_job->request_payload.sg_cnt, sg_cnt);
1186 data_len = bsg_job->request_payload.payload_len;
1187 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1188 &mgmt_dma, GFP_KERNEL);
1190 ql_log(ql_log_warn, vha, 0x7042,
1191 "DMA alloc failed for mgmt_b.\n");
1196 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1197 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1199 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1201 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1204 case QLA84_MGMT_CHNG_CONFIG:
1205 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1207 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1210 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1213 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1221 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1222 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1223 mn->dseg_count = cpu_to_le16(1);
1224 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1225 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1226 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1229 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1232 ql_log(ql_log_warn, vha, 0x7043,
1233 "Vendor request 84xx mgmt failed.\n");
1235 rval = (DID_ERROR << 16);
1238 ql_dbg(ql_dbg_user, vha, 0x7044,
1239 "Vendor request 84xx mgmt completed.\n");
1241 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1242 bsg_job->reply->result = DID_OK;
1244 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1245 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1246 bsg_job->reply->reply_payload_rcv_len =
1247 bsg_job->reply_payload.payload_len;
1249 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1250 bsg_job->reply_payload.sg_cnt, mgmt_b,
1257 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1259 if (dma_direction == DMA_TO_DEVICE)
1260 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1261 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1262 else if (dma_direction == DMA_FROM_DEVICE)
1263 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1264 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1267 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1270 bsg_job->job_done(bsg_job);
1275 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1277 struct Scsi_Host *host = bsg_job->shost;
1278 scsi_qla_host_t *vha = shost_priv(host);
1280 struct qla_port_param *port_param = NULL;
1281 fc_port_t *fcport = NULL;
1283 uint16_t mb[MAILBOX_REGISTER_COUNT];
1284 uint8_t *rsp_ptr = NULL;
1286 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1287 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1291 port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
1292 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1293 ql_log(ql_log_warn, vha, 0x7048,
1294 "Invalid destination type.\n");
1298 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1299 if (fcport->port_type != FCT_TARGET)
1302 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1303 fcport->port_name, sizeof(fcport->port_name)))
1311 ql_log(ql_log_warn, vha, 0x7049,
1312 "Failed to find port.\n");
1316 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1317 ql_log(ql_log_warn, vha, 0x704a,
1318 "Port is not online.\n");
1322 if (fcport->flags & FCF_LOGIN_NEEDED) {
1323 ql_log(ql_log_warn, vha, 0x704b,
1324 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1328 if (port_param->mode)
1329 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1330 port_param->speed, mb);
1332 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1333 &port_param->speed, mb);
1336 ql_log(ql_log_warn, vha, 0x704c,
1337 "iIDMA cmd failed for %8phN -- "
1338 "%04x %x %04x %04x.\n", fcport->port_name,
1339 rval, fcport->fp_speed, mb[0], mb[1]);
1340 rval = (DID_ERROR << 16);
1342 if (!port_param->mode) {
1343 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1344 sizeof(struct qla_port_param);
1346 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1347 sizeof(struct fc_bsg_reply);
1349 memcpy(rsp_ptr, port_param,
1350 sizeof(struct qla_port_param));
1353 bsg_job->reply->result = DID_OK;
1354 bsg_job->job_done(bsg_job);
1361 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1366 struct qla_hw_data *ha = vha->hw;
1368 if (unlikely(pci_channel_offline(ha->pdev)))
1371 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1372 if (start > ha->optrom_size) {
1373 ql_log(ql_log_warn, vha, 0x7055,
1374 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1378 if (ha->optrom_state != QLA_SWAITING) {
1379 ql_log(ql_log_info, vha, 0x7056,
1380 "optrom_state %d.\n", ha->optrom_state);
1384 ha->optrom_region_start = start;
1385 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1387 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1389 else if (start == (ha->flt_region_boot * 4) ||
1390 start == (ha->flt_region_fw * 4))
1392 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1393 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1396 ql_log(ql_log_warn, vha, 0x7058,
1397 "Invalid start region 0x%x/0x%x.\n", start,
1398 bsg_job->request_payload.payload_len);
1402 ha->optrom_region_size = start +
1403 bsg_job->request_payload.payload_len > ha->optrom_size ?
1404 ha->optrom_size - start :
1405 bsg_job->request_payload.payload_len;
1406 ha->optrom_state = QLA_SWRITING;
1408 ha->optrom_region_size = start +
1409 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1410 ha->optrom_size - start :
1411 bsg_job->reply_payload.payload_len;
1412 ha->optrom_state = QLA_SREADING;
1415 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1416 if (!ha->optrom_buffer) {
1417 ql_log(ql_log_warn, vha, 0x7059,
1418 "Read: Unable to allocate memory for optrom retrieval "
1419 "(%x)\n", ha->optrom_region_size);
1421 ha->optrom_state = QLA_SWAITING;
1425 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1430 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1432 struct Scsi_Host *host = bsg_job->shost;
1433 scsi_qla_host_t *vha = shost_priv(host);
1434 struct qla_hw_data *ha = vha->hw;
1437 if (ha->flags.nic_core_reset_hdlr_active)
1440 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1444 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1445 ha->optrom_region_start, ha->optrom_region_size);
1447 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1448 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1449 ha->optrom_region_size);
1451 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1452 bsg_job->reply->result = DID_OK;
1453 vfree(ha->optrom_buffer);
1454 ha->optrom_buffer = NULL;
1455 ha->optrom_state = QLA_SWAITING;
1456 bsg_job->job_done(bsg_job);
1461 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1463 struct Scsi_Host *host = bsg_job->shost;
1464 scsi_qla_host_t *vha = shost_priv(host);
1465 struct qla_hw_data *ha = vha->hw;
1468 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1472 /* Set the isp82xx_no_md_cap not to capture minidump */
1473 ha->flags.isp82xx_no_md_cap = 1;
1475 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1476 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1477 ha->optrom_region_size);
1479 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1480 ha->optrom_region_start, ha->optrom_region_size);
1482 bsg_job->reply->result = DID_OK;
1483 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING;
1486 bsg_job->job_done(bsg_job);
1491 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1493 struct Scsi_Host *host = bsg_job->shost;
1494 scsi_qla_host_t *vha = shost_priv(host);
1495 struct qla_hw_data *ha = vha->hw;
1497 uint8_t bsg[DMA_POOL_SIZE];
1498 struct qla_image_version_list *list = (void *)bsg;
1499 struct qla_image_version *image;
1502 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1504 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1505 EXT_STATUS_NO_MEMORY;
1509 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1510 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1512 image = list->version;
1513 count = list->count;
1515 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1516 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1517 image->field_address.device, image->field_address.offset,
1518 sizeof(image->field_info), image->field_address.option);
1520 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1527 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1530 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1533 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1534 bsg_job->reply->result = DID_OK << 16;
1535 bsg_job->job_done(bsg_job);
1541 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1543 struct Scsi_Host *host = bsg_job->shost;
1544 scsi_qla_host_t *vha = shost_priv(host);
1545 struct qla_hw_data *ha = vha->hw;
1547 uint8_t bsg[DMA_POOL_SIZE];
1548 struct qla_status_reg *sr = (void *)bsg;
1550 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1552 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1553 EXT_STATUS_NO_MEMORY;
1557 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1558 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1560 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1561 sr->field_address.device, sr->field_address.offset,
1562 sizeof(sr->status_reg), sr->field_address.option);
1563 sr->status_reg = *sfp;
1566 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1571 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1572 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1577 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1580 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1581 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1582 bsg_job->reply->result = DID_OK << 16;
1583 bsg_job->job_done(bsg_job);
1589 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1591 struct Scsi_Host *host = bsg_job->shost;
1592 scsi_qla_host_t *vha = shost_priv(host);
1593 struct qla_hw_data *ha = vha->hw;
1595 uint8_t bsg[DMA_POOL_SIZE];
1596 struct qla_status_reg *sr = (void *)bsg;
1598 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1600 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1601 EXT_STATUS_NO_MEMORY;
1605 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1606 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1608 *sfp = sr->status_reg;
1609 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1610 sr->field_address.device, sr->field_address.offset,
1611 sizeof(sr->status_reg), sr->field_address.option);
1614 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1619 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1622 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1625 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1626 bsg_job->reply->result = DID_OK << 16;
1627 bsg_job->job_done(bsg_job);
1633 qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1635 struct Scsi_Host *host = bsg_job->shost;
1636 scsi_qla_host_t *vha = shost_priv(host);
1637 struct qla_hw_data *ha = vha->hw;
1639 uint8_t bsg[DMA_POOL_SIZE];
1640 struct qla_i2c_access *i2c = (void *)bsg;
1642 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1644 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1645 EXT_STATUS_NO_MEMORY;
1649 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1650 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1652 memcpy(sfp, i2c->buffer, i2c->length);
1653 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1654 i2c->device, i2c->offset, i2c->length, i2c->option);
1657 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1662 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669 bsg_job->reply->result = DID_OK << 16;
1670 bsg_job->job_done(bsg_job);
1676 qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1678 struct Scsi_Host *host = bsg_job->shost;
1679 scsi_qla_host_t *vha = shost_priv(host);
1680 struct qla_hw_data *ha = vha->hw;
1682 uint8_t bsg[DMA_POOL_SIZE];
1683 struct qla_i2c_access *i2c = (void *)bsg;
1685 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1687 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1688 EXT_STATUS_NO_MEMORY;
1692 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1693 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1695 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1696 i2c->device, i2c->offset, i2c->length, i2c->option);
1699 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1704 memcpy(i2c->buffer, sfp, i2c->length);
1705 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1706 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1708 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1711 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1714 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1715 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1716 bsg_job->reply->result = DID_OK << 16;
1717 bsg_job->job_done(bsg_job);
1723 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1725 struct Scsi_Host *host = bsg_job->shost;
1726 scsi_qla_host_t *vha = shost_priv(host);
1727 struct qla_hw_data *ha = vha->hw;
1729 uint32_t rval = EXT_STATUS_OK;
1730 uint16_t req_sg_cnt = 0;
1731 uint16_t rsp_sg_cnt = 0;
1732 uint16_t nextlid = 0;
1735 uint32_t req_data_len = 0;
1736 uint32_t rsp_data_len = 0;
1738 /* Check the type of the adapter */
1739 if (!IS_BIDI_CAPABLE(ha)) {
1740 ql_log(ql_log_warn, vha, 0x70a0,
1741 "This adapter is not supported\n");
1742 rval = EXT_STATUS_NOT_SUPPORTED;
1746 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1747 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1748 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1749 rval = EXT_STATUS_BUSY;
1753 /* Check if host is online */
1754 if (!vha->flags.online) {
1755 ql_log(ql_log_warn, vha, 0x70a1,
1756 "Host is not online\n");
1757 rval = EXT_STATUS_DEVICE_OFFLINE;
1761 /* Check if cable is plugged in or not */
1762 if (vha->device_flags & DFLG_NO_CABLE) {
1763 ql_log(ql_log_warn, vha, 0x70a2,
1764 "Cable is unplugged...\n");
1765 rval = EXT_STATUS_INVALID_CFG;
1769 /* Check if the switch is connected or not */
1770 if (ha->current_topology != ISP_CFG_F) {
1771 ql_log(ql_log_warn, vha, 0x70a3,
1772 "Host is not connected to the switch\n");
1773 rval = EXT_STATUS_INVALID_CFG;
1777 /* Check if operating mode is P2P */
1778 if (ha->operating_mode != P2P) {
1779 ql_log(ql_log_warn, vha, 0x70a4,
1780 "Host is operating mode is not P2p\n");
1781 rval = EXT_STATUS_INVALID_CFG;
1785 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1787 mutex_lock(&ha->selflogin_lock);
1788 if (vha->self_login_loop_id == 0) {
1789 /* Initialize all required fields of fcport */
1790 vha->bidir_fcport.vha = vha;
1791 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1792 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1793 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1794 vha->bidir_fcport.loop_id = vha->loop_id;
1796 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1797 ql_log(ql_log_warn, vha, 0x70a7,
1798 "Failed to login port %06X for bidirectional IOCB\n",
1799 vha->bidir_fcport.d_id.b24);
1800 mutex_unlock(&ha->selflogin_lock);
1801 rval = EXT_STATUS_MAILBOX;
1804 vha->self_login_loop_id = nextlid - 1;
1807 /* Assign the self login loop id to fcport */
1808 mutex_unlock(&ha->selflogin_lock);
1810 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1812 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1813 bsg_job->request_payload.sg_list,
1814 bsg_job->request_payload.sg_cnt,
1818 rval = EXT_STATUS_NO_MEMORY;
1822 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1823 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1827 rval = EXT_STATUS_NO_MEMORY;
1828 goto done_unmap_req_sg;
1831 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1832 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1833 ql_dbg(ql_dbg_user, vha, 0x70a9,
1834 "Dma mapping resulted in different sg counts "
1835 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1836 "%x dma_reply_sg_cnt: %x]\n",
1837 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1838 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1839 rval = EXT_STATUS_NO_MEMORY;
1843 if (req_data_len != rsp_data_len) {
1844 rval = EXT_STATUS_BUSY;
1845 ql_log(ql_log_warn, vha, 0x70aa,
1846 "req_data_len != rsp_data_len\n");
1850 req_data_len = bsg_job->request_payload.payload_len;
1851 rsp_data_len = bsg_job->reply_payload.payload_len;
1854 /* Alloc SRB structure */
1855 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1857 ql_dbg(ql_dbg_user, vha, 0x70ac,
1858 "Alloc SRB structure failed\n");
1859 rval = EXT_STATUS_NO_MEMORY;
1863 /*Populate srb->ctx with bidir ctx*/
1864 sp->u.bsg_job = bsg_job;
1865 sp->free = qla2x00_bsg_sp_free;
1866 sp->type = SRB_BIDI_CMD;
1867 sp->done = qla2x00_bsg_job_done;
1869 /* Add the read and write sg count */
1870 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1872 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1873 if (rval != EXT_STATUS_OK)
1875 /* the bsg request will be completed in the interrupt handler */
1879 mempool_free(sp, ha->srb_mempool);
1881 dma_unmap_sg(&ha->pdev->dev,
1882 bsg_job->reply_payload.sg_list,
1883 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1885 dma_unmap_sg(&ha->pdev->dev,
1886 bsg_job->request_payload.sg_list,
1887 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1890 /* Return an error vendor specific response
1891 * and complete the bsg request
1893 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1894 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1895 bsg_job->reply->reply_payload_rcv_len = 0;
1896 bsg_job->reply->result = (DID_OK) << 16;
1897 bsg_job->job_done(bsg_job);
1898 /* Always return success, vendor rsp carries correct status */
1903 qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1905 struct Scsi_Host *host = bsg_job->shost;
1906 scsi_qla_host_t *vha = shost_priv(host);
1907 struct qla_hw_data *ha = vha->hw;
1908 int rval = (DRIVER_ERROR << 16);
1909 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1911 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1912 struct fc_port *fcport;
1913 char *type = "FC_BSG_HST_FX_MGMT";
1915 /* Copy the IOCB specific information */
1916 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1917 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1919 /* Dump the vendor information */
1920 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1921 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1923 if (!vha->flags.online) {
1924 ql_log(ql_log_warn, vha, 0x70d0,
1925 "Host is not online.\n");
1930 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1931 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1932 bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1935 ql_log(ql_log_warn, vha, 0x70c7,
1936 "dma_map_sg return %d for request\n", req_sg_cnt);
1942 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1944 bsg_job->reply_payload.sg_list,
1945 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1947 ql_log(ql_log_warn, vha, 0x70c8,
1948 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1950 goto done_unmap_req_sg;
1954 ql_dbg(ql_dbg_user, vha, 0x70c9,
1955 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1956 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
1957 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1959 /* Allocate a dummy fcport structure, since functions preparing the
1960 * IOCB and mailbox command retrieves port specific information
1961 * from fcport structure. For Host based ELS commands there will be
1962 * no fcport structure allocated
1964 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1966 ql_log(ql_log_warn, vha, 0x70ca,
1967 "Failed to allocate fcport.\n");
1969 goto done_unmap_rsp_sg;
1972 /* Alloc SRB structure */
1973 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1975 ql_log(ql_log_warn, vha, 0x70cb,
1976 "qla2x00_get_sp failed.\n");
1978 goto done_free_fcport;
1981 /* Initialize all required fields of fcport */
1983 fcport->loop_id = piocb_rqst->dataword;
1985 sp->type = SRB_FXIOCB_BCMD;
1986 sp->name = "bsg_fx_mgmt";
1987 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
1988 sp->u.bsg_job = bsg_job;
1989 sp->free = qla2x00_bsg_sp_free;
1990 sp->done = qla2x00_bsg_job_done;
1992 ql_dbg(ql_dbg_user, vha, 0x70cc,
1993 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
1994 type, piocb_rqst->func_type, fcport->loop_id);
1996 rval = qla2x00_start_sp(sp);
1997 if (rval != QLA_SUCCESS) {
1998 ql_log(ql_log_warn, vha, 0x70cd,
1999 "qla2x00_start_sp failed=%d.\n", rval);
2000 mempool_free(sp, ha->srb_mempool);
2002 goto done_free_fcport;
2010 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2011 dma_unmap_sg(&ha->pdev->dev,
2012 bsg_job->reply_payload.sg_list,
2013 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2015 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2016 dma_unmap_sg(&ha->pdev->dev,
2017 bsg_job->request_payload.sg_list,
2018 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2025 qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
2027 struct Scsi_Host *host = bsg_job->shost;
2028 scsi_qla_host_t *vha = shost_priv(host);
2030 struct qla_serdes_reg sr;
2032 memset(&sr, 0, sizeof(sr));
2034 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2035 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2038 case INT_SC_SERDES_WRITE_REG:
2039 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2040 bsg_job->reply->reply_payload_rcv_len = 0;
2042 case INT_SC_SERDES_READ_REG:
2043 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2044 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2045 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2046 bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
2049 ql_log(ql_log_warn, vha, 0x708c,
2050 "Unknown serdes cmd %x.\n", sr.cmd);
2055 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
2056 rval ? EXT_STATUS_MAILBOX : 0;
2058 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2059 bsg_job->reply->result = DID_OK << 16;
2060 bsg_job->job_done(bsg_job);
2065 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2067 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2068 case QL_VND_LOOPBACK:
2069 return qla2x00_process_loopback(bsg_job);
2071 case QL_VND_A84_RESET:
2072 return qla84xx_reset(bsg_job);
2074 case QL_VND_A84_UPDATE_FW:
2075 return qla84xx_updatefw(bsg_job);
2077 case QL_VND_A84_MGMT_CMD:
2078 return qla84xx_mgmt_cmd(bsg_job);
2081 return qla24xx_iidma(bsg_job);
2083 case QL_VND_FCP_PRIO_CFG_CMD:
2084 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2086 case QL_VND_READ_FLASH:
2087 return qla2x00_read_optrom(bsg_job);
2089 case QL_VND_UPDATE_FLASH:
2090 return qla2x00_update_optrom(bsg_job);
2092 case QL_VND_SET_FRU_VERSION:
2093 return qla2x00_update_fru_versions(bsg_job);
2095 case QL_VND_READ_FRU_STATUS:
2096 return qla2x00_read_fru_status(bsg_job);
2098 case QL_VND_WRITE_FRU_STATUS:
2099 return qla2x00_write_fru_status(bsg_job);
2101 case QL_VND_WRITE_I2C:
2102 return qla2x00_write_i2c(bsg_job);
2104 case QL_VND_READ_I2C:
2105 return qla2x00_read_i2c(bsg_job);
2107 case QL_VND_DIAG_IO_CMD:
2108 return qla24xx_process_bidir_cmd(bsg_job);
2110 case QL_VND_FX00_MGMT_CMD:
2111 return qlafx00_mgmt_cmd(bsg_job);
2113 case QL_VND_SERDES_OP:
2114 return qla26xx_serdes_op(bsg_job);
2122 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2125 struct fc_rport *rport;
2126 fc_port_t *fcport = NULL;
2127 struct Scsi_Host *host;
2128 scsi_qla_host_t *vha;
2130 /* In case no data transferred. */
2131 bsg_job->reply->reply_payload_rcv_len = 0;
2133 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
2134 rport = bsg_job->rport;
2135 fcport = *(fc_port_t **) rport->dd_data;
2136 host = rport_to_shost(rport);
2137 vha = shost_priv(host);
2139 host = bsg_job->shost;
2140 vha = shost_priv(host);
2143 if (qla2x00_reset_active(vha)) {
2144 ql_dbg(ql_dbg_user, vha, 0x709f,
2145 "BSG: ISP abort active/needed -- cmd=%d.\n",
2146 bsg_job->request->msgcode);
2150 ql_dbg(ql_dbg_user, vha, 0x7000,
2151 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
2153 switch (bsg_job->request->msgcode) {
2154 case FC_BSG_RPT_ELS:
2155 case FC_BSG_HST_ELS_NOLOGIN:
2156 ret = qla2x00_process_els(bsg_job);
2159 ret = qla2x00_process_ct(bsg_job);
2161 case FC_BSG_HST_VENDOR:
2162 ret = qla2x00_process_vendor_specific(bsg_job);
2164 case FC_BSG_HST_ADD_RPORT:
2165 case FC_BSG_HST_DEL_RPORT:
2168 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2175 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2177 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2178 struct qla_hw_data *ha = vha->hw;
2181 unsigned long flags;
2182 struct req_que *req;
2184 /* find the bsg job from the active list of commands */
2185 spin_lock_irqsave(&ha->hardware_lock, flags);
2186 for (que = 0; que < ha->max_req_queues; que++) {
2187 req = ha->req_q_map[que];
2191 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2192 sp = req->outstanding_cmds[cnt];
2194 if (((sp->type == SRB_CT_CMD) ||
2195 (sp->type == SRB_ELS_CMD_HST) ||
2196 (sp->type == SRB_FXIOCB_BCMD))
2197 && (sp->u.bsg_job == bsg_job)) {
2198 req->outstanding_cmds[cnt] = NULL;
2199 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2200 if (ha->isp_ops->abort_command(sp)) {
2201 ql_log(ql_log_warn, vha, 0x7089,
2202 "mbx abort_command "
2204 bsg_job->req->errors =
2205 bsg_job->reply->result = -EIO;
2207 ql_dbg(ql_dbg_user, vha, 0x708a,
2208 "mbx abort_command "
2210 bsg_job->req->errors =
2211 bsg_job->reply->result = 0;
2213 spin_lock_irqsave(&ha->hardware_lock, flags);
2219 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2220 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2221 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2225 spin_unlock_irqrestore(&ha->hardware_lock, flags);