2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
22 BFA_TRC_FILE(HAL, CORE);
25 * BFA module list terminated by NULL
27 static struct bfa_module_s *hal_mods[] = {
40 * Message handlers for various modules.
42 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
43 bfa_isr_unhandled, /* NONE */
44 bfa_isr_unhandled, /* BFI_MC_IOC */
45 bfa_fcdiag_intr, /* BFI_MC_DIAG */
46 bfa_isr_unhandled, /* BFI_MC_FLASH */
47 bfa_isr_unhandled, /* BFI_MC_CEE */
48 bfa_fcport_isr, /* BFI_MC_FCPORT */
49 bfa_isr_unhandled, /* BFI_MC_IOCFC */
50 bfa_isr_unhandled, /* BFI_MC_LL */
51 bfa_uf_isr, /* BFI_MC_UF */
52 bfa_fcxp_isr, /* BFI_MC_FCXP */
53 bfa_lps_isr, /* BFI_MC_LPS */
54 bfa_rport_isr, /* BFI_MC_RPORT */
55 bfa_itn_isr, /* BFI_MC_ITN */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
58 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
59 bfa_ioim_isr, /* BFI_MC_IOIM */
60 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
61 bfa_tskim_isr, /* BFI_MC_TSKIM */
62 bfa_isr_unhandled, /* BFI_MC_SBOOT */
63 bfa_isr_unhandled, /* BFI_MC_IPFC */
64 bfa_isr_unhandled, /* BFI_MC_PORT */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74 bfa_isr_unhandled, /* --------- */
77 * Message handlers for mailbox command classes
79 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
81 NULL, /* BFI_MC_IOC */
82 NULL, /* BFI_MC_DIAG */
83 NULL, /* BFI_MC_FLASH */
84 NULL, /* BFI_MC_CEE */
85 NULL, /* BFI_MC_PORT */
86 bfa_iocfc_isr, /* BFI_MC_IOCFC */
93 bfa_com_port_attach(struct bfa_s *bfa)
95 struct bfa_port_s *port = &bfa->modules.port;
96 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
98 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
99 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
106 bfa_com_ablk_attach(struct bfa_s *bfa)
108 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
109 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
111 bfa_ablk_attach(ablk, &bfa->ioc);
112 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
116 bfa_com_cee_attach(struct bfa_s *bfa)
118 struct bfa_cee_s *cee = &bfa->modules.cee;
119 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
121 cee->trcmod = bfa->trcmod;
122 bfa_cee_attach(cee, &bfa->ioc, bfa);
123 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
127 bfa_com_sfp_attach(struct bfa_s *bfa)
129 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
130 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
132 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
133 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
137 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
139 struct bfa_flash_s *flash = BFA_FLASH(bfa);
140 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
142 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
143 bfa_flash_memclaim(flash, flash_dma->kva_curp,
144 flash_dma->dma_curp, mincfg);
148 bfa_com_diag_attach(struct bfa_s *bfa)
150 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
151 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
153 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
154 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
158 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
160 struct bfa_phy_s *phy = BFA_PHY(bfa);
161 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
163 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
164 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
168 * BFA IOC FC related definitions
172 * IOC local definitions
174 #define BFA_IOCFC_TOV 5000 /* msecs */
177 BFA_IOCFC_ACT_NONE = 0,
178 BFA_IOCFC_ACT_INIT = 1,
179 BFA_IOCFC_ACT_STOP = 2,
180 BFA_IOCFC_ACT_DISABLE = 3,
181 BFA_IOCFC_ACT_ENABLE = 4,
184 #define DEF_CFG_NUM_FABRICS 1
185 #define DEF_CFG_NUM_LPORTS 256
186 #define DEF_CFG_NUM_CQS 4
187 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
188 #define DEF_CFG_NUM_TSKIM_REQS 128
189 #define DEF_CFG_NUM_FCXP_REQS 64
190 #define DEF_CFG_NUM_UF_BUFS 64
191 #define DEF_CFG_NUM_RPORTS 1024
192 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
193 #define DEF_CFG_NUM_TINS 256
195 #define DEF_CFG_NUM_SGPGS 2048
196 #define DEF_CFG_NUM_REQQ_ELEMS 256
197 #define DEF_CFG_NUM_RSPQ_ELEMS 64
198 #define DEF_CFG_NUM_SBOOT_TGTS 16
199 #define DEF_CFG_NUM_SBOOT_LUNS 16
202 * forward declaration for IOC FC functions
204 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
205 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
206 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
207 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
208 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
211 * BFA Interrupt handling functions
214 bfa_reqq_resume(struct bfa_s *bfa, int qid)
216 struct list_head *waitq, *qe, *qen;
217 struct bfa_reqq_wait_s *wqe;
219 waitq = bfa_reqq(bfa, qid);
220 list_for_each_safe(qe, qen, waitq) {
222 * Callback only as long as there is room in request queue
224 if (bfa_reqq_full(bfa, qid))
228 wqe = (struct bfa_reqq_wait_s *) qe;
229 wqe->qresume(wqe->cbarg);
234 bfa_isr_rspq(struct bfa_s *bfa, int qid)
238 struct list_head *waitq;
240 bfa_isr_rspq_ack(bfa, qid);
242 ci = bfa_rspq_ci(bfa, qid);
243 pi = bfa_rspq_pi(bfa, qid);
246 m = bfa_rspq_elem(bfa, qid, ci);
247 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
249 bfa_isrs[m->mhdr.msg_class] (bfa, m);
250 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
256 bfa_rspq_ci(bfa, qid) = pi;
257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
261 * Resume any pending requests in the corresponding reqq.
263 waitq = bfa_reqq(bfa, qid);
264 if (!list_empty(waitq))
265 bfa_reqq_resume(bfa, qid);
269 bfa_isr_reqq(struct bfa_s *bfa, int qid)
271 struct list_head *waitq;
273 bfa_isr_reqq_ack(bfa, qid);
276 * Resume any pending requests in the corresponding reqq.
278 waitq = bfa_reqq(bfa, qid);
279 if (!list_empty(waitq))
280 bfa_reqq_resume(bfa, qid);
284 bfa_msix_all(struct bfa_s *bfa, int vec)
289 intr = readl(bfa->iocfc.bfa_regs.intr_status);
294 * RME completion queue interrupt
296 qintr = intr & __HFN_INT_RME_MASK;
297 if (qintr && bfa->queue_process) {
298 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
299 bfa_isr_rspq(bfa, queue);
307 * CPE completion queue interrupt
309 qintr = intr & __HFN_INT_CPE_MASK;
310 if (qintr && bfa->queue_process) {
311 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
312 bfa_isr_reqq(bfa, queue);
318 bfa_msix_lpu_err(bfa, intr);
322 bfa_intx(struct bfa_s *bfa)
327 intr = readl(bfa->iocfc.bfa_regs.intr_status);
331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
333 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
336 * RME completion queue interrupt
338 qintr = intr & __HFN_INT_RME_MASK;
339 if (qintr && bfa->queue_process) {
340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
341 bfa_isr_rspq(bfa, queue);
349 * CPE completion queue interrupt
351 qintr = intr & __HFN_INT_CPE_MASK;
352 if (qintr && bfa->queue_process) {
353 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
354 bfa_isr_reqq(bfa, queue);
360 bfa_msix_lpu_err(bfa, intr);
366 bfa_isr_enable(struct bfa_s *bfa)
369 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
371 bfa_trc(bfa, pci_func);
373 bfa_msix_ctrl_install(bfa);
375 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
376 umsk = __HFN_INT_ERR_MASK_CT2;
377 umsk |= pci_func == 0 ?
378 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
380 umsk = __HFN_INT_ERR_MASK;
381 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
384 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
385 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
386 bfa->iocfc.intr_mask = ~umsk;
387 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
391 bfa_isr_disable(struct bfa_s *bfa)
393 bfa_isr_mode_set(bfa, BFA_FALSE);
394 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
395 bfa_msix_uninstall(bfa);
399 bfa_msix_reqq(struct bfa_s *bfa, int vec)
401 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
405 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
407 bfa_trc(bfa, m->mhdr.msg_class);
408 bfa_trc(bfa, m->mhdr.msg_id);
409 bfa_trc(bfa, m->mhdr.mtag.i2htok);
411 bfa_trc_stop(bfa->trcmod);
415 bfa_msix_rspq(struct bfa_s *bfa, int vec)
417 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
421 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
423 u32 intr, curr_value;
424 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
426 intr = readl(bfa->iocfc.bfa_regs.intr_status);
428 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
429 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
430 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
432 __HFN_INT_MBOX_LPU1_CT2);
433 intr &= __HFN_INT_ERR_MASK_CT2;
435 halt_isr = intr & __HFN_INT_LL_HALT;
436 pss_isr = intr & __HFN_INT_ERR_PSS;
437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
438 intr &= __HFN_INT_ERR_MASK;
442 bfa_ioc_mbox_isr(&bfa->ioc);
447 * If LL_HALT bit is set then FW Init Halt LL Port
448 * Register needs to be cleared as well so Interrupt
449 * Status Register will be cleared.
451 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
452 curr_value &= ~__FW_INIT_HALT_P;
453 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
458 * ERR_PSS bit needs to be cleared as well in case
459 * interrups are shared so driver's interrupt handler is
460 * still called even though it is already masked out.
463 bfa->ioc.ioc_regs.pss_err_status_reg);
465 bfa->ioc.ioc_regs.pss_err_status_reg);
468 writel(intr, bfa->iocfc.bfa_regs.intr_status);
469 bfa_ioc_error_isr(&bfa->ioc);
474 * BFA IOC FC related functions
478 * BFA IOC private functions
482 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
485 bfa_iocfc_send_cfg(void *bfa_arg)
487 struct bfa_s *bfa = bfa_arg;
488 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
489 struct bfi_iocfc_cfg_req_s cfg_req;
490 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
491 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
494 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
495 bfa_trc(bfa, cfg->fwcfg.num_cqs);
497 bfa_iocfc_reset_queues(bfa);
500 * initialize IOC configuration info
502 cfg_info->single_msix_vec = 0;
503 if (bfa->msix.nvecs == 1)
504 cfg_info->single_msix_vec = 1;
505 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
506 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
507 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
508 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
510 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
512 * dma map REQ and RSP circular queues and shadow pointers
514 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
515 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
516 iocfc->req_cq_ba[i].pa);
517 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
518 iocfc->req_cq_shadow_ci[i].pa);
519 cfg_info->req_cq_elems[i] =
520 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
522 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
523 iocfc->rsp_cq_ba[i].pa);
524 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
525 iocfc->rsp_cq_shadow_pi[i].pa);
526 cfg_info->rsp_cq_elems[i] =
527 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
531 * Enable interrupt coalescing if it is driver init path
532 * and not ioc disable/enable path.
535 cfg_info->intr_attr.coalesce = BFA_TRUE;
537 iocfc->cfgdone = BFA_FALSE;
540 * dma map IOC configuration itself
542 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
544 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
546 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
547 sizeof(struct bfi_iocfc_cfg_req_s));
551 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
552 struct bfa_pcidev_s *pcidev)
554 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
558 iocfc->action = BFA_IOCFC_ACT_NONE;
563 * Initialize chip specific handlers.
565 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
566 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
567 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
568 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
569 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
570 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
571 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
572 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
573 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
574 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
575 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
576 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
577 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
580 iocfc->hwif.hw_reqq_ack = NULL;
581 iocfc->hwif.hw_rspq_ack = NULL;
582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
585 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
586 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
587 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
588 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
589 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
590 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
591 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
592 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
597 iocfc->hwif.hw_isr_mode_set = NULL;
598 iocfc->hwif.hw_rspq_ack = NULL;
601 iocfc->hwif.hw_reginit(bfa);
606 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
610 int i, per_reqq_sz, per_rspq_sz, dbgsz;
611 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
612 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
613 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
614 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
616 /* First allocate dma memory for IOC */
617 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
618 bfa_mem_dma_phys(ioc_dma));
620 /* Claim DMA-able memory for the request/response queues */
621 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
623 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
626 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
627 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
628 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
629 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
630 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
632 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
633 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
634 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
635 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
638 /* Claim IOCFC dma memory - for shadow CI/PI */
639 dm_kva = bfa_mem_dma_virt(iocfc_dma);
640 dm_pa = bfa_mem_dma_phys(iocfc_dma);
642 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
643 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
644 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
645 dm_kva += BFA_CACHELINE_SZ;
646 dm_pa += BFA_CACHELINE_SZ;
648 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
649 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
650 dm_kva += BFA_CACHELINE_SZ;
651 dm_pa += BFA_CACHELINE_SZ;
654 /* Claim IOCFC dma memory - for the config info page */
655 bfa->iocfc.cfg_info.kva = dm_kva;
656 bfa->iocfc.cfg_info.pa = dm_pa;
657 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
658 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
659 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
661 /* Claim IOCFC dma memory - for the config response */
662 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
663 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
664 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
665 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
667 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
670 /* Claim IOCFC kva memory */
671 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
673 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
674 bfa_mem_kva_curp(iocfc) += dbgsz;
679 * Start BFA submodules.
682 bfa_iocfc_start_submod(struct bfa_s *bfa)
686 bfa->queue_process = BFA_TRUE;
687 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
688 bfa_isr_rspq_ack(bfa, i);
690 for (i = 0; hal_mods[i]; i++)
691 hal_mods[i]->start(bfa);
695 * Disable BFA submodules.
698 bfa_iocfc_disable_submod(struct bfa_s *bfa)
702 for (i = 0; hal_mods[i]; i++)
703 hal_mods[i]->iocdisable(bfa);
707 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
709 struct bfa_s *bfa = bfa_arg;
712 if (bfa->iocfc.cfgdone)
713 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
715 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
717 if (bfa->iocfc.cfgdone)
718 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
723 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
725 struct bfa_s *bfa = bfa_arg;
726 struct bfad_s *bfad = bfa->bfad;
729 complete(&bfad->comp);
731 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
735 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
737 struct bfa_s *bfa = bfa_arg;
738 struct bfad_s *bfad = bfa->bfad;
741 complete(&bfad->enable_comp);
745 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
747 struct bfa_s *bfa = bfa_arg;
748 struct bfad_s *bfad = bfa->bfad;
751 complete(&bfad->disable_comp);
755 * configure queue registers from firmware response
758 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
761 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
762 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
764 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
765 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
766 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
767 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
768 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
769 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
770 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
771 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
776 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
778 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
779 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
780 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
781 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
782 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
786 * Update BFA configuration from firmware configuration.
789 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
791 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
792 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
793 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
795 fwcfg->num_cqs = fwcfg->num_cqs;
796 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
797 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
798 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
799 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
800 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
801 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
803 iocfc->cfgdone = BFA_TRUE;
806 * configure queue register offsets as learnt from firmware
808 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
811 * Re-configure resources as learnt from Firmware
813 bfa_iocfc_res_recfg(bfa, fwcfg);
816 * Install MSIX queue handlers
818 bfa_msix_queue_install(bfa);
821 * Configuration is complete - initialize/start submodules
823 bfa_fcport_init(bfa);
825 if (iocfc->action == BFA_IOCFC_ACT_INIT)
826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
830 bfa_iocfc_enable_cb, bfa);
831 bfa_iocfc_start_submod(bfa);
835 bfa_iocfc_reset_queues(struct bfa_s *bfa)
839 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
840 bfa_reqq_ci(bfa, q) = 0;
841 bfa_reqq_pi(bfa, q) = 0;
842 bfa_rspq_ci(bfa, q) = 0;
843 bfa_rspq_pi(bfa, q) = 0;
847 /* Fabric Assigned Address specific functions */
850 * Check whether IOC is ready before sending command down
853 bfa_faa_validate_request(struct bfa_s *bfa)
855 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
856 u32 card_type = bfa->ioc.attr->card_type;
858 if (bfa_ioc_is_operational(&bfa->ioc)) {
859 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
860 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
862 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
863 return BFA_STATUS_IOC_NON_OP;
866 return BFA_STATUS_OK;
870 bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
872 struct bfi_faa_en_dis_s faa_enable_req;
873 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
876 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
877 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
879 status = bfa_faa_validate_request(bfa);
880 if (status != BFA_STATUS_OK)
883 if (iocfc->faa_args.busy == BFA_TRUE)
884 return BFA_STATUS_DEVBUSY;
886 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
887 return BFA_STATUS_FAA_ENABLED;
889 if (bfa_fcport_is_trunk_enabled(bfa))
890 return BFA_STATUS_ERROR_TRUNK_ENABLED;
892 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
893 iocfc->faa_args.busy = BFA_TRUE;
895 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
896 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
897 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
899 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
900 sizeof(struct bfi_faa_en_dis_s));
902 return BFA_STATUS_OK;
906 bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
909 struct bfi_faa_en_dis_s faa_disable_req;
910 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
913 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
914 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
916 status = bfa_faa_validate_request(bfa);
917 if (status != BFA_STATUS_OK)
920 if (iocfc->faa_args.busy == BFA_TRUE)
921 return BFA_STATUS_DEVBUSY;
923 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
924 return BFA_STATUS_FAA_DISABLED;
926 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
927 iocfc->faa_args.busy = BFA_TRUE;
929 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
930 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
931 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
933 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
934 sizeof(struct bfi_faa_en_dis_s));
936 return BFA_STATUS_OK;
940 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
941 bfa_cb_iocfc_t cbfn, void *cbarg)
943 struct bfi_faa_query_s faa_attr_req;
944 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
947 iocfc->faa_args.faa_attr = attr;
948 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
949 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
951 status = bfa_faa_validate_request(bfa);
952 if (status != BFA_STATUS_OK)
955 if (iocfc->faa_args.busy == BFA_TRUE)
956 return BFA_STATUS_DEVBUSY;
958 iocfc->faa_args.busy = BFA_TRUE;
959 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
960 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
961 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
963 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
964 sizeof(struct bfi_faa_query_s));
966 return BFA_STATUS_OK;
970 * FAA enable response
973 bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
974 struct bfi_faa_en_dis_rsp_s *rsp)
976 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
977 bfa_status_t status = rsp->status;
979 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
981 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
982 iocfc->faa_args.busy = BFA_FALSE;
986 * FAA disable response
989 bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
990 struct bfi_faa_en_dis_rsp_s *rsp)
992 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
993 bfa_status_t status = rsp->status;
995 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
997 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
998 iocfc->faa_args.busy = BFA_FALSE;
1002 * FAA query response
1005 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1006 bfi_faa_query_rsp_t *rsp)
1008 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1010 if (iocfc->faa_args.faa_attr) {
1011 iocfc->faa_args.faa_attr->faa = rsp->faa;
1012 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1013 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1016 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1018 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1019 iocfc->faa_args.busy = BFA_FALSE;
1023 * IOC enable request is complete
1026 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1028 struct bfa_s *bfa = bfa_arg;
1030 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
1031 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1032 bfa_iocfc_init_cb, bfa);
1036 if (status != BFA_STATUS_OK) {
1037 bfa_isr_disable(bfa);
1038 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1039 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1040 bfa_iocfc_init_cb, bfa);
1041 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1042 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1043 bfa_iocfc_enable_cb, bfa);
1047 bfa_iocfc_send_cfg(bfa);
1051 * IOC disable request is complete
1054 bfa_iocfc_disable_cbfn(void *bfa_arg)
1056 struct bfa_s *bfa = bfa_arg;
1058 bfa_isr_disable(bfa);
1059 bfa_iocfc_disable_submod(bfa);
1061 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1062 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1065 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
1066 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1072 * Notify sub-modules of hardware failure.
1075 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1077 struct bfa_s *bfa = bfa_arg;
1079 bfa->queue_process = BFA_FALSE;
1081 bfa_isr_disable(bfa);
1082 bfa_iocfc_disable_submod(bfa);
1084 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1085 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1090 * Actions on chip-reset completion.
1093 bfa_iocfc_reset_cbfn(void *bfa_arg)
1095 struct bfa_s *bfa = bfa_arg;
1097 bfa_iocfc_reset_queues(bfa);
1098 bfa_isr_enable(bfa);
1103 * Query IOC memory requirement information.
1106 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1109 int q, per_reqq_sz, per_rspq_sz;
1110 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1111 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1112 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1115 /* dma memory setup for IOC */
1116 bfa_mem_dma_setup(meminfo, ioc_dma,
1117 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1119 /* dma memory setup for REQ/RSP queues */
1120 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1122 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1125 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1126 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1128 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1132 /* IOCFC dma memory - calculate Shadow CI/PI size */
1133 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1134 dm_len += (2 * BFA_CACHELINE_SZ);
1136 /* IOCFC dma memory - calculate config info / rsp size */
1137 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1138 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1141 /* dma memory setup for IOCFC */
1142 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1144 /* kva memory setup for IOCFC */
1145 bfa_mem_kva_setup(meminfo, iocfc_kva,
1146 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
1150 * Query IOC memory requirement information.
1153 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1154 struct bfa_pcidev_s *pcidev)
1157 struct bfa_ioc_s *ioc = &bfa->ioc;
1159 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1160 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1161 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1162 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1164 ioc->trcmod = bfa->trcmod;
1165 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1167 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1168 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1170 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1171 bfa_iocfc_mem_claim(bfa, cfg);
1172 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1174 INIT_LIST_HEAD(&bfa->comp_q);
1175 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1176 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1180 * Query IOC memory requirement information.
1183 bfa_iocfc_init(struct bfa_s *bfa)
1185 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1186 bfa_ioc_enable(&bfa->ioc);
1190 * IOC start called from bfa_start(). Called to start IOC operations
1191 * at driver instantiation for this instance.
1194 bfa_iocfc_start(struct bfa_s *bfa)
1196 if (bfa->iocfc.cfgdone)
1197 bfa_iocfc_start_submod(bfa);
1201 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1202 * for this instance.
1205 bfa_iocfc_stop(struct bfa_s *bfa)
1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1209 bfa->queue_process = BFA_FALSE;
1210 bfa_ioc_disable(&bfa->ioc);
1214 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1216 struct bfa_s *bfa = bfaarg;
1217 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1218 union bfi_iocfc_i2h_msg_u *msg;
1220 msg = (union bfi_iocfc_i2h_msg_u *) m;
1221 bfa_trc(bfa, msg->mh.msg_id);
1223 switch (msg->mh.msg_id) {
1224 case BFI_IOCFC_I2H_CFG_REPLY:
1225 bfa_iocfc_cfgrsp(bfa);
1227 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1228 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1230 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1231 bfa_faa_enable_reply(iocfc,
1232 (struct bfi_faa_en_dis_rsp_s *)msg);
1234 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1235 bfa_faa_disable_reply(iocfc,
1236 (struct bfi_faa_en_dis_rsp_s *)msg);
1238 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1239 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1247 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1249 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1251 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1253 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1254 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1255 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1257 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1258 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1259 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1261 attr->config = iocfc->cfg;
1265 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1267 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1268 struct bfi_iocfc_set_intr_req_s *m;
1270 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1271 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1272 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1274 if (!bfa_iocfc_is_operational(bfa))
1275 return BFA_STATUS_OK;
1277 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1279 return BFA_STATUS_DEVBUSY;
1281 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1283 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1284 m->delay = iocfc->cfginfo->intr_attr.delay;
1285 m->latency = iocfc->cfginfo->intr_attr.latency;
1287 bfa_trc(bfa, attr->delay);
1288 bfa_trc(bfa, attr->latency);
1290 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1291 return BFA_STATUS_OK;
1295 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
1297 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1299 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1300 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
1303 * Enable IOC after it is disabled.
1306 bfa_iocfc_enable(struct bfa_s *bfa)
1308 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1310 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
1311 bfa_ioc_enable(&bfa->ioc);
1315 bfa_iocfc_disable(struct bfa_s *bfa)
1317 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1319 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1321 bfa->queue_process = BFA_FALSE;
1322 bfa_ioc_disable(&bfa->ioc);
1327 bfa_iocfc_is_operational(struct bfa_s *bfa)
1329 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1333 * Return boot target port wwns -- read from boot information in flash.
1336 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1338 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1339 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1342 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1343 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1344 *nwwns = cfgrsp->pbc_cfg.nbluns;
1345 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1346 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1351 *nwwns = cfgrsp->bootwwns.nwwns;
1352 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1356 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1358 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1359 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1361 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1362 return cfgrsp->pbc_cfg.nvports;
1367 * Use this function query the memory requirement of the BFA library.
1368 * This function needs to be called before bfa_attach() to get the
1369 * memory required of the BFA layer for a given driver configuration.
1371 * This call will fail, if the cap is out of range compared to pre-defined
1372 * values within the BFA library
1374 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1375 * its configuration in this structure.
1376 * The default values for struct bfa_iocfc_cfg_s can be
1377 * fetched using bfa_cfg_get_default() API.
1379 * If cap's boundary check fails, the library will use
1380 * the default bfa_cap_t values (and log a warning msg).
1382 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1383 * indicates the memory type (see bfa_mem_type_t) and
1384 * amount of memory required.
1386 * Driver should allocate the memory, populate the
1387 * starting address for each block and provide the same
1388 * structure as input parameter to bfa_attach() call.
1390 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1391 * dma, kva memory information of the bfa sub-modules.
1395 * Special Considerations: @note
1398 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1402 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1403 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1404 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1405 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1406 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1407 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1408 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1410 WARN_ON((cfg == NULL) || (meminfo == NULL));
1412 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1414 /* Initialize the DMA & KVA meminfo queues */
1415 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1416 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1418 bfa_iocfc_meminfo(cfg, meminfo, bfa);
1420 for (i = 0; hal_mods[i]; i++)
1421 hal_mods[i]->meminfo(cfg, meminfo, bfa);
1423 /* dma info setup */
1424 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1425 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1426 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1427 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1428 bfa_mem_dma_setup(meminfo, flash_dma,
1429 bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1430 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1431 bfa_mem_dma_setup(meminfo, phy_dma,
1432 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1436 * Use this function to do attach the driver instance with the BFA
1437 * library. This function will not trigger any HW initialization
1438 * process (which will be done in bfa_init() call)
1440 * This call will fail, if the cap is out of range compared to
1441 * pre-defined values within the BFA library
1443 * @param[out] bfa Pointer to bfa_t.
1444 * @param[in] bfad Opaque handle back to the driver's IOC structure
1445 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1446 * that was used in bfa_cfg_get_meminfo().
1447 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1448 * use the bfa_cfg_get_meminfo() call to
1449 * find the memory blocks required, allocate the
1450 * required memory and provide the starting addresses.
1451 * @param[in] pcidev pointer to struct bfa_pcidev_s
1456 * Special Considerations:
1462 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1463 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1466 struct bfa_mem_dma_s *dma_info, *dma_elem;
1467 struct bfa_mem_kva_s *kva_info, *kva_elem;
1468 struct list_head *dm_qe, *km_qe;
1470 bfa->fcs = BFA_FALSE;
1472 WARN_ON((cfg == NULL) || (meminfo == NULL));
1474 /* Initialize memory pointers for iterative allocation */
1475 dma_info = &meminfo->dma_info;
1476 dma_info->kva_curp = dma_info->kva;
1477 dma_info->dma_curp = dma_info->dma;
1479 kva_info = &meminfo->kva_info;
1480 kva_info->kva_curp = kva_info->kva;
1482 list_for_each(dm_qe, &dma_info->qe) {
1483 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1484 dma_elem->kva_curp = dma_elem->kva;
1485 dma_elem->dma_curp = dma_elem->dma;
1488 list_for_each(km_qe, &kva_info->qe) {
1489 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1490 kva_elem->kva_curp = kva_elem->kva;
1493 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1495 for (i = 0; hal_mods[i]; i++)
1496 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
1498 bfa_com_port_attach(bfa);
1499 bfa_com_ablk_attach(bfa);
1500 bfa_com_cee_attach(bfa);
1501 bfa_com_sfp_attach(bfa);
1502 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1503 bfa_com_diag_attach(bfa);
1504 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1508 * Use this function to delete a BFA IOC. IOC should be stopped (by
1509 * calling bfa_stop()) before this function call.
1511 * @param[in] bfa - pointer to bfa_t.
1516 * Special Considerations:
1521 bfa_detach(struct bfa_s *bfa)
1525 for (i = 0; hal_mods[i]; i++)
1526 hal_mods[i]->detach(bfa);
1527 bfa_ioc_detach(&bfa->ioc);
1531 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1533 INIT_LIST_HEAD(comp_q);
1534 list_splice_tail_init(&bfa->comp_q, comp_q);
1538 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1540 struct list_head *qe;
1541 struct list_head *qen;
1542 struct bfa_cb_qe_s *hcb_qe;
1544 list_for_each_safe(qe, qen, comp_q) {
1545 hcb_qe = (struct bfa_cb_qe_s *) qe;
1546 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1551 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1553 struct list_head *qe;
1554 struct bfa_cb_qe_s *hcb_qe;
1556 while (!list_empty(comp_q)) {
1557 bfa_q_deq(comp_q, &qe);
1558 hcb_qe = (struct bfa_cb_qe_s *) qe;
1559 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1565 * Return the list of PCI vendor/device id lists supported by this
1569 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1571 static struct bfa_pciid_s __pciids[] = {
1572 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1573 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1574 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1575 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1578 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1583 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1584 * into BFA layer). The OS driver can then turn back and overwrite entries that
1585 * have been configured by the user.
1587 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1592 * Special Considerations:
1596 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1598 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1599 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1600 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1601 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1602 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1603 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1604 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1605 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1606 cfg->fwcfg.num_fwtio_reqs = 0;
1608 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1609 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1610 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1611 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1612 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1613 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1614 cfg->drvcfg.ioc_recover = BFA_FALSE;
1615 cfg->drvcfg.delay_comp = BFA_FALSE;
1620 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1622 bfa_cfg_get_default(cfg);
1623 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1624 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1625 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1626 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1627 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1628 cfg->fwcfg.num_fwtio_reqs = 0;
1630 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1631 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1632 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1633 cfg->drvcfg.min_cfg = BFA_TRUE;