2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfa_modules.h"
22 BFA_TRC_FILE(HAL, CORE);
25 * BFA module list terminated by NULL
27 static struct bfa_module_s *hal_mods[] = {
40 * Message handlers for various modules.
42 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
43 bfa_isr_unhandled, /* NONE */
44 bfa_isr_unhandled, /* BFI_MC_IOC */
45 bfa_fcdiag_intr, /* BFI_MC_DIAG */
46 bfa_isr_unhandled, /* BFI_MC_FLASH */
47 bfa_isr_unhandled, /* BFI_MC_CEE */
48 bfa_fcport_isr, /* BFI_MC_FCPORT */
49 bfa_isr_unhandled, /* BFI_MC_IOCFC */
50 bfa_isr_unhandled, /* BFI_MC_LL */
51 bfa_uf_isr, /* BFI_MC_UF */
52 bfa_fcxp_isr, /* BFI_MC_FCXP */
53 bfa_lps_isr, /* BFI_MC_LPS */
54 bfa_rport_isr, /* BFI_MC_RPORT */
55 bfa_itn_isr, /* BFI_MC_ITN */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
58 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
59 bfa_ioim_isr, /* BFI_MC_IOIM */
60 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
61 bfa_tskim_isr, /* BFI_MC_TSKIM */
62 bfa_isr_unhandled, /* BFI_MC_SBOOT */
63 bfa_isr_unhandled, /* BFI_MC_IPFC */
64 bfa_isr_unhandled, /* BFI_MC_PORT */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74 bfa_isr_unhandled, /* --------- */
77 * Message handlers for mailbox command classes
79 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
81 NULL, /* BFI_MC_IOC */
82 NULL, /* BFI_MC_DIAG */
83 NULL, /* BFI_MC_FLASH */
84 NULL, /* BFI_MC_CEE */
85 NULL, /* BFI_MC_PORT */
86 bfa_iocfc_isr, /* BFI_MC_IOCFC */
93 bfa_com_port_attach(struct bfa_s *bfa)
95 struct bfa_port_s *port = &bfa->modules.port;
96 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
98 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
99 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
106 bfa_com_ablk_attach(struct bfa_s *bfa)
108 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
109 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
111 bfa_ablk_attach(ablk, &bfa->ioc);
112 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
116 bfa_com_cee_attach(struct bfa_s *bfa)
118 struct bfa_cee_s *cee = &bfa->modules.cee;
119 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
121 cee->trcmod = bfa->trcmod;
122 bfa_cee_attach(cee, &bfa->ioc, bfa);
123 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
127 bfa_com_sfp_attach(struct bfa_s *bfa)
129 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
130 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
132 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
133 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
137 bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
139 struct bfa_flash_s *flash = BFA_FLASH(bfa);
140 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
142 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
143 bfa_flash_memclaim(flash, flash_dma->kva_curp,
144 flash_dma->dma_curp, mincfg);
148 bfa_com_diag_attach(struct bfa_s *bfa)
150 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
151 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
153 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
154 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
158 bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
160 struct bfa_phy_s *phy = BFA_PHY(bfa);
161 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
163 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
164 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
168 * BFA IOC FC related definitions
172 * IOC local definitions
174 #define BFA_IOCFC_TOV 5000 /* msecs */
177 BFA_IOCFC_ACT_NONE = 0,
178 BFA_IOCFC_ACT_INIT = 1,
179 BFA_IOCFC_ACT_STOP = 2,
180 BFA_IOCFC_ACT_DISABLE = 3,
181 BFA_IOCFC_ACT_ENABLE = 4,
184 #define DEF_CFG_NUM_FABRICS 1
185 #define DEF_CFG_NUM_LPORTS 256
186 #define DEF_CFG_NUM_CQS 4
187 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
188 #define DEF_CFG_NUM_TSKIM_REQS 128
189 #define DEF_CFG_NUM_FCXP_REQS 64
190 #define DEF_CFG_NUM_UF_BUFS 64
191 #define DEF_CFG_NUM_RPORTS 1024
192 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
193 #define DEF_CFG_NUM_TINS 256
195 #define DEF_CFG_NUM_SGPGS 2048
196 #define DEF_CFG_NUM_REQQ_ELEMS 256
197 #define DEF_CFG_NUM_RSPQ_ELEMS 64
198 #define DEF_CFG_NUM_SBOOT_TGTS 16
199 #define DEF_CFG_NUM_SBOOT_LUNS 16
202 * forward declaration for IOC FC functions
204 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
205 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
206 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
207 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
208 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
211 * BFA Interrupt handling functions
214 bfa_reqq_resume(struct bfa_s *bfa, int qid)
216 struct list_head *waitq, *qe, *qen;
217 struct bfa_reqq_wait_s *wqe;
219 waitq = bfa_reqq(bfa, qid);
220 list_for_each_safe(qe, qen, waitq) {
222 * Callback only as long as there is room in request queue
224 if (bfa_reqq_full(bfa, qid))
228 wqe = (struct bfa_reqq_wait_s *) qe;
229 wqe->qresume(wqe->cbarg);
234 bfa_isr_rspq(struct bfa_s *bfa, int qid)
238 struct list_head *waitq;
240 ci = bfa_rspq_ci(bfa, qid);
241 pi = bfa_rspq_pi(bfa, qid);
244 m = bfa_rspq_elem(bfa, qid, ci);
245 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
247 bfa_isrs[m->mhdr.msg_class] (bfa, m);
248 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
252 * acknowledge RME completions and update CI
254 bfa_isr_rspq_ack(bfa, qid, ci);
257 * Resume any pending requests in the corresponding reqq.
259 waitq = bfa_reqq(bfa, qid);
260 if (!list_empty(waitq))
261 bfa_reqq_resume(bfa, qid);
265 bfa_isr_reqq(struct bfa_s *bfa, int qid)
267 struct list_head *waitq;
269 bfa_isr_reqq_ack(bfa, qid);
272 * Resume any pending requests in the corresponding reqq.
274 waitq = bfa_reqq(bfa, qid);
275 if (!list_empty(waitq))
276 bfa_reqq_resume(bfa, qid);
280 bfa_msix_all(struct bfa_s *bfa, int vec)
285 intr = readl(bfa->iocfc.bfa_regs.intr_status);
290 * RME completion queue interrupt
292 qintr = intr & __HFN_INT_RME_MASK;
293 if (qintr && bfa->queue_process) {
294 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
295 bfa_isr_rspq(bfa, queue);
303 * CPE completion queue interrupt
305 qintr = intr & __HFN_INT_CPE_MASK;
306 if (qintr && bfa->queue_process) {
307 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
308 bfa_isr_reqq(bfa, queue);
314 bfa_msix_lpu_err(bfa, intr);
318 bfa_intx(struct bfa_s *bfa)
323 intr = readl(bfa->iocfc.bfa_regs.intr_status);
325 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
327 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
330 * Unconditional RME completion queue interrupt
332 if (bfa->queue_process) {
333 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
334 bfa_isr_rspq(bfa, queue);
341 * CPE completion queue interrupt
343 qintr = intr & __HFN_INT_CPE_MASK;
344 if (qintr && bfa->queue_process) {
345 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
346 bfa_isr_reqq(bfa, queue);
352 bfa_msix_lpu_err(bfa, intr);
358 bfa_isr_enable(struct bfa_s *bfa)
361 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
363 bfa_trc(bfa, pci_func);
365 bfa_msix_ctrl_install(bfa);
367 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
368 umsk = __HFN_INT_ERR_MASK_CT2;
369 umsk |= pci_func == 0 ?
370 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
372 umsk = __HFN_INT_ERR_MASK;
373 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
376 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
377 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
378 bfa->iocfc.intr_mask = ~umsk;
379 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
383 bfa_isr_disable(struct bfa_s *bfa)
385 bfa_isr_mode_set(bfa, BFA_FALSE);
386 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
387 bfa_msix_uninstall(bfa);
391 bfa_msix_reqq(struct bfa_s *bfa, int vec)
393 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
397 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
399 bfa_trc(bfa, m->mhdr.msg_class);
400 bfa_trc(bfa, m->mhdr.msg_id);
401 bfa_trc(bfa, m->mhdr.mtag.i2htok);
403 bfa_trc_stop(bfa->trcmod);
407 bfa_msix_rspq(struct bfa_s *bfa, int vec)
409 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
413 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
415 u32 intr, curr_value;
416 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
418 intr = readl(bfa->iocfc.bfa_regs.intr_status);
420 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
421 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
422 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
423 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
424 __HFN_INT_MBOX_LPU1_CT2);
425 intr &= __HFN_INT_ERR_MASK_CT2;
427 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
428 (intr & __HFN_INT_LL_HALT) : 0;
429 pss_isr = intr & __HFN_INT_ERR_PSS;
430 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
431 intr &= __HFN_INT_ERR_MASK;
435 bfa_ioc_mbox_isr(&bfa->ioc);
440 * If LL_HALT bit is set then FW Init Halt LL Port
441 * Register needs to be cleared as well so Interrupt
442 * Status Register will be cleared.
444 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
445 curr_value &= ~__FW_INIT_HALT_P;
446 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
451 * ERR_PSS bit needs to be cleared as well in case
452 * interrups are shared so driver's interrupt handler is
453 * still called even though it is already masked out.
456 bfa->ioc.ioc_regs.pss_err_status_reg);
458 bfa->ioc.ioc_regs.pss_err_status_reg);
461 writel(intr, bfa->iocfc.bfa_regs.intr_status);
462 bfa_ioc_error_isr(&bfa->ioc);
467 * BFA IOC FC related functions
471 * BFA IOC private functions
475 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
478 bfa_iocfc_send_cfg(void *bfa_arg)
480 struct bfa_s *bfa = bfa_arg;
481 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
482 struct bfi_iocfc_cfg_req_s cfg_req;
483 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
484 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
487 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
488 bfa_trc(bfa, cfg->fwcfg.num_cqs);
490 bfa_iocfc_reset_queues(bfa);
493 * initialize IOC configuration info
495 cfg_info->single_msix_vec = 0;
496 if (bfa->msix.nvecs == 1)
497 cfg_info->single_msix_vec = 1;
498 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
499 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
500 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
501 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
503 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
505 * dma map REQ and RSP circular queues and shadow pointers
507 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
508 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
509 iocfc->req_cq_ba[i].pa);
510 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
511 iocfc->req_cq_shadow_ci[i].pa);
512 cfg_info->req_cq_elems[i] =
513 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
515 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
516 iocfc->rsp_cq_ba[i].pa);
517 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
518 iocfc->rsp_cq_shadow_pi[i].pa);
519 cfg_info->rsp_cq_elems[i] =
520 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
524 * Enable interrupt coalescing if it is driver init path
525 * and not ioc disable/enable path.
528 cfg_info->intr_attr.coalesce = BFA_TRUE;
530 iocfc->cfgdone = BFA_FALSE;
533 * dma map IOC configuration itself
535 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
537 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
539 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
540 sizeof(struct bfi_iocfc_cfg_req_s));
544 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
545 struct bfa_pcidev_s *pcidev)
547 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
551 iocfc->action = BFA_IOCFC_ACT_NONE;
556 * Initialize chip specific handlers.
558 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
559 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
560 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
561 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
562 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
563 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
564 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
565 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
566 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
567 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
568 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
569 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
570 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
572 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
573 iocfc->hwif.hw_reqq_ack = NULL;
574 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
575 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
576 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
577 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
578 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
579 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
580 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
581 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
582 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
583 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
584 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
585 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
588 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
589 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
590 iocfc->hwif.hw_isr_mode_set = NULL;
591 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
594 iocfc->hwif.hw_reginit(bfa);
599 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
603 int i, per_reqq_sz, per_rspq_sz, dbgsz;
604 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
605 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
606 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
607 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
609 /* First allocate dma memory for IOC */
610 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
611 bfa_mem_dma_phys(ioc_dma));
613 /* Claim DMA-able memory for the request/response queues */
614 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
616 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
619 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
620 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
621 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
622 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
623 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
625 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
626 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
627 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
628 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
631 /* Claim IOCFC dma memory - for shadow CI/PI */
632 dm_kva = bfa_mem_dma_virt(iocfc_dma);
633 dm_pa = bfa_mem_dma_phys(iocfc_dma);
635 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
636 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
637 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
638 dm_kva += BFA_CACHELINE_SZ;
639 dm_pa += BFA_CACHELINE_SZ;
641 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
642 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
643 dm_kva += BFA_CACHELINE_SZ;
644 dm_pa += BFA_CACHELINE_SZ;
647 /* Claim IOCFC dma memory - for the config info page */
648 bfa->iocfc.cfg_info.kva = dm_kva;
649 bfa->iocfc.cfg_info.pa = dm_pa;
650 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
651 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
652 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
654 /* Claim IOCFC dma memory - for the config response */
655 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
656 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
657 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
658 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
660 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
663 /* Claim IOCFC kva memory */
664 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
666 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
667 bfa_mem_kva_curp(iocfc) += dbgsz;
672 * Start BFA submodules.
675 bfa_iocfc_start_submod(struct bfa_s *bfa)
679 bfa->queue_process = BFA_TRUE;
680 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
681 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
683 for (i = 0; hal_mods[i]; i++)
684 hal_mods[i]->start(bfa);
688 * Disable BFA submodules.
691 bfa_iocfc_disable_submod(struct bfa_s *bfa)
695 for (i = 0; hal_mods[i]; i++)
696 hal_mods[i]->iocdisable(bfa);
700 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
702 struct bfa_s *bfa = bfa_arg;
705 if (bfa->iocfc.cfgdone)
706 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
708 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
710 if (bfa->iocfc.cfgdone)
711 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
716 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
718 struct bfa_s *bfa = bfa_arg;
719 struct bfad_s *bfad = bfa->bfad;
722 complete(&bfad->comp);
724 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
728 bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
730 struct bfa_s *bfa = bfa_arg;
731 struct bfad_s *bfad = bfa->bfad;
734 complete(&bfad->enable_comp);
738 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
740 struct bfa_s *bfa = bfa_arg;
741 struct bfad_s *bfad = bfa->bfad;
744 complete(&bfad->disable_comp);
748 * configure queue registers from firmware response
751 bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
754 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
755 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
757 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
758 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
759 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
760 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
761 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
762 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
763 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
764 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
769 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
771 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
772 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
773 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
774 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
775 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
779 * Update BFA configuration from firmware configuration.
782 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
784 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
785 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
786 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
788 fwcfg->num_cqs = fwcfg->num_cqs;
789 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
790 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
791 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
792 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
793 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
794 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
796 iocfc->cfgdone = BFA_TRUE;
799 * configure queue register offsets as learnt from firmware
801 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
804 * Re-configure resources as learnt from Firmware
806 bfa_iocfc_res_recfg(bfa, fwcfg);
809 * Install MSIX queue handlers
811 bfa_msix_queue_install(bfa);
814 * Configuration is complete - initialize/start submodules
816 bfa_fcport_init(bfa);
818 if (iocfc->action == BFA_IOCFC_ACT_INIT)
819 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
821 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
822 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
823 bfa_iocfc_enable_cb, bfa);
824 bfa_iocfc_start_submod(bfa);
828 bfa_iocfc_reset_queues(struct bfa_s *bfa)
832 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
833 bfa_reqq_ci(bfa, q) = 0;
834 bfa_reqq_pi(bfa, q) = 0;
835 bfa_rspq_ci(bfa, q) = 0;
836 bfa_rspq_pi(bfa, q) = 0;
840 /* Fabric Assigned Address specific functions */
843 * Check whether IOC is ready before sending command down
846 bfa_faa_validate_request(struct bfa_s *bfa)
848 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
849 u32 card_type = bfa->ioc.attr->card_type;
851 if (bfa_ioc_is_operational(&bfa->ioc)) {
852 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
853 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
855 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
856 return BFA_STATUS_IOC_NON_OP;
859 return BFA_STATUS_OK;
863 bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
865 struct bfi_faa_en_dis_s faa_enable_req;
866 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
869 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
870 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
872 status = bfa_faa_validate_request(bfa);
873 if (status != BFA_STATUS_OK)
876 if (iocfc->faa_args.busy == BFA_TRUE)
877 return BFA_STATUS_DEVBUSY;
879 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
880 return BFA_STATUS_FAA_ENABLED;
882 if (bfa_fcport_is_trunk_enabled(bfa))
883 return BFA_STATUS_ERROR_TRUNK_ENABLED;
885 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
886 iocfc->faa_args.busy = BFA_TRUE;
888 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
889 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
890 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
892 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
893 sizeof(struct bfi_faa_en_dis_s));
895 return BFA_STATUS_OK;
899 bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
902 struct bfi_faa_en_dis_s faa_disable_req;
903 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
906 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
907 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
909 status = bfa_faa_validate_request(bfa);
910 if (status != BFA_STATUS_OK)
913 if (iocfc->faa_args.busy == BFA_TRUE)
914 return BFA_STATUS_DEVBUSY;
916 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
917 return BFA_STATUS_FAA_DISABLED;
919 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
920 iocfc->faa_args.busy = BFA_TRUE;
922 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
923 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
924 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
926 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
927 sizeof(struct bfi_faa_en_dis_s));
929 return BFA_STATUS_OK;
933 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
934 bfa_cb_iocfc_t cbfn, void *cbarg)
936 struct bfi_faa_query_s faa_attr_req;
937 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
940 iocfc->faa_args.faa_attr = attr;
941 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
942 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
944 status = bfa_faa_validate_request(bfa);
945 if (status != BFA_STATUS_OK)
948 if (iocfc->faa_args.busy == BFA_TRUE)
949 return BFA_STATUS_DEVBUSY;
951 iocfc->faa_args.busy = BFA_TRUE;
952 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
953 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
954 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
956 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
957 sizeof(struct bfi_faa_query_s));
959 return BFA_STATUS_OK;
963 * FAA enable response
966 bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
967 struct bfi_faa_en_dis_rsp_s *rsp)
969 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
970 bfa_status_t status = rsp->status;
972 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
974 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
975 iocfc->faa_args.busy = BFA_FALSE;
979 * FAA disable response
982 bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
983 struct bfi_faa_en_dis_rsp_s *rsp)
985 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
986 bfa_status_t status = rsp->status;
988 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
990 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
991 iocfc->faa_args.busy = BFA_FALSE;
998 bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
999 bfi_faa_query_rsp_t *rsp)
1001 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1003 if (iocfc->faa_args.faa_attr) {
1004 iocfc->faa_args.faa_attr->faa = rsp->faa;
1005 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1006 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1009 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1011 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1012 iocfc->faa_args.busy = BFA_FALSE;
1016 * IOC enable request is complete
1019 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1021 struct bfa_s *bfa = bfa_arg;
1023 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
1024 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1025 bfa_iocfc_init_cb, bfa);
1029 if (status != BFA_STATUS_OK) {
1030 bfa_isr_disable(bfa);
1031 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1032 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1033 bfa_iocfc_init_cb, bfa);
1034 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1035 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1036 bfa_iocfc_enable_cb, bfa);
1040 bfa_iocfc_send_cfg(bfa);
1044 * IOC disable request is complete
1047 bfa_iocfc_disable_cbfn(void *bfa_arg)
1049 struct bfa_s *bfa = bfa_arg;
1051 bfa_isr_disable(bfa);
1052 bfa_iocfc_disable_submod(bfa);
1054 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1055 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1058 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
1059 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1065 * Notify sub-modules of hardware failure.
1068 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1070 struct bfa_s *bfa = bfa_arg;
1072 bfa->queue_process = BFA_FALSE;
1074 bfa_isr_disable(bfa);
1075 bfa_iocfc_disable_submod(bfa);
1077 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1078 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1083 * Actions on chip-reset completion.
1086 bfa_iocfc_reset_cbfn(void *bfa_arg)
1088 struct bfa_s *bfa = bfa_arg;
1090 bfa_iocfc_reset_queues(bfa);
1091 bfa_isr_enable(bfa);
1096 * Query IOC memory requirement information.
1099 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1102 int q, per_reqq_sz, per_rspq_sz;
1103 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1104 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1105 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1108 /* dma memory setup for IOC */
1109 bfa_mem_dma_setup(meminfo, ioc_dma,
1110 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1112 /* dma memory setup for REQ/RSP queues */
1113 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1115 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1118 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1119 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1121 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1125 /* IOCFC dma memory - calculate Shadow CI/PI size */
1126 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1127 dm_len += (2 * BFA_CACHELINE_SZ);
1129 /* IOCFC dma memory - calculate config info / rsp size */
1130 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1131 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1134 /* dma memory setup for IOCFC */
1135 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1137 /* kva memory setup for IOCFC */
1138 bfa_mem_kva_setup(meminfo, iocfc_kva,
1139 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
1143 * Query IOC memory requirement information.
1146 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1147 struct bfa_pcidev_s *pcidev)
1150 struct bfa_ioc_s *ioc = &bfa->ioc;
1152 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1153 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1154 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1155 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1157 ioc->trcmod = bfa->trcmod;
1158 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1160 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1161 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1163 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1164 bfa_iocfc_mem_claim(bfa, cfg);
1165 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1167 INIT_LIST_HEAD(&bfa->comp_q);
1168 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1169 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1173 * Query IOC memory requirement information.
1176 bfa_iocfc_init(struct bfa_s *bfa)
1178 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1179 bfa_ioc_enable(&bfa->ioc);
1183 * IOC start called from bfa_start(). Called to start IOC operations
1184 * at driver instantiation for this instance.
1187 bfa_iocfc_start(struct bfa_s *bfa)
1189 if (bfa->iocfc.cfgdone)
1190 bfa_iocfc_start_submod(bfa);
1194 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1195 * for this instance.
1198 bfa_iocfc_stop(struct bfa_s *bfa)
1200 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1202 bfa->queue_process = BFA_FALSE;
1203 bfa_ioc_disable(&bfa->ioc);
1207 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1209 struct bfa_s *bfa = bfaarg;
1210 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1211 union bfi_iocfc_i2h_msg_u *msg;
1213 msg = (union bfi_iocfc_i2h_msg_u *) m;
1214 bfa_trc(bfa, msg->mh.msg_id);
1216 switch (msg->mh.msg_id) {
1217 case BFI_IOCFC_I2H_CFG_REPLY:
1218 bfa_iocfc_cfgrsp(bfa);
1220 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1221 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1223 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1224 bfa_faa_enable_reply(iocfc,
1225 (struct bfi_faa_en_dis_rsp_s *)msg);
1227 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1228 bfa_faa_disable_reply(iocfc,
1229 (struct bfi_faa_en_dis_rsp_s *)msg);
1231 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1232 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1240 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1242 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1244 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1246 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1247 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1248 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1250 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1251 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1252 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1254 attr->config = iocfc->cfg;
1258 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1260 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1261 struct bfi_iocfc_set_intr_req_s *m;
1263 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1264 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1265 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1267 if (!bfa_iocfc_is_operational(bfa))
1268 return BFA_STATUS_OK;
1270 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1272 return BFA_STATUS_DEVBUSY;
1274 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1276 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1277 m->delay = iocfc->cfginfo->intr_attr.delay;
1278 m->latency = iocfc->cfginfo->intr_attr.latency;
1280 bfa_trc(bfa, attr->delay);
1281 bfa_trc(bfa, attr->latency);
1283 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1284 return BFA_STATUS_OK;
1288 bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
1290 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1292 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1293 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
1296 * Enable IOC after it is disabled.
1299 bfa_iocfc_enable(struct bfa_s *bfa)
1301 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1303 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
1304 bfa_ioc_enable(&bfa->ioc);
1308 bfa_iocfc_disable(struct bfa_s *bfa)
1310 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1312 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1314 bfa->queue_process = BFA_FALSE;
1315 bfa_ioc_disable(&bfa->ioc);
1320 bfa_iocfc_is_operational(struct bfa_s *bfa)
1322 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1326 * Return boot target port wwns -- read from boot information in flash.
1329 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1331 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1332 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1335 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1336 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1337 *nwwns = cfgrsp->pbc_cfg.nbluns;
1338 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1339 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1344 *nwwns = cfgrsp->bootwwns.nwwns;
1345 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1349 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1351 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1352 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1354 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1355 return cfgrsp->pbc_cfg.nvports;
1360 * Use this function query the memory requirement of the BFA library.
1361 * This function needs to be called before bfa_attach() to get the
1362 * memory required of the BFA layer for a given driver configuration.
1364 * This call will fail, if the cap is out of range compared to pre-defined
1365 * values within the BFA library
1367 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1368 * its configuration in this structure.
1369 * The default values for struct bfa_iocfc_cfg_s can be
1370 * fetched using bfa_cfg_get_default() API.
1372 * If cap's boundary check fails, the library will use
1373 * the default bfa_cap_t values (and log a warning msg).
1375 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1376 * indicates the memory type (see bfa_mem_type_t) and
1377 * amount of memory required.
1379 * Driver should allocate the memory, populate the
1380 * starting address for each block and provide the same
1381 * structure as input parameter to bfa_attach() call.
1383 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1384 * dma, kva memory information of the bfa sub-modules.
1388 * Special Considerations: @note
1391 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1395 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1396 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1397 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1398 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1399 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1400 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1401 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1403 WARN_ON((cfg == NULL) || (meminfo == NULL));
1405 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1407 /* Initialize the DMA & KVA meminfo queues */
1408 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1409 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1411 bfa_iocfc_meminfo(cfg, meminfo, bfa);
1413 for (i = 0; hal_mods[i]; i++)
1414 hal_mods[i]->meminfo(cfg, meminfo, bfa);
1416 /* dma info setup */
1417 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1418 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1419 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1420 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1421 bfa_mem_dma_setup(meminfo, flash_dma,
1422 bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1423 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1424 bfa_mem_dma_setup(meminfo, phy_dma,
1425 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1429 * Use this function to do attach the driver instance with the BFA
1430 * library. This function will not trigger any HW initialization
1431 * process (which will be done in bfa_init() call)
1433 * This call will fail, if the cap is out of range compared to
1434 * pre-defined values within the BFA library
1436 * @param[out] bfa Pointer to bfa_t.
1437 * @param[in] bfad Opaque handle back to the driver's IOC structure
1438 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1439 * that was used in bfa_cfg_get_meminfo().
1440 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1441 * use the bfa_cfg_get_meminfo() call to
1442 * find the memory blocks required, allocate the
1443 * required memory and provide the starting addresses.
1444 * @param[in] pcidev pointer to struct bfa_pcidev_s
1449 * Special Considerations:
1455 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1456 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1459 struct bfa_mem_dma_s *dma_info, *dma_elem;
1460 struct bfa_mem_kva_s *kva_info, *kva_elem;
1461 struct list_head *dm_qe, *km_qe;
1463 bfa->fcs = BFA_FALSE;
1465 WARN_ON((cfg == NULL) || (meminfo == NULL));
1467 /* Initialize memory pointers for iterative allocation */
1468 dma_info = &meminfo->dma_info;
1469 dma_info->kva_curp = dma_info->kva;
1470 dma_info->dma_curp = dma_info->dma;
1472 kva_info = &meminfo->kva_info;
1473 kva_info->kva_curp = kva_info->kva;
1475 list_for_each(dm_qe, &dma_info->qe) {
1476 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1477 dma_elem->kva_curp = dma_elem->kva;
1478 dma_elem->dma_curp = dma_elem->dma;
1481 list_for_each(km_qe, &kva_info->qe) {
1482 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1483 kva_elem->kva_curp = kva_elem->kva;
1486 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1488 for (i = 0; hal_mods[i]; i++)
1489 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
1491 bfa_com_port_attach(bfa);
1492 bfa_com_ablk_attach(bfa);
1493 bfa_com_cee_attach(bfa);
1494 bfa_com_sfp_attach(bfa);
1495 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1496 bfa_com_diag_attach(bfa);
1497 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1501 * Use this function to delete a BFA IOC. IOC should be stopped (by
1502 * calling bfa_stop()) before this function call.
1504 * @param[in] bfa - pointer to bfa_t.
1509 * Special Considerations:
1514 bfa_detach(struct bfa_s *bfa)
1518 for (i = 0; hal_mods[i]; i++)
1519 hal_mods[i]->detach(bfa);
1520 bfa_ioc_detach(&bfa->ioc);
1524 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1526 INIT_LIST_HEAD(comp_q);
1527 list_splice_tail_init(&bfa->comp_q, comp_q);
1531 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1533 struct list_head *qe;
1534 struct list_head *qen;
1535 struct bfa_cb_qe_s *hcb_qe;
1537 list_for_each_safe(qe, qen, comp_q) {
1538 hcb_qe = (struct bfa_cb_qe_s *) qe;
1539 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1544 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1546 struct list_head *qe;
1547 struct bfa_cb_qe_s *hcb_qe;
1549 while (!list_empty(comp_q)) {
1550 bfa_q_deq(comp_q, &qe);
1551 hcb_qe = (struct bfa_cb_qe_s *) qe;
1552 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1558 * Return the list of PCI vendor/device id lists supported by this
1562 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1564 static struct bfa_pciid_s __pciids[] = {
1565 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1566 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1567 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1568 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1571 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1576 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1577 * into BFA layer). The OS driver can then turn back and overwrite entries that
1578 * have been configured by the user.
1580 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1585 * Special Considerations:
1589 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1591 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1592 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1593 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1594 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1595 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1596 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1597 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1598 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1599 cfg->fwcfg.num_fwtio_reqs = 0;
1601 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1602 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1603 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1604 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1605 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1606 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1607 cfg->drvcfg.ioc_recover = BFA_FALSE;
1608 cfg->drvcfg.delay_comp = BFA_FALSE;
1613 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1615 bfa_cfg_get_default(cfg);
1616 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1617 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1618 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1619 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1620 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1621 cfg->fwcfg.num_fwtio_reqs = 0;
1623 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1624 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1625 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1626 cfg->drvcfg.min_cfg = BFA_TRUE;