2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
23 /* IOC local definitions */
25 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
27 #define bfa_ioc_firmware_lock(__ioc) \
28 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
29 #define bfa_ioc_firmware_unlock(__ioc) \
30 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
31 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
32 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
33 #define bfa_ioc_notify_fail(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
35 #define bfa_ioc_sync_start(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
37 #define bfa_ioc_sync_join(__ioc) \
38 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
39 #define bfa_ioc_sync_leave(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
41 #define bfa_ioc_sync_ack(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
43 #define bfa_ioc_sync_complete(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
46 #define bfa_ioc_mbox_cmd_pending(__ioc) \
47 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
48 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
50 static bool bfa_nw_auto_recover = true;
53 * forward declarations
55 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
56 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
57 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
58 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
59 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
60 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
61 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
62 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
63 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
64 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
65 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
66 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
67 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
68 static void bfa_ioc_recover(struct bfa_ioc *ioc);
69 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
70 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
71 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
72 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
73 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
74 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
75 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
81 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
84 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
86 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
88 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
90 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
92 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
93 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
95 /* IOC state machine definitions/declarations */
97 IOC_E_RESET = 1, /*!< IOC reset request */
98 IOC_E_ENABLE = 2, /*!< IOC enable request */
99 IOC_E_DISABLE = 3, /*!< IOC disable request */
100 IOC_E_DETACH = 4, /*!< driver detach cleanup */
101 IOC_E_ENABLED = 5, /*!< f/w enabled */
102 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
103 IOC_E_DISABLED = 7, /*!< f/w disabled */
104 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
105 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
106 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
107 IOC_E_TIMEOUT = 11, /*!< timeout */
108 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
111 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
112 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
122 static struct bfa_sm_table ioc_sm_table[] = {
123 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
124 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
125 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
126 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
127 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
128 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
129 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
130 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
131 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
132 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
136 * Forward declareations for iocpf state machine
138 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
139 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
140 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
141 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
142 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
143 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
145 /* IOCPF state machine events */
147 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
148 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
149 IOCPF_E_STOP = 3, /*!< stop on driver detach */
150 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
151 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
152 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
153 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
154 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
155 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
156 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
157 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
158 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
162 enum bfa_iocpf_state {
163 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
164 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
165 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
166 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
167 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
168 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
169 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
170 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
171 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
174 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
175 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
176 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
177 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
178 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
180 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
183 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
189 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
191 static struct bfa_sm_table iocpf_sm_table[] = {
192 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
193 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
194 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
195 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
196 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
197 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
198 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
199 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
200 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
201 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
202 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
203 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
204 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
205 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
208 /* IOC State Machine */
210 /* Beginning state. IOC uninit state. */
212 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
216 /* IOC is in uninit state. */
218 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
222 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
230 /* Reset entry actions -- initialize state machine */
232 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
234 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
237 /* IOC is in reset state. */
239 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
243 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
247 bfa_ioc_disable_comp(ioc);
251 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
260 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
262 bfa_iocpf_enable(ioc);
265 /* Host IOC function is being enabled, awaiting response from firmware.
266 * Semaphore is acquired.
269 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
273 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
277 /* !!! fall through !!! */
279 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
280 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
281 if (event != IOC_E_PFFAILED)
282 bfa_iocpf_initfail(ioc);
286 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
287 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
291 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
295 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
307 /* Semaphore should be acquired for version check. */
309 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
311 mod_timer(&ioc->ioc_timer, jiffies +
312 msecs_to_jiffies(BFA_IOC_TOV));
313 bfa_ioc_send_getattr(ioc);
316 /* IOC configuration in progress. Timer is active. */
318 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
321 case IOC_E_FWRSP_GETATTR:
322 del_timer(&ioc->ioc_timer);
323 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
328 del_timer(&ioc->ioc_timer);
331 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
332 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
333 if (event != IOC_E_PFFAILED)
334 bfa_iocpf_getattrfail(ioc);
338 del_timer(&ioc->ioc_timer);
339 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
351 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
353 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
354 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
355 bfa_ioc_hb_monitor(ioc);
359 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
366 bfa_ioc_hb_stop(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
372 bfa_ioc_hb_stop(ioc);
373 /* !!! fall through !!! */
375 if (ioc->iocpf.auto_recover)
376 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
378 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
380 bfa_ioc_fail_notify(ioc);
382 if (event != IOC_E_PFFAILED)
392 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
394 bfa_iocpf_disable(ioc);
397 /* IOC is being disabled */
399 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
403 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
408 * No state change. Will move to disabled state
409 * after iocpf sm completes failure processing and
410 * moves to disabled state.
416 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
417 bfa_ioc_disable_comp(ioc);
425 /* IOC disable completion entry. */
427 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
429 bfa_ioc_disable_comp(ioc);
433 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
437 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
441 ioc->cbfn->disable_cbfn(ioc->bfa);
445 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
455 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
459 /* Hardware initialization retry. */
461 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
465 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
471 * Initialization retry failed.
473 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
474 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
475 if (event != IOC_E_PFFAILED)
476 bfa_iocpf_initfail(ioc);
480 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
481 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
488 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
492 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
502 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
508 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
512 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
516 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
520 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
525 /* HB failure notification, ignore. */
534 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
540 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
545 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
549 ioc->cbfn->disable_cbfn(ioc->bfa);
553 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
561 /* IOCPF State Machine */
563 /* Reset entry actions -- initialize state machine */
565 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
567 iocpf->fw_mismatch_notified = false;
568 iocpf->auto_recover = bfa_nw_auto_recover;
571 /* Beginning state. IOC is in reset state. */
573 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
577 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
588 /* Semaphore should be acquired for version check. */
590 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
592 bfa_ioc_hw_sem_init(iocpf->ioc);
593 bfa_ioc_hw_sem_get(iocpf->ioc);
596 /* Awaiting h/w semaphore to continue with version check. */
598 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
600 struct bfa_ioc *ioc = iocpf->ioc;
603 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_start(ioc)) {
606 bfa_ioc_sync_join(ioc);
607 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
609 bfa_ioc_firmware_unlock(ioc);
610 bfa_nw_ioc_hw_sem_release(ioc);
611 mod_timer(&ioc->sem_timer, jiffies +
612 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
615 bfa_nw_ioc_hw_sem_release(ioc);
616 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
620 case IOCPF_E_SEM_ERROR:
621 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
622 bfa_ioc_pf_hwfailed(ioc);
625 case IOCPF_E_DISABLE:
626 bfa_ioc_hw_sem_get_cancel(ioc);
627 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628 bfa_ioc_pf_disabled(ioc);
632 bfa_ioc_hw_sem_get_cancel(ioc);
633 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
641 /* Notify enable completion callback */
643 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
645 /* Call only the first time sm enters fwmismatch state. */
646 if (!iocpf->fw_mismatch_notified)
647 bfa_ioc_pf_fwmismatch(iocpf->ioc);
649 iocpf->fw_mismatch_notified = true;
650 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
651 msecs_to_jiffies(BFA_IOC_TOV));
654 /* Awaiting firmware version match. */
656 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
658 struct bfa_ioc *ioc = iocpf->ioc;
661 case IOCPF_E_TIMEOUT:
662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
665 case IOCPF_E_DISABLE:
666 del_timer(&ioc->iocpf_timer);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_ioc_pf_disabled(ioc);
672 del_timer(&ioc->iocpf_timer);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
681 /* Request for semaphore. */
683 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
685 bfa_ioc_hw_sem_get(iocpf->ioc);
688 /* Awaiting semaphore for h/w initialzation. */
690 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
692 struct bfa_ioc *ioc = iocpf->ioc;
695 case IOCPF_E_SEMLOCKED:
696 if (bfa_ioc_sync_complete(ioc)) {
697 bfa_ioc_sync_join(ioc);
698 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
700 bfa_nw_ioc_hw_sem_release(ioc);
701 mod_timer(&ioc->sem_timer, jiffies +
702 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
706 case IOCPF_E_SEM_ERROR:
707 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
708 bfa_ioc_pf_hwfailed(ioc);
711 case IOCPF_E_DISABLE:
712 bfa_ioc_hw_sem_get_cancel(ioc);
713 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
722 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
724 iocpf->poll_time = 0;
725 bfa_ioc_reset(iocpf->ioc, false);
728 /* Hardware is being initialized. Interrupts are enabled.
729 * Holding hardware semaphore lock.
732 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
734 struct bfa_ioc *ioc = iocpf->ioc;
737 case IOCPF_E_FWREADY:
738 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
741 case IOCPF_E_TIMEOUT:
742 bfa_nw_ioc_hw_sem_release(ioc);
743 bfa_ioc_pf_failed(ioc);
744 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
747 case IOCPF_E_DISABLE:
748 del_timer(&ioc->iocpf_timer);
749 bfa_ioc_sync_leave(ioc);
750 bfa_nw_ioc_hw_sem_release(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
760 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
762 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
763 msecs_to_jiffies(BFA_IOC_TOV));
765 * Enable Interrupts before sending fw IOC ENABLE cmd.
767 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
768 bfa_ioc_send_enable(iocpf->ioc);
771 /* Host IOC function is being enabled, awaiting response from firmware.
772 * Semaphore is acquired.
775 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
777 struct bfa_ioc *ioc = iocpf->ioc;
780 case IOCPF_E_FWRSP_ENABLE:
781 del_timer(&ioc->iocpf_timer);
782 bfa_nw_ioc_hw_sem_release(ioc);
783 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
786 case IOCPF_E_INITFAIL:
787 del_timer(&ioc->iocpf_timer);
789 * !!! fall through !!!
791 case IOCPF_E_TIMEOUT:
792 bfa_nw_ioc_hw_sem_release(ioc);
793 if (event == IOCPF_E_TIMEOUT)
794 bfa_ioc_pf_failed(ioc);
795 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
798 case IOCPF_E_DISABLE:
799 del_timer(&ioc->iocpf_timer);
800 bfa_nw_ioc_hw_sem_release(ioc);
801 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
810 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
812 bfa_ioc_pf_enabled(iocpf->ioc);
816 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
819 case IOCPF_E_DISABLE:
820 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
823 case IOCPF_E_GETATTRFAIL:
824 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
828 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
837 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
839 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
840 msecs_to_jiffies(BFA_IOC_TOV));
841 bfa_ioc_send_disable(iocpf->ioc);
844 /* IOC is being disabled */
846 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
848 struct bfa_ioc *ioc = iocpf->ioc;
851 case IOCPF_E_FWRSP_DISABLE:
852 del_timer(&ioc->iocpf_timer);
853 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
857 del_timer(&ioc->iocpf_timer);
859 * !!! fall through !!!
862 case IOCPF_E_TIMEOUT:
863 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
864 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
867 case IOCPF_E_FWRSP_ENABLE:
876 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
878 bfa_ioc_hw_sem_get(iocpf->ioc);
881 /* IOC hb ack request is being removed. */
883 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
885 struct bfa_ioc *ioc = iocpf->ioc;
888 case IOCPF_E_SEMLOCKED:
889 bfa_ioc_sync_leave(ioc);
890 bfa_nw_ioc_hw_sem_release(ioc);
891 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
894 case IOCPF_E_SEM_ERROR:
895 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896 bfa_ioc_pf_hwfailed(ioc);
907 /* IOC disable completion entry. */
909 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
911 bfa_ioc_mbox_flush(iocpf->ioc);
912 bfa_ioc_pf_disabled(iocpf->ioc);
916 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
918 struct bfa_ioc *ioc = iocpf->ioc;
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
926 bfa_ioc_firmware_unlock(ioc);
927 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
936 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
938 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
939 bfa_ioc_hw_sem_get(iocpf->ioc);
942 /* Hardware initialization failed. */
944 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
946 struct bfa_ioc *ioc = iocpf->ioc;
949 case IOCPF_E_SEMLOCKED:
950 bfa_ioc_notify_fail(ioc);
951 bfa_ioc_sync_leave(ioc);
952 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
953 bfa_nw_ioc_hw_sem_release(ioc);
954 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
957 case IOCPF_E_SEM_ERROR:
958 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
959 bfa_ioc_pf_hwfailed(ioc);
962 case IOCPF_E_DISABLE:
963 bfa_ioc_hw_sem_get_cancel(ioc);
964 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
968 bfa_ioc_hw_sem_get_cancel(ioc);
969 bfa_ioc_firmware_unlock(ioc);
970 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
982 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
986 /* Hardware initialization failed. */
988 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
990 struct bfa_ioc *ioc = iocpf->ioc;
993 case IOCPF_E_DISABLE:
994 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
998 bfa_ioc_firmware_unlock(ioc);
999 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1003 bfa_sm_fault(event);
1008 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1011 * Mark IOC as failed in hardware and stop firmware.
1013 bfa_ioc_lpu_stop(iocpf->ioc);
1016 * Flush any queued up mailbox requests.
1018 bfa_ioc_mbox_flush(iocpf->ioc);
1019 bfa_ioc_hw_sem_get(iocpf->ioc);
1022 /* IOC is in failed state. */
1024 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1026 struct bfa_ioc *ioc = iocpf->ioc;
1029 case IOCPF_E_SEMLOCKED:
1030 bfa_ioc_sync_ack(ioc);
1031 bfa_ioc_notify_fail(ioc);
1032 if (!iocpf->auto_recover) {
1033 bfa_ioc_sync_leave(ioc);
1034 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1035 bfa_nw_ioc_hw_sem_release(ioc);
1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1038 if (bfa_ioc_sync_complete(ioc))
1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1041 bfa_nw_ioc_hw_sem_release(ioc);
1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1047 case IOCPF_E_SEM_ERROR:
1048 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1049 bfa_ioc_pf_hwfailed(ioc);
1052 case IOCPF_E_DISABLE:
1053 bfa_ioc_hw_sem_get_cancel(ioc);
1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1061 bfa_sm_fault(event);
1066 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1070 /* IOC is in failed state. */
1072 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1075 case IOCPF_E_DISABLE:
1076 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1080 bfa_sm_fault(event);
1084 /* BFA IOC private functions */
1086 /* Notify common modules registered for notification. */
1088 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1090 struct bfa_ioc_notify *notify;
1091 struct list_head *qe;
1093 list_for_each(qe, &ioc->notify_q) {
1094 notify = (struct bfa_ioc_notify *)qe;
1095 notify->cbfn(notify->cbarg, event);
1100 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1102 ioc->cbfn->disable_cbfn(ioc->bfa);
1103 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1107 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1111 #define BFA_SEM_SPINCNT 3000
1113 r32 = readl(sem_reg);
1115 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1118 r32 = readl(sem_reg);
1128 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1134 /* Clear fwver hdr */
1136 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1138 u32 pgnum, pgoff, loff = 0;
1141 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1142 pgoff = PSS_SMEM_PGOFF(loff);
1143 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1145 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1146 writel(0, ioc->ioc_regs.smem_page_start + loff);
1147 loff += sizeof(u32);
1153 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1155 struct bfi_ioc_image_hdr fwhdr;
1158 /* Spin on init semaphore to serialize. */
1159 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1162 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1165 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1166 if (fwstate == BFI_IOC_UNINIT) {
1167 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1171 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1173 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1174 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1178 bfa_ioc_fwver_clear(ioc);
1179 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1180 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1183 * Try to lock and then unlock the semaphore.
1185 readl(ioc->ioc_regs.ioc_sem_reg);
1186 writel(1, ioc->ioc_regs.ioc_sem_reg);
1188 /* Unlock init semaphore */
1189 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1193 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1198 * First read to the semaphore register will return 0, subsequent reads
1199 * will return 1. Semaphore is released by writing 1 to the register
1201 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1203 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1207 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1211 mod_timer(&ioc->sem_timer, jiffies +
1212 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1218 writel(1, ioc->ioc_regs.ioc_sem_reg);
1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1224 del_timer(&ioc->sem_timer);
1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1229 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1233 #define PSS_LMEM_INIT_TIME 10000
1235 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1236 pss_ctl &= ~__PSS_LMEM_RESET;
1237 pss_ctl |= __PSS_LMEM_INIT_EN;
1240 * i2c workaround 12.5khz clock
1242 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1243 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1246 * wait for memory initialization to be complete
1250 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1252 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1255 * If memory initialization is not successful, IOC timeout will catch
1258 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1260 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1261 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1265 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1270 * Take processor out of reset.
1272 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1273 pss_ctl &= ~__PSS_LPU0_RESET;
1275 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1279 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1284 * Put processors in reset.
1286 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1287 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1289 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1292 /* Get driver and firmware versions. */
1294 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1299 u32 *fwsig = (u32 *) fwhdr;
1301 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1302 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1304 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1307 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1308 loff += sizeof(u32);
1312 /* Returns TRUE if same. */
1314 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1316 struct bfi_ioc_image_hdr *drv_fwhdr;
1319 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1320 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1322 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1323 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1330 /* Return true if current running version is valid. Firmware signature and
1331 * execution context (driver/bios) must match.
1334 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1336 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1338 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1339 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1340 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1342 if (fwhdr.signature != drv_fwhdr->signature)
1345 if (swab32(fwhdr.bootenv) != boot_env)
1348 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1351 /* Conditionally flush any pending message from firmware at start. */
1353 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1357 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1359 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1363 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1365 enum bfi_ioc_state ioc_fwstate;
1369 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1372 ioc_fwstate = BFI_IOC_UNINIT;
1374 boot_env = BFI_FWBOOT_ENV_OS;
1377 * check if firmware is valid
1379 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1380 false : bfa_ioc_fwver_valid(ioc, boot_env);
1383 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1384 bfa_ioc_poll_fwinit(ioc);
1389 * If hardware initialization is in progress (initialized by other IOC),
1390 * just wait for an initialization completion interrupt.
1392 if (ioc_fwstate == BFI_IOC_INITING) {
1393 bfa_ioc_poll_fwinit(ioc);
1398 * If IOC function is disabled and firmware version is same,
1399 * just re-enable IOC.
1401 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1403 * When using MSI-X any pending firmware ready event should
1404 * be flushed. Otherwise MSI-X interrupts are not delivered.
1406 bfa_ioc_msgflush(ioc);
1407 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1412 * Initialize the h/w for any other states.
1414 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1415 bfa_ioc_poll_fwinit(ioc);
1419 bfa_nw_ioc_timeout(void *ioc_arg)
1421 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1423 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1427 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1429 u32 *msgp = (u32 *) ioc_msg;
1432 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1435 * first write msg to mailbox registers
1437 for (i = 0; i < len / sizeof(u32); i++)
1438 writel(cpu_to_le32(msgp[i]),
1439 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1441 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1442 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1445 * write 1 to mailbox CMD to trigger LPU event
1447 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1448 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1452 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1454 struct bfi_ioc_ctrl_req enable_req;
1457 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1458 bfa_ioc_portid(ioc));
1459 enable_req.clscode = htons(ioc->clscode);
1460 do_gettimeofday(&tv);
1461 enable_req.tv_sec = ntohl(tv.tv_sec);
1462 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1466 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1468 struct bfi_ioc_ctrl_req disable_req;
1470 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1471 bfa_ioc_portid(ioc));
1472 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1476 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1478 struct bfi_ioc_getattr_req attr_req;
1480 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1481 bfa_ioc_portid(ioc));
1482 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1483 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1487 bfa_nw_ioc_hb_check(void *cbarg)
1489 struct bfa_ioc *ioc = cbarg;
1492 hb_count = readl(ioc->ioc_regs.heartbeat);
1493 if (ioc->hb_count == hb_count) {
1494 bfa_ioc_recover(ioc);
1497 ioc->hb_count = hb_count;
1500 bfa_ioc_mbox_poll(ioc);
1501 mod_timer(&ioc->hb_timer, jiffies +
1502 msecs_to_jiffies(BFA_IOC_HB_TOV));
1506 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1508 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1509 mod_timer(&ioc->hb_timer, jiffies +
1510 msecs_to_jiffies(BFA_IOC_HB_TOV));
1514 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1516 del_timer(&ioc->hb_timer);
1519 /* Initiate a full firmware download. */
1521 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1531 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1533 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1535 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1537 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1538 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1539 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1540 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1541 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1547 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1548 ((ioc->ioc_regs.smem_page_start) + (loff)));
1550 loff += sizeof(u32);
1553 * handle page offset wrap around
1555 loff = PSS_SMEM_PGOFF(loff);
1559 ioc->ioc_regs.host_page_num_fn);
1563 writel(bfa_ioc_smem_pgnum(ioc, 0),
1564 ioc->ioc_regs.host_page_num_fn);
1567 * Set boot type, env and device mode at the end.
1569 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1570 ioc->port0_mode, ioc->port1_mode);
1571 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1572 + BFI_FWBOOT_DEVMODE_OFF));
1573 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1574 + (BFI_FWBOOT_TYPE_OFF)));
1575 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1576 + (BFI_FWBOOT_ENV_OFF)));
1580 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1582 bfa_ioc_hwinit(ioc, force);
1585 /* BFA ioc enable reply by firmware */
1587 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1590 struct bfa_iocpf *iocpf = &ioc->iocpf;
1592 ioc->port_mode = ioc->port_mode_cfg = port_mode;
1593 ioc->ad_cap_bm = cap_bm;
1594 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1597 /* Update BFA configuration from firmware configuration. */
1599 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1601 struct bfi_ioc_attr *attr = ioc->attr;
1603 attr->adapter_prop = ntohl(attr->adapter_prop);
1604 attr->card_type = ntohl(attr->card_type);
1605 attr->maxfrsize = ntohs(attr->maxfrsize);
1607 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1610 /* Attach time initialization of mbox logic. */
1612 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1614 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1617 INIT_LIST_HEAD(&mod->cmd_q);
1618 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1619 mod->mbhdlr[mc].cbfn = NULL;
1620 mod->mbhdlr[mc].cbarg = ioc->bfa;
1624 /* Mbox poll timer -- restarts any pending mailbox requests. */
1626 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1628 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1629 struct bfa_mbox_cmd *cmd;
1630 bfa_mbox_cmd_cbfn_t cbfn;
1635 * If no command pending, do nothing
1637 if (list_empty(&mod->cmd_q))
1641 * If previous command is not yet fetched by firmware, do nothing
1643 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1648 * Enqueue command to firmware.
1650 bfa_q_deq(&mod->cmd_q, &cmd);
1651 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1654 * Give a callback to the client, indicating that the command is sent
1664 /* Cleanup any pending requests. */
1666 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1668 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1669 struct bfa_mbox_cmd *cmd;
1671 while (!list_empty(&mod->cmd_q))
1672 bfa_q_deq(&mod->cmd_q, &cmd);
1676 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
1678 * @ioc: memory for IOC
1679 * @tbuf: app memory to store data from smem
1680 * @soff: smem offset
1681 * @sz: size of smem in bytes
1684 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1686 u32 pgnum, loff, r32;
1690 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1691 loff = PSS_SMEM_PGOFF(soff);
1694 * Hold semaphore to serialize pll init and fwtrc.
1696 if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1699 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1701 len = sz/sizeof(u32);
1702 for (i = 0; i < len; i++) {
1703 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1704 buf[i] = be32_to_cpu(r32);
1705 loff += sizeof(u32);
1708 * handle page offset wrap around
1710 loff = PSS_SMEM_PGOFF(loff);
1713 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1717 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1718 ioc->ioc_regs.host_page_num_fn);
1723 readl(ioc->ioc_regs.ioc_init_sem_reg);
1724 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1728 /* Retrieve saved firmware trace from a prior IOC failure. */
1730 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1732 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1733 int tlen, status = 0;
1736 if (tlen > BNA_DBG_FWTRC_LEN)
1737 tlen = BNA_DBG_FWTRC_LEN;
1739 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1744 /* Save firmware trace if configured. */
1746 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1750 if (ioc->dbg_fwsave_once) {
1751 ioc->dbg_fwsave_once = 0;
1752 if (ioc->dbg_fwsave_len) {
1753 tlen = ioc->dbg_fwsave_len;
1754 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1759 /* Retrieve saved firmware trace from a prior IOC failure. */
1761 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1765 if (ioc->dbg_fwsave_len == 0)
1766 return BFA_STATUS_ENOFSAVE;
1769 if (tlen > ioc->dbg_fwsave_len)
1770 tlen = ioc->dbg_fwsave_len;
1772 memcpy(trcdata, ioc->dbg_fwsave, tlen);
1774 return BFA_STATUS_OK;
1778 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1781 * Notify driver and common modules registered for notification.
1783 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1784 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1785 bfa_nw_ioc_debug_save_ftrc(ioc);
1788 /* IOCPF to IOC interface */
1790 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1792 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1796 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1798 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1802 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1804 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1808 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1810 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1814 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1817 * Provide enable completion callback and AEN notification.
1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1823 static enum bfa_status
1824 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1827 * Hold semaphore so that nobody can access the chip during init.
1829 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1831 bfa_ioc_pll_init_asic(ioc);
1833 ioc->pllinit = true;
1835 /* Initialize LMEM */
1836 bfa_ioc_lmem_init(ioc);
1839 * release semaphore.
1841 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1843 return BFA_STATUS_OK;
1846 /* Interface used by diag module to do firmware boot with memory test
1847 * as the entry vector.
1850 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1853 bfa_ioc_stats(ioc, ioc_boots);
1855 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1859 * Initialize IOC state of all functions on a chip reset.
1861 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1862 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1863 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1865 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1866 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1869 bfa_ioc_msgflush(ioc);
1870 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1871 bfa_ioc_lpu_start(ioc);
1874 /* Enable/disable IOC failure auto recovery. */
1876 bfa_nw_ioc_auto_recover(bool auto_recover)
1878 bfa_nw_auto_recover = auto_recover;
1882 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1888 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1895 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1897 r32 = readl(ioc->ioc_regs.lpu_mbox +
1899 msgp[i] = htonl(r32);
1903 * turn off mailbox interrupt by clearing mailbox status
1905 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1906 readl(ioc->ioc_regs.lpu_mbox_cmd);
1912 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1914 union bfi_ioc_i2h_msg_u *msg;
1915 struct bfa_iocpf *iocpf = &ioc->iocpf;
1917 msg = (union bfi_ioc_i2h_msg_u *) m;
1919 bfa_ioc_stats(ioc, ioc_isrs);
1921 switch (msg->mh.msg_id) {
1922 case BFI_IOC_I2H_HBEAT:
1925 case BFI_IOC_I2H_ENABLE_REPLY:
1926 bfa_ioc_enable_reply(ioc,
1927 (enum bfa_mode)msg->fw_event.port_mode,
1928 msg->fw_event.cap_bm);
1931 case BFI_IOC_I2H_DISABLE_REPLY:
1932 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1935 case BFI_IOC_I2H_GETATTR_REPLY:
1936 bfa_ioc_getattr_reply(ioc);
1945 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
1947 * @ioc: memory for IOC
1948 * @bfa: driver instance structure
1951 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1955 ioc->fcmode = false;
1956 ioc->pllinit = false;
1957 ioc->dbg_fwsave_once = true;
1958 ioc->iocpf.ioc = ioc;
1960 bfa_ioc_mbox_attach(ioc);
1961 INIT_LIST_HEAD(&ioc->notify_q);
1963 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1964 bfa_fsm_send_event(ioc, IOC_E_RESET);
1967 /* Driver detach time IOC cleanup. */
1969 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1971 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1973 /* Done with detach, empty the notify_q. */
1974 INIT_LIST_HEAD(&ioc->notify_q);
1978 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
1980 * @pcidev: PCI device information for this IOC
1983 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1984 enum bfi_pcifn_class clscode)
1986 ioc->clscode = clscode;
1987 ioc->pcidev = *pcidev;
1990 * Initialize IOC and device personality
1992 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1993 ioc->asic_mode = BFI_ASIC_MODE_FC;
1995 switch (pcidev->device_id) {
1996 case PCI_DEVICE_ID_BROCADE_CT:
1997 ioc->asic_gen = BFI_ASIC_GEN_CT;
1998 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1999 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2000 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2001 ioc->ad_cap_bm = BFA_CM_CNA;
2004 case BFA_PCI_DEVICE_ID_CT2:
2005 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2006 if (clscode == BFI_PCIFN_CLASS_FC &&
2007 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2008 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2010 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2011 ioc->ad_cap_bm = BFA_CM_HBA;
2013 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2014 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2015 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2017 ioc->port_mode_cfg = BFA_MODE_CNA;
2018 ioc->ad_cap_bm = BFA_CM_CNA;
2021 ioc->port_mode_cfg = BFA_MODE_NIC;
2022 ioc->ad_cap_bm = BFA_CM_NIC;
2032 * Set asic specific interfaces.
2034 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2035 bfa_nw_ioc_set_ct_hwif(ioc);
2037 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2038 bfa_nw_ioc_set_ct2_hwif(ioc);
2039 bfa_nw_ioc_ct2_poweron(ioc);
2042 bfa_ioc_map_port(ioc);
2043 bfa_ioc_reg_init(ioc);
2047 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2049 * @dm_kva: kernel virtual address of IOC dma memory
2050 * @dm_pa: physical address of IOC dma memory
2053 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2056 * dma memory for firmware attribute
2058 ioc->attr_dma.kva = dm_kva;
2059 ioc->attr_dma.pa = dm_pa;
2060 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2063 /* Return size of dma memory required. */
2065 bfa_nw_ioc_meminfo(void)
2067 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2071 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2073 bfa_ioc_stats(ioc, ioc_enables);
2074 ioc->dbg_fwsave_once = true;
2076 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2080 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2082 bfa_ioc_stats(ioc, ioc_disables);
2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2086 /* Initialize memory for saving firmware trace. */
2088 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2090 ioc->dbg_fwsave = dbg_fwsave;
2091 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2095 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2097 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2100 /* Register mailbox message handler function, to be called by common modules */
2102 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2103 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2105 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2107 mod->mbhdlr[mc].cbfn = cbfn;
2108 mod->mbhdlr[mc].cbarg = cbarg;
2112 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2114 * @ioc: IOC instance
2115 * @cmd: Mailbox command
2117 * Waits if mailbox is busy. Responsibility of caller to serialize
2120 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2121 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2123 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2130 * If a previous command is pending, queue new command
2132 if (!list_empty(&mod->cmd_q)) {
2133 list_add_tail(&cmd->qe, &mod->cmd_q);
2138 * If mailbox is busy, queue command for poll timer
2140 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2142 list_add_tail(&cmd->qe, &mod->cmd_q);
2147 * mailbox is free -- queue command to firmware
2149 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2154 /* Handle mailbox interrupts */
2156 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2158 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2162 if (bfa_ioc_msgget(ioc, &m)) {
2164 * Treat IOC message class as special.
2166 mc = m.mh.msg_class;
2167 if (mc == BFI_MC_IOC) {
2168 bfa_ioc_isr(ioc, &m);
2172 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2175 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2178 bfa_ioc_lpu_read_stat(ioc);
2181 * Try to send pending mailbox commands
2183 bfa_ioc_mbox_poll(ioc);
2187 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2189 bfa_ioc_stats(ioc, ioc_hbfails);
2190 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2191 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2194 /* return true if IOC is disabled */
2196 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2198 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2199 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2202 /* return true if IOC is operational */
2204 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2206 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2209 /* Add to IOC heartbeat failure notification queue. To be used by common
2210 * modules such as cee, port, diag.
2213 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2214 struct bfa_ioc_notify *notify)
2216 list_add_tail(¬ify->qe, &ioc->notify_q);
2219 #define BFA_MFG_NAME "Brocade"
2221 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2222 struct bfa_adapter_attr *ad_attr)
2224 struct bfi_ioc_attr *ioc_attr;
2226 ioc_attr = ioc->attr;
2228 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2229 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2230 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2231 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2232 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2233 sizeof(struct bfa_mfg_vpd));
2235 ad_attr->nports = bfa_ioc_get_nports(ioc);
2236 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2238 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2239 /* For now, model descr uses same model string */
2240 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2242 ad_attr->card_type = ioc_attr->card_type;
2243 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2245 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2246 ad_attr->prototype = 1;
2248 ad_attr->prototype = 0;
2250 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2251 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2253 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2254 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2255 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2256 ad_attr->asic_rev = ioc_attr->asic_rev;
2258 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2261 static enum bfa_ioc_type
2262 bfa_ioc_get_type(struct bfa_ioc *ioc)
2264 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2265 return BFA_IOC_TYPE_LL;
2267 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2269 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2270 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2274 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2277 (void *)ioc->attr->brcd_serialnum,
2278 BFA_ADAPTER_SERIAL_NUM_LEN);
2282 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2284 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2288 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2290 BUG_ON(!(chip_rev));
2292 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2298 chip_rev[4] = ioc->attr->asic_rev;
2303 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2305 memcpy(optrom_ver, ioc->attr->optrom_version,
2310 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2312 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2316 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2318 struct bfi_ioc_attr *ioc_attr;
2321 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2323 ioc_attr = ioc->attr;
2325 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2326 BFA_MFG_NAME, ioc_attr->card_type);
2329 static enum bfa_ioc_state
2330 bfa_ioc_get_state(struct bfa_ioc *ioc)
2332 enum bfa_iocpf_state iocpf_st;
2333 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2335 if (ioc_st == BFA_IOC_ENABLING ||
2336 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2338 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2341 case BFA_IOCPF_SEMWAIT:
2342 ioc_st = BFA_IOC_SEMWAIT;
2345 case BFA_IOCPF_HWINIT:
2346 ioc_st = BFA_IOC_HWINIT;
2349 case BFA_IOCPF_FWMISMATCH:
2350 ioc_st = BFA_IOC_FWMISMATCH;
2353 case BFA_IOCPF_FAIL:
2354 ioc_st = BFA_IOC_FAIL;
2357 case BFA_IOCPF_INITFAIL:
2358 ioc_st = BFA_IOC_INITFAIL;
2369 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2371 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2373 ioc_attr->state = bfa_ioc_get_state(ioc);
2374 ioc_attr->port_id = ioc->port_id;
2375 ioc_attr->port_mode = ioc->port_mode;
2377 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2378 ioc_attr->cap_bm = ioc->ad_cap_bm;
2380 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2382 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2384 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2385 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2386 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2391 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2393 return ioc->attr->pwwn;
2397 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2399 return ioc->attr->mac;
2402 /* Firmware failure detected. Start recovery actions. */
2404 bfa_ioc_recover(struct bfa_ioc *ioc)
2406 pr_crit("Heart Beat of IOC has failed\n");
2407 bfa_ioc_stats(ioc, ioc_hbfails);
2408 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2409 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2412 /* BFA IOC PF private functions */
2415 bfa_iocpf_enable(struct bfa_ioc *ioc)
2417 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2421 bfa_iocpf_disable(struct bfa_ioc *ioc)
2423 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2427 bfa_iocpf_fail(struct bfa_ioc *ioc)
2429 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2433 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2435 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2439 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2441 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2445 bfa_iocpf_stop(struct bfa_ioc *ioc)
2447 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2451 bfa_nw_iocpf_timeout(void *ioc_arg)
2453 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2454 enum bfa_iocpf_state iocpf_st;
2456 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2458 if (iocpf_st == BFA_IOCPF_HWINIT)
2459 bfa_ioc_poll_fwinit(ioc);
2461 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2465 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2467 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2469 bfa_ioc_hw_sem_get(ioc);
2473 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2475 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2477 if (fwstate == BFI_IOC_DISABLED) {
2478 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2482 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2483 bfa_nw_iocpf_timeout(ioc);
2485 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2486 mod_timer(&ioc->iocpf_timer, jiffies +
2487 msecs_to_jiffies(BFA_IOC_POLL_TOV));
2492 * Flash module specific
2496 * FLASH DMA buffer should be big enough to hold both MFG block and
2497 * asic block(64k) at the same time and also should be 2k aligned to
2498 * avoid write segement to cross sector boundary.
2500 #define BFA_FLASH_SEG_SZ 2048
2501 #define BFA_FLASH_DMA_BUF_SZ \
2502 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2505 bfa_flash_cb(struct bfa_flash *flash)
2509 flash->cbfn(flash->cbarg, flash->status);
2513 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2515 struct bfa_flash *flash = cbarg;
2518 case BFA_IOC_E_DISABLED:
2519 case BFA_IOC_E_FAILED:
2520 if (flash->op_busy) {
2521 flash->status = BFA_STATUS_IOC_FAILURE;
2522 flash->cbfn(flash->cbarg, flash->status);
2532 * Send flash write request.
2535 bfa_flash_write_send(struct bfa_flash *flash)
2537 struct bfi_flash_write_req *msg =
2538 (struct bfi_flash_write_req *) flash->mb.msg;
2541 msg->type = be32_to_cpu(flash->type);
2542 msg->instance = flash->instance;
2543 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2544 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2545 flash->residue : BFA_FLASH_DMA_BUF_SZ;
2546 msg->length = be32_to_cpu(len);
2548 /* indicate if it's the last msg of the whole write operation */
2549 msg->last = (len == flash->residue) ? 1 : 0;
2551 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2552 bfa_ioc_portid(flash->ioc));
2553 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2554 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2555 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2557 flash->residue -= len;
2558 flash->offset += len;
2562 * bfa_flash_read_send - Send flash read request.
2564 * @cbarg: callback argument
2567 bfa_flash_read_send(void *cbarg)
2569 struct bfa_flash *flash = cbarg;
2570 struct bfi_flash_read_req *msg =
2571 (struct bfi_flash_read_req *) flash->mb.msg;
2574 msg->type = be32_to_cpu(flash->type);
2575 msg->instance = flash->instance;
2576 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2577 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2578 flash->residue : BFA_FLASH_DMA_BUF_SZ;
2579 msg->length = be32_to_cpu(len);
2580 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2581 bfa_ioc_portid(flash->ioc));
2582 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2583 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2587 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
2589 * @flasharg: flash structure
2590 * @msg: message structure
2593 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2595 struct bfa_flash *flash = flasharg;
2599 struct bfi_flash_query_rsp *query;
2600 struct bfi_flash_write_rsp *write;
2601 struct bfi_flash_read_rsp *read;
2602 struct bfi_mbmsg *msg;
2607 /* receiving response after ioc failure */
2608 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2611 switch (msg->mh.msg_id) {
2612 case BFI_FLASH_I2H_QUERY_RSP:
2613 status = be32_to_cpu(m.query->status);
2614 if (status == BFA_STATUS_OK) {
2616 struct bfa_flash_attr *attr, *f;
2618 attr = (struct bfa_flash_attr *) flash->ubuf;
2619 f = (struct bfa_flash_attr *) flash->dbuf_kva;
2620 attr->status = be32_to_cpu(f->status);
2621 attr->npart = be32_to_cpu(f->npart);
2622 for (i = 0; i < attr->npart; i++) {
2623 attr->part[i].part_type =
2624 be32_to_cpu(f->part[i].part_type);
2625 attr->part[i].part_instance =
2626 be32_to_cpu(f->part[i].part_instance);
2627 attr->part[i].part_off =
2628 be32_to_cpu(f->part[i].part_off);
2629 attr->part[i].part_size =
2630 be32_to_cpu(f->part[i].part_size);
2631 attr->part[i].part_len =
2632 be32_to_cpu(f->part[i].part_len);
2633 attr->part[i].part_status =
2634 be32_to_cpu(f->part[i].part_status);
2637 flash->status = status;
2638 bfa_flash_cb(flash);
2640 case BFI_FLASH_I2H_WRITE_RSP:
2641 status = be32_to_cpu(m.write->status);
2642 if (status != BFA_STATUS_OK || flash->residue == 0) {
2643 flash->status = status;
2644 bfa_flash_cb(flash);
2646 bfa_flash_write_send(flash);
2648 case BFI_FLASH_I2H_READ_RSP:
2649 status = be32_to_cpu(m.read->status);
2650 if (status != BFA_STATUS_OK) {
2651 flash->status = status;
2652 bfa_flash_cb(flash);
2654 u32 len = be32_to_cpu(m.read->length);
2655 memcpy(flash->ubuf + flash->offset,
2656 flash->dbuf_kva, len);
2657 flash->residue -= len;
2658 flash->offset += len;
2659 if (flash->residue == 0) {
2660 flash->status = status;
2661 bfa_flash_cb(flash);
2663 bfa_flash_read_send(flash);
2666 case BFI_FLASH_I2H_BOOT_VER_RSP:
2667 case BFI_FLASH_I2H_EVENT:
2675 * Flash memory info API.
2678 bfa_nw_flash_meminfo(void)
2680 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2684 * bfa_nw_flash_attach - Flash attach API.
2686 * @flash: flash structure
2687 * @ioc: ioc structure
2688 * @dev: device structure
2691 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2695 flash->cbarg = NULL;
2698 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2699 bfa_q_qe_init(&flash->ioc_notify);
2700 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2701 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2705 * bfa_nw_flash_memclaim - Claim memory for flash
2707 * @flash: flash structure
2708 * @dm_kva: pointer to virtual memory address
2709 * @dm_pa: physical memory address
2712 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2714 flash->dbuf_kva = dm_kva;
2715 flash->dbuf_pa = dm_pa;
2716 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2717 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2718 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2722 * bfa_nw_flash_get_attr - Get flash attribute.
2724 * @flash: flash structure
2725 * @attr: flash attribute structure
2726 * @cbfn: callback function
2727 * @cbarg: callback argument
2732 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2733 bfa_cb_flash cbfn, void *cbarg)
2735 struct bfi_flash_query_req *msg =
2736 (struct bfi_flash_query_req *) flash->mb.msg;
2738 if (!bfa_nw_ioc_is_operational(flash->ioc))
2739 return BFA_STATUS_IOC_NON_OP;
2742 return BFA_STATUS_DEVBUSY;
2746 flash->cbarg = cbarg;
2747 flash->ubuf = (u8 *) attr;
2749 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2750 bfa_ioc_portid(flash->ioc));
2751 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2752 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2754 return BFA_STATUS_OK;
2758 * bfa_nw_flash_update_part - Update flash partition.
2760 * @flash: flash structure
2761 * @type: flash partition type
2762 * @instance: flash partition instance
2763 * @buf: update data buffer
2764 * @len: data buffer length
2765 * @offset: offset relative to the partition starting address
2766 * @cbfn: callback function
2767 * @cbarg: callback argument
2772 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2773 void *buf, u32 len, u32 offset,
2774 bfa_cb_flash cbfn, void *cbarg)
2776 if (!bfa_nw_ioc_is_operational(flash->ioc))
2777 return BFA_STATUS_IOC_NON_OP;
2780 * 'len' must be in word (4-byte) boundary
2782 if (!len || (len & 0x03))
2783 return BFA_STATUS_FLASH_BAD_LEN;
2785 if (type == BFA_FLASH_PART_MFG)
2786 return BFA_STATUS_EINVAL;
2789 return BFA_STATUS_DEVBUSY;
2793 flash->cbarg = cbarg;
2795 flash->instance = instance;
2796 flash->residue = len;
2798 flash->addr_off = offset;
2801 bfa_flash_write_send(flash);
2803 return BFA_STATUS_OK;
2807 * bfa_nw_flash_read_part - Read flash partition.
2809 * @flash: flash structure
2810 * @type: flash partition type
2811 * @instance: flash partition instance
2812 * @buf: read data buffer
2813 * @len: data buffer length
2814 * @offset: offset relative to the partition starting address
2815 * @cbfn: callback function
2816 * @cbarg: callback argument
2821 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2822 void *buf, u32 len, u32 offset,
2823 bfa_cb_flash cbfn, void *cbarg)
2825 if (!bfa_nw_ioc_is_operational(flash->ioc))
2826 return BFA_STATUS_IOC_NON_OP;
2829 * 'len' must be in word (4-byte) boundary
2831 if (!len || (len & 0x03))
2832 return BFA_STATUS_FLASH_BAD_LEN;
2835 return BFA_STATUS_DEVBUSY;
2839 flash->cbarg = cbarg;
2841 flash->instance = instance;
2842 flash->residue = len;
2844 flash->addr_off = offset;
2847 bfa_flash_read_send(flash);
2849 return BFA_STATUS_OK;