bna: ENET and Tx Rx Redesign Enablement
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / brocade / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24
25 /**
26  * IOC local definitions
27  */
28
29 /**
30  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31  */
32
33 #define bfa_ioc_firmware_lock(__ioc)                    \
34                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35 #define bfa_ioc_firmware_unlock(__ioc)                  \
36                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39 #define bfa_ioc_notify_fail(__ioc)                      \
40                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41 #define bfa_ioc_sync_start(__ioc)               \
42                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
43 #define bfa_ioc_sync_join(__ioc)                        \
44                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
45 #define bfa_ioc_sync_leave(__ioc)                       \
46                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
47 #define bfa_ioc_sync_ack(__ioc)                         \
48                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
49 #define bfa_ioc_sync_complete(__ioc)                    \
50                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
51
52 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
53                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
54                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
55
56 static bool bfa_nw_auto_recover = true;
57
58 /*
59  * forward declarations
60  */
61 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
62 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
63 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
64 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
65 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
67 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
68 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
69 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
70 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
71 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
72 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
73 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
74 static void bfa_ioc_recover(struct bfa_ioc *ioc);
75 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
76 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
77 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
78 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
79 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
83 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
84 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
85 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
86                          u32 boot_param);
87 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
88 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
89                                                 char *serial_num);
90 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
91                                                 char *fw_ver);
92 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
93                                                 char *chip_rev);
94 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
95                                                 char *optrom_ver);
96 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
97                                                 char *manufacturer);
98 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
99 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
100
101 /**
102  * IOC state machine definitions/declarations
103  */
104 enum ioc_event {
105         IOC_E_RESET             = 1,    /*!< IOC reset request          */
106         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
107         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
108         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
109         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
110         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
111         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
112         IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
113         IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
114         IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
115         IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
116         IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
117 };
118
119 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
129
130 static struct bfa_sm_table ioc_sm_table[] = {
131         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
132         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
133         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
134         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
135         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
136         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
137         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
138         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
139         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
140         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
141 };
142
143 /**
144  * IOCPF state machine definitions/declarations
145  */
146
147 /*
148  * Forward declareations for iocpf state machine
149  */
150 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
151 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
152 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
153 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
154 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
155 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
156
157 /**
158  * IOCPF state machine events
159  */
160 enum iocpf_event {
161         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
162         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
163         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
164         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
165         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
166         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
167         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
168         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
169         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
170         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
171         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
172         IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
173 };
174
175 /**
176  * IOCPF states
177  */
178 enum bfa_iocpf_state {
179         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
180         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
181         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
182         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
183         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
184         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
185         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
186         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
187         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
188 };
189
190 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
198                                                 enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
204                                                 enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
206
207 static struct bfa_sm_table iocpf_sm_table[] = {
208         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
209         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
210         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
211         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
212         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
213         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
214         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
215         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
216         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
217         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
218         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
219         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
220         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
221         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
222 };
223
224 /**
225  * IOC State Machine
226  */
227
228 /**
229  * Beginning state. IOC uninit state.
230  */
231 static void
232 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
233 {
234 }
235
236 /**
237  * IOC is in uninit state.
238  */
239 static void
240 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
241 {
242         switch (event) {
243         case IOC_E_RESET:
244                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
245                 break;
246
247         default:
248                 bfa_sm_fault(event);
249         }
250 }
251
252 /**
253  * Reset entry actions -- initialize state machine
254  */
255 static void
256 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
257 {
258         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
259 }
260
261 /**
262  * IOC is in reset state.
263  */
264 static void
265 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
266 {
267         switch (event) {
268         case IOC_E_ENABLE:
269                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
270                 break;
271
272         case IOC_E_DISABLE:
273                 bfa_ioc_disable_comp(ioc);
274                 break;
275
276         case IOC_E_DETACH:
277                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
278                 break;
279
280         default:
281                 bfa_sm_fault(event);
282         }
283 }
284
285 static void
286 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
287 {
288         bfa_iocpf_enable(ioc);
289 }
290
291 /**
292  * Host IOC function is being enabled, awaiting response from firmware.
293  * Semaphore is acquired.
294  */
295 static void
296 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
297 {
298         switch (event) {
299         case IOC_E_ENABLED:
300                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
301                 break;
302
303         case IOC_E_PFFAILED:
304                 /* !!! fall through !!! */
305         case IOC_E_HWERROR:
306                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
307                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
308                 if (event != IOC_E_PFFAILED)
309                         bfa_iocpf_initfail(ioc);
310                 break;
311
312         case IOC_E_HWFAILED:
313                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
314                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
315                 break;
316
317         case IOC_E_DISABLE:
318                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
319                 break;
320
321         case IOC_E_DETACH:
322                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
323                 bfa_iocpf_stop(ioc);
324                 break;
325
326         case IOC_E_ENABLE:
327                 break;
328
329         default:
330                 bfa_sm_fault(event);
331         }
332 }
333
334 /**
335  * Semaphore should be acquired for version check.
336  */
337 static void
338 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
339 {
340         mod_timer(&ioc->ioc_timer, jiffies +
341                 msecs_to_jiffies(BFA_IOC_TOV));
342         bfa_ioc_send_getattr(ioc);
343 }
344
345 /**
346  * IOC configuration in progress. Timer is active.
347  */
348 static void
349 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
350 {
351         switch (event) {
352         case IOC_E_FWRSP_GETATTR:
353                 del_timer(&ioc->ioc_timer);
354                 bfa_ioc_check_attr_wwns(ioc);
355                 bfa_ioc_hb_monitor(ioc);
356                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
357                 break;
358
359         case IOC_E_PFFAILED:
360         case IOC_E_HWERROR:
361                 del_timer(&ioc->ioc_timer);
362                 /* fall through */
363         case IOC_E_TIMEOUT:
364                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
365                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
366                 if (event != IOC_E_PFFAILED)
367                         bfa_iocpf_getattrfail(ioc);
368                 break;
369
370         case IOC_E_DISABLE:
371                 del_timer(&ioc->ioc_timer);
372                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
373                 break;
374
375         case IOC_E_ENABLE:
376                 break;
377
378         default:
379                 bfa_sm_fault(event);
380         }
381 }
382
383 static void
384 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
385 {
386         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
387         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
388 }
389
390 static void
391 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
392 {
393         switch (event) {
394         case IOC_E_ENABLE:
395                 break;
396
397         case IOC_E_DISABLE:
398                 bfa_ioc_hb_stop(ioc);
399                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
400                 break;
401
402         case IOC_E_PFFAILED:
403         case IOC_E_HWERROR:
404                 bfa_ioc_hb_stop(ioc);
405                 /* !!! fall through !!! */
406         case IOC_E_HBFAIL:
407                 if (ioc->iocpf.auto_recover)
408                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
409                 else
410                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
411
412                 bfa_ioc_fail_notify(ioc);
413
414                 if (event != IOC_E_PFFAILED)
415                         bfa_iocpf_fail(ioc);
416                 break;
417
418         default:
419                 bfa_sm_fault(event);
420         }
421 }
422
423 static void
424 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
425 {
426         bfa_iocpf_disable(ioc);
427 }
428
429 /**
430  * IOC is being desabled
431  */
432 static void
433 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
434 {
435         switch (event) {
436         case IOC_E_DISABLED:
437                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
438                 break;
439
440         case IOC_E_HWERROR:
441                 /*
442                  * No state change.  Will move to disabled state
443                  * after iocpf sm completes failure processing and
444                  * moves to disabled state.
445                  */
446                 bfa_iocpf_fail(ioc);
447                 break;
448
449         case IOC_E_HWFAILED:
450                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
451                 bfa_ioc_disable_comp(ioc);
452                 break;
453
454         default:
455                 bfa_sm_fault(event);
456         }
457 }
458
459 /**
460  * IOC desable completion entry.
461  */
462 static void
463 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
464 {
465         bfa_ioc_disable_comp(ioc);
466 }
467
468 static void
469 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
470 {
471         switch (event) {
472         case IOC_E_ENABLE:
473                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
474                 break;
475
476         case IOC_E_DISABLE:
477                 ioc->cbfn->disable_cbfn(ioc->bfa);
478                 break;
479
480         case IOC_E_DETACH:
481                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
482                 bfa_iocpf_stop(ioc);
483                 break;
484
485         default:
486                 bfa_sm_fault(event);
487         }
488 }
489
490 static void
491 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
492 {
493 }
494
495 /**
496  * Hardware initialization retry.
497  */
498 static void
499 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
500 {
501         switch (event) {
502         case IOC_E_ENABLED:
503                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
504                 break;
505
506         case IOC_E_PFFAILED:
507         case IOC_E_HWERROR:
508                 /**
509                  * Initialization retry failed.
510                  */
511                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
512                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
513                 if (event != IOC_E_PFFAILED)
514                         bfa_iocpf_initfail(ioc);
515                 break;
516
517         case IOC_E_HWFAILED:
518                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
519                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
520                 break;
521
522         case IOC_E_ENABLE:
523                 break;
524
525         case IOC_E_DISABLE:
526                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
527                 break;
528
529         case IOC_E_DETACH:
530                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
531                 bfa_iocpf_stop(ioc);
532                 break;
533
534         default:
535                 bfa_sm_fault(event);
536         }
537 }
538
539 static void
540 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
541 {
542 }
543
544 /**
545  * IOC failure.
546  */
547 static void
548 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
549 {
550         switch (event) {
551         case IOC_E_ENABLE:
552                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
553                 break;
554
555         case IOC_E_DISABLE:
556                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
557                 break;
558
559         case IOC_E_DETACH:
560                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
561                 bfa_iocpf_stop(ioc);
562                 break;
563
564         case IOC_E_HWERROR:
565                 /* HB failure notification, ignore. */
566                 break;
567
568         default:
569                 bfa_sm_fault(event);
570         }
571 }
572
573 static void
574 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
575 {
576 }
577
578 /**
579  * IOC failure.
580  */
581 static void
582 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
583 {
584         switch (event) {
585
586         case IOC_E_ENABLE:
587                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
588                 break;
589
590         case IOC_E_DISABLE:
591                 ioc->cbfn->disable_cbfn(ioc->bfa);
592                 break;
593
594         case IOC_E_DETACH:
595                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
596                 break;
597
598         default:
599                 bfa_sm_fault(event);
600         }
601 }
602
603 /**
604  * IOCPF State Machine
605  */
606
607 /**
608  * Reset entry actions -- initialize state machine
609  */
610 static void
611 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
612 {
613         iocpf->fw_mismatch_notified = false;
614         iocpf->auto_recover = bfa_nw_auto_recover;
615 }
616
617 /**
618  * Beginning state. IOC is in reset state.
619  */
620 static void
621 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
622 {
623         switch (event) {
624         case IOCPF_E_ENABLE:
625                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
626                 break;
627
628         case IOCPF_E_STOP:
629                 break;
630
631         default:
632                 bfa_sm_fault(event);
633         }
634 }
635
636 /**
637  * Semaphore should be acquired for version check.
638  */
639 static void
640 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
641 {
642         bfa_ioc_hw_sem_init(iocpf->ioc);
643         bfa_ioc_hw_sem_get(iocpf->ioc);
644 }
645
646 /**
647  * Awaiting h/w semaphore to continue with version check.
648  */
649 static void
650 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
651 {
652         struct bfa_ioc *ioc = iocpf->ioc;
653
654         switch (event) {
655         case IOCPF_E_SEMLOCKED:
656                 if (bfa_ioc_firmware_lock(ioc)) {
657                         if (bfa_ioc_sync_start(ioc)) {
658                                 bfa_ioc_sync_join(ioc);
659                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
660                         } else {
661                                 bfa_ioc_firmware_unlock(ioc);
662                                 bfa_nw_ioc_hw_sem_release(ioc);
663                                 mod_timer(&ioc->sem_timer, jiffies +
664                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
665                         }
666                 } else {
667                         bfa_nw_ioc_hw_sem_release(ioc);
668                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
669                 }
670                 break;
671
672         case IOCPF_E_SEM_ERROR:
673                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
674                 bfa_ioc_pf_hwfailed(ioc);
675                 break;
676
677         case IOCPF_E_DISABLE:
678                 bfa_ioc_hw_sem_get_cancel(ioc);
679                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
680                 bfa_ioc_pf_disabled(ioc);
681                 break;
682
683         case IOCPF_E_STOP:
684                 bfa_ioc_hw_sem_get_cancel(ioc);
685                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
686                 break;
687
688         default:
689                 bfa_sm_fault(event);
690         }
691 }
692
693 /**
694  * Notify enable completion callback
695  */
696 static void
697 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
698 {
699         /* Call only the first time sm enters fwmismatch state. */
700         if (iocpf->fw_mismatch_notified == false)
701                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
702
703         iocpf->fw_mismatch_notified = true;
704         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
705                 msecs_to_jiffies(BFA_IOC_TOV));
706 }
707
708 /**
709  * Awaiting firmware version match.
710  */
711 static void
712 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
713 {
714         struct bfa_ioc *ioc = iocpf->ioc;
715
716         switch (event) {
717         case IOCPF_E_TIMEOUT:
718                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
719                 break;
720
721         case IOCPF_E_DISABLE:
722                 del_timer(&ioc->iocpf_timer);
723                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
724                 bfa_ioc_pf_disabled(ioc);
725                 break;
726
727         case IOCPF_E_STOP:
728                 del_timer(&ioc->iocpf_timer);
729                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
730                 break;
731
732         default:
733                 bfa_sm_fault(event);
734         }
735 }
736
737 /**
738  * Request for semaphore.
739  */
740 static void
741 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
742 {
743         bfa_ioc_hw_sem_get(iocpf->ioc);
744 }
745
746 /**
747  * Awaiting semaphore for h/w initialzation.
748  */
749 static void
750 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
751 {
752         struct bfa_ioc *ioc = iocpf->ioc;
753
754         switch (event) {
755         case IOCPF_E_SEMLOCKED:
756                 if (bfa_ioc_sync_complete(ioc)) {
757                         bfa_ioc_sync_join(ioc);
758                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
759                 } else {
760                         bfa_nw_ioc_hw_sem_release(ioc);
761                         mod_timer(&ioc->sem_timer, jiffies +
762                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
763                 }
764                 break;
765
766         case IOCPF_E_SEM_ERROR:
767                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
768                 bfa_ioc_pf_hwfailed(ioc);
769                 break;
770
771         case IOCPF_E_DISABLE:
772                 bfa_ioc_hw_sem_get_cancel(ioc);
773                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
774                 break;
775
776         default:
777                 bfa_sm_fault(event);
778         }
779 }
780
781 static void
782 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
783 {
784         iocpf->poll_time = 0;
785         bfa_ioc_reset(iocpf->ioc, 0);
786 }
787
788 /**
789  * Hardware is being initialized. Interrupts are enabled.
790  * Holding hardware semaphore lock.
791  */
792 static void
793 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
794 {
795         struct bfa_ioc *ioc = iocpf->ioc;
796
797         switch (event) {
798         case IOCPF_E_FWREADY:
799                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
800                 break;
801
802         case IOCPF_E_TIMEOUT:
803                 bfa_nw_ioc_hw_sem_release(ioc);
804                         bfa_ioc_pf_failed(ioc);
805                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
806                 break;
807
808         case IOCPF_E_DISABLE:
809                 del_timer(&ioc->iocpf_timer);
810                 bfa_ioc_sync_leave(ioc);
811                 bfa_nw_ioc_hw_sem_release(ioc);
812                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
813                 break;
814
815         default:
816                 bfa_sm_fault(event);
817         }
818 }
819
820 static void
821 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
822 {
823         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
824                 msecs_to_jiffies(BFA_IOC_TOV));
825         /**
826          * Enable Interrupts before sending fw IOC ENABLE cmd.
827          */
828         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
829         bfa_ioc_send_enable(iocpf->ioc);
830 }
831
832 /**
833  * Host IOC function is being enabled, awaiting response from firmware.
834  * Semaphore is acquired.
835  */
836 static void
837 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
838 {
839         struct bfa_ioc *ioc = iocpf->ioc;
840
841         switch (event) {
842         case IOCPF_E_FWRSP_ENABLE:
843                 del_timer(&ioc->iocpf_timer);
844                 bfa_nw_ioc_hw_sem_release(ioc);
845                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
846                 break;
847
848         case IOCPF_E_INITFAIL:
849                 del_timer(&ioc->iocpf_timer);
850                 /*
851                  * !!! fall through !!!
852                  */
853         case IOCPF_E_TIMEOUT:
854                 bfa_nw_ioc_hw_sem_release(ioc);
855                 if (event == IOCPF_E_TIMEOUT)
856                         bfa_ioc_pf_failed(ioc);
857                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
858                 break;
859
860         case IOCPF_E_DISABLE:
861                 del_timer(&ioc->iocpf_timer);
862                 bfa_nw_ioc_hw_sem_release(ioc);
863                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
864                 break;
865
866         default:
867                 bfa_sm_fault(event);
868         }
869 }
870
871 static void
872 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
873 {
874         bfa_ioc_pf_enabled(iocpf->ioc);
875 }
876
877 static void
878 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
879 {
880         switch (event) {
881         case IOCPF_E_DISABLE:
882                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
883                 break;
884
885         case IOCPF_E_GETATTRFAIL:
886                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
887                 break;
888
889         case IOCPF_E_FAIL:
890                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
891                 break;
892
893         default:
894                 bfa_sm_fault(event);
895         }
896 }
897
898 static void
899 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
900 {
901         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
902                 msecs_to_jiffies(BFA_IOC_TOV));
903         bfa_ioc_send_disable(iocpf->ioc);
904 }
905
906 /**
907  * IOC is being disabled
908  */
909 static void
910 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
911 {
912         struct bfa_ioc *ioc = iocpf->ioc;
913
914         switch (event) {
915         case IOCPF_E_FWRSP_DISABLE:
916                 del_timer(&ioc->iocpf_timer);
917                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
918                 break;
919
920         case IOCPF_E_FAIL:
921                 del_timer(&ioc->iocpf_timer);
922                 /*
923                  * !!! fall through !!!
924                  */
925
926         case IOCPF_E_TIMEOUT:
927                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
928                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
929                 break;
930
931         case IOCPF_E_FWRSP_ENABLE:
932                 break;
933
934         default:
935                 bfa_sm_fault(event);
936         }
937 }
938
939 static void
940 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
941 {
942         bfa_ioc_hw_sem_get(iocpf->ioc);
943 }
944
945 /**
946  * IOC hb ack request is being removed.
947  */
948 static void
949 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
950 {
951         struct bfa_ioc *ioc = iocpf->ioc;
952
953         switch (event) {
954         case IOCPF_E_SEMLOCKED:
955                 bfa_ioc_sync_leave(ioc);
956                 bfa_nw_ioc_hw_sem_release(ioc);
957                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
958                 break;
959
960         case IOCPF_E_SEM_ERROR:
961                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
962                 bfa_ioc_pf_hwfailed(ioc);
963                 break;
964
965         case IOCPF_E_FAIL:
966                 break;
967
968         default:
969                 bfa_sm_fault(event);
970         }
971 }
972
973 /**
974  * IOC disable completion entry.
975  */
976 static void
977 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
978 {
979         bfa_ioc_mbox_flush(iocpf->ioc);
980         bfa_ioc_pf_disabled(iocpf->ioc);
981 }
982
983 static void
984 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
985 {
986         struct bfa_ioc *ioc = iocpf->ioc;
987
988         switch (event) {
989         case IOCPF_E_ENABLE:
990                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
991                 break;
992
993         case IOCPF_E_STOP:
994                 bfa_ioc_firmware_unlock(ioc);
995                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
996                 break;
997
998         default:
999                 bfa_sm_fault(event);
1000         }
1001 }
1002
1003 static void
1004 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
1005 {
1006         bfa_ioc_hw_sem_get(iocpf->ioc);
1007 }
1008
1009 /**
1010  * Hardware initialization failed.
1011  */
1012 static void
1013 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1014 {
1015         struct bfa_ioc *ioc = iocpf->ioc;
1016
1017         switch (event) {
1018         case IOCPF_E_SEMLOCKED:
1019                 bfa_ioc_notify_fail(ioc);
1020                 bfa_ioc_sync_leave(ioc);
1021                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1022                 bfa_nw_ioc_hw_sem_release(ioc);
1023                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1024                 break;
1025
1026         case IOCPF_E_SEM_ERROR:
1027                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1028                 bfa_ioc_pf_hwfailed(ioc);
1029                 break;
1030
1031         case IOCPF_E_DISABLE:
1032                 bfa_ioc_hw_sem_get_cancel(ioc);
1033                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1034                 break;
1035
1036         case IOCPF_E_STOP:
1037                 bfa_ioc_hw_sem_get_cancel(ioc);
1038                 bfa_ioc_firmware_unlock(ioc);
1039                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1040                 break;
1041
1042         case IOCPF_E_FAIL:
1043                 break;
1044
1045         default:
1046                 bfa_sm_fault(event);
1047         }
1048 }
1049
1050 static void
1051 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1052 {
1053 }
1054
1055 /**
1056  * Hardware initialization failed.
1057  */
1058 static void
1059 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1060 {
1061         struct bfa_ioc *ioc = iocpf->ioc;
1062
1063         switch (event) {
1064         case IOCPF_E_DISABLE:
1065                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1066                 break;
1067
1068         case IOCPF_E_STOP:
1069                 bfa_ioc_firmware_unlock(ioc);
1070                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1071                 break;
1072
1073         default:
1074                 bfa_sm_fault(event);
1075         }
1076 }
1077
1078 static void
1079 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1080 {
1081         /**
1082          * Mark IOC as failed in hardware and stop firmware.
1083          */
1084         bfa_ioc_lpu_stop(iocpf->ioc);
1085
1086         /**
1087          * Flush any queued up mailbox requests.
1088          */
1089         bfa_ioc_mbox_flush(iocpf->ioc);
1090         bfa_ioc_hw_sem_get(iocpf->ioc);
1091 }
1092
1093 /**
1094  * IOC is in failed state.
1095  */
1096 static void
1097 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1098 {
1099         struct bfa_ioc *ioc = iocpf->ioc;
1100
1101         switch (event) {
1102         case IOCPF_E_SEMLOCKED:
1103                 bfa_ioc_sync_ack(ioc);
1104                 bfa_ioc_notify_fail(ioc);
1105                 if (!iocpf->auto_recover) {
1106                         bfa_ioc_sync_leave(ioc);
1107                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1108                         bfa_nw_ioc_hw_sem_release(ioc);
1109                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1110                 } else {
1111                         if (bfa_ioc_sync_complete(ioc))
1112                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1113                         else {
1114                                 bfa_nw_ioc_hw_sem_release(ioc);
1115                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1116                         }
1117                 }
1118                 break;
1119
1120         case IOCPF_E_SEM_ERROR:
1121                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1122                 bfa_ioc_pf_hwfailed(ioc);
1123                 break;
1124
1125         case IOCPF_E_DISABLE:
1126                 bfa_ioc_hw_sem_get_cancel(ioc);
1127                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1128                 break;
1129
1130         case IOCPF_E_FAIL:
1131                 break;
1132
1133         default:
1134                 bfa_sm_fault(event);
1135         }
1136 }
1137
1138 static void
1139 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1140 {
1141 }
1142
1143 /**
1144  * @brief
1145  * IOC is in failed state.
1146  */
1147 static void
1148 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1149 {
1150         switch (event) {
1151         case IOCPF_E_DISABLE:
1152                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1153                 break;
1154
1155         default:
1156                 bfa_sm_fault(event);
1157         }
1158 }
1159
1160 /**
1161  * BFA IOC private functions
1162  */
1163
1164 /**
1165  * Notify common modules registered for notification.
1166  */
1167 static void
1168 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1169 {
1170         struct bfa_ioc_notify *notify;
1171         struct list_head                        *qe;
1172
1173         list_for_each(qe, &ioc->notify_q) {
1174                 notify = (struct bfa_ioc_notify *)qe;
1175                 notify->cbfn(notify->cbarg, event);
1176         }
1177 }
1178
1179 static void
1180 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1181 {
1182         ioc->cbfn->disable_cbfn(ioc->bfa);
1183         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1184 }
1185
1186 bool
1187 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1188 {
1189         u32 r32;
1190         int cnt = 0;
1191 #define BFA_SEM_SPINCNT 3000
1192
1193         r32 = readl(sem_reg);
1194
1195         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1196                 cnt++;
1197                 udelay(2);
1198                 r32 = readl(sem_reg);
1199         }
1200
1201         if (!(r32 & 1))
1202                 return true;
1203
1204         BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1205         return false;
1206 }
1207
1208 void
1209 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1210 {
1211         writel(1, sem_reg);
1212 }
1213
1214 static void
1215 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1216 {
1217         struct bfi_ioc_image_hdr fwhdr;
1218         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1219
1220         if (fwstate == BFI_IOC_UNINIT)
1221                 return;
1222
1223         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1224
1225         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1226                 return;
1227
1228         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1229
1230         /*
1231          * Try to lock and then unlock the semaphore.
1232          */
1233         readl(ioc->ioc_regs.ioc_sem_reg);
1234         writel(1, ioc->ioc_regs.ioc_sem_reg);
1235 }
1236
1237 static void
1238 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1239 {
1240         u32     r32;
1241
1242         /**
1243          * First read to the semaphore register will return 0, subsequent reads
1244          * will return 1. Semaphore is released by writing 1 to the register
1245          */
1246         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1247         if (r32 == ~0) {
1248                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1249                 return;
1250         }
1251         if (!(r32 & 1)) {
1252                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1253                 return;
1254         }
1255
1256         mod_timer(&ioc->sem_timer, jiffies +
1257                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1258 }
1259
1260 void
1261 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1262 {
1263         writel(1, ioc->ioc_regs.ioc_sem_reg);
1264 }
1265
1266 static void
1267 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1268 {
1269         del_timer(&ioc->sem_timer);
1270 }
1271
1272 /**
1273  * @brief
1274  * Initialize LPU local memory (aka secondary memory / SRAM)
1275  */
1276 static void
1277 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1278 {
1279         u32     pss_ctl;
1280         int             i;
1281 #define PSS_LMEM_INIT_TIME  10000
1282
1283         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1284         pss_ctl &= ~__PSS_LMEM_RESET;
1285         pss_ctl |= __PSS_LMEM_INIT_EN;
1286
1287         /*
1288          * i2c workaround 12.5khz clock
1289          */
1290         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1291         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1292
1293         /**
1294          * wait for memory initialization to be complete
1295          */
1296         i = 0;
1297         do {
1298                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1299                 i++;
1300         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1301
1302         /**
1303          * If memory initialization is not successful, IOC timeout will catch
1304          * such failures.
1305          */
1306         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1307
1308         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1309         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1310 }
1311
1312 static void
1313 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1314 {
1315         u32     pss_ctl;
1316
1317         /**
1318          * Take processor out of reset.
1319          */
1320         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1321         pss_ctl &= ~__PSS_LPU0_RESET;
1322
1323         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1324 }
1325
1326 static void
1327 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1328 {
1329         u32     pss_ctl;
1330
1331         /**
1332          * Put processors in reset.
1333          */
1334         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1335         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1336
1337         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1338 }
1339
1340 /**
1341  * Get driver and firmware versions.
1342  */
1343 void
1344 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1345 {
1346         u32     pgnum;
1347         u32     loff = 0;
1348         int             i;
1349         u32     *fwsig = (u32 *) fwhdr;
1350
1351         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1352         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1353
1354         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1355              i++) {
1356                 fwsig[i] =
1357                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1358                 loff += sizeof(u32);
1359         }
1360 }
1361
1362 /**
1363  * Returns TRUE if same.
1364  */
1365 bool
1366 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1367 {
1368         struct bfi_ioc_image_hdr *drv_fwhdr;
1369         int i;
1370
1371         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1372                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1373
1374         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1375                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1376                         return false;
1377         }
1378
1379         return true;
1380 }
1381
1382 /**
1383  * Return true if current running version is valid. Firmware signature and
1384  * execution context (driver/bios) must match.
1385  */
1386 static bool
1387 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1388 {
1389         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1390
1391         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1392         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1393                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1394
1395         if (fwhdr.signature != drv_fwhdr->signature)
1396                 return false;
1397
1398         if (swab32(fwhdr.bootenv) != boot_env)
1399                 return false;
1400
1401         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1402 }
1403
1404 /**
1405  * Conditionally flush any pending message from firmware at start.
1406  */
1407 static void
1408 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1409 {
1410         u32     r32;
1411
1412         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1413         if (r32)
1414                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1415 }
1416
1417 /**
1418  * @img ioc_init_logic.jpg
1419  */
1420 static void
1421 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1422 {
1423         enum bfi_ioc_state ioc_fwstate;
1424         bool fwvalid;
1425         u32 boot_env;
1426
1427         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1428
1429         if (force)
1430                 ioc_fwstate = BFI_IOC_UNINIT;
1431
1432         boot_env = BFI_FWBOOT_ENV_OS;
1433
1434         /**
1435          * check if firmware is valid
1436          */
1437         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1438                 false : bfa_ioc_fwver_valid(ioc, boot_env);
1439
1440         if (!fwvalid) {
1441                 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1442                 bfa_ioc_poll_fwinit(ioc);
1443                 return;
1444         }
1445
1446         /**
1447          * If hardware initialization is in progress (initialized by other IOC),
1448          * just wait for an initialization completion interrupt.
1449          */
1450         if (ioc_fwstate == BFI_IOC_INITING) {
1451                 bfa_ioc_poll_fwinit(ioc);
1452                 return;
1453         }
1454
1455         /**
1456          * If IOC function is disabled and firmware version is same,
1457          * just re-enable IOC.
1458          */
1459         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1460                 /**
1461                  * When using MSI-X any pending firmware ready event should
1462                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1463                  */
1464                 bfa_ioc_msgflush(ioc);
1465                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1466                 return;
1467         }
1468
1469         /**
1470          * Initialize the h/w for any other states.
1471          */
1472         bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1473         bfa_ioc_poll_fwinit(ioc);
1474 }
1475
1476 void
1477 bfa_nw_ioc_timeout(void *ioc_arg)
1478 {
1479         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1480
1481         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1482 }
1483
1484 static void
1485 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1486 {
1487         u32 *msgp = (u32 *) ioc_msg;
1488         u32 i;
1489
1490         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1491
1492         /*
1493          * first write msg to mailbox registers
1494          */
1495         for (i = 0; i < len / sizeof(u32); i++)
1496                 writel(cpu_to_le32(msgp[i]),
1497                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1498
1499         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1500                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1501
1502         /*
1503          * write 1 to mailbox CMD to trigger LPU event
1504          */
1505         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1506         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1507 }
1508
1509 static void
1510 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1511 {
1512         struct bfi_ioc_ctrl_req enable_req;
1513         struct timeval tv;
1514
1515         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1516                     bfa_ioc_portid(ioc));
1517         enable_req.clscode = htons(ioc->clscode);
1518         do_gettimeofday(&tv);
1519         enable_req.tv_sec = ntohl(tv.tv_sec);
1520         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1521 }
1522
1523 static void
1524 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1525 {
1526         struct bfi_ioc_ctrl_req disable_req;
1527
1528         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1529                     bfa_ioc_portid(ioc));
1530         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1531 }
1532
1533 static void
1534 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1535 {
1536         struct bfi_ioc_getattr_req attr_req;
1537
1538         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1539                     bfa_ioc_portid(ioc));
1540         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1541         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1542 }
1543
1544 void
1545 bfa_nw_ioc_hb_check(void *cbarg)
1546 {
1547         struct bfa_ioc *ioc = cbarg;
1548         u32     hb_count;
1549
1550         hb_count = readl(ioc->ioc_regs.heartbeat);
1551         if (ioc->hb_count == hb_count) {
1552                 bfa_ioc_recover(ioc);
1553                 return;
1554         } else {
1555                 ioc->hb_count = hb_count;
1556         }
1557
1558         bfa_ioc_mbox_poll(ioc);
1559         mod_timer(&ioc->hb_timer, jiffies +
1560                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1561 }
1562
1563 static void
1564 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1565 {
1566         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1567         mod_timer(&ioc->hb_timer, jiffies +
1568                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1569 }
1570
1571 static void
1572 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1573 {
1574         del_timer(&ioc->hb_timer);
1575 }
1576
1577 /**
1578  * @brief
1579  *      Initiate a full firmware download.
1580  */
1581 static void
1582 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1583                     u32 boot_env)
1584 {
1585         u32 *fwimg;
1586         u32 pgnum;
1587         u32 loff = 0;
1588         u32 chunkno = 0;
1589         u32 i;
1590         u32 asicmode;
1591
1592         /**
1593          * Initialize LMEM first before code download
1594          */
1595         bfa_ioc_lmem_init(ioc);
1596
1597         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1598
1599         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1600
1601         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1602
1603         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1604                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1605                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1606                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1607                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1608                 }
1609
1610                 /**
1611                  * write smem
1612                  */
1613                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1614                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1615
1616                 loff += sizeof(u32);
1617
1618                 /**
1619                  * handle page offset wrap around
1620                  */
1621                 loff = PSS_SMEM_PGOFF(loff);
1622                 if (loff == 0) {
1623                         pgnum++;
1624                         writel(pgnum,
1625                                       ioc->ioc_regs.host_page_num_fn);
1626                 }
1627         }
1628
1629         writel(bfa_ioc_smem_pgnum(ioc, 0),
1630                       ioc->ioc_regs.host_page_num_fn);
1631
1632         /*
1633          * Set boot type, env and device mode at the end.
1634         */
1635         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1636                                         ioc->port0_mode, ioc->port1_mode);
1637         writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1638                         + BFI_FWBOOT_DEVMODE_OFF));
1639         writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1640                         + (BFI_FWBOOT_TYPE_OFF)));
1641         writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1642                         + (BFI_FWBOOT_ENV_OFF)));
1643 }
1644
1645 static void
1646 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1647 {
1648         bfa_ioc_hwinit(ioc, force);
1649 }
1650
1651 /**
1652  * BFA ioc enable reply by firmware
1653  */
1654 static void
1655 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1656                         u8 cap_bm)
1657 {
1658         struct bfa_iocpf *iocpf = &ioc->iocpf;
1659
1660         ioc->port_mode = ioc->port_mode_cfg = port_mode;
1661         ioc->ad_cap_bm = cap_bm;
1662         bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1663 }
1664
1665 /**
1666  * @brief
1667  * Update BFA configuration from firmware configuration.
1668  */
1669 static void
1670 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1671 {
1672         struct bfi_ioc_attr *attr = ioc->attr;
1673
1674         attr->adapter_prop  = ntohl(attr->adapter_prop);
1675         attr->card_type     = ntohl(attr->card_type);
1676         attr->maxfrsize     = ntohs(attr->maxfrsize);
1677
1678         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1679 }
1680
1681 /**
1682  * Attach time initialization of mbox logic.
1683  */
1684 static void
1685 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1686 {
1687         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1688         int     mc;
1689
1690         INIT_LIST_HEAD(&mod->cmd_q);
1691         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1692                 mod->mbhdlr[mc].cbfn = NULL;
1693                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1694         }
1695 }
1696
1697 /**
1698  * Mbox poll timer -- restarts any pending mailbox requests.
1699  */
1700 static void
1701 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1702 {
1703         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1704         struct bfa_mbox_cmd *cmd;
1705         bfa_mbox_cmd_cbfn_t cbfn;
1706         void *cbarg;
1707         u32 stat;
1708
1709         /**
1710          * If no command pending, do nothing
1711          */
1712         if (list_empty(&mod->cmd_q))
1713                 return;
1714
1715         /**
1716          * If previous command is not yet fetched by firmware, do nothing
1717          */
1718         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1719         if (stat)
1720                 return;
1721
1722         /**
1723          * Enqueue command to firmware.
1724          */
1725         bfa_q_deq(&mod->cmd_q, &cmd);
1726         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1727
1728         /**
1729          * Give a callback to the client, indicating that the command is sent
1730          */
1731         if (cmd->cbfn) {
1732                 cbfn = cmd->cbfn;
1733                 cbarg = cmd->cbarg;
1734                 cmd->cbfn = NULL;
1735                 cbfn(cbarg);
1736         }
1737 }
1738
1739 /**
1740  * Cleanup any pending requests.
1741  */
1742 static void
1743 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1744 {
1745         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1746         struct bfa_mbox_cmd *cmd;
1747
1748         while (!list_empty(&mod->cmd_q))
1749                 bfa_q_deq(&mod->cmd_q, &cmd);
1750 }
1751
1752 static void
1753 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1754 {
1755         /**
1756          * Notify driver and common modules registered for notification.
1757          */
1758         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1759         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1760 }
1761
1762 static void
1763 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1764 {
1765         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1766 }
1767
1768 static void
1769 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1770 {
1771         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1772 }
1773
1774 static void
1775 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1776 {
1777         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1778 }
1779
1780 static void
1781 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1782 {
1783         bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1784 }
1785
1786 static void
1787 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1788 {
1789         /**
1790          * Provide enable completion callback and AEN notification.
1791          */
1792         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1793 }
1794
1795 /**
1796  * IOC public
1797  */
1798 static enum bfa_status
1799 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1800 {
1801         /*
1802          *  Hold semaphore so that nobody can access the chip during init.
1803          */
1804         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1805
1806         bfa_ioc_pll_init_asic(ioc);
1807
1808         ioc->pllinit = true;
1809         /*
1810          *  release semaphore.
1811          */
1812         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1813
1814         return BFA_STATUS_OK;
1815 }
1816
1817 /**
1818  * Interface used by diag module to do firmware boot with memory test
1819  * as the entry vector.
1820  */
1821 static void
1822 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1823                 u32 boot_env)
1824 {
1825         bfa_ioc_stats(ioc, ioc_boots);
1826
1827         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1828                 return;
1829
1830         /**
1831          * Initialize IOC state of all functions on a chip reset.
1832          */
1833         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1834                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1835                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1836         } else {
1837                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1838                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1839         }
1840
1841         bfa_ioc_msgflush(ioc);
1842         bfa_ioc_download_fw(ioc, boot_type, boot_env);
1843         bfa_ioc_lpu_start(ioc);
1844 }
1845
1846 /**
1847  * Enable/disable IOC failure auto recovery.
1848  */
1849 void
1850 bfa_nw_ioc_auto_recover(bool auto_recover)
1851 {
1852         bfa_nw_auto_recover = auto_recover;
1853 }
1854
1855 static bool
1856 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1857 {
1858         u32     *msgp = mbmsg;
1859         u32     r32;
1860         int             i;
1861
1862         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1863         if ((r32 & 1) == 0)
1864                 return false;
1865
1866         /**
1867          * read the MBOX msg
1868          */
1869         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1870              i++) {
1871                 r32 = readl(ioc->ioc_regs.lpu_mbox +
1872                                    i * sizeof(u32));
1873                 msgp[i] = htonl(r32);
1874         }
1875
1876         /**
1877          * turn off mailbox interrupt by clearing mailbox status
1878          */
1879         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1880         readl(ioc->ioc_regs.lpu_mbox_cmd);
1881
1882         return true;
1883 }
1884
1885 static void
1886 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1887 {
1888         union bfi_ioc_i2h_msg_u *msg;
1889         struct bfa_iocpf *iocpf = &ioc->iocpf;
1890
1891         msg = (union bfi_ioc_i2h_msg_u *) m;
1892
1893         bfa_ioc_stats(ioc, ioc_isrs);
1894
1895         switch (msg->mh.msg_id) {
1896         case BFI_IOC_I2H_HBEAT:
1897                 break;
1898
1899         case BFI_IOC_I2H_ENABLE_REPLY:
1900                 bfa_ioc_enable_reply(ioc,
1901                         (enum bfa_mode)msg->fw_event.port_mode,
1902                         msg->fw_event.cap_bm);
1903                 break;
1904
1905         case BFI_IOC_I2H_DISABLE_REPLY:
1906                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1907                 break;
1908
1909         case BFI_IOC_I2H_GETATTR_REPLY:
1910                 bfa_ioc_getattr_reply(ioc);
1911                 break;
1912
1913         default:
1914                 BUG_ON(1);
1915         }
1916 }
1917
1918 /**
1919  * IOC attach time initialization and setup.
1920  *
1921  * @param[in]   ioc     memory for IOC
1922  * @param[in]   bfa     driver instance structure
1923  */
1924 void
1925 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1926 {
1927         ioc->bfa        = bfa;
1928         ioc->cbfn       = cbfn;
1929         ioc->fcmode     = false;
1930         ioc->pllinit    = false;
1931         ioc->dbg_fwsave_once = true;
1932         ioc->iocpf.ioc  = ioc;
1933
1934         bfa_ioc_mbox_attach(ioc);
1935         INIT_LIST_HEAD(&ioc->notify_q);
1936
1937         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1938         bfa_fsm_send_event(ioc, IOC_E_RESET);
1939 }
1940
1941 /**
1942  * Driver detach time IOC cleanup.
1943  */
1944 void
1945 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1946 {
1947         bfa_fsm_send_event(ioc, IOC_E_DETACH);
1948
1949         /* Done with detach, empty the notify_q. */
1950         INIT_LIST_HEAD(&ioc->notify_q);
1951 }
1952
1953 /**
1954  * Setup IOC PCI properties.
1955  *
1956  * @param[in]   pcidev  PCI device information for this IOC
1957  */
1958 void
1959 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1960                  enum bfi_pcifn_class clscode)
1961 {
1962         ioc->clscode    = clscode;
1963         ioc->pcidev     = *pcidev;
1964
1965         /**
1966          * Initialize IOC and device personality
1967          */
1968         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1969         ioc->asic_mode  = BFI_ASIC_MODE_FC;
1970
1971         switch (pcidev->device_id) {
1972         case PCI_DEVICE_ID_BROCADE_CT:
1973                 ioc->asic_gen = BFI_ASIC_GEN_CT;
1974                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1975                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
1976                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
1977                 ioc->ad_cap_bm = BFA_CM_CNA;
1978                 break;
1979
1980         default:
1981                 BUG_ON(1);
1982         }
1983
1984         bfa_nw_ioc_set_ct_hwif(ioc);
1985
1986         bfa_ioc_map_port(ioc);
1987         bfa_ioc_reg_init(ioc);
1988 }
1989
1990 /**
1991  * Initialize IOC dma memory
1992  *
1993  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
1994  * @param[in]   dm_pa   physical address of IOC dma memory
1995  */
1996 void
1997 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
1998 {
1999         /**
2000          * dma memory for firmware attribute
2001          */
2002         ioc->attr_dma.kva = dm_kva;
2003         ioc->attr_dma.pa = dm_pa;
2004         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2005 }
2006
2007 /**
2008  * Return size of dma memory required.
2009  */
2010 u32
2011 bfa_nw_ioc_meminfo(void)
2012 {
2013         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2014 }
2015
2016 void
2017 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2018 {
2019         bfa_ioc_stats(ioc, ioc_enables);
2020         ioc->dbg_fwsave_once = true;
2021
2022         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2023 }
2024
2025 void
2026 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2027 {
2028         bfa_ioc_stats(ioc, ioc_disables);
2029         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2030 }
2031
2032 static u32
2033 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2034 {
2035         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2036 }
2037
2038 /**
2039  * Register mailbox message handler function, to be called by common modules
2040  */
2041 void
2042 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2043                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2044 {
2045         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2046
2047         mod->mbhdlr[mc].cbfn    = cbfn;
2048         mod->mbhdlr[mc].cbarg = cbarg;
2049 }
2050
2051 /**
2052  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2053  * Responsibility of caller to serialize
2054  *
2055  * @param[in]   ioc     IOC instance
2056  * @param[i]    cmd     Mailbox command
2057  */
2058 bool
2059 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2060                         bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2061 {
2062         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2063         u32                     stat;
2064
2065         cmd->cbfn = cbfn;
2066         cmd->cbarg = cbarg;
2067
2068         /**
2069          * If a previous command is pending, queue new command
2070          */
2071         if (!list_empty(&mod->cmd_q)) {
2072                 list_add_tail(&cmd->qe, &mod->cmd_q);
2073                 return true;
2074         }
2075
2076         /**
2077          * If mailbox is busy, queue command for poll timer
2078          */
2079         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2080         if (stat) {
2081                 list_add_tail(&cmd->qe, &mod->cmd_q);
2082                 return true;
2083         }
2084
2085         /**
2086          * mailbox is free -- queue command to firmware
2087          */
2088         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2089
2090         return false;
2091 }
2092
2093 /**
2094  * Handle mailbox interrupts
2095  */
2096 void
2097 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2098 {
2099         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2100         struct bfi_mbmsg m;
2101         int                             mc;
2102
2103         if (bfa_ioc_msgget(ioc, &m)) {
2104                 /**
2105                  * Treat IOC message class as special.
2106                  */
2107                 mc = m.mh.msg_class;
2108                 if (mc == BFI_MC_IOC) {
2109                         bfa_ioc_isr(ioc, &m);
2110                         return;
2111                 }
2112
2113                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2114                         return;
2115
2116                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2117         }
2118
2119         bfa_ioc_lpu_read_stat(ioc);
2120
2121         /**
2122          * Try to send pending mailbox commands
2123          */
2124         bfa_ioc_mbox_poll(ioc);
2125 }
2126
2127 void
2128 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2129 {
2130         bfa_ioc_stats(ioc, ioc_hbfails);
2131         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2132         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2133 }
2134
2135 /**
2136  * return true if IOC is disabled
2137  */
2138 bool
2139 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2140 {
2141         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2142                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2143 }
2144
2145 /**
2146  * Add to IOC heartbeat failure notification queue. To be used by common
2147  * modules such as cee, port, diag.
2148  */
2149 void
2150 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2151                         struct bfa_ioc_notify *notify)
2152 {
2153         list_add_tail(&notify->qe, &ioc->notify_q);
2154 }
2155
2156 #define BFA_MFG_NAME "Brocade"
2157 static void
2158 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2159                          struct bfa_adapter_attr *ad_attr)
2160 {
2161         struct bfi_ioc_attr *ioc_attr;
2162
2163         ioc_attr = ioc->attr;
2164
2165         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2166         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2167         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2168         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2169         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2170                       sizeof(struct bfa_mfg_vpd));
2171
2172         ad_attr->nports = bfa_ioc_get_nports(ioc);
2173         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2174
2175         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2176         /* For now, model descr uses same model string */
2177         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2178
2179         ad_attr->card_type = ioc_attr->card_type;
2180         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2181
2182         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2183                 ad_attr->prototype = 1;
2184         else
2185                 ad_attr->prototype = 0;
2186
2187         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2188         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2189
2190         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2191         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2192         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2193         ad_attr->asic_rev = ioc_attr->asic_rev;
2194
2195         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2196 }
2197
2198 static enum bfa_ioc_type
2199 bfa_ioc_get_type(struct bfa_ioc *ioc)
2200 {
2201         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2202                 return BFA_IOC_TYPE_LL;
2203
2204         BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2205
2206         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2207                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2208 }
2209
2210 static void
2211 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2212 {
2213         memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2214         memcpy(serial_num,
2215                         (void *)ioc->attr->brcd_serialnum,
2216                         BFA_ADAPTER_SERIAL_NUM_LEN);
2217 }
2218
2219 static void
2220 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2221 {
2222         memset(fw_ver, 0, BFA_VERSION_LEN);
2223         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2224 }
2225
2226 static void
2227 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2228 {
2229         BUG_ON(!(chip_rev));
2230
2231         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2232
2233         chip_rev[0] = 'R';
2234         chip_rev[1] = 'e';
2235         chip_rev[2] = 'v';
2236         chip_rev[3] = '-';
2237         chip_rev[4] = ioc->attr->asic_rev;
2238         chip_rev[5] = '\0';
2239 }
2240
2241 static void
2242 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2243 {
2244         memset(optrom_ver, 0, BFA_VERSION_LEN);
2245         memcpy(optrom_ver, ioc->attr->optrom_version,
2246                       BFA_VERSION_LEN);
2247 }
2248
2249 static void
2250 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2251 {
2252         memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2253         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2254 }
2255
2256 static void
2257 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2258 {
2259         struct bfi_ioc_attr *ioc_attr;
2260
2261         BUG_ON(!(model));
2262         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2263
2264         ioc_attr = ioc->attr;
2265
2266         /**
2267          * model name
2268          */
2269         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2270                 BFA_MFG_NAME, ioc_attr->card_type);
2271 }
2272
2273 static enum bfa_ioc_state
2274 bfa_ioc_get_state(struct bfa_ioc *ioc)
2275 {
2276         enum bfa_iocpf_state iocpf_st;
2277         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2278
2279         if (ioc_st == BFA_IOC_ENABLING ||
2280                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2281
2282                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2283
2284                 switch (iocpf_st) {
2285                 case BFA_IOCPF_SEMWAIT:
2286                         ioc_st = BFA_IOC_SEMWAIT;
2287                         break;
2288
2289                 case BFA_IOCPF_HWINIT:
2290                         ioc_st = BFA_IOC_HWINIT;
2291                         break;
2292
2293                 case BFA_IOCPF_FWMISMATCH:
2294                         ioc_st = BFA_IOC_FWMISMATCH;
2295                         break;
2296
2297                 case BFA_IOCPF_FAIL:
2298                         ioc_st = BFA_IOC_FAIL;
2299                         break;
2300
2301                 case BFA_IOCPF_INITFAIL:
2302                         ioc_st = BFA_IOC_INITFAIL;
2303                         break;
2304
2305                 default:
2306                         break;
2307                 }
2308         }
2309         return ioc_st;
2310 }
2311
2312 void
2313 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2314 {
2315         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2316
2317         ioc_attr->state = bfa_ioc_get_state(ioc);
2318         ioc_attr->port_id = ioc->port_id;
2319         ioc_attr->port_mode = ioc->port_mode;
2320
2321         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2322         ioc_attr->cap_bm = ioc->ad_cap_bm;
2323
2324         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2325
2326         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2327
2328         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2329         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2330         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2331 }
2332
2333 /**
2334  * WWN public
2335  */
2336 static u64
2337 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2338 {
2339         return ioc->attr->pwwn;
2340 }
2341
2342 mac_t
2343 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2344 {
2345         return ioc->attr->mac;
2346 }
2347
2348 /**
2349  * Firmware failure detected. Start recovery actions.
2350  */
2351 static void
2352 bfa_ioc_recover(struct bfa_ioc *ioc)
2353 {
2354         pr_crit("Heart Beat of IOC has failed\n");
2355         bfa_ioc_stats(ioc, ioc_hbfails);
2356         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2357         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2358 }
2359
2360 static void
2361 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2362 {
2363         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2364                 return;
2365 }
2366
2367 /**
2368  * @dg hal_iocpf_pvt BFA IOC PF private functions
2369  * @{
2370  */
2371
2372 static void
2373 bfa_iocpf_enable(struct bfa_ioc *ioc)
2374 {
2375         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2376 }
2377
2378 static void
2379 bfa_iocpf_disable(struct bfa_ioc *ioc)
2380 {
2381         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2382 }
2383
2384 static void
2385 bfa_iocpf_fail(struct bfa_ioc *ioc)
2386 {
2387         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2388 }
2389
2390 static void
2391 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2392 {
2393         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2394 }
2395
2396 static void
2397 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2398 {
2399         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2400 }
2401
2402 static void
2403 bfa_iocpf_stop(struct bfa_ioc *ioc)
2404 {
2405         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2406 }
2407
2408 void
2409 bfa_nw_iocpf_timeout(void *ioc_arg)
2410 {
2411         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2412         enum bfa_iocpf_state iocpf_st;
2413
2414         iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2415
2416         if (iocpf_st == BFA_IOCPF_HWINIT)
2417                 bfa_ioc_poll_fwinit(ioc);
2418         else
2419                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2420 }
2421
2422 void
2423 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2424 {
2425         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2426
2427         bfa_ioc_hw_sem_get(ioc);
2428 }
2429
2430 static void
2431 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2432 {
2433         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2434
2435         if (fwstate == BFI_IOC_DISABLED) {
2436                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2437                 return;
2438         }
2439
2440         if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2441                 bfa_nw_iocpf_timeout(ioc);
2442         } else {
2443                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2444                 mod_timer(&ioc->iocpf_timer, jiffies +
2445                         msecs_to_jiffies(BFA_IOC_POLL_TOV));
2446         }
2447 }