Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / bfa / bfa_hw_cb.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20 #include "bfi_cbreg.h"
21
22 void
23 bfa_hwcb_reginit(struct bfa_s *bfa)
24 {
25         struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
26         void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
27         int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
28
29         if (fn == 0) {
30                 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
31                 bfa_regs->intr_mask   = (kva + HOSTFN0_INT_MSK);
32         } else {
33                 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
34                 bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
35         }
36
37         for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
38                 /*
39                  * CPE registers
40                  */
41                 q = CPE_Q_NUM(fn, i);
42                 bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
43                 bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
44                 bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
45
46                 /*
47                  * RME registers
48                  */
49                 q = CPE_Q_NUM(fn, i);
50                 bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
51                 bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
52                 bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
53         }
54 }
55
56 void
57 bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
58 {
59 }
60
61 static void
62 bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
63 {
64         writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
65                         bfa->iocfc.bfa_regs.intr_status);
66 }
67
68 void
69 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
70 {
71 }
72
73 static void
74 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
75 {
76         writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
77                         bfa->iocfc.bfa_regs.intr_status);
78 }
79
80 void
81 bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
82                  u32 *num_vecs, u32 *max_vec_bit)
83 {
84 #define __HFN_NUMINTS   13
85         if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
86                 *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
87                                    __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
88                                    __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
89                                    __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
90                                    __HFN_INT_MBOX_LPU0);
91                 *max_vec_bit = __HFN_INT_MBOX_LPU0;
92         } else {
93                 *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
94                                    __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
95                                    __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
96                                    __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
97                                    __HFN_INT_MBOX_LPU1);
98                 *max_vec_bit = __HFN_INT_MBOX_LPU1;
99         }
100
101         *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
102                             __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
103         *num_vecs = __HFN_NUMINTS;
104 }
105
106 /*
107  * No special setup required for crossbow -- vector assignments are implicit.
108  */
109 void
110 bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
111 {
112         int i;
113
114         WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
115
116         bfa->msix.nvecs = nvecs;
117         if (nvecs == 1) {
118                 for (i = 0; i < BFA_MSIX_CB_MAX; i++)
119                         bfa->msix.handler[i] = bfa_msix_all;
120                 return;
121         }
122
123         for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
124                 bfa->msix.handler[i] = bfa_msix_reqq;
125
126         for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
127                 bfa->msix.handler[i] = bfa_msix_rspq;
128
129         for (; i < BFA_MSIX_CB_MAX; i++)
130                 bfa->msix.handler[i] = bfa_msix_lpu_err;
131 }
132
133 /*
134  * Crossbow -- dummy, interrupts are masked
135  */
136 void
137 bfa_hwcb_msix_install(struct bfa_s *bfa)
138 {
139 }
140
141 void
142 bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
143 {
144 }
145
146 /*
147  * No special enable/disable -- vector assignments are implicit.
148  */
149 void
150 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
151 {
152         bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
153         bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
154 }
155
156 void
157 bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
158 {
159         *start = BFA_MSIX_RME_Q0;
160         *end = BFA_MSIX_RME_Q7;
161 }