Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / gdm72xx / gdm_qos.c
1 /*
2  * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
18
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
22
23 #include "gdm_wimax.h"
24 #include "hci.h"
25 #include "gdm_qos.h"
26
27 #define B2H(x)  __be16_to_cpu(x)
28
29 #define MAX_FREE_LIST_CNT               32
30 static struct {
31         struct list_head head;
32         int cnt;
33         spinlock_t lock;
34 } qos_free_list;
35
36 static void init_qos_entry_list(void)
37 {
38         qos_free_list.cnt = 0;
39         INIT_LIST_HEAD(&qos_free_list.head);
40         spin_lock_init(&qos_free_list.lock);
41 }
42
43 static void *alloc_qos_entry(void)
44 {
45         struct qos_entry_s *entry;
46         unsigned long flags;
47
48         spin_lock_irqsave(&qos_free_list.lock, flags);
49         if (qos_free_list.cnt) {
50                 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
51                                    list);
52                 list_del(&entry->list);
53                 qos_free_list.cnt--;
54                 spin_unlock_irqrestore(&qos_free_list.lock, flags);
55                 return entry;
56         }
57         spin_unlock_irqrestore(&qos_free_list.lock, flags);
58
59         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
60         return entry;
61 }
62
63 static void free_qos_entry(void *entry)
64 {
65         struct qos_entry_s *qentry = (struct qos_entry_s *)entry;
66         unsigned long flags;
67
68         spin_lock_irqsave(&qos_free_list.lock, flags);
69         if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
70                 list_add(&qentry->list, &qos_free_list.head);
71                 qos_free_list.cnt++;
72                 spin_unlock_irqrestore(&qos_free_list.lock, flags);
73                 return;
74         }
75         spin_unlock_irqrestore(&qos_free_list.lock, flags);
76
77         kfree(entry);
78 }
79
80 static void free_qos_entry_list(struct list_head *free_list)
81 {
82         struct qos_entry_s *entry, *n;
83         int total_free = 0;
84
85         list_for_each_entry_safe(entry, n, free_list, list) {
86                 list_del(&entry->list);
87                 kfree(entry);
88                 total_free++;
89         }
90
91         pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
92 }
93
94 void gdm_qos_init(void *nic_ptr)
95 {
96         struct nic *nic = nic_ptr;
97         struct qos_cb_s *qcb = &nic->qos;
98         int i;
99
100         for (i = 0; i < QOS_MAX; i++) {
101                 INIT_LIST_HEAD(&qcb->qos_list[i]);
102                 qcb->csr[i].qos_buf_count = 0;
103                 qcb->csr[i].enabled = 0;
104         }
105
106         qcb->qos_list_cnt = 0;
107         qcb->qos_null_idx = QOS_MAX-1;
108         qcb->qos_limit_size = 255;
109
110         spin_lock_init(&qcb->qos_lock);
111
112         init_qos_entry_list();
113 }
114
115 void gdm_qos_release_list(void *nic_ptr)
116 {
117         struct nic *nic = nic_ptr;
118         struct qos_cb_s *qcb = &nic->qos;
119         unsigned long flags;
120         struct qos_entry_s *entry, *n;
121         struct list_head free_list;
122         int i;
123
124         INIT_LIST_HEAD(&free_list);
125
126         spin_lock_irqsave(&qcb->qos_lock, flags);
127
128         for (i = 0; i < QOS_MAX; i++) {
129                 qcb->csr[i].qos_buf_count = 0;
130                 qcb->csr[i].enabled = 0;
131         }
132
133         qcb->qos_list_cnt = 0;
134         qcb->qos_null_idx = QOS_MAX-1;
135
136         for (i = 0; i < QOS_MAX; i++) {
137                 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
138                         list_move_tail(&entry->list, &free_list);
139                 }
140         }
141         spin_unlock_irqrestore(&qcb->qos_lock, flags);
142         free_qos_entry_list(&free_list);
143 }
144
145 static u32 chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
146 {
147         int i;
148
149         if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
150                 if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
151                     ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
152                         return 1;
153         }
154
155         if (csr->classifier_rule_en&PROTOCOL) {
156                 if (stream[9] != csr->protocol)
157                         return 1;
158         }
159
160         if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
161                 for (i = 0; i < 4; i++) {
162                         if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
163                         (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
164                                 return 1;
165                 }
166         }
167
168         if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
169                 for (i = 0; i < 4; i++) {
170                         if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
171                         (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
172                                 return 1;
173                 }
174         }
175
176         if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
177                 i = ((port[0]<<8)&0xff00)+port[1];
178                 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
179                         return 1;
180         }
181
182         if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
183                 i = ((port[2]<<8)&0xff00)+port[3];
184                 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
185                         return 1;
186         }
187
188         return 0;
189 }
190
191 static u32 get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
192 {
193         u32 IP_ver, i;
194         struct qos_cb_s *qcb = &nic->qos;
195
196         if (iph == NULL || tcpudph == NULL)
197                 return -1;
198
199         IP_ver = (iph[0]>>4)&0xf;
200
201         if (IP_ver != 4)
202                 return -1;
203
204         for (i = 0; i < QOS_MAX; i++) {
205                 if (!qcb->csr[i].enabled)
206                         continue;
207                 if (!qcb->csr[i].classifier_rule_en)
208                         continue;
209                 if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
210                         return i;
211         }
212
213         return -1;
214 }
215
216 static u32 extract_qos_list(struct nic *nic, struct list_head *head)
217 {
218         struct qos_cb_s *qcb = &nic->qos;
219         struct qos_entry_s *entry;
220         int i;
221
222         INIT_LIST_HEAD(head);
223
224         for (i = 0; i < QOS_MAX; i++) {
225                 if (!qcb->csr[i].enabled)
226                         continue;
227                 if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
228                         continue;
229                 if (list_empty(&qcb->qos_list[i]))
230                         continue;
231
232                 entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
233                                    list);
234
235                 list_move_tail(&entry->list, head);
236                 qcb->csr[i].qos_buf_count++;
237
238                 if (!list_empty(&qcb->qos_list[i]))
239                         netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
240         }
241
242         return 0;
243 }
244
245 static void send_qos_list(struct nic *nic, struct list_head *head)
246 {
247         struct qos_entry_s *entry, *n;
248
249         list_for_each_entry_safe(entry, n, head, list) {
250                 list_del(&entry->list);
251                 gdm_wimax_send_tx(entry->skb, entry->dev);
252                 free_qos_entry(entry);
253         }
254 }
255
256 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
257 {
258         struct nic *nic = netdev_priv(dev);
259         int index;
260         struct qos_cb_s *qcb = &nic->qos;
261         unsigned long flags;
262         struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
263         struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
264         struct tcphdr *tcph;
265         struct qos_entry_s *entry = NULL;
266         struct list_head send_list;
267         int ret = 0;
268
269         tcph = (struct tcphdr *)iph + iph->ihl*4;
270
271         if (B2H(ethh->h_proto) == ETH_P_IP) {
272                 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
273                         entry = alloc_qos_entry();
274                         entry->skb = skb;
275                         entry->dev = dev;
276                         netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
277                                    qcb->qos_list_cnt);
278                 }
279
280                 spin_lock_irqsave(&qcb->qos_lock, flags);
281                 if (qcb->qos_list_cnt) {
282                         index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
283                         if (index == -1)
284                                 index = qcb->qos_null_idx;
285
286                         if (!entry) {
287                                 entry = alloc_qos_entry();
288                                 entry->skb = skb;
289                                 entry->dev = dev;
290                         }
291
292                         list_add_tail(&entry->list, &qcb->qos_list[index]);
293                         extract_qos_list(nic, &send_list);
294                         spin_unlock_irqrestore(&qcb->qos_lock, flags);
295                         send_qos_list(nic, &send_list);
296                         goto out;
297                 }
298                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
299                 if (entry)
300                         free_qos_entry(entry);
301         }
302
303         ret = gdm_wimax_send_tx(skb, dev);
304 out:
305         return ret;
306 }
307
308 static u32 get_csr(struct qos_cb_s *qcb, u32 SFID, int mode)
309 {
310         int i;
311
312         for (i = 0; i < qcb->qos_list_cnt; i++) {
313                 if (qcb->csr[i].SFID == SFID)
314                         return i;
315         }
316
317         if (mode) {
318                 for (i = 0; i < QOS_MAX; i++) {
319                         if (qcb->csr[i].enabled == 0) {
320                                 qcb->csr[i].enabled = 1;
321                                 qcb->qos_list_cnt++;
322                                 return i;
323                         }
324                 }
325         }
326         return -1;
327 }
328
329 #define QOS_CHANGE_DEL  0xFC
330 #define QOS_ADD         0xFD
331 #define QOS_REPORT      0xFE
332
333 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
334 {
335         struct nic *nic = nic_ptr;
336         u32 i, SFID, index, pos;
337         u8 sub_cmd_evt;
338         struct qos_cb_s *qcb = &nic->qos;
339         struct qos_entry_s *entry, *n;
340         struct list_head send_list;
341         struct list_head free_list;
342         unsigned long flags;
343
344         sub_cmd_evt = (u8)buf[4];
345
346         if (sub_cmd_evt == QOS_REPORT) {
347                 spin_lock_irqsave(&qcb->qos_lock, flags);
348                 for (i = 0; i < qcb->qos_list_cnt; i++) {
349                         SFID = ((buf[(i*5)+6]<<24)&0xff000000);
350                         SFID += ((buf[(i*5)+7]<<16)&0xff0000);
351                         SFID += ((buf[(i*5)+8]<<8)&0xff00);
352                         SFID += (buf[(i*5)+9]);
353                         index = get_csr(qcb, SFID, 0);
354                         if (index == -1) {
355                                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
356                                 netdev_err(nic->netdev, "QoS ERROR: No SF\n");
357                                 return;
358                         }
359                         qcb->csr[index].qos_buf_count = buf[(i*5)+10];
360                 }
361
362                 extract_qos_list(nic, &send_list);
363                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
364                 send_qos_list(nic, &send_list);
365                 return;
366         }
367
368         /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
369         pos = 6;
370         SFID = ((buf[pos++]<<24)&0xff000000);
371         SFID += ((buf[pos++]<<16)&0xff0000);
372         SFID += ((buf[pos++]<<8)&0xff00);
373         SFID += (buf[pos++]);
374
375         index = get_csr(qcb, SFID, 1);
376         if (index == -1) {
377                 netdev_err(nic->netdev,
378                            "QoS ERROR: csr Update Error / Wrong index (%d)\n",
379                            index);
380                 return;
381         }
382
383         if (sub_cmd_evt == QOS_ADD) {
384                 netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
385                            SFID, index);
386
387                 spin_lock_irqsave(&qcb->qos_lock, flags);
388                 qcb->csr[index].SFID = SFID;
389                 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
390                 qcb->csr[index].classifier_rule_en += buf[pos++];
391                 if (qcb->csr[index].classifier_rule_en == 0)
392                         qcb->qos_null_idx = index;
393                 qcb->csr[index].ip2s_mask = buf[pos++];
394                 qcb->csr[index].ip2s_lo = buf[pos++];
395                 qcb->csr[index].ip2s_hi = buf[pos++];
396                 qcb->csr[index].protocol = buf[pos++];
397                 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
398                 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
399                 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
400                 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
401                 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
402                 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
403                 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
404                 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
405                 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
406                 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
407                 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
408                 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
409                 qcb->csr[index].ipdst_addr[0] = buf[pos++];
410                 qcb->csr[index].ipdst_addr[1] = buf[pos++];
411                 qcb->csr[index].ipdst_addr[2] = buf[pos++];
412                 qcb->csr[index].ipdst_addr[3] = buf[pos++];
413                 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
414                 qcb->csr[index].srcport_lo += buf[pos++];
415                 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
416                 qcb->csr[index].srcport_hi += buf[pos++];
417                 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
418                 qcb->csr[index].dstport_lo += buf[pos++];
419                 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
420                 qcb->csr[index].dstport_hi += buf[pos++];
421
422                 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
423                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
424         } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
425                 netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
426                            SFID, index);
427
428                 INIT_LIST_HEAD(&free_list);
429
430                 spin_lock_irqsave(&qcb->qos_lock, flags);
431                 qcb->csr[index].enabled = 0;
432                 qcb->qos_list_cnt--;
433                 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
434
435                 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
436                                          list) {
437                         list_move_tail(&entry->list, &free_list);
438                 }
439                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
440                 free_qos_entry_list(&free_list);
441         }
442 }