rocker: implement L2 bridge offloading
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / rocker / rocker.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <net/switchdev.h>
34 #include <net/rtnetlink.h>
35 #include <asm-generic/io-64-nonatomic-lo-hi.h>
36 #include <generated/utsrelease.h>
37
38 #include "rocker.h"
39
40 static const char rocker_driver_name[] = "rocker";
41
42 static const struct pci_device_id rocker_pci_id_table[] = {
43         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
44         {0, }
45 };
46
47 struct rocker_flow_tbl_key {
48         u32 priority;
49         enum rocker_of_dpa_table_id tbl_id;
50         union {
51                 struct {
52                         u32 in_lport;
53                         u32 in_lport_mask;
54                         enum rocker_of_dpa_table_id goto_tbl;
55                 } ig_port;
56                 struct {
57                         u32 in_lport;
58                         __be16 vlan_id;
59                         __be16 vlan_id_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                         bool untagged;
62                         __be16 new_vlan_id;
63                 } vlan;
64                 struct {
65                         u32 in_lport;
66                         u32 in_lport_mask;
67                         __be16 eth_type;
68                         u8 eth_dst[ETH_ALEN];
69                         u8 eth_dst_mask[ETH_ALEN];
70                         __be16 vlan_id;
71                         __be16 vlan_id_mask;
72                         enum rocker_of_dpa_table_id goto_tbl;
73                         bool copy_to_cpu;
74                 } term_mac;
75                 struct {
76                         __be16 eth_type;
77                         __be32 dst4;
78                         __be32 dst4_mask;
79                         enum rocker_of_dpa_table_id goto_tbl;
80                         u32 group_id;
81                 } ucast_routing;
82                 struct {
83                         u8 eth_dst[ETH_ALEN];
84                         u8 eth_dst_mask[ETH_ALEN];
85                         int has_eth_dst;
86                         int has_eth_dst_mask;
87                         __be16 vlan_id;
88                         u32 tunnel_id;
89                         enum rocker_of_dpa_table_id goto_tbl;
90                         u32 group_id;
91                         bool copy_to_cpu;
92                 } bridge;
93                 struct {
94                         u32 in_lport;
95                         u32 in_lport_mask;
96                         u8 eth_src[ETH_ALEN];
97                         u8 eth_src_mask[ETH_ALEN];
98                         u8 eth_dst[ETH_ALEN];
99                         u8 eth_dst_mask[ETH_ALEN];
100                         __be16 eth_type;
101                         __be16 vlan_id;
102                         __be16 vlan_id_mask;
103                         u8 ip_proto;
104                         u8 ip_proto_mask;
105                         u8 ip_tos;
106                         u8 ip_tos_mask;
107                         u32 group_id;
108                 } acl;
109         };
110 };
111
112 struct rocker_flow_tbl_entry {
113         struct hlist_node entry;
114         u32 ref_count;
115         u64 cookie;
116         struct rocker_flow_tbl_key key;
117         u32 key_crc32; /* key */
118 };
119
120 struct rocker_group_tbl_entry {
121         struct hlist_node entry;
122         u32 cmd;
123         u32 group_id; /* key */
124         u16 group_count;
125         u32 *group_ids;
126         union {
127                 struct {
128                         u8 pop_vlan;
129                 } l2_interface;
130                 struct {
131                         u8 eth_src[ETH_ALEN];
132                         u8 eth_dst[ETH_ALEN];
133                         __be16 vlan_id;
134                         u32 group_id;
135                 } l2_rewrite;
136                 struct {
137                         u8 eth_src[ETH_ALEN];
138                         u8 eth_dst[ETH_ALEN];
139                         __be16 vlan_id;
140                         bool ttl_check;
141                         u32 group_id;
142                 } l3_unicast;
143         };
144 };
145
146 struct rocker_fdb_tbl_entry {
147         struct hlist_node entry;
148         u32 key_crc32; /* key */
149         bool learned;
150         struct rocker_fdb_tbl_key {
151                 u32 lport;
152                 u8 addr[ETH_ALEN];
153                 __be16 vlan_id;
154         } key;
155 };
156
157 struct rocker_internal_vlan_tbl_entry {
158         struct hlist_node entry;
159         int ifindex; /* key */
160         u32 ref_count;
161         __be16 vlan_id;
162 };
163
164 struct rocker_desc_info {
165         char *data; /* mapped */
166         size_t data_size;
167         size_t tlv_size;
168         struct rocker_desc *desc;
169         DEFINE_DMA_UNMAP_ADDR(mapaddr);
170 };
171
172 struct rocker_dma_ring_info {
173         size_t size;
174         u32 head;
175         u32 tail;
176         struct rocker_desc *desc; /* mapped */
177         dma_addr_t mapaddr;
178         struct rocker_desc_info *desc_info;
179         unsigned int type;
180 };
181
182 struct rocker;
183
184 enum {
185         ROCKER_CTRL_LINK_LOCAL_MCAST,
186         ROCKER_CTRL_LOCAL_ARP,
187         ROCKER_CTRL_IPV4_MCAST,
188         ROCKER_CTRL_IPV6_MCAST,
189         ROCKER_CTRL_DFLT_BRIDGING,
190         ROCKER_CTRL_MAX,
191 };
192
193 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
194 #define ROCKER_N_INTERNAL_VLANS         255
195 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
196 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
197
198 struct rocker_port {
199         struct net_device *dev;
200         struct net_device *bridge_dev;
201         struct rocker *rocker;
202         unsigned int port_number;
203         u32 lport;
204         __be16 internal_vlan_id;
205         int stp_state;
206         bool ctrls[ROCKER_CTRL_MAX];
207         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
208         struct napi_struct napi_tx;
209         struct napi_struct napi_rx;
210         struct rocker_dma_ring_info tx_ring;
211         struct rocker_dma_ring_info rx_ring;
212 };
213
214 struct rocker {
215         struct pci_dev *pdev;
216         u8 __iomem *hw_addr;
217         struct msix_entry *msix_entries;
218         unsigned int port_count;
219         struct rocker_port **ports;
220         struct {
221                 u64 id;
222         } hw;
223         spinlock_t cmd_ring_lock;
224         struct rocker_dma_ring_info cmd_ring;
225         struct rocker_dma_ring_info event_ring;
226         DECLARE_HASHTABLE(flow_tbl, 16);
227         spinlock_t flow_tbl_lock;
228         u64 flow_tbl_next_cookie;
229         DECLARE_HASHTABLE(group_tbl, 16);
230         spinlock_t group_tbl_lock;
231         DECLARE_HASHTABLE(fdb_tbl, 16);
232         spinlock_t fdb_tbl_lock;
233         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
234         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
235         spinlock_t internal_vlan_tbl_lock;
236 };
237
238 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
239 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
240 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
241 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
242 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
243 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
244 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
245 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
246 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
247
248 /* Rocker priority levels for flow table entries.  Higher
249  * priority match takes precedence over lower priority match.
250  */
251
252 enum {
253         ROCKER_PRIORITY_UNKNOWN = 0,
254         ROCKER_PRIORITY_IG_PORT = 1,
255         ROCKER_PRIORITY_VLAN = 1,
256         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
257         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
258         ROCKER_PRIORITY_UNICAST_ROUTING = 1,
259         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
260         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
261         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
262         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
263         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
264         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
265         ROCKER_PRIORITY_ACL_CTRL = 3,
266         ROCKER_PRIORITY_ACL_NORMAL = 2,
267         ROCKER_PRIORITY_ACL_DFLT = 1,
268 };
269
270 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
271 {
272         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
273         u16 end = 0xffe;
274         u16 _vlan_id = ntohs(vlan_id);
275
276         return (_vlan_id >= start && _vlan_id <= end);
277 }
278
279 static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
280                                       u16 vid, bool *pop_vlan)
281 {
282         __be16 vlan_id;
283
284         if (pop_vlan)
285                 *pop_vlan = false;
286         vlan_id = htons(vid);
287         if (!vlan_id) {
288                 vlan_id = rocker_port->internal_vlan_id;
289                 if (pop_vlan)
290                         *pop_vlan = true;
291         }
292
293         return vlan_id;
294 }
295
296 static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
297                                    __be16 vlan_id)
298 {
299         if (rocker_vlan_id_is_internal(vlan_id))
300                 return 0;
301
302         return ntohs(vlan_id);
303 }
304
305 static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
306 {
307         return !!rocker_port->bridge_dev;
308 }
309
310 struct rocker_wait {
311         wait_queue_head_t wait;
312         bool done;
313         bool nowait;
314 };
315
316 static void rocker_wait_reset(struct rocker_wait *wait)
317 {
318         wait->done = false;
319         wait->nowait = false;
320 }
321
322 static void rocker_wait_init(struct rocker_wait *wait)
323 {
324         init_waitqueue_head(&wait->wait);
325         rocker_wait_reset(wait);
326 }
327
328 static struct rocker_wait *rocker_wait_create(gfp_t gfp)
329 {
330         struct rocker_wait *wait;
331
332         wait = kmalloc(sizeof(*wait), gfp);
333         if (!wait)
334                 return NULL;
335         rocker_wait_init(wait);
336         return wait;
337 }
338
339 static void rocker_wait_destroy(struct rocker_wait *work)
340 {
341         kfree(work);
342 }
343
344 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
345                                       unsigned long timeout)
346 {
347         wait_event_timeout(wait->wait, wait->done, HZ / 10);
348         if (!wait->done)
349                 return false;
350         return true;
351 }
352
353 static void rocker_wait_wake_up(struct rocker_wait *wait)
354 {
355         wait->done = true;
356         wake_up(&wait->wait);
357 }
358
359 static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
360 {
361         return rocker->msix_entries[vector].vector;
362 }
363
364 static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
365 {
366         return rocker_msix_vector(rocker_port->rocker,
367                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
368 }
369
370 static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
371 {
372         return rocker_msix_vector(rocker_port->rocker,
373                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
374 }
375
376 #define rocker_write32(rocker, reg, val)        \
377         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
378 #define rocker_read32(rocker, reg)      \
379         readl((rocker)->hw_addr + (ROCKER_ ## reg))
380 #define rocker_write64(rocker, reg, val)        \
381         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
382 #define rocker_read64(rocker, reg)      \
383         readq((rocker)->hw_addr + (ROCKER_ ## reg))
384
385 /*****************************
386  * HW basic testing functions
387  *****************************/
388
389 static int rocker_reg_test(struct rocker *rocker)
390 {
391         struct pci_dev *pdev = rocker->pdev;
392         u64 test_reg;
393         u64 rnd;
394
395         rnd = prandom_u32();
396         rnd >>= 1;
397         rocker_write32(rocker, TEST_REG, rnd);
398         test_reg = rocker_read32(rocker, TEST_REG);
399         if (test_reg != rnd * 2) {
400                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
401                         test_reg, rnd * 2);
402                 return -EIO;
403         }
404
405         rnd = prandom_u32();
406         rnd <<= 31;
407         rnd |= prandom_u32();
408         rocker_write64(rocker, TEST_REG64, rnd);
409         test_reg = rocker_read64(rocker, TEST_REG64);
410         if (test_reg != rnd * 2) {
411                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
412                         test_reg, rnd * 2);
413                 return -EIO;
414         }
415
416         return 0;
417 }
418
419 static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
420                                u32 test_type, dma_addr_t dma_handle,
421                                unsigned char *buf, unsigned char *expect,
422                                size_t size)
423 {
424         struct pci_dev *pdev = rocker->pdev;
425         int i;
426
427         rocker_wait_reset(wait);
428         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
429
430         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
431                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
432                 return -EIO;
433         }
434
435         for (i = 0; i < size; i++) {
436                 if (buf[i] != expect[i]) {
437                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
438                                 buf[i], i, expect[i]);
439                         return -EIO;
440                 }
441         }
442         return 0;
443 }
444
445 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
446 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
447
448 static int rocker_dma_test_offset(struct rocker *rocker,
449                                   struct rocker_wait *wait, int offset)
450 {
451         struct pci_dev *pdev = rocker->pdev;
452         unsigned char *alloc;
453         unsigned char *buf;
454         unsigned char *expect;
455         dma_addr_t dma_handle;
456         int i;
457         int err;
458
459         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
460                         GFP_KERNEL | GFP_DMA);
461         if (!alloc)
462                 return -ENOMEM;
463         buf = alloc + offset;
464         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
465
466         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
467                                     PCI_DMA_BIDIRECTIONAL);
468         if (pci_dma_mapping_error(pdev, dma_handle)) {
469                 err = -EIO;
470                 goto free_alloc;
471         }
472
473         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
474         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
475
476         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
477         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
478                                   dma_handle, buf, expect,
479                                   ROCKER_TEST_DMA_BUF_SIZE);
480         if (err)
481                 goto unmap;
482
483         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
484         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
485                                   dma_handle, buf, expect,
486                                   ROCKER_TEST_DMA_BUF_SIZE);
487         if (err)
488                 goto unmap;
489
490         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
491         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
492                 expect[i] = ~buf[i];
493         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
494                                   dma_handle, buf, expect,
495                                   ROCKER_TEST_DMA_BUF_SIZE);
496         if (err)
497                 goto unmap;
498
499 unmap:
500         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
501                          PCI_DMA_BIDIRECTIONAL);
502 free_alloc:
503         kfree(alloc);
504
505         return err;
506 }
507
508 static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
509 {
510         int i;
511         int err;
512
513         for (i = 0; i < 8; i++) {
514                 err = rocker_dma_test_offset(rocker, wait, i);
515                 if (err)
516                         return err;
517         }
518         return 0;
519 }
520
521 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
522 {
523         struct rocker_wait *wait = dev_id;
524
525         rocker_wait_wake_up(wait);
526
527         return IRQ_HANDLED;
528 }
529
530 static int rocker_basic_hw_test(struct rocker *rocker)
531 {
532         struct pci_dev *pdev = rocker->pdev;
533         struct rocker_wait wait;
534         int err;
535
536         err = rocker_reg_test(rocker);
537         if (err) {
538                 dev_err(&pdev->dev, "reg test failed\n");
539                 return err;
540         }
541
542         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
543                           rocker_test_irq_handler, 0,
544                           rocker_driver_name, &wait);
545         if (err) {
546                 dev_err(&pdev->dev, "cannot assign test irq\n");
547                 return err;
548         }
549
550         rocker_wait_init(&wait);
551         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
552
553         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
554                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
555                 err = -EIO;
556                 goto free_irq;
557         }
558
559         err = rocker_dma_test(rocker, &wait);
560         if (err)
561                 dev_err(&pdev->dev, "dma test failed\n");
562
563 free_irq:
564         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
565         return err;
566 }
567
568 /******
569  * TLV
570  ******/
571
572 #define ROCKER_TLV_ALIGNTO 8U
573 #define ROCKER_TLV_ALIGN(len) \
574         (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
575 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
576
577 /*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
578  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
579  * |             Header          | Pad |           Payload           | Pad |
580  * |      (struct rocker_tlv)    | ing |                             | ing |
581  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
582  *  <--------------------------- tlv->len -------------------------->
583  */
584
585 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
586                                           int *remaining)
587 {
588         int totlen = ROCKER_TLV_ALIGN(tlv->len);
589
590         *remaining -= totlen;
591         return (struct rocker_tlv *) ((char *) tlv + totlen);
592 }
593
594 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
595 {
596         return remaining >= (int) ROCKER_TLV_HDRLEN &&
597                tlv->len >= ROCKER_TLV_HDRLEN &&
598                tlv->len <= remaining;
599 }
600
601 #define rocker_tlv_for_each(pos, head, len, rem)        \
602         for (pos = head, rem = len;                     \
603              rocker_tlv_ok(pos, rem);                   \
604              pos = rocker_tlv_next(pos, &(rem)))
605
606 #define rocker_tlv_for_each_nested(pos, tlv, rem)       \
607         rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
608                             rocker_tlv_len(tlv), rem)
609
610 static int rocker_tlv_attr_size(int payload)
611 {
612         return ROCKER_TLV_HDRLEN + payload;
613 }
614
615 static int rocker_tlv_total_size(int payload)
616 {
617         return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
618 }
619
620 static int rocker_tlv_padlen(int payload)
621 {
622         return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
623 }
624
625 static int rocker_tlv_type(const struct rocker_tlv *tlv)
626 {
627         return tlv->type;
628 }
629
630 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
631 {
632         return (char *) tlv + ROCKER_TLV_HDRLEN;
633 }
634
635 static int rocker_tlv_len(const struct rocker_tlv *tlv)
636 {
637         return tlv->len - ROCKER_TLV_HDRLEN;
638 }
639
640 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
641 {
642         return *(u8 *) rocker_tlv_data(tlv);
643 }
644
645 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
646 {
647         return *(u16 *) rocker_tlv_data(tlv);
648 }
649
650 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
651 {
652         return *(u32 *) rocker_tlv_data(tlv);
653 }
654
655 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
656 {
657         return *(u64 *) rocker_tlv_data(tlv);
658 }
659
660 static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
661                              const char *buf, int buf_len)
662 {
663         const struct rocker_tlv *tlv;
664         const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
665         int rem;
666
667         memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
668
669         rocker_tlv_for_each(tlv, head, buf_len, rem) {
670                 u32 type = rocker_tlv_type(tlv);
671
672                 if (type > 0 && type <= maxtype)
673                         tb[type] = (struct rocker_tlv *) tlv;
674         }
675 }
676
677 static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
678                                     const struct rocker_tlv *tlv)
679 {
680         rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
681                          rocker_tlv_len(tlv));
682 }
683
684 static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
685                                   struct rocker_desc_info *desc_info)
686 {
687         rocker_tlv_parse(tb, maxtype, desc_info->data,
688                          desc_info->desc->tlv_size);
689 }
690
691 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
692 {
693         return (struct rocker_tlv *) ((char *) desc_info->data +
694                                                desc_info->tlv_size);
695 }
696
697 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
698                           int attrtype, int attrlen, const void *data)
699 {
700         int tail_room = desc_info->data_size - desc_info->tlv_size;
701         int total_size = rocker_tlv_total_size(attrlen);
702         struct rocker_tlv *tlv;
703
704         if (unlikely(tail_room < total_size))
705                 return -EMSGSIZE;
706
707         tlv = rocker_tlv_start(desc_info);
708         desc_info->tlv_size += total_size;
709         tlv->type = attrtype;
710         tlv->len = rocker_tlv_attr_size(attrlen);
711         memcpy(rocker_tlv_data(tlv), data, attrlen);
712         memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
713         return 0;
714 }
715
716 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
717                              int attrtype, u8 value)
718 {
719         return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
720 }
721
722 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
723                               int attrtype, u16 value)
724 {
725         return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
726 }
727
728 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
729                               int attrtype, u32 value)
730 {
731         return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
732 }
733
734 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
735                               int attrtype, u64 value)
736 {
737         return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
738 }
739
740 static struct rocker_tlv *
741 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
742 {
743         struct rocker_tlv *start = rocker_tlv_start(desc_info);
744
745         if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
746                 return NULL;
747
748         return start;
749 }
750
751 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
752                                 struct rocker_tlv *start)
753 {
754         start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
755 }
756
757 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
758                                    struct rocker_tlv *start)
759 {
760         desc_info->tlv_size = (char *) start - desc_info->data;
761 }
762
763 /******************************************
764  * DMA rings and descriptors manipulations
765  ******************************************/
766
767 static u32 __pos_inc(u32 pos, size_t limit)
768 {
769         return ++pos == limit ? 0 : pos;
770 }
771
772 static int rocker_desc_err(struct rocker_desc_info *desc_info)
773 {
774         return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
775 }
776
777 static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
778 {
779         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
780 }
781
782 static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
783 {
784         u32 comp_err = desc_info->desc->comp_err;
785
786         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
787 }
788
789 static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
790 {
791         return (void *) desc_info->desc->cookie;
792 }
793
794 static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
795                                        void *ptr)
796 {
797         desc_info->desc->cookie = (long) ptr;
798 }
799
800 static struct rocker_desc_info *
801 rocker_desc_head_get(struct rocker_dma_ring_info *info)
802 {
803         static struct rocker_desc_info *desc_info;
804         u32 head = __pos_inc(info->head, info->size);
805
806         desc_info = &info->desc_info[info->head];
807         if (head == info->tail)
808                 return NULL; /* ring full */
809         desc_info->tlv_size = 0;
810         return desc_info;
811 }
812
813 static void rocker_desc_commit(struct rocker_desc_info *desc_info)
814 {
815         desc_info->desc->buf_size = desc_info->data_size;
816         desc_info->desc->tlv_size = desc_info->tlv_size;
817 }
818
819 static void rocker_desc_head_set(struct rocker *rocker,
820                                  struct rocker_dma_ring_info *info,
821                                  struct rocker_desc_info *desc_info)
822 {
823         u32 head = __pos_inc(info->head, info->size);
824
825         BUG_ON(head == info->tail);
826         rocker_desc_commit(desc_info);
827         info->head = head;
828         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
829 }
830
831 static struct rocker_desc_info *
832 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
833 {
834         static struct rocker_desc_info *desc_info;
835
836         if (info->tail == info->head)
837                 return NULL; /* nothing to be done between head and tail */
838         desc_info = &info->desc_info[info->tail];
839         if (!rocker_desc_gen(desc_info))
840                 return NULL; /* gen bit not set, desc is not ready yet */
841         info->tail = __pos_inc(info->tail, info->size);
842         desc_info->tlv_size = desc_info->desc->tlv_size;
843         return desc_info;
844 }
845
846 static void rocker_dma_ring_credits_set(struct rocker *rocker,
847                                         struct rocker_dma_ring_info *info,
848                                         u32 credits)
849 {
850         if (credits)
851                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
852 }
853
854 static unsigned long rocker_dma_ring_size_fix(size_t size)
855 {
856         return max(ROCKER_DMA_SIZE_MIN,
857                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
858 }
859
860 static int rocker_dma_ring_create(struct rocker *rocker,
861                                   unsigned int type,
862                                   size_t size,
863                                   struct rocker_dma_ring_info *info)
864 {
865         int i;
866
867         BUG_ON(size != rocker_dma_ring_size_fix(size));
868         info->size = size;
869         info->type = type;
870         info->head = 0;
871         info->tail = 0;
872         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
873                                   GFP_KERNEL);
874         if (!info->desc_info)
875                 return -ENOMEM;
876
877         info->desc = pci_alloc_consistent(rocker->pdev,
878                                           info->size * sizeof(*info->desc),
879                                           &info->mapaddr);
880         if (!info->desc) {
881                 kfree(info->desc_info);
882                 return -ENOMEM;
883         }
884
885         for (i = 0; i < info->size; i++)
886                 info->desc_info[i].desc = &info->desc[i];
887
888         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
889                        ROCKER_DMA_DESC_CTRL_RESET);
890         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
891         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
892
893         return 0;
894 }
895
896 static void rocker_dma_ring_destroy(struct rocker *rocker,
897                                     struct rocker_dma_ring_info *info)
898 {
899         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
900
901         pci_free_consistent(rocker->pdev,
902                             info->size * sizeof(struct rocker_desc),
903                             info->desc, info->mapaddr);
904         kfree(info->desc_info);
905 }
906
907 static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
908                                              struct rocker_dma_ring_info *info)
909 {
910         int i;
911
912         BUG_ON(info->head || info->tail);
913
914         /* When ring is consumer, we need to advance head for each desc.
915          * That tells hw that the desc is ready to be used by it.
916          */
917         for (i = 0; i < info->size - 1; i++)
918                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
919         rocker_desc_commit(&info->desc_info[i]);
920 }
921
922 static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
923                                       struct rocker_dma_ring_info *info,
924                                       int direction, size_t buf_size)
925 {
926         struct pci_dev *pdev = rocker->pdev;
927         int i;
928         int err;
929
930         for (i = 0; i < info->size; i++) {
931                 struct rocker_desc_info *desc_info = &info->desc_info[i];
932                 struct rocker_desc *desc = &info->desc[i];
933                 dma_addr_t dma_handle;
934                 char *buf;
935
936                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
937                 if (!buf) {
938                         err = -ENOMEM;
939                         goto rollback;
940                 }
941
942                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
943                 if (pci_dma_mapping_error(pdev, dma_handle)) {
944                         kfree(buf);
945                         err = -EIO;
946                         goto rollback;
947                 }
948
949                 desc_info->data = buf;
950                 desc_info->data_size = buf_size;
951                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
952
953                 desc->buf_addr = dma_handle;
954                 desc->buf_size = buf_size;
955         }
956         return 0;
957
958 rollback:
959         for (i--; i >= 0; i--) {
960                 struct rocker_desc_info *desc_info = &info->desc_info[i];
961
962                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
963                                  desc_info->data_size, direction);
964                 kfree(desc_info->data);
965         }
966         return err;
967 }
968
969 static void rocker_dma_ring_bufs_free(struct rocker *rocker,
970                                       struct rocker_dma_ring_info *info,
971                                       int direction)
972 {
973         struct pci_dev *pdev = rocker->pdev;
974         int i;
975
976         for (i = 0; i < info->size; i++) {
977                 struct rocker_desc_info *desc_info = &info->desc_info[i];
978                 struct rocker_desc *desc = &info->desc[i];
979
980                 desc->buf_addr = 0;
981                 desc->buf_size = 0;
982                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
983                                  desc_info->data_size, direction);
984                 kfree(desc_info->data);
985         }
986 }
987
988 static int rocker_dma_rings_init(struct rocker *rocker)
989 {
990         struct pci_dev *pdev = rocker->pdev;
991         int err;
992
993         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
994                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
995                                      &rocker->cmd_ring);
996         if (err) {
997                 dev_err(&pdev->dev, "failed to create command dma ring\n");
998                 return err;
999         }
1000
1001         spin_lock_init(&rocker->cmd_ring_lock);
1002
1003         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1004                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1005         if (err) {
1006                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1007                 goto err_dma_cmd_ring_bufs_alloc;
1008         }
1009
1010         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1011                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
1012                                      &rocker->event_ring);
1013         if (err) {
1014                 dev_err(&pdev->dev, "failed to create event dma ring\n");
1015                 goto err_dma_event_ring_create;
1016         }
1017
1018         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1019                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
1020         if (err) {
1021                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1022                 goto err_dma_event_ring_bufs_alloc;
1023         }
1024         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1025         return 0;
1026
1027 err_dma_event_ring_bufs_alloc:
1028         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1029 err_dma_event_ring_create:
1030         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1031                                   PCI_DMA_BIDIRECTIONAL);
1032 err_dma_cmd_ring_bufs_alloc:
1033         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1034         return err;
1035 }
1036
1037 static void rocker_dma_rings_fini(struct rocker *rocker)
1038 {
1039         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1040                                   PCI_DMA_BIDIRECTIONAL);
1041         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1042         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1043                                   PCI_DMA_BIDIRECTIONAL);
1044         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1045 }
1046
1047 static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1048                                       struct rocker_port *rocker_port,
1049                                       struct rocker_desc_info *desc_info,
1050                                       struct sk_buff *skb, size_t buf_len)
1051 {
1052         struct pci_dev *pdev = rocker->pdev;
1053         dma_addr_t dma_handle;
1054
1055         dma_handle = pci_map_single(pdev, skb->data, buf_len,
1056                                     PCI_DMA_FROMDEVICE);
1057         if (pci_dma_mapping_error(pdev, dma_handle))
1058                 return -EIO;
1059         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1060                 goto tlv_put_failure;
1061         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1062                 goto tlv_put_failure;
1063         return 0;
1064
1065 tlv_put_failure:
1066         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1067         desc_info->tlv_size = 0;
1068         return -EMSGSIZE;
1069 }
1070
1071 static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1072 {
1073         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1074 }
1075
1076 static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1077                                         struct rocker_port *rocker_port,
1078                                         struct rocker_desc_info *desc_info)
1079 {
1080         struct net_device *dev = rocker_port->dev;
1081         struct sk_buff *skb;
1082         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1083         int err;
1084
1085         /* Ensure that hw will see tlv_size zero in case of an error.
1086          * That tells hw to use another descriptor.
1087          */
1088         rocker_desc_cookie_ptr_set(desc_info, NULL);
1089         desc_info->tlv_size = 0;
1090
1091         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1092         if (!skb)
1093                 return -ENOMEM;
1094         err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1095                                          skb, buf_len);
1096         if (err) {
1097                 dev_kfree_skb_any(skb);
1098                 return err;
1099         }
1100         rocker_desc_cookie_ptr_set(desc_info, skb);
1101         return 0;
1102 }
1103
1104 static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1105                                          struct rocker_tlv **attrs)
1106 {
1107         struct pci_dev *pdev = rocker->pdev;
1108         dma_addr_t dma_handle;
1109         size_t len;
1110
1111         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1112             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1113                 return;
1114         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1115         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1116         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1117 }
1118
1119 static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1120                                         struct rocker_desc_info *desc_info)
1121 {
1122         struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1123         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1124
1125         if (!skb)
1126                 return;
1127         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1128         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1129         dev_kfree_skb_any(skb);
1130 }
1131
1132 static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1133                                          struct rocker_port *rocker_port)
1134 {
1135         struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1136         int i;
1137         int err;
1138
1139         for (i = 0; i < rx_ring->size; i++) {
1140                 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1141                                                    &rx_ring->desc_info[i]);
1142                 if (err)
1143                         goto rollback;
1144         }
1145         return 0;
1146
1147 rollback:
1148         for (i--; i >= 0; i--)
1149                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1150         return err;
1151 }
1152
1153 static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1154                                          struct rocker_port *rocker_port)
1155 {
1156         struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1157         int i;
1158
1159         for (i = 0; i < rx_ring->size; i++)
1160                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1161 }
1162
1163 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1164 {
1165         struct rocker *rocker = rocker_port->rocker;
1166         int err;
1167
1168         err = rocker_dma_ring_create(rocker,
1169                                      ROCKER_DMA_TX(rocker_port->port_number),
1170                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1171                                      &rocker_port->tx_ring);
1172         if (err) {
1173                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1174                 return err;
1175         }
1176
1177         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1178                                          PCI_DMA_TODEVICE,
1179                                          ROCKER_DMA_TX_DESC_SIZE);
1180         if (err) {
1181                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1182                 goto err_dma_tx_ring_bufs_alloc;
1183         }
1184
1185         err = rocker_dma_ring_create(rocker,
1186                                      ROCKER_DMA_RX(rocker_port->port_number),
1187                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1188                                      &rocker_port->rx_ring);
1189         if (err) {
1190                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1191                 goto err_dma_rx_ring_create;
1192         }
1193
1194         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1195                                          PCI_DMA_BIDIRECTIONAL,
1196                                          ROCKER_DMA_RX_DESC_SIZE);
1197         if (err) {
1198                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1199                 goto err_dma_rx_ring_bufs_alloc;
1200         }
1201
1202         err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1203         if (err) {
1204                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1205                 goto err_dma_rx_ring_skbs_alloc;
1206         }
1207         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1208
1209         return 0;
1210
1211 err_dma_rx_ring_skbs_alloc:
1212         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1213                                   PCI_DMA_BIDIRECTIONAL);
1214 err_dma_rx_ring_bufs_alloc:
1215         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1216 err_dma_rx_ring_create:
1217         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1218                                   PCI_DMA_TODEVICE);
1219 err_dma_tx_ring_bufs_alloc:
1220         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1221         return err;
1222 }
1223
1224 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1225 {
1226         struct rocker *rocker = rocker_port->rocker;
1227
1228         rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1229         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1230                                   PCI_DMA_BIDIRECTIONAL);
1231         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1232         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1233                                   PCI_DMA_TODEVICE);
1234         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1235 }
1236
1237 static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1238 {
1239         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1240
1241         if (enable)
1242                 val |= 1 << rocker_port->lport;
1243         else
1244                 val &= ~(1 << rocker_port->lport);
1245         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1246 }
1247
1248 /********************************
1249  * Interrupt handler and helpers
1250  ********************************/
1251
1252 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1253 {
1254         struct rocker *rocker = dev_id;
1255         struct rocker_desc_info *desc_info;
1256         struct rocker_wait *wait;
1257         u32 credits = 0;
1258
1259         spin_lock(&rocker->cmd_ring_lock);
1260         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1261                 wait = rocker_desc_cookie_ptr_get(desc_info);
1262                 if (wait->nowait) {
1263                         rocker_desc_gen_clear(desc_info);
1264                         rocker_wait_destroy(wait);
1265                 } else {
1266                         rocker_wait_wake_up(wait);
1267                 }
1268                 credits++;
1269         }
1270         spin_unlock(&rocker->cmd_ring_lock);
1271         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1272
1273         return IRQ_HANDLED;
1274 }
1275
1276 static void rocker_port_link_up(struct rocker_port *rocker_port)
1277 {
1278         netif_carrier_on(rocker_port->dev);
1279         netdev_info(rocker_port->dev, "Link is up\n");
1280 }
1281
1282 static void rocker_port_link_down(struct rocker_port *rocker_port)
1283 {
1284         netif_carrier_off(rocker_port->dev);
1285         netdev_info(rocker_port->dev, "Link is down\n");
1286 }
1287
1288 static int rocker_event_link_change(struct rocker *rocker,
1289                                     const struct rocker_tlv *info)
1290 {
1291         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1292         unsigned int port_number;
1293         bool link_up;
1294         struct rocker_port *rocker_port;
1295
1296         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1297         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
1298             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1299                 return -EIO;
1300         port_number =
1301                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
1302         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1303
1304         if (port_number >= rocker->port_count)
1305                 return -EINVAL;
1306
1307         rocker_port = rocker->ports[port_number];
1308         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1309                 if (link_up)
1310                         rocker_port_link_up(rocker_port);
1311                 else
1312                         rocker_port_link_down(rocker_port);
1313         }
1314
1315         return 0;
1316 }
1317
1318 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
1319 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
1320 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
1321 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
1322
1323 static int rocker_port_fdb(struct rocker_port *rocker_port,
1324                            const unsigned char *addr,
1325                            __be16 vlan_id, int flags);
1326
1327 static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1328                                       const struct rocker_tlv *info)
1329 {
1330         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1331         unsigned int port_number;
1332         struct rocker_port *rocker_port;
1333         unsigned char *addr;
1334         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1335         __be16 vlan_id;
1336
1337         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1338         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
1339             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1340             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1341                 return -EIO;
1342         port_number =
1343                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
1344         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1345         vlan_id = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1346
1347         if (port_number >= rocker->port_count)
1348                 return -EINVAL;
1349
1350         rocker_port = rocker->ports[port_number];
1351
1352         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1353             rocker_port->stp_state != BR_STATE_FORWARDING)
1354                 return 0;
1355
1356         return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1357 }
1358
1359 static int rocker_event_process(struct rocker *rocker,
1360                                 struct rocker_desc_info *desc_info)
1361 {
1362         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1363         struct rocker_tlv *info;
1364         u16 type;
1365
1366         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1367         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1368             !attrs[ROCKER_TLV_EVENT_INFO])
1369                 return -EIO;
1370
1371         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1372         info = attrs[ROCKER_TLV_EVENT_INFO];
1373
1374         switch (type) {
1375         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1376                 return rocker_event_link_change(rocker, info);
1377         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1378                 return rocker_event_mac_vlan_seen(rocker, info);
1379         }
1380
1381         return -EOPNOTSUPP;
1382 }
1383
1384 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1385 {
1386         struct rocker *rocker = dev_id;
1387         struct pci_dev *pdev = rocker->pdev;
1388         struct rocker_desc_info *desc_info;
1389         u32 credits = 0;
1390         int err;
1391
1392         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1393                 err = rocker_desc_err(desc_info);
1394                 if (err) {
1395                         dev_err(&pdev->dev, "event desc received with err %d\n",
1396                                 err);
1397                 } else {
1398                         err = rocker_event_process(rocker, desc_info);
1399                         if (err)
1400                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1401                                         err);
1402                 }
1403                 rocker_desc_gen_clear(desc_info);
1404                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1405                 credits++;
1406         }
1407         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1408
1409         return IRQ_HANDLED;
1410 }
1411
1412 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1413 {
1414         struct rocker_port *rocker_port = dev_id;
1415
1416         napi_schedule(&rocker_port->napi_tx);
1417         return IRQ_HANDLED;
1418 }
1419
1420 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1421 {
1422         struct rocker_port *rocker_port = dev_id;
1423
1424         napi_schedule(&rocker_port->napi_rx);
1425         return IRQ_HANDLED;
1426 }
1427
1428 /********************
1429  * Command interface
1430  ********************/
1431
1432 typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1433                                struct rocker_port *rocker_port,
1434                                struct rocker_desc_info *desc_info,
1435                                void *priv);
1436
1437 static int rocker_cmd_exec(struct rocker *rocker,
1438                            struct rocker_port *rocker_port,
1439                            rocker_cmd_cb_t prepare, void *prepare_priv,
1440                            rocker_cmd_cb_t process, void *process_priv,
1441                            bool nowait)
1442 {
1443         struct rocker_desc_info *desc_info;
1444         struct rocker_wait *wait;
1445         unsigned long flags;
1446         int err;
1447
1448         wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1449         if (!wait)
1450                 return -ENOMEM;
1451         wait->nowait = nowait;
1452
1453         spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1454         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1455         if (!desc_info) {
1456                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1457                 err = -EAGAIN;
1458                 goto out;
1459         }
1460         err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1461         if (err) {
1462                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1463                 goto out;
1464         }
1465         rocker_desc_cookie_ptr_set(desc_info, wait);
1466         rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1467         spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1468
1469         if (nowait)
1470                 return 0;
1471
1472         if (!rocker_wait_event_timeout(wait, HZ / 10))
1473                 return -EIO;
1474
1475         err = rocker_desc_err(desc_info);
1476         if (err)
1477                 return err;
1478
1479         if (process)
1480                 err = process(rocker, rocker_port, desc_info, process_priv);
1481
1482         rocker_desc_gen_clear(desc_info);
1483 out:
1484         rocker_wait_destroy(wait);
1485         return err;
1486 }
1487
1488 static int
1489 rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1490                                   struct rocker_port *rocker_port,
1491                                   struct rocker_desc_info *desc_info,
1492                                   void *priv)
1493 {
1494         struct rocker_tlv *cmd_info;
1495
1496         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1497                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1498                 return -EMSGSIZE;
1499         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1500         if (!cmd_info)
1501                 return -EMSGSIZE;
1502         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1503                                rocker_port->lport))
1504                 return -EMSGSIZE;
1505         rocker_tlv_nest_end(desc_info, cmd_info);
1506         return 0;
1507 }
1508
1509 static int
1510 rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1511                                           struct rocker_port *rocker_port,
1512                                           struct rocker_desc_info *desc_info,
1513                                           void *priv)
1514 {
1515         struct ethtool_cmd *ecmd = priv;
1516         struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1517         struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1518         u32 speed;
1519         u8 duplex;
1520         u8 autoneg;
1521
1522         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1523         if (!attrs[ROCKER_TLV_CMD_INFO])
1524                 return -EIO;
1525
1526         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1527                                 attrs[ROCKER_TLV_CMD_INFO]);
1528         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1529             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1530             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1531                 return -EIO;
1532
1533         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1534         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1535         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1536
1537         ecmd->transceiver = XCVR_INTERNAL;
1538         ecmd->supported = SUPPORTED_TP;
1539         ecmd->phy_address = 0xff;
1540         ecmd->port = PORT_TP;
1541         ethtool_cmd_speed_set(ecmd, speed);
1542         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1543         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1544
1545         return 0;
1546 }
1547
1548 static int
1549 rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1550                                           struct rocker_port *rocker_port,
1551                                           struct rocker_desc_info *desc_info,
1552                                           void *priv)
1553 {
1554         unsigned char *macaddr = priv;
1555         struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1556         struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1557         struct rocker_tlv *attr;
1558
1559         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1560         if (!attrs[ROCKER_TLV_CMD_INFO])
1561                 return -EIO;
1562
1563         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1564                                 attrs[ROCKER_TLV_CMD_INFO]);
1565         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1566         if (!attr)
1567                 return -EIO;
1568
1569         if (rocker_tlv_len(attr) != ETH_ALEN)
1570                 return -EINVAL;
1571
1572         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1573         return 0;
1574 }
1575
1576 static int
1577 rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1578                                           struct rocker_port *rocker_port,
1579                                           struct rocker_desc_info *desc_info,
1580                                           void *priv)
1581 {
1582         struct ethtool_cmd *ecmd = priv;
1583         struct rocker_tlv *cmd_info;
1584
1585         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1586                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1587                 return -EMSGSIZE;
1588         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1589         if (!cmd_info)
1590                 return -EMSGSIZE;
1591         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1592                                rocker_port->lport))
1593                 return -EMSGSIZE;
1594         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1595                                ethtool_cmd_speed(ecmd)))
1596                 return -EMSGSIZE;
1597         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1598                               ecmd->duplex))
1599                 return -EMSGSIZE;
1600         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1601                               ecmd->autoneg))
1602                 return -EMSGSIZE;
1603         rocker_tlv_nest_end(desc_info, cmd_info);
1604         return 0;
1605 }
1606
1607 static int
1608 rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1609                                           struct rocker_port *rocker_port,
1610                                           struct rocker_desc_info *desc_info,
1611                                           void *priv)
1612 {
1613         unsigned char *macaddr = priv;
1614         struct rocker_tlv *cmd_info;
1615
1616         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1617                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1618                 return -EMSGSIZE;
1619         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1620         if (!cmd_info)
1621                 return -EMSGSIZE;
1622         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1623                                rocker_port->lport))
1624                 return -EMSGSIZE;
1625         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1626                            ETH_ALEN, macaddr))
1627                 return -EMSGSIZE;
1628         rocker_tlv_nest_end(desc_info, cmd_info);
1629         return 0;
1630 }
1631
1632 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1633                                                 struct ethtool_cmd *ecmd)
1634 {
1635         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1636                                rocker_cmd_get_port_settings_prep, NULL,
1637                                rocker_cmd_get_port_settings_ethtool_proc,
1638                                ecmd, false);
1639 }
1640
1641 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1642                                                 unsigned char *macaddr)
1643 {
1644         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1645                                rocker_cmd_get_port_settings_prep, NULL,
1646                                rocker_cmd_get_port_settings_macaddr_proc,
1647                                macaddr, false);
1648 }
1649
1650 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1651                                                 struct ethtool_cmd *ecmd)
1652 {
1653         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1654                                rocker_cmd_set_port_settings_ethtool_prep,
1655                                ecmd, NULL, NULL, false);
1656 }
1657
1658 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1659                                                 unsigned char *macaddr)
1660 {
1661         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1662                                rocker_cmd_set_port_settings_macaddr_prep,
1663                                macaddr, NULL, NULL, false);
1664 }
1665
1666 static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1667                                            struct rocker_flow_tbl_entry *entry)
1668 {
1669         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1670                                entry->key.ig_port.in_lport))
1671                 return -EMSGSIZE;
1672         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1673                                entry->key.ig_port.in_lport_mask))
1674                 return -EMSGSIZE;
1675         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1676                                entry->key.ig_port.goto_tbl))
1677                 return -EMSGSIZE;
1678
1679         return 0;
1680 }
1681
1682 static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1683                                         struct rocker_flow_tbl_entry *entry)
1684 {
1685         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1686                                entry->key.vlan.in_lport))
1687                 return -EMSGSIZE;
1688         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1689                                entry->key.vlan.vlan_id))
1690                 return -EMSGSIZE;
1691         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1692                                entry->key.vlan.vlan_id_mask))
1693                 return -EMSGSIZE;
1694         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1695                                entry->key.vlan.goto_tbl))
1696                 return -EMSGSIZE;
1697         if (entry->key.vlan.untagged &&
1698             rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1699                                entry->key.vlan.new_vlan_id))
1700                 return -EMSGSIZE;
1701
1702         return 0;
1703 }
1704
1705 static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1706                                             struct rocker_flow_tbl_entry *entry)
1707 {
1708         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1709                                entry->key.term_mac.in_lport))
1710                 return -EMSGSIZE;
1711         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1712                                entry->key.term_mac.in_lport_mask))
1713                 return -EMSGSIZE;
1714         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1715                                entry->key.term_mac.eth_type))
1716                 return -EMSGSIZE;
1717         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1718                            ETH_ALEN, entry->key.term_mac.eth_dst))
1719                 return -EMSGSIZE;
1720         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1721                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1722                 return -EMSGSIZE;
1723         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1724                                entry->key.term_mac.vlan_id))
1725                 return -EMSGSIZE;
1726         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1727                                entry->key.term_mac.vlan_id_mask))
1728                 return -EMSGSIZE;
1729         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1730                                entry->key.term_mac.goto_tbl))
1731                 return -EMSGSIZE;
1732         if (entry->key.term_mac.copy_to_cpu &&
1733             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1734                               entry->key.term_mac.copy_to_cpu))
1735                 return -EMSGSIZE;
1736
1737         return 0;
1738 }
1739
1740 static int
1741 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1742                                       struct rocker_flow_tbl_entry *entry)
1743 {
1744         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1745                                entry->key.ucast_routing.eth_type))
1746                 return -EMSGSIZE;
1747         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1748                                entry->key.ucast_routing.dst4))
1749                 return -EMSGSIZE;
1750         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1751                                entry->key.ucast_routing.dst4_mask))
1752                 return -EMSGSIZE;
1753         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1754                                entry->key.ucast_routing.goto_tbl))
1755                 return -EMSGSIZE;
1756         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1757                                entry->key.ucast_routing.group_id))
1758                 return -EMSGSIZE;
1759
1760         return 0;
1761 }
1762
1763 static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1764                                           struct rocker_flow_tbl_entry *entry)
1765 {
1766         if (entry->key.bridge.has_eth_dst &&
1767             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1768                            ETH_ALEN, entry->key.bridge.eth_dst))
1769                 return -EMSGSIZE;
1770         if (entry->key.bridge.has_eth_dst_mask &&
1771             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1772                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
1773                 return -EMSGSIZE;
1774         if (entry->key.bridge.vlan_id &&
1775             rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1776                                entry->key.bridge.vlan_id))
1777                 return -EMSGSIZE;
1778         if (entry->key.bridge.tunnel_id &&
1779             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1780                                entry->key.bridge.tunnel_id))
1781                 return -EMSGSIZE;
1782         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1783                                entry->key.bridge.goto_tbl))
1784                 return -EMSGSIZE;
1785         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1786                                entry->key.bridge.group_id))
1787                 return -EMSGSIZE;
1788         if (entry->key.bridge.copy_to_cpu &&
1789             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1790                               entry->key.bridge.copy_to_cpu))
1791                 return -EMSGSIZE;
1792
1793         return 0;
1794 }
1795
1796 static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1797                                        struct rocker_flow_tbl_entry *entry)
1798 {
1799         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1800                                entry->key.acl.in_lport))
1801                 return -EMSGSIZE;
1802         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1803                                entry->key.acl.in_lport_mask))
1804                 return -EMSGSIZE;
1805         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1806                            ETH_ALEN, entry->key.acl.eth_src))
1807                 return -EMSGSIZE;
1808         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1809                            ETH_ALEN, entry->key.acl.eth_src_mask))
1810                 return -EMSGSIZE;
1811         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1812                            ETH_ALEN, entry->key.acl.eth_dst))
1813                 return -EMSGSIZE;
1814         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1815                            ETH_ALEN, entry->key.acl.eth_dst_mask))
1816                 return -EMSGSIZE;
1817         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1818                                entry->key.acl.eth_type))
1819                 return -EMSGSIZE;
1820         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1821                                entry->key.acl.vlan_id))
1822                 return -EMSGSIZE;
1823         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1824                                entry->key.acl.vlan_id_mask))
1825                 return -EMSGSIZE;
1826
1827         switch (ntohs(entry->key.acl.eth_type)) {
1828         case ETH_P_IP:
1829         case ETH_P_IPV6:
1830                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1831                                       entry->key.acl.ip_proto))
1832                         return -EMSGSIZE;
1833                 if (rocker_tlv_put_u8(desc_info,
1834                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1835                                       entry->key.acl.ip_proto_mask))
1836                         return -EMSGSIZE;
1837                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1838                                       entry->key.acl.ip_tos & 0x3f))
1839                         return -EMSGSIZE;
1840                 if (rocker_tlv_put_u8(desc_info,
1841                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1842                                       entry->key.acl.ip_tos_mask & 0x3f))
1843                         return -EMSGSIZE;
1844                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1845                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
1846                         return -EMSGSIZE;
1847                 if (rocker_tlv_put_u8(desc_info,
1848                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1849                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1850                         return -EMSGSIZE;
1851                 break;
1852         }
1853
1854         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1855             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1856                                entry->key.acl.group_id))
1857                 return -EMSGSIZE;
1858
1859         return 0;
1860 }
1861
1862 static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1863                                    struct rocker_port *rocker_port,
1864                                    struct rocker_desc_info *desc_info,
1865                                    void *priv)
1866 {
1867         struct rocker_flow_tbl_entry *entry = priv;
1868         struct rocker_tlv *cmd_info;
1869         int err = 0;
1870
1871         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1872                                ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1873                 return -EMSGSIZE;
1874         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1875         if (!cmd_info)
1876                 return -EMSGSIZE;
1877         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1878                                entry->key.tbl_id))
1879                 return -EMSGSIZE;
1880         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1881                                entry->key.priority))
1882                 return -EMSGSIZE;
1883         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1884                 return -EMSGSIZE;
1885         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1886                                entry->cookie))
1887                 return -EMSGSIZE;
1888
1889         switch (entry->key.tbl_id) {
1890         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1891                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1892                 break;
1893         case ROCKER_OF_DPA_TABLE_ID_VLAN:
1894                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1895                 break;
1896         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1897                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1898                 break;
1899         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1900                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1901                 break;
1902         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1903                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1904                 break;
1905         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1906                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1907                 break;
1908         default:
1909                 err = -ENOTSUPP;
1910                 break;
1911         }
1912
1913         if (err)
1914                 return err;
1915
1916         rocker_tlv_nest_end(desc_info, cmd_info);
1917
1918         return 0;
1919 }
1920
1921 static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1922                                    struct rocker_port *rocker_port,
1923                                    struct rocker_desc_info *desc_info,
1924                                    void *priv)
1925 {
1926         const struct rocker_flow_tbl_entry *entry = priv;
1927         struct rocker_tlv *cmd_info;
1928
1929         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1930                                ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
1931                 return -EMSGSIZE;
1932         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1933         if (!cmd_info)
1934                 return -EMSGSIZE;
1935         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1936                                entry->cookie))
1937                 return -EMSGSIZE;
1938         rocker_tlv_nest_end(desc_info, cmd_info);
1939
1940         return 0;
1941 }
1942
1943 static int
1944 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
1945                                       struct rocker_group_tbl_entry *entry)
1946 {
1947         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
1948                                ROCKER_GROUP_PORT_GET(entry->group_id)))
1949                 return -EMSGSIZE;
1950         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
1951                               entry->l2_interface.pop_vlan))
1952                 return -EMSGSIZE;
1953
1954         return 0;
1955 }
1956
1957 static int
1958 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
1959                                     struct rocker_group_tbl_entry *entry)
1960 {
1961         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
1962                                entry->l2_rewrite.group_id))
1963                 return -EMSGSIZE;
1964         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
1965             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1966                            ETH_ALEN, entry->l2_rewrite.eth_src))
1967                 return -EMSGSIZE;
1968         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
1969             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1970                            ETH_ALEN, entry->l2_rewrite.eth_dst))
1971                 return -EMSGSIZE;
1972         if (entry->l2_rewrite.vlan_id &&
1973             rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1974                                entry->l2_rewrite.vlan_id))
1975                 return -EMSGSIZE;
1976
1977         return 0;
1978 }
1979
1980 static int
1981 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
1982                                    struct rocker_group_tbl_entry *entry)
1983 {
1984         int i;
1985         struct rocker_tlv *group_ids;
1986
1987         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
1988                                entry->group_count))
1989                 return -EMSGSIZE;
1990
1991         group_ids = rocker_tlv_nest_start(desc_info,
1992                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
1993         if (!group_ids)
1994                 return -EMSGSIZE;
1995
1996         for (i = 0; i < entry->group_count; i++)
1997                 /* Note TLV array is 1-based */
1998                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
1999                         return -EMSGSIZE;
2000
2001         rocker_tlv_nest_end(desc_info, group_ids);
2002
2003         return 0;
2004 }
2005
2006 static int
2007 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2008                                     struct rocker_group_tbl_entry *entry)
2009 {
2010         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2011             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2012                            ETH_ALEN, entry->l3_unicast.eth_src))
2013                 return -EMSGSIZE;
2014         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2015             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2016                            ETH_ALEN, entry->l3_unicast.eth_dst))
2017                 return -EMSGSIZE;
2018         if (entry->l3_unicast.vlan_id &&
2019             rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2020                                entry->l3_unicast.vlan_id))
2021                 return -EMSGSIZE;
2022         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2023                               entry->l3_unicast.ttl_check))
2024                 return -EMSGSIZE;
2025         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2026                                entry->l3_unicast.group_id))
2027                 return -EMSGSIZE;
2028
2029         return 0;
2030 }
2031
2032 static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2033                                     struct rocker_port *rocker_port,
2034                                     struct rocker_desc_info *desc_info,
2035                                     void *priv)
2036 {
2037         struct rocker_group_tbl_entry *entry = priv;
2038         struct rocker_tlv *cmd_info;
2039         int err = 0;
2040
2041         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2042                 return -EMSGSIZE;
2043         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2044         if (!cmd_info)
2045                 return -EMSGSIZE;
2046
2047         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2048                                entry->group_id))
2049                 return -EMSGSIZE;
2050
2051         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2052         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2053                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2054                 break;
2055         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2056                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2057                 break;
2058         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2059         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2060                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2061                 break;
2062         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2063                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2064                 break;
2065         default:
2066                 err = -ENOTSUPP;
2067                 break;
2068         }
2069
2070         if (err)
2071                 return err;
2072
2073         rocker_tlv_nest_end(desc_info, cmd_info);
2074
2075         return 0;
2076 }
2077
2078 static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2079                                     struct rocker_port *rocker_port,
2080                                     struct rocker_desc_info *desc_info,
2081                                     void *priv)
2082 {
2083         const struct rocker_group_tbl_entry *entry = priv;
2084         struct rocker_tlv *cmd_info;
2085
2086         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2087                 return -EMSGSIZE;
2088         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2089         if (!cmd_info)
2090                 return -EMSGSIZE;
2091         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2092                                entry->group_id))
2093                 return -EMSGSIZE;
2094         rocker_tlv_nest_end(desc_info, cmd_info);
2095
2096         return 0;
2097 }
2098
2099 /*****************************************
2100  * Flow, group, FDB, internal VLAN tables
2101  *****************************************/
2102
2103 static int rocker_init_tbls(struct rocker *rocker)
2104 {
2105         hash_init(rocker->flow_tbl);
2106         spin_lock_init(&rocker->flow_tbl_lock);
2107
2108         hash_init(rocker->group_tbl);
2109         spin_lock_init(&rocker->group_tbl_lock);
2110
2111         hash_init(rocker->fdb_tbl);
2112         spin_lock_init(&rocker->fdb_tbl_lock);
2113
2114         hash_init(rocker->internal_vlan_tbl);
2115         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2116
2117         return 0;
2118 }
2119
2120 static void rocker_free_tbls(struct rocker *rocker)
2121 {
2122         unsigned long flags;
2123         struct rocker_flow_tbl_entry *flow_entry;
2124         struct rocker_group_tbl_entry *group_entry;
2125         struct rocker_fdb_tbl_entry *fdb_entry;
2126         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2127         struct hlist_node *tmp;
2128         int bkt;
2129
2130         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2131         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2132                 hash_del(&flow_entry->entry);
2133         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2134
2135         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2136         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2137                 hash_del(&group_entry->entry);
2138         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2139
2140         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2141         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2142                 hash_del(&fdb_entry->entry);
2143         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2144
2145         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2146         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2147                            tmp, internal_vlan_entry, entry)
2148                 hash_del(&internal_vlan_entry->entry);
2149         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2150 }
2151
2152 static struct rocker_flow_tbl_entry *
2153 rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2154 {
2155         struct rocker_flow_tbl_entry *found;
2156
2157         hash_for_each_possible(rocker->flow_tbl, found,
2158                                entry, match->key_crc32) {
2159                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2160                         return found;
2161         }
2162
2163         return NULL;
2164 }
2165
2166 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2167                                struct rocker_flow_tbl_entry *match,
2168                                bool nowait)
2169 {
2170         struct rocker *rocker = rocker_port->rocker;
2171         struct rocker_flow_tbl_entry *found;
2172         unsigned long flags;
2173         bool add_to_hw = false;
2174         int err = 0;
2175
2176         match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2177
2178         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2179
2180         found = rocker_flow_tbl_find(rocker, match);
2181
2182         if (found) {
2183                 kfree(match);
2184         } else {
2185                 found = match;
2186                 found->cookie = rocker->flow_tbl_next_cookie++;
2187                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2188                 add_to_hw = true;
2189         }
2190
2191         found->ref_count++;
2192
2193         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2194
2195         if (add_to_hw) {
2196                 err = rocker_cmd_exec(rocker, rocker_port,
2197                                       rocker_cmd_flow_tbl_add,
2198                                       found, NULL, NULL, nowait);
2199                 if (err) {
2200                         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2201                         hash_del(&found->entry);
2202                         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2203                         kfree(found);
2204                 }
2205         }
2206
2207         return err;
2208 }
2209
2210 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2211                                struct rocker_flow_tbl_entry *match,
2212                                bool nowait)
2213 {
2214         struct rocker *rocker = rocker_port->rocker;
2215         struct rocker_flow_tbl_entry *found;
2216         unsigned long flags;
2217         bool del_from_hw = false;
2218         int err = 0;
2219
2220         match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2221
2222         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2223
2224         found = rocker_flow_tbl_find(rocker, match);
2225
2226         if (found) {
2227                 found->ref_count--;
2228                 if (found->ref_count == 0) {
2229                         hash_del(&found->entry);
2230                         del_from_hw = true;
2231                 }
2232         }
2233
2234         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2235
2236         kfree(match);
2237
2238         if (del_from_hw) {
2239                 err = rocker_cmd_exec(rocker, rocker_port,
2240                                       rocker_cmd_flow_tbl_del,
2241                                       found, NULL, NULL, nowait);
2242                 kfree(found);
2243         }
2244
2245         return err;
2246 }
2247
2248 static gfp_t rocker_op_flags_gfp(int flags)
2249 {
2250         return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2251 }
2252
2253 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2254                               int flags, struct rocker_flow_tbl_entry *entry)
2255 {
2256         bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2257
2258         if (flags & ROCKER_OP_FLAG_REMOVE)
2259                 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2260         else
2261                 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2262 }
2263
2264 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2265                                    int flags, u32 in_lport, u32 in_lport_mask,
2266                                    enum rocker_of_dpa_table_id goto_tbl)
2267 {
2268         struct rocker_flow_tbl_entry *entry;
2269
2270         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2271         if (!entry)
2272                 return -ENOMEM;
2273
2274         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2275         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2276         entry->key.ig_port.in_lport = in_lport;
2277         entry->key.ig_port.in_lport_mask = in_lport_mask;
2278         entry->key.ig_port.goto_tbl = goto_tbl;
2279
2280         return rocker_flow_tbl_do(rocker_port, flags, entry);
2281 }
2282
2283 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2284                                 int flags, u32 in_lport,
2285                                 __be16 vlan_id, __be16 vlan_id_mask,
2286                                 enum rocker_of_dpa_table_id goto_tbl,
2287                                 bool untagged, __be16 new_vlan_id)
2288 {
2289         struct rocker_flow_tbl_entry *entry;
2290
2291         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2292         if (!entry)
2293                 return -ENOMEM;
2294
2295         entry->key.priority = ROCKER_PRIORITY_VLAN;
2296         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2297         entry->key.vlan.in_lport = in_lport;
2298         entry->key.vlan.vlan_id = vlan_id;
2299         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2300         entry->key.vlan.goto_tbl = goto_tbl;
2301
2302         entry->key.vlan.untagged = untagged;
2303         entry->key.vlan.new_vlan_id = new_vlan_id;
2304
2305         return rocker_flow_tbl_do(rocker_port, flags, entry);
2306 }
2307
2308 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2309                                     u32 in_lport, u32 in_lport_mask,
2310                                     __be16 eth_type, const u8 *eth_dst,
2311                                     const u8 *eth_dst_mask, __be16 vlan_id,
2312                                     __be16 vlan_id_mask, bool copy_to_cpu,
2313                                     int flags)
2314 {
2315         struct rocker_flow_tbl_entry *entry;
2316
2317         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2318         if (!entry)
2319                 return -ENOMEM;
2320
2321         if (is_multicast_ether_addr(eth_dst)) {
2322                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2323                 entry->key.term_mac.goto_tbl =
2324                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2325         } else {
2326                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2327                 entry->key.term_mac.goto_tbl =
2328                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2329         }
2330
2331         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2332         entry->key.term_mac.in_lport = in_lport;
2333         entry->key.term_mac.in_lport_mask = in_lport_mask;
2334         entry->key.term_mac.eth_type = eth_type;
2335         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2336         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2337         entry->key.term_mac.vlan_id = vlan_id;
2338         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2339         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2340
2341         return rocker_flow_tbl_do(rocker_port, flags, entry);
2342 }
2343
2344 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2345                                   int flags,
2346                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2347                                   __be16 vlan_id, u32 tunnel_id,
2348                                   enum rocker_of_dpa_table_id goto_tbl,
2349                                   u32 group_id, bool copy_to_cpu)
2350 {
2351         struct rocker_flow_tbl_entry *entry;
2352         u32 priority;
2353         bool vlan_bridging = !!vlan_id;
2354         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2355         bool wild = false;
2356
2357         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2358         if (!entry)
2359                 return -ENOMEM;
2360
2361         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2362
2363         if (eth_dst) {
2364                 entry->key.bridge.has_eth_dst = 1;
2365                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2366         }
2367         if (eth_dst_mask) {
2368                 entry->key.bridge.has_eth_dst_mask = 1;
2369                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2370                 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2371                         wild = true;
2372         }
2373
2374         priority = ROCKER_PRIORITY_UNKNOWN;
2375         if (vlan_bridging & dflt & wild)
2376                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2377         else if (vlan_bridging & dflt & !wild)
2378                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2379         else if (vlan_bridging & !dflt)
2380                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2381         else if (!vlan_bridging & dflt & wild)
2382                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2383         else if (!vlan_bridging & dflt & !wild)
2384                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2385         else if (!vlan_bridging & !dflt)
2386                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2387
2388         entry->key.priority = priority;
2389         entry->key.bridge.vlan_id = vlan_id;
2390         entry->key.bridge.tunnel_id = tunnel_id;
2391         entry->key.bridge.goto_tbl = goto_tbl;
2392         entry->key.bridge.group_id = group_id;
2393         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2394
2395         return rocker_flow_tbl_do(rocker_port, flags, entry);
2396 }
2397
2398 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2399                                int flags, u32 in_lport,
2400                                u32 in_lport_mask,
2401                                const u8 *eth_src, const u8 *eth_src_mask,
2402                                const u8 *eth_dst, const u8 *eth_dst_mask,
2403                                __be16 eth_type,
2404                                __be16 vlan_id, __be16 vlan_id_mask,
2405                                u8 ip_proto, u8 ip_proto_mask,
2406                                u8 ip_tos, u8 ip_tos_mask,
2407                                u32 group_id)
2408 {
2409         u32 priority;
2410         struct rocker_flow_tbl_entry *entry;
2411
2412         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2413         if (!entry)
2414                 return -ENOMEM;
2415
2416         priority = ROCKER_PRIORITY_ACL_NORMAL;
2417         if (eth_dst && eth_dst_mask) {
2418                 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2419                         priority = ROCKER_PRIORITY_ACL_DFLT;
2420                 else if (is_link_local_ether_addr(eth_dst))
2421                         priority = ROCKER_PRIORITY_ACL_CTRL;
2422         }
2423
2424         entry->key.priority = priority;
2425         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2426         entry->key.acl.in_lport = in_lport;
2427         entry->key.acl.in_lport_mask = in_lport_mask;
2428
2429         if (eth_src)
2430                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2431         if (eth_src_mask)
2432                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2433         if (eth_dst)
2434                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2435         if (eth_dst_mask)
2436                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2437
2438         entry->key.acl.eth_type = eth_type;
2439         entry->key.acl.vlan_id = vlan_id;
2440         entry->key.acl.vlan_id_mask = vlan_id_mask;
2441         entry->key.acl.ip_proto = ip_proto;
2442         entry->key.acl.ip_proto_mask = ip_proto_mask;
2443         entry->key.acl.ip_tos = ip_tos;
2444         entry->key.acl.ip_tos_mask = ip_tos_mask;
2445         entry->key.acl.group_id = group_id;
2446
2447         return rocker_flow_tbl_do(rocker_port, flags, entry);
2448 }
2449
2450 static struct rocker_group_tbl_entry *
2451 rocker_group_tbl_find(struct rocker *rocker,
2452                       struct rocker_group_tbl_entry *match)
2453 {
2454         struct rocker_group_tbl_entry *found;
2455
2456         hash_for_each_possible(rocker->group_tbl, found,
2457                                entry, match->group_id) {
2458                 if (found->group_id == match->group_id)
2459                         return found;
2460         }
2461
2462         return NULL;
2463 }
2464
2465 static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2466 {
2467         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2468         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2469         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2470                 kfree(entry->group_ids);
2471                 break;
2472         default:
2473                 break;
2474         }
2475         kfree(entry);
2476 }
2477
2478 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2479                                 struct rocker_group_tbl_entry *match,
2480                                 bool nowait)
2481 {
2482         struct rocker *rocker = rocker_port->rocker;
2483         struct rocker_group_tbl_entry *found;
2484         unsigned long flags;
2485         int err = 0;
2486
2487         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2488
2489         found = rocker_group_tbl_find(rocker, match);
2490
2491         if (found) {
2492                 hash_del(&found->entry);
2493                 rocker_group_tbl_entry_free(found);
2494                 found = match;
2495                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2496         } else {
2497                 found = match;
2498                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2499         }
2500
2501         hash_add(rocker->group_tbl, &found->entry, found->group_id);
2502
2503         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2504
2505         if (found->cmd)
2506                 err = rocker_cmd_exec(rocker, rocker_port,
2507                                       rocker_cmd_group_tbl_add,
2508                                       found, NULL, NULL, nowait);
2509
2510         return err;
2511 }
2512
2513 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2514                                 struct rocker_group_tbl_entry *match,
2515                                 bool nowait)
2516 {
2517         struct rocker *rocker = rocker_port->rocker;
2518         struct rocker_group_tbl_entry *found;
2519         unsigned long flags;
2520         int err = 0;
2521
2522         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2523
2524         found = rocker_group_tbl_find(rocker, match);
2525
2526         if (found) {
2527                 hash_del(&found->entry);
2528                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2529         }
2530
2531         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2532
2533         rocker_group_tbl_entry_free(match);
2534
2535         if (found) {
2536                 err = rocker_cmd_exec(rocker, rocker_port,
2537                                       rocker_cmd_group_tbl_del,
2538                                       found, NULL, NULL, nowait);
2539                 rocker_group_tbl_entry_free(found);
2540         }
2541
2542         return err;
2543 }
2544
2545 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2546                                int flags, struct rocker_group_tbl_entry *entry)
2547 {
2548         bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2549
2550         if (flags & ROCKER_OP_FLAG_REMOVE)
2551                 return rocker_group_tbl_del(rocker_port, entry, nowait);
2552         else
2553                 return rocker_group_tbl_add(rocker_port, entry, nowait);
2554 }
2555
2556 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2557                                      int flags, __be16 vlan_id,
2558                                      u32 out_lport, int pop_vlan)
2559 {
2560         struct rocker_group_tbl_entry *entry;
2561
2562         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2563         if (!entry)
2564                 return -ENOMEM;
2565
2566         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
2567         entry->l2_interface.pop_vlan = pop_vlan;
2568
2569         return rocker_group_tbl_do(rocker_port, flags, entry);
2570 }
2571
2572 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2573                                    int flags, u8 group_count,
2574                                    u32 *group_ids, u32 group_id)
2575 {
2576         struct rocker_group_tbl_entry *entry;
2577
2578         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2579         if (!entry)
2580                 return -ENOMEM;
2581
2582         entry->group_id = group_id;
2583         entry->group_count = group_count;
2584
2585         entry->group_ids = kcalloc(group_count, sizeof(u32),
2586                                    rocker_op_flags_gfp(flags));
2587         if (!entry->group_ids) {
2588                 kfree(entry);
2589                 return -ENOMEM;
2590         }
2591         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2592
2593         return rocker_group_tbl_do(rocker_port, flags, entry);
2594 }
2595
2596 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2597                                  int flags, __be16 vlan_id,
2598                                  u8 group_count, u32 *group_ids,
2599                                  u32 group_id)
2600 {
2601         return rocker_group_l2_fan_out(rocker_port, flags,
2602                                        group_count, group_ids,
2603                                        group_id);
2604 }
2605
2606 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2607                                         int flags, __be16 vlan_id)
2608 {
2609         struct rocker_port *p;
2610         struct rocker *rocker = rocker_port->rocker;
2611         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2612         u32 group_ids[rocker->port_count];
2613         u8 group_count = 0;
2614         int err;
2615         int i;
2616
2617         /* Adjust the flood group for this VLAN.  The flood group
2618          * references an L2 interface group for each port in this
2619          * VLAN.
2620          */
2621
2622         for (i = 0; i < rocker->port_count; i++) {
2623                 p = rocker->ports[i];
2624                 if (!rocker_port_is_bridged(p))
2625                         continue;
2626                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2627                         group_ids[group_count++] =
2628                                 ROCKER_GROUP_L2_INTERFACE(vlan_id,
2629                                                           p->lport);
2630                 }
2631         }
2632
2633         /* If there are no bridged ports in this VLAN, we're done */
2634         if (group_count == 0)
2635                 return 0;
2636
2637         err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
2638                                     group_count, group_ids,
2639                                     group_id);
2640         if (err)
2641                 netdev_err(rocker_port->dev,
2642                            "Error (%d) port VLAN l2 flood group\n", err);
2643
2644         return err;
2645 }
2646
2647 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2648                                       int flags, __be16 vlan_id,
2649                                       bool pop_vlan)
2650 {
2651         struct rocker *rocker = rocker_port->rocker;
2652         struct rocker_port *p;
2653         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2654         u32 out_lport;
2655         int ref = 0;
2656         int err;
2657         int i;
2658
2659         /* An L2 interface group for this port in this VLAN, but
2660          * only when port STP state is LEARNING|FORWARDING.
2661          */
2662
2663         if (rocker_port->stp_state == BR_STATE_LEARNING ||
2664             rocker_port->stp_state == BR_STATE_FORWARDING) {
2665                 out_lport = rocker_port->lport;
2666                 err = rocker_group_l2_interface(rocker_port, flags,
2667                                                 vlan_id, out_lport,
2668                                                 pop_vlan);
2669                 if (err) {
2670                         netdev_err(rocker_port->dev,
2671                                    "Error (%d) port VLAN l2 group for lport %d\n",
2672                                    err, out_lport);
2673                         return err;
2674                 }
2675         }
2676
2677         /* An L2 interface group for this VLAN to CPU port.
2678          * Add when first port joins this VLAN and destroy when
2679          * last port leaves this VLAN.
2680          */
2681
2682         for (i = 0; i < rocker->port_count; i++) {
2683                 p = rocker->ports[i];
2684                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
2685                         ref++;
2686         }
2687
2688         if ((!adding || ref != 1) && (adding || ref != 0))
2689                 return 0;
2690
2691         out_lport = 0;
2692         err = rocker_group_l2_interface(rocker_port, flags,
2693                                         vlan_id, out_lport,
2694                                         pop_vlan);
2695         if (err) {
2696                 netdev_err(rocker_port->dev,
2697                            "Error (%d) port VLAN l2 group for CPU port\n", err);
2698                 return err;
2699         }
2700
2701         return 0;
2702 }
2703
2704 static struct rocker_ctrl {
2705         const u8 *eth_dst;
2706         const u8 *eth_dst_mask;
2707         u16 eth_type;
2708         bool acl;
2709         bool bridge;
2710         bool term;
2711         bool copy_to_cpu;
2712 } rocker_ctrls[] = {
2713         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
2714                 /* pass link local multicast pkts up to CPU for filtering */
2715                 .eth_dst = ll_mac,
2716                 .eth_dst_mask = ll_mask,
2717                 .acl = true,
2718         },
2719         [ROCKER_CTRL_LOCAL_ARP] = {
2720                 /* pass local ARP pkts up to CPU */
2721                 .eth_dst = zero_mac,
2722                 .eth_dst_mask = zero_mac,
2723                 .eth_type = htons(ETH_P_ARP),
2724                 .acl = true,
2725         },
2726         [ROCKER_CTRL_IPV4_MCAST] = {
2727                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
2728                 .eth_dst = ipv4_mcast,
2729                 .eth_dst_mask = ipv4_mask,
2730                 .eth_type = htons(ETH_P_IP),
2731                 .term  = true,
2732                 .copy_to_cpu = true,
2733         },
2734         [ROCKER_CTRL_IPV6_MCAST] = {
2735                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
2736                 .eth_dst = ipv6_mcast,
2737                 .eth_dst_mask = ipv6_mask,
2738                 .eth_type = htons(ETH_P_IPV6),
2739                 .term  = true,
2740                 .copy_to_cpu = true,
2741         },
2742         [ROCKER_CTRL_DFLT_BRIDGING] = {
2743                 /* flood any pkts on vlan */
2744                 .bridge = true,
2745                 .copy_to_cpu = true,
2746         },
2747 };
2748
2749 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
2750                                      int flags, struct rocker_ctrl *ctrl,
2751                                      __be16 vlan_id)
2752 {
2753         u32 in_lport = rocker_port->lport;
2754         u32 in_lport_mask = 0xffffffff;
2755         u32 out_lport = 0;
2756         u8 *eth_src = NULL;
2757         u8 *eth_src_mask = NULL;
2758         __be16 vlan_id_mask = htons(0xffff);
2759         u8 ip_proto = 0;
2760         u8 ip_proto_mask = 0;
2761         u8 ip_tos = 0;
2762         u8 ip_tos_mask = 0;
2763         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
2764         int err;
2765
2766         err = rocker_flow_tbl_acl(rocker_port, flags,
2767                                   in_lport, in_lport_mask,
2768                                   eth_src, eth_src_mask,
2769                                   ctrl->eth_dst, ctrl->eth_dst_mask,
2770                                   ctrl->eth_type,
2771                                   vlan_id, vlan_id_mask,
2772                                   ip_proto, ip_proto_mask,
2773                                   ip_tos, ip_tos_mask,
2774                                   group_id);
2775
2776         if (err)
2777                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
2778
2779         return err;
2780 }
2781
2782 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
2783                                         int flags, struct rocker_ctrl *ctrl,
2784                                         __be16 vlan_id)
2785 {
2786         enum rocker_of_dpa_table_id goto_tbl =
2787                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2788         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2789         u32 tunnel_id = 0;
2790         int err;
2791
2792         if (!rocker_port_is_bridged(rocker_port))
2793                 return 0;
2794
2795         err = rocker_flow_tbl_bridge(rocker_port, flags,
2796                                      ctrl->eth_dst, ctrl->eth_dst_mask,
2797                                      vlan_id, tunnel_id,
2798                                      goto_tbl, group_id, ctrl->copy_to_cpu);
2799
2800         if (err)
2801                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
2802
2803         return err;
2804 }
2805
2806 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
2807                                       int flags, struct rocker_ctrl *ctrl,
2808                                       __be16 vlan_id)
2809 {
2810         u32 in_lport_mask = 0xffffffff;
2811         __be16 vlan_id_mask = htons(0xffff);
2812         int err;
2813
2814         if (ntohs(vlan_id) == 0)
2815                 vlan_id = rocker_port->internal_vlan_id;
2816
2817         err = rocker_flow_tbl_term_mac(rocker_port,
2818                                        rocker_port->lport, in_lport_mask,
2819                                        ctrl->eth_type, ctrl->eth_dst,
2820                                        ctrl->eth_dst_mask, vlan_id,
2821                                        vlan_id_mask, ctrl->copy_to_cpu,
2822                                        flags);
2823
2824         if (err)
2825                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
2826
2827         return err;
2828 }
2829
2830 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
2831                                  struct rocker_ctrl *ctrl, __be16 vlan_id)
2832 {
2833         if (ctrl->acl)
2834                 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
2835                                                  ctrl, vlan_id);
2836         if (ctrl->bridge)
2837                 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
2838                                                     ctrl, vlan_id);
2839
2840         if (ctrl->term)
2841                 return rocker_port_ctrl_vlan_term(rocker_port, flags,
2842                                                   ctrl, vlan_id);
2843
2844         return -EOPNOTSUPP;
2845 }
2846
2847 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
2848                                      int flags, __be16 vlan_id)
2849 {
2850         int err = 0;
2851         int i;
2852
2853         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
2854                 if (rocker_port->ctrls[i]) {
2855                         err = rocker_port_ctrl_vlan(rocker_port, flags,
2856                                                     &rocker_ctrls[i], vlan_id);
2857                         if (err)
2858                                 return err;
2859                 }
2860         }
2861
2862         return err;
2863 }
2864
2865 static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
2866                             struct rocker_ctrl *ctrl)
2867 {
2868         u16 vid;
2869         int err = 0;
2870
2871         for (vid = 1; vid < VLAN_N_VID; vid++) {
2872                 if (!test_bit(vid, rocker_port->vlan_bitmap))
2873                         continue;
2874                 err = rocker_port_ctrl_vlan(rocker_port, flags,
2875                                             ctrl, htons(vid));
2876                 if (err)
2877                         break;
2878         }
2879
2880         return err;
2881 }
2882
2883 static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
2884                             u16 vid)
2885 {
2886         enum rocker_of_dpa_table_id goto_tbl =
2887                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2888         u32 in_lport = rocker_port->lport;
2889         __be16 vlan_id = htons(vid);
2890         __be16 vlan_id_mask = htons(0xffff);
2891         __be16 internal_vlan_id;
2892         bool untagged;
2893         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2894         int err;
2895
2896         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
2897
2898         if (adding && test_and_set_bit(ntohs(internal_vlan_id),
2899                                        rocker_port->vlan_bitmap))
2900                         return 0; /* already added */
2901         else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
2902                                                 rocker_port->vlan_bitmap))
2903                         return 0; /* already removed */
2904
2905         if (adding) {
2906                 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
2907                                                 internal_vlan_id);
2908                 if (err) {
2909                         netdev_err(rocker_port->dev,
2910                                    "Error (%d) port ctrl vlan add\n", err);
2911                         return err;
2912                 }
2913         }
2914
2915         err = rocker_port_vlan_l2_groups(rocker_port, flags,
2916                                          internal_vlan_id, untagged);
2917         if (err) {
2918                 netdev_err(rocker_port->dev,
2919                            "Error (%d) port VLAN l2 groups\n", err);
2920                 return err;
2921         }
2922
2923         err = rocker_port_vlan_flood_group(rocker_port, flags,
2924                                            internal_vlan_id);
2925         if (err) {
2926                 netdev_err(rocker_port->dev,
2927                            "Error (%d) port VLAN l2 flood group\n", err);
2928                 return err;
2929         }
2930
2931         err = rocker_flow_tbl_vlan(rocker_port, flags,
2932                                    in_lport, vlan_id, vlan_id_mask,
2933                                    goto_tbl, untagged, internal_vlan_id);
2934         if (err)
2935                 netdev_err(rocker_port->dev,
2936                            "Error (%d) port VLAN table\n", err);
2937
2938         return err;
2939 }
2940
2941 static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
2942 {
2943         enum rocker_of_dpa_table_id goto_tbl;
2944         u32 in_lport;
2945         u32 in_lport_mask;
2946         int err;
2947
2948         /* Normal Ethernet Frames.  Matches pkts from any local physical
2949          * ports.  Goto VLAN tbl.
2950          */
2951
2952         in_lport = 0;
2953         in_lport_mask = 0xffff0000;
2954         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
2955
2956         err = rocker_flow_tbl_ig_port(rocker_port, flags,
2957                                       in_lport, in_lport_mask,
2958                                       goto_tbl);
2959         if (err)
2960                 netdev_err(rocker_port->dev,
2961                            "Error (%d) ingress port table entry\n", err);
2962
2963         return err;
2964 }
2965
2966 struct rocker_fdb_learn_work {
2967         struct work_struct work;
2968         struct net_device *dev;
2969         int flags;
2970         u8 addr[ETH_ALEN];
2971         u16 vid;
2972 };
2973
2974 static void rocker_port_fdb_learn_work(struct work_struct *work)
2975 {
2976         struct rocker_fdb_learn_work *lw =
2977                 container_of(work, struct rocker_fdb_learn_work, work);
2978         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
2979         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
2980
2981         if (learned & removing)
2982                 br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
2983         else if (learned & !removing)
2984                 br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
2985
2986         kfree(work);
2987 }
2988
2989 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
2990                                  int flags, const u8 *addr, __be16 vlan_id)
2991 {
2992         struct rocker_fdb_learn_work *lw;
2993         enum rocker_of_dpa_table_id goto_tbl =
2994                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2995         u32 out_lport = rocker_port->lport;
2996         u32 tunnel_id = 0;
2997         u32 group_id = ROCKER_GROUP_NONE;
2998         bool copy_to_cpu = false;
2999         int err;
3000
3001         if (rocker_port_is_bridged(rocker_port))
3002                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
3003
3004         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3005                 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3006                                              vlan_id, tunnel_id, goto_tbl,
3007                                              group_id, copy_to_cpu);
3008                 if (err)
3009                         return err;
3010         }
3011
3012         if (!rocker_port_is_bridged(rocker_port))
3013                 return 0;
3014
3015         lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3016         if (!lw)
3017                 return -ENOMEM;
3018
3019         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3020
3021         lw->dev = rocker_port->dev;
3022         lw->flags = flags;
3023         ether_addr_copy(lw->addr, addr);
3024         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3025
3026         schedule_work(&lw->work);
3027
3028         return 0;
3029 }
3030
3031 static struct rocker_fdb_tbl_entry *
3032 rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3033 {
3034         struct rocker_fdb_tbl_entry *found;
3035
3036         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3037                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3038                         return found;
3039
3040         return NULL;
3041 }
3042
3043 static int rocker_port_fdb(struct rocker_port *rocker_port,
3044                            const unsigned char *addr,
3045                            __be16 vlan_id, int flags)
3046 {
3047         struct rocker *rocker = rocker_port->rocker;
3048         struct rocker_fdb_tbl_entry *fdb;
3049         struct rocker_fdb_tbl_entry *found;
3050         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3051         unsigned long lock_flags;
3052
3053         fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3054         if (!fdb)
3055                 return -ENOMEM;
3056
3057         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3058         fdb->key.lport = rocker_port->lport;
3059         ether_addr_copy(fdb->key.addr, addr);
3060         fdb->key.vlan_id = vlan_id;
3061         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3062
3063         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3064
3065         found = rocker_fdb_tbl_find(rocker, fdb);
3066
3067         if (removing && found) {
3068                 kfree(fdb);
3069                 hash_del(&found->entry);
3070         } else if (!removing && !found) {
3071                 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3072         }
3073
3074         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3075
3076         /* Check if adding and already exists, or removing and can't find */
3077         if (!found != !removing) {
3078                 kfree(fdb);
3079                 if (!found && removing)
3080                         return 0;
3081                 /* Refreshing existing to update aging timers */
3082                 flags |= ROCKER_OP_FLAG_REFRESH;
3083         }
3084
3085         return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3086 }
3087
3088 static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3089 {
3090         struct rocker *rocker = rocker_port->rocker;
3091         struct rocker_fdb_tbl_entry *found;
3092         unsigned long lock_flags;
3093         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3094         struct hlist_node *tmp;
3095         int bkt;
3096         int err = 0;
3097
3098         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3099             rocker_port->stp_state == BR_STATE_FORWARDING)
3100                 return 0;
3101
3102         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3103
3104         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3105                 if (found->key.lport != rocker_port->lport)
3106                         continue;
3107                 if (!found->learned)
3108                         continue;
3109                 err = rocker_port_fdb_learn(rocker_port, flags,
3110                                             found->key.addr,
3111                                             found->key.vlan_id);
3112                 if (err)
3113                         goto err_out;
3114                 hash_del(&found->entry);
3115         }
3116
3117 err_out:
3118         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3119
3120         return err;
3121 }
3122
3123 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3124                                   int flags, __be16 vlan_id)
3125 {
3126         u32 in_lport_mask = 0xffffffff;
3127         __be16 eth_type;
3128         const u8 *dst_mac_mask = ff_mac;
3129         __be16 vlan_id_mask = htons(0xffff);
3130         bool copy_to_cpu = false;
3131         int err;
3132
3133         if (ntohs(vlan_id) == 0)
3134                 vlan_id = rocker_port->internal_vlan_id;
3135
3136         eth_type = htons(ETH_P_IP);
3137         err = rocker_flow_tbl_term_mac(rocker_port,
3138                                        rocker_port->lport, in_lport_mask,
3139                                        eth_type, rocker_port->dev->dev_addr,
3140                                        dst_mac_mask, vlan_id, vlan_id_mask,
3141                                        copy_to_cpu, flags);
3142         if (err)
3143                 return err;
3144
3145         eth_type = htons(ETH_P_IPV6);
3146         err = rocker_flow_tbl_term_mac(rocker_port,
3147                                        rocker_port->lport, in_lport_mask,
3148                                        eth_type, rocker_port->dev->dev_addr,
3149                                        dst_mac_mask, vlan_id, vlan_id_mask,
3150                                        copy_to_cpu, flags);
3151
3152         return err;
3153 }
3154
3155 static int rocker_port_fwding(struct rocker_port *rocker_port)
3156 {
3157         bool pop_vlan;
3158         u32 out_lport;
3159         __be16 vlan_id;
3160         u16 vid;
3161         int flags = ROCKER_OP_FLAG_NOWAIT;
3162         int err;
3163
3164         /* Port will be forwarding-enabled if its STP state is LEARNING
3165          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3166          * port STP state.  Use L2 interface group on port VLANs as a way
3167          * to toggle port forwarding: if forwarding is disabled, L2
3168          * interface group will not exist.
3169          */
3170
3171         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3172             rocker_port->stp_state != BR_STATE_FORWARDING)
3173                 flags |= ROCKER_OP_FLAG_REMOVE;
3174
3175         out_lport = rocker_port->lport;
3176         for (vid = 1; vid < VLAN_N_VID; vid++) {
3177                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3178                         continue;
3179                 vlan_id = htons(vid);
3180                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3181                 err = rocker_group_l2_interface(rocker_port, flags,
3182                                                 vlan_id, out_lport,
3183                                                 pop_vlan);
3184                 if (err) {
3185                         netdev_err(rocker_port->dev,
3186                                    "Error (%d) port VLAN l2 group for lport %d\n",
3187                                    err, out_lport);
3188                         return err;
3189                 }
3190         }
3191
3192         return 0;
3193 }
3194
3195 static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3196 {
3197         bool want[ROCKER_CTRL_MAX] = { 0, };
3198         int flags;
3199         int err;
3200         int i;
3201
3202         if (rocker_port->stp_state == state)
3203                 return 0;
3204
3205         rocker_port->stp_state = state;
3206
3207         switch (state) {
3208         case BR_STATE_DISABLED:
3209                 /* port is completely disabled */
3210                 break;
3211         case BR_STATE_LISTENING:
3212         case BR_STATE_BLOCKING:
3213                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3214                 break;
3215         case BR_STATE_LEARNING:
3216         case BR_STATE_FORWARDING:
3217                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3218                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3219                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3220                 if (rocker_port_is_bridged(rocker_port))
3221                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3222                 else
3223                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3224                 break;
3225         }
3226
3227         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3228                 if (want[i] != rocker_port->ctrls[i]) {
3229                         flags = ROCKER_OP_FLAG_NOWAIT |
3230                                 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3231                         err = rocker_port_ctrl(rocker_port, flags,
3232                                                &rocker_ctrls[i]);
3233                         if (err)
3234                                 return err;
3235                         rocker_port->ctrls[i] = want[i];
3236                 }
3237         }
3238
3239         err = rocker_port_fdb_flush(rocker_port);
3240         if (err)
3241                 return err;
3242
3243         return rocker_port_fwding(rocker_port);
3244 }
3245
3246 static struct rocker_internal_vlan_tbl_entry *
3247 rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3248 {
3249         struct rocker_internal_vlan_tbl_entry *found;
3250
3251         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3252                                entry, ifindex) {
3253                 if (found->ifindex == ifindex)
3254                         return found;
3255         }
3256
3257         return NULL;
3258 }
3259
3260 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3261                                                int ifindex)
3262 {
3263         struct rocker *rocker = rocker_port->rocker;
3264         struct rocker_internal_vlan_tbl_entry *entry;
3265         struct rocker_internal_vlan_tbl_entry *found;
3266         unsigned long lock_flags;
3267         int i;
3268
3269         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3270         if (!entry)
3271                 return 0;
3272
3273         entry->ifindex = ifindex;
3274
3275         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3276
3277         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3278         if (found) {
3279                 kfree(entry);
3280                 goto found;
3281         }
3282
3283         found = entry;
3284         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3285
3286         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3287                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3288                         continue;
3289                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3290                 goto found;
3291         }
3292
3293         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3294
3295 found:
3296         found->ref_count++;
3297         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3298
3299         return found->vlan_id;
3300 }
3301
3302 static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3303                                              int ifindex)
3304 {
3305         struct rocker *rocker = rocker_port->rocker;
3306         struct rocker_internal_vlan_tbl_entry *found;
3307         unsigned long lock_flags;
3308         unsigned long bit;
3309
3310         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3311
3312         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3313         if (!found) {
3314                 netdev_err(rocker_port->dev,
3315                            "ifindex (%d) not found in internal VLAN tbl\n",
3316                            ifindex);
3317                 goto not_found;
3318         }
3319
3320         if (--found->ref_count <= 0) {
3321                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3322                 clear_bit(bit, rocker->internal_vlan_bitmap);
3323                 hash_del(&found->entry);
3324                 kfree(found);
3325         }
3326
3327 not_found:
3328         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3329 }
3330
3331 /*****************
3332  * Net device ops
3333  *****************/
3334
3335 static int rocker_port_open(struct net_device *dev)
3336 {
3337         struct rocker_port *rocker_port = netdev_priv(dev);
3338         u8 stp_state = rocker_port_is_bridged(rocker_port) ?
3339                 BR_STATE_BLOCKING : BR_STATE_FORWARDING;
3340         int err;
3341
3342         err = rocker_port_dma_rings_init(rocker_port);
3343         if (err)
3344                 return err;
3345
3346         err = request_irq(rocker_msix_tx_vector(rocker_port),
3347                           rocker_tx_irq_handler, 0,
3348                           rocker_driver_name, rocker_port);
3349         if (err) {
3350                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3351                 goto err_request_tx_irq;
3352         }
3353
3354         err = request_irq(rocker_msix_rx_vector(rocker_port),
3355                           rocker_rx_irq_handler, 0,
3356                           rocker_driver_name, rocker_port);
3357         if (err) {
3358                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3359                 goto err_request_rx_irq;
3360         }
3361
3362         err = rocker_port_stp_update(rocker_port, stp_state);
3363         if (err)
3364                 goto err_stp_update;
3365
3366         napi_enable(&rocker_port->napi_tx);
3367         napi_enable(&rocker_port->napi_rx);
3368         rocker_port_set_enable(rocker_port, true);
3369         netif_start_queue(dev);
3370         return 0;
3371
3372 err_stp_update:
3373         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3374 err_request_rx_irq:
3375         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3376 err_request_tx_irq:
3377         rocker_port_dma_rings_fini(rocker_port);
3378         return err;
3379 }
3380
3381 static int rocker_port_stop(struct net_device *dev)
3382 {
3383         struct rocker_port *rocker_port = netdev_priv(dev);
3384
3385         netif_stop_queue(dev);
3386         rocker_port_set_enable(rocker_port, false);
3387         napi_disable(&rocker_port->napi_rx);
3388         napi_disable(&rocker_port->napi_tx);
3389         rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
3390         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3391         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3392         rocker_port_dma_rings_fini(rocker_port);
3393
3394         return 0;
3395 }
3396
3397 static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3398                                        struct rocker_desc_info *desc_info)
3399 {
3400         struct rocker *rocker = rocker_port->rocker;
3401         struct pci_dev *pdev = rocker->pdev;
3402         struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3403         struct rocker_tlv *attr;
3404         int rem;
3405
3406         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3407         if (!attrs[ROCKER_TLV_TX_FRAGS])
3408                 return;
3409         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3410                 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3411                 dma_addr_t dma_handle;
3412                 size_t len;
3413
3414                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3415                         continue;
3416                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3417                                         attr);
3418                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3419                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3420                         continue;
3421                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3422                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3423                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3424         }
3425 }
3426
3427 static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3428                                        struct rocker_desc_info *desc_info,
3429                                        char *buf, size_t buf_len)
3430 {
3431         struct rocker *rocker = rocker_port->rocker;
3432         struct pci_dev *pdev = rocker->pdev;
3433         dma_addr_t dma_handle;
3434         struct rocker_tlv *frag;
3435
3436         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3437         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3438                 if (net_ratelimit())
3439                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3440                 return -EIO;
3441         }
3442         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3443         if (!frag)
3444                 goto unmap_frag;
3445         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3446                                dma_handle))
3447                 goto nest_cancel;
3448         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3449                                buf_len))
3450                 goto nest_cancel;
3451         rocker_tlv_nest_end(desc_info, frag);
3452         return 0;
3453
3454 nest_cancel:
3455         rocker_tlv_nest_cancel(desc_info, frag);
3456 unmap_frag:
3457         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3458         return -EMSGSIZE;
3459 }
3460
3461 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3462 {
3463         struct rocker_port *rocker_port = netdev_priv(dev);
3464         struct rocker *rocker = rocker_port->rocker;
3465         struct rocker_desc_info *desc_info;
3466         struct rocker_tlv *frags;
3467         int i;
3468         int err;
3469
3470         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3471         if (unlikely(!desc_info)) {
3472                 if (net_ratelimit())
3473                         netdev_err(dev, "tx ring full when queue awake\n");
3474                 return NETDEV_TX_BUSY;
3475         }
3476
3477         rocker_desc_cookie_ptr_set(desc_info, skb);
3478
3479         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3480         if (!frags)
3481                 goto out;
3482         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3483                                           skb->data, skb_headlen(skb));
3484         if (err)
3485                 goto nest_cancel;
3486         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3487                 goto nest_cancel;
3488
3489         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3490                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3491
3492                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3493                                                   skb_frag_address(frag),
3494                                                   skb_frag_size(frag));
3495                 if (err)
3496                         goto unmap_frags;
3497         }
3498         rocker_tlv_nest_end(desc_info, frags);
3499
3500         rocker_desc_gen_clear(desc_info);
3501         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3502
3503         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3504         if (!desc_info)
3505                 netif_stop_queue(dev);
3506
3507         return NETDEV_TX_OK;
3508
3509 unmap_frags:
3510         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3511 nest_cancel:
3512         rocker_tlv_nest_cancel(desc_info, frags);
3513 out:
3514         dev_kfree_skb(skb);
3515         return NETDEV_TX_OK;
3516 }
3517
3518 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3519 {
3520         struct sockaddr *addr = p;
3521         struct rocker_port *rocker_port = netdev_priv(dev);
3522         int err;
3523
3524         if (!is_valid_ether_addr(addr->sa_data))
3525                 return -EADDRNOTAVAIL;
3526
3527         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
3528         if (err)
3529                 return err;
3530         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3531         return 0;
3532 }
3533
3534 static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
3535                                        __be16 proto, u16 vid)
3536 {
3537         struct rocker_port *rocker_port = netdev_priv(dev);
3538         int err;
3539
3540         err = rocker_port_vlan(rocker_port, 0, vid);
3541         if (err)
3542                 return err;
3543
3544         return rocker_port_router_mac(rocker_port, 0, htons(vid));
3545 }
3546
3547 static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
3548                                         __be16 proto, u16 vid)
3549 {
3550         struct rocker_port *rocker_port = netdev_priv(dev);
3551         int err;
3552
3553         err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
3554                                      htons(vid));
3555         if (err)
3556                 return err;
3557
3558         return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
3559 }
3560
3561 static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
3562                                struct net_device *dev,
3563                                const unsigned char *addr, u16 vid,
3564                                u16 nlm_flags)
3565 {
3566         struct rocker_port *rocker_port = netdev_priv(dev);
3567         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3568         int flags = 0;
3569
3570         if (!rocker_port_is_bridged(rocker_port))
3571                 return -EINVAL;
3572
3573         return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3574 }
3575
3576 static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
3577                                struct net_device *dev,
3578                                const unsigned char *addr, u16 vid)
3579 {
3580         struct rocker_port *rocker_port = netdev_priv(dev);
3581         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3582         int flags = ROCKER_OP_FLAG_REMOVE;
3583
3584         if (!rocker_port_is_bridged(rocker_port))
3585                 return -EINVAL;
3586
3587         return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3588 }
3589
3590 static int rocker_port_switch_parent_id_get(struct net_device *dev,
3591                                             struct netdev_phys_item_id *psid)
3592 {
3593         struct rocker_port *rocker_port = netdev_priv(dev);
3594         struct rocker *rocker = rocker_port->rocker;
3595
3596         psid->id_len = sizeof(rocker->hw.id);
3597         memcpy(&psid->id, &rocker->hw.id, psid->id_len);
3598         return 0;
3599 }
3600
3601 static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
3602 {
3603         struct rocker_port *rocker_port = netdev_priv(dev);
3604
3605         return rocker_port_stp_update(rocker_port, state);
3606 }
3607
3608 static const struct net_device_ops rocker_port_netdev_ops = {
3609         .ndo_open                       = rocker_port_open,
3610         .ndo_stop                       = rocker_port_stop,
3611         .ndo_start_xmit                 = rocker_port_xmit,
3612         .ndo_set_mac_address            = rocker_port_set_mac_address,
3613         .ndo_vlan_rx_add_vid            = rocker_port_vlan_rx_add_vid,
3614         .ndo_vlan_rx_kill_vid           = rocker_port_vlan_rx_kill_vid,
3615         .ndo_fdb_add                    = rocker_port_fdb_add,
3616         .ndo_fdb_del                    = rocker_port_fdb_del,
3617         .ndo_switch_parent_id_get       = rocker_port_switch_parent_id_get,
3618         .ndo_switch_port_stp_update     = rocker_port_switch_port_stp_update,
3619 };
3620
3621 /********************
3622  * ethtool interface
3623  ********************/
3624
3625 static int rocker_port_get_settings(struct net_device *dev,
3626                                     struct ethtool_cmd *ecmd)
3627 {
3628         struct rocker_port *rocker_port = netdev_priv(dev);
3629
3630         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
3631 }
3632
3633 static int rocker_port_set_settings(struct net_device *dev,
3634                                     struct ethtool_cmd *ecmd)
3635 {
3636         struct rocker_port *rocker_port = netdev_priv(dev);
3637
3638         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
3639 }
3640
3641 static void rocker_port_get_drvinfo(struct net_device *dev,
3642                                     struct ethtool_drvinfo *drvinfo)
3643 {
3644         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
3645         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3646 }
3647
3648 static const struct ethtool_ops rocker_port_ethtool_ops = {
3649         .get_settings           = rocker_port_get_settings,
3650         .set_settings           = rocker_port_set_settings,
3651         .get_drvinfo            = rocker_port_get_drvinfo,
3652         .get_link               = ethtool_op_get_link,
3653 };
3654
3655 /*****************
3656  * NAPI interface
3657  *****************/
3658
3659 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
3660 {
3661         return container_of(napi, struct rocker_port, napi_tx);
3662 }
3663
3664 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
3665 {
3666         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
3667         struct rocker *rocker = rocker_port->rocker;
3668         struct rocker_desc_info *desc_info;
3669         u32 credits = 0;
3670         int err;
3671
3672         /* Cleanup tx descriptors */
3673         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
3674                 err = rocker_desc_err(desc_info);
3675                 if (err && net_ratelimit())
3676                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
3677                                    err);
3678                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3679                 dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
3680                 credits++;
3681         }
3682
3683         if (credits && netif_queue_stopped(rocker_port->dev))
3684                 netif_wake_queue(rocker_port->dev);
3685
3686         napi_complete(napi);
3687         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
3688
3689         return 0;
3690 }
3691
3692 static int rocker_port_rx_proc(struct rocker *rocker,
3693                                struct rocker_port *rocker_port,
3694                                struct rocker_desc_info *desc_info)
3695 {
3696         struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
3697         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
3698         size_t rx_len;
3699
3700         if (!skb)
3701                 return -ENOENT;
3702
3703         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
3704         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
3705                 return -EINVAL;
3706
3707         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
3708
3709         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
3710         skb_put(skb, rx_len);
3711         skb->protocol = eth_type_trans(skb, rocker_port->dev);
3712         netif_receive_skb(skb);
3713
3714         return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
3715 }
3716
3717 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
3718 {
3719         return container_of(napi, struct rocker_port, napi_rx);
3720 }
3721
3722 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
3723 {
3724         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
3725         struct rocker *rocker = rocker_port->rocker;
3726         struct rocker_desc_info *desc_info;
3727         u32 credits = 0;
3728         int err;
3729
3730         /* Process rx descriptors */
3731         while (credits < budget &&
3732                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
3733                 err = rocker_desc_err(desc_info);
3734                 if (err) {
3735                         if (net_ratelimit())
3736                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
3737                                            err);
3738                 } else {
3739                         err = rocker_port_rx_proc(rocker, rocker_port,
3740                                                   desc_info);
3741                         if (err && net_ratelimit())
3742                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
3743                                            err);
3744                 }
3745                 rocker_desc_gen_clear(desc_info);
3746                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
3747                 credits++;
3748         }
3749
3750         if (credits < budget)
3751                 napi_complete(napi);
3752
3753         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
3754
3755         return credits;
3756 }
3757
3758 /*****************
3759  * PCI driver ops
3760  *****************/
3761
3762 static void rocker_carrier_init(struct rocker_port *rocker_port)
3763 {
3764         struct rocker *rocker = rocker_port->rocker;
3765         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
3766         bool link_up;
3767
3768         link_up = link_status & (1 << rocker_port->lport);
3769         if (link_up)
3770                 netif_carrier_on(rocker_port->dev);
3771         else
3772                 netif_carrier_off(rocker_port->dev);
3773 }
3774
3775 static void rocker_remove_ports(struct rocker *rocker)
3776 {
3777         struct rocker_port *rocker_port;
3778         int i;
3779
3780         for (i = 0; i < rocker->port_count; i++) {
3781                 rocker_port = rocker->ports[i];
3782                 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
3783                 unregister_netdev(rocker_port->dev);
3784         }
3785         kfree(rocker->ports);
3786 }
3787
3788 static void rocker_port_dev_addr_init(struct rocker *rocker,
3789                                       struct rocker_port *rocker_port)
3790 {
3791         struct pci_dev *pdev = rocker->pdev;
3792         int err;
3793
3794         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
3795                                                    rocker_port->dev->dev_addr);
3796         if (err) {
3797                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
3798                 eth_hw_addr_random(rocker_port->dev);
3799         }
3800 }
3801
3802 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
3803 {
3804         struct pci_dev *pdev = rocker->pdev;
3805         struct rocker_port *rocker_port;
3806         struct net_device *dev;
3807         int err;
3808
3809         dev = alloc_etherdev(sizeof(struct rocker_port));
3810         if (!dev)
3811                 return -ENOMEM;
3812         rocker_port = netdev_priv(dev);
3813         rocker_port->dev = dev;
3814         rocker_port->rocker = rocker;
3815         rocker_port->port_number = port_number;
3816         rocker_port->lport = port_number + 1;
3817
3818         rocker_port_dev_addr_init(rocker, rocker_port);
3819         dev->netdev_ops = &rocker_port_netdev_ops;
3820         dev->ethtool_ops = &rocker_port_ethtool_ops;
3821         netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
3822                        NAPI_POLL_WEIGHT);
3823         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
3824                        NAPI_POLL_WEIGHT);
3825         rocker_carrier_init(rocker_port);
3826
3827         dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3828
3829         err = register_netdev(dev);
3830         if (err) {
3831                 dev_err(&pdev->dev, "register_netdev failed\n");
3832                 goto err_register_netdev;
3833         }
3834         rocker->ports[port_number] = rocker_port;
3835
3836         rocker_port->internal_vlan_id =
3837                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
3838         err = rocker_port_ig_tbl(rocker_port, 0);
3839         if (err) {
3840                 dev_err(&pdev->dev, "install ig port table failed\n");
3841                 goto err_port_ig_tbl;
3842         }
3843
3844         return 0;
3845
3846 err_port_ig_tbl:
3847         unregister_netdev(dev);
3848 err_register_netdev:
3849         free_netdev(dev);
3850         return err;
3851 }
3852
3853 static int rocker_probe_ports(struct rocker *rocker)
3854 {
3855         int i;
3856         size_t alloc_size;
3857         int err;
3858
3859         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
3860         rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
3861         for (i = 0; i < rocker->port_count; i++) {
3862                 err = rocker_probe_port(rocker, i);
3863                 if (err)
3864                         goto remove_ports;
3865         }
3866         return 0;
3867
3868 remove_ports:
3869         rocker_remove_ports(rocker);
3870         return err;
3871 }
3872
3873 static int rocker_msix_init(struct rocker *rocker)
3874 {
3875         struct pci_dev *pdev = rocker->pdev;
3876         int msix_entries;
3877         int i;
3878         int err;
3879
3880         msix_entries = pci_msix_vec_count(pdev);
3881         if (msix_entries < 0)
3882                 return msix_entries;
3883
3884         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
3885                 return -EINVAL;
3886
3887         rocker->msix_entries = kmalloc_array(msix_entries,
3888                                              sizeof(struct msix_entry),
3889                                              GFP_KERNEL);
3890         if (!rocker->msix_entries)
3891                 return -ENOMEM;
3892
3893         for (i = 0; i < msix_entries; i++)
3894                 rocker->msix_entries[i].entry = i;
3895
3896         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
3897         if (err < 0)
3898                 goto err_enable_msix;
3899
3900         return 0;
3901
3902 err_enable_msix:
3903         kfree(rocker->msix_entries);
3904         return err;
3905 }
3906
3907 static void rocker_msix_fini(struct rocker *rocker)
3908 {
3909         pci_disable_msix(rocker->pdev);
3910         kfree(rocker->msix_entries);
3911 }
3912
3913 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3914 {
3915         struct rocker *rocker;
3916         int err;
3917
3918         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
3919         if (!rocker)
3920                 return -ENOMEM;
3921
3922         err = pci_enable_device(pdev);
3923         if (err) {
3924                 dev_err(&pdev->dev, "pci_enable_device failed\n");
3925                 goto err_pci_enable_device;
3926         }
3927
3928         err = pci_request_regions(pdev, rocker_driver_name);
3929         if (err) {
3930                 dev_err(&pdev->dev, "pci_request_regions failed\n");
3931                 goto err_pci_request_regions;
3932         }
3933
3934         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3935         if (!err) {
3936                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3937                 if (err) {
3938                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
3939                         goto err_pci_set_dma_mask;
3940                 }
3941         } else {
3942                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3943                 if (err) {
3944                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
3945                         goto err_pci_set_dma_mask;
3946                 }
3947         }
3948
3949         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
3950                 dev_err(&pdev->dev, "invalid PCI region size\n");
3951                 goto err_pci_resource_len_check;
3952         }
3953
3954         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
3955                                   pci_resource_len(pdev, 0));
3956         if (!rocker->hw_addr) {
3957                 dev_err(&pdev->dev, "ioremap failed\n");
3958                 err = -EIO;
3959                 goto err_ioremap;
3960         }
3961         pci_set_master(pdev);
3962
3963         rocker->pdev = pdev;
3964         pci_set_drvdata(pdev, rocker);
3965
3966         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
3967
3968         err = rocker_msix_init(rocker);
3969         if (err) {
3970                 dev_err(&pdev->dev, "MSI-X init failed\n");
3971                 goto err_msix_init;
3972         }
3973
3974         err = rocker_basic_hw_test(rocker);
3975         if (err) {
3976                 dev_err(&pdev->dev, "basic hw test failed\n");
3977                 goto err_basic_hw_test;
3978         }
3979
3980         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
3981
3982         err = rocker_dma_rings_init(rocker);
3983         if (err)
3984                 goto err_dma_rings_init;
3985
3986         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
3987                           rocker_cmd_irq_handler, 0,
3988                           rocker_driver_name, rocker);
3989         if (err) {
3990                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
3991                 goto err_request_cmd_irq;
3992         }
3993
3994         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
3995                           rocker_event_irq_handler, 0,
3996                           rocker_driver_name, rocker);
3997         if (err) {
3998                 dev_err(&pdev->dev, "cannot assign event irq\n");
3999                 goto err_request_event_irq;
4000         }
4001
4002         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4003
4004         err = rocker_init_tbls(rocker);
4005         if (err) {
4006                 dev_err(&pdev->dev, "cannot init rocker tables\n");
4007                 goto err_init_tbls;
4008         }
4009
4010         err = rocker_probe_ports(rocker);
4011         if (err) {
4012                 dev_err(&pdev->dev, "failed to probe ports\n");
4013                 goto err_probe_ports;
4014         }
4015
4016         dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4017
4018         return 0;
4019
4020 err_probe_ports:
4021         rocker_free_tbls(rocker);
4022 err_init_tbls:
4023         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4024 err_request_event_irq:
4025         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4026 err_request_cmd_irq:
4027         rocker_dma_rings_fini(rocker);
4028 err_dma_rings_init:
4029 err_basic_hw_test:
4030         rocker_msix_fini(rocker);
4031 err_msix_init:
4032         iounmap(rocker->hw_addr);
4033 err_ioremap:
4034 err_pci_resource_len_check:
4035 err_pci_set_dma_mask:
4036         pci_release_regions(pdev);
4037 err_pci_request_regions:
4038         pci_disable_device(pdev);
4039 err_pci_enable_device:
4040         kfree(rocker);
4041         return err;
4042 }
4043
4044 static void rocker_remove(struct pci_dev *pdev)
4045 {
4046         struct rocker *rocker = pci_get_drvdata(pdev);
4047
4048         rocker_free_tbls(rocker);
4049         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4050         rocker_remove_ports(rocker);
4051         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4052         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4053         rocker_dma_rings_fini(rocker);
4054         rocker_msix_fini(rocker);
4055         iounmap(rocker->hw_addr);
4056         pci_release_regions(rocker->pdev);
4057         pci_disable_device(rocker->pdev);
4058         kfree(rocker);
4059 }
4060
4061 static struct pci_driver rocker_pci_driver = {
4062         .name           = rocker_driver_name,
4063         .id_table       = rocker_pci_id_table,
4064         .probe          = rocker_probe,
4065         .remove         = rocker_remove,
4066 };
4067
4068 /************************************
4069  * Net device notifier event handler
4070  ************************************/
4071
4072 static bool rocker_port_dev_check(struct net_device *dev)
4073 {
4074         return dev->netdev_ops == &rocker_port_netdev_ops;
4075 }
4076
4077 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4078                                    struct net_device *bridge)
4079 {
4080         int err;
4081
4082         rocker_port_internal_vlan_id_put(rocker_port,
4083                                          rocker_port->dev->ifindex);
4084
4085         rocker_port->bridge_dev = bridge;
4086
4087         /* Use bridge internal VLAN ID for untagged pkts */
4088         err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4089         if (err)
4090                 return err;
4091         rocker_port->internal_vlan_id =
4092                 rocker_port_internal_vlan_id_get(rocker_port,
4093                                                  bridge->ifindex);
4094         err = rocker_port_vlan(rocker_port, 0, 0);
4095
4096         return err;
4097 }
4098
4099 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4100 {
4101         int err;
4102
4103         rocker_port_internal_vlan_id_put(rocker_port,
4104                                          rocker_port->bridge_dev->ifindex);
4105
4106         rocker_port->bridge_dev = NULL;
4107
4108         /* Use port internal VLAN ID for untagged pkts */
4109         err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4110         if (err)
4111                 return err;
4112         rocker_port->internal_vlan_id =
4113                 rocker_port_internal_vlan_id_get(rocker_port,
4114                                                  rocker_port->dev->ifindex);
4115         err = rocker_port_vlan(rocker_port, 0, 0);
4116
4117         return err;
4118 }
4119
4120 static int rocker_port_master_changed(struct net_device *dev)
4121 {
4122         struct rocker_port *rocker_port = netdev_priv(dev);
4123         struct net_device *master = netdev_master_upper_dev_get(dev);
4124         int err = 0;
4125
4126         if (master && master->rtnl_link_ops &&
4127             !strcmp(master->rtnl_link_ops->kind, "bridge"))
4128                 err = rocker_port_bridge_join(rocker_port, master);
4129         else
4130                 err = rocker_port_bridge_leave(rocker_port);
4131
4132         return err;
4133 }
4134
4135 static int rocker_netdevice_event(struct notifier_block *unused,
4136                                   unsigned long event, void *ptr)
4137 {
4138         struct net_device *dev;
4139         int err;
4140
4141         switch (event) {
4142         case NETDEV_CHANGEUPPER:
4143                 dev = netdev_notifier_info_to_dev(ptr);
4144                 if (!rocker_port_dev_check(dev))
4145                         return NOTIFY_DONE;
4146                 err = rocker_port_master_changed(dev);
4147                 if (err)
4148                         netdev_warn(dev,
4149                                     "failed to reflect master change (err %d)\n",
4150                                     err);
4151                 break;
4152         }
4153
4154         return NOTIFY_DONE;
4155 }
4156
4157 static struct notifier_block rocker_netdevice_nb __read_mostly = {
4158         .notifier_call = rocker_netdevice_event,
4159 };
4160
4161 /***********************
4162  * Module init and exit
4163  ***********************/
4164
4165 static int __init rocker_module_init(void)
4166 {
4167         int err;
4168
4169         register_netdevice_notifier(&rocker_netdevice_nb);
4170         err = pci_register_driver(&rocker_pci_driver);
4171         if (err)
4172                 goto err_pci_register_driver;
4173         return 0;
4174
4175 err_pci_register_driver:
4176         unregister_netdevice_notifier(&rocker_netdevice_nb);
4177         return err;
4178 }
4179
4180 static void __exit rocker_module_exit(void)
4181 {
4182         unregister_netdevice_notifier(&rocker_netdevice_nb);
4183         pci_unregister_driver(&rocker_pci_driver);
4184 }
4185
4186 module_init(rocker_module_init);
4187 module_exit(rocker_module_exit);
4188
4189 MODULE_LICENSE("GPL v2");
4190 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
4191 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
4192 MODULE_DESCRIPTION("Rocker switch device driver");
4193 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);