Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / ath / wil6210 / txrx.c
1 /*
2  * Copyright (c) 2012 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/hardirq.h>
21 #include <net/ieee80211_radiotap.h>
22 #include <linux/if_arp.h>
23 #include <linux/moduleparam.h>
24
25 #include "wil6210.h"
26 #include "wmi.h"
27 #include "txrx.h"
28
29 static bool rtap_include_phy_info;
30 module_param(rtap_include_phy_info, bool, S_IRUGO);
31 MODULE_PARM_DESC(rtap_include_phy_info,
32                  " Include PHY info in the radiotap header, default - no");
33
34 static inline int wil_vring_is_empty(struct vring *vring)
35 {
36         return vring->swhead == vring->swtail;
37 }
38
39 static inline u32 wil_vring_next_tail(struct vring *vring)
40 {
41         return (vring->swtail + 1) % vring->size;
42 }
43
44 static inline void wil_vring_advance_head(struct vring *vring, int n)
45 {
46         vring->swhead = (vring->swhead + n) % vring->size;
47 }
48
49 static inline int wil_vring_is_full(struct vring *vring)
50 {
51         return wil_vring_next_tail(vring) == vring->swhead;
52 }
53 /*
54  * Available space in Tx Vring
55  */
56 static inline int wil_vring_avail_tx(struct vring *vring)
57 {
58         u32 swhead = vring->swhead;
59         u32 swtail = vring->swtail;
60         int used = (vring->size + swhead - swtail) % vring->size;
61
62         return vring->size - used - 1;
63 }
64
65 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
66 {
67         struct device *dev = wil_to_dev(wil);
68         size_t sz = vring->size * sizeof(vring->va[0]);
69         uint i;
70
71         BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
72
73         vring->swhead = 0;
74         vring->swtail = 0;
75         vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
76         if (!vring->ctx) {
77                 vring->va = NULL;
78                 return -ENOMEM;
79         }
80         /*
81          * vring->va should be aligned on its size rounded up to power of 2
82          * This is granted by the dma_alloc_coherent
83          */
84         vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
85         if (!vring->va) {
86                 kfree(vring->ctx);
87                 vring->ctx = NULL;
88                 return -ENOMEM;
89         }
90         /* initially, all descriptors are SW owned
91          * For Tx and Rx, ownership bit is at the same location, thus
92          * we can use any
93          */
94         for (i = 0; i < vring->size; i++) {
95                 volatile struct vring_tx_desc *d = &(vring->va[i].tx);
96                 d->dma.status = TX_DMA_STATUS_DU;
97         }
98
99         wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
100                      vring->va, (unsigned long long)vring->pa, vring->ctx);
101
102         return 0;
103 }
104
105 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
106                            int tx)
107 {
108         struct device *dev = wil_to_dev(wil);
109         size_t sz = vring->size * sizeof(vring->va[0]);
110
111         while (!wil_vring_is_empty(vring)) {
112                 if (tx) {
113                         volatile struct vring_tx_desc *d =
114                                         &vring->va[vring->swtail].tx;
115                         dma_addr_t pa = d->dma.addr_low |
116                                         ((u64)d->dma.addr_high << 32);
117                         struct sk_buff *skb = vring->ctx[vring->swtail];
118                         if (skb) {
119                                 dma_unmap_single(dev, pa, d->dma.length,
120                                                  DMA_TO_DEVICE);
121                                 dev_kfree_skb_any(skb);
122                                 vring->ctx[vring->swtail] = NULL;
123                         } else {
124                                 dma_unmap_page(dev, pa, d->dma.length,
125                                                DMA_TO_DEVICE);
126                         }
127                         vring->swtail = wil_vring_next_tail(vring);
128                 } else { /* rx */
129                         volatile struct vring_rx_desc *d =
130                                         &vring->va[vring->swtail].rx;
131                         dma_addr_t pa = d->dma.addr_low |
132                                         ((u64)d->dma.addr_high << 32);
133                         struct sk_buff *skb = vring->ctx[vring->swhead];
134                         dma_unmap_single(dev, pa, d->dma.length,
135                                          DMA_FROM_DEVICE);
136                         kfree_skb(skb);
137                         wil_vring_advance_head(vring, 1);
138                 }
139         }
140         dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
141         kfree(vring->ctx);
142         vring->pa = 0;
143         vring->va = NULL;
144         vring->ctx = NULL;
145 }
146
147 /**
148  * Allocate one skb for Rx VRING
149  *
150  * Safe to call from IRQ
151  */
152 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
153                                u32 i, int headroom)
154 {
155         struct device *dev = wil_to_dev(wil);
156         unsigned int sz = RX_BUF_LEN;
157         volatile struct vring_rx_desc *d = &(vring->va[i].rx);
158         dma_addr_t pa;
159
160         /* TODO align */
161         struct sk_buff *skb = dev_alloc_skb(sz + headroom);
162         if (unlikely(!skb))
163                 return -ENOMEM;
164
165         skb_reserve(skb, headroom);
166         skb_put(skb, sz);
167
168         pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
169         if (unlikely(dma_mapping_error(dev, pa))) {
170                 kfree_skb(skb);
171                 return -ENOMEM;
172         }
173
174         d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
175         d->dma.addr_low = lower_32_bits(pa);
176         d->dma.addr_high = (u16)upper_32_bits(pa);
177         /* ip_length don't care */
178         /* b11 don't care */
179         /* error don't care */
180         d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
181         d->dma.length = sz;
182         vring->ctx[i] = skb;
183
184         return 0;
185 }
186
187 /**
188  * Adds radiotap header
189  *
190  * Any error indicated as "Bad FCS"
191  *
192  * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
193  *  - Rx descriptor: 32 bytes
194  *  - Phy info
195  */
196 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
197                                        struct sk_buff *skb,
198                                        volatile struct vring_rx_desc *d)
199 {
200         struct wireless_dev *wdev = wil->wdev;
201         struct wil6210_rtap {
202                 struct ieee80211_radiotap_header rthdr;
203                 /* fields should be in the order of bits in rthdr.it_present */
204                 /* flags */
205                 u8 flags;
206                 /* channel */
207                 __le16 chnl_freq __aligned(2);
208                 __le16 chnl_flags;
209                 /* MCS */
210                 u8 mcs_present;
211                 u8 mcs_flags;
212                 u8 mcs_index;
213         } __packed;
214         struct wil6210_rtap_vendor {
215                 struct wil6210_rtap rtap;
216                 /* vendor */
217                 u8 vendor_oui[3] __aligned(2);
218                 u8 vendor_ns;
219                 __le16 vendor_skip;
220                 u8 vendor_data[0];
221         } __packed;
222         struct wil6210_rtap_vendor *rtap_vendor;
223         int rtap_len = sizeof(struct wil6210_rtap);
224         int phy_length = 0; /* phy info header size, bytes */
225         static char phy_data[128];
226         struct ieee80211_channel *ch = wdev->preset_chandef.chan;
227
228         if (rtap_include_phy_info) {
229                 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
230                 /* calculate additional length */
231                 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
232                         /**
233                          * PHY info starts from 8-byte boundary
234                          * there are 8-byte lines, last line may be partially
235                          * written (HW bug), thus FW configures for last line
236                          * to be excessive. Driver skips this last line.
237                          */
238                         int len = min_t(int, 8 + sizeof(phy_data),
239                                         wil_rxdesc_phy_length(d));
240                         if (len > 8) {
241                                 void *p = skb_tail_pointer(skb);
242                                 void *pa = PTR_ALIGN(p, 8);
243                                 if (skb_tailroom(skb) >= len + (pa - p)) {
244                                         phy_length = len - 8;
245                                         memcpy(phy_data, pa, phy_length);
246                                 }
247                         }
248                 }
249                 rtap_len += phy_length;
250         }
251
252         if (skb_headroom(skb) < rtap_len &&
253             pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
254                 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
255                 return;
256         }
257
258         rtap_vendor = (void *)skb_push(skb, rtap_len);
259         memset(rtap_vendor, 0, rtap_len);
260
261         rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
262         rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
263         rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
264                         (1 << IEEE80211_RADIOTAP_FLAGS) |
265                         (1 << IEEE80211_RADIOTAP_CHANNEL) |
266                         (1 << IEEE80211_RADIOTAP_MCS));
267         if (d->dma.status & RX_DMA_STATUS_ERROR)
268                 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
269
270         rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
271         rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
272
273         rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
274         rtap_vendor->rtap.mcs_flags = 0;
275         rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
276
277         if (rtap_include_phy_info) {
278                 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
279                                 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
280                 /* OUI for Wilocity 04:ce:14 */
281                 rtap_vendor->vendor_oui[0] = 0x04;
282                 rtap_vendor->vendor_oui[1] = 0xce;
283                 rtap_vendor->vendor_oui[2] = 0x14;
284                 rtap_vendor->vendor_ns = 1;
285                 /* Rx descriptor + PHY data  */
286                 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
287                                                        phy_length);
288                 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
289                 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
290                        phy_length);
291         }
292 }
293
294 /*
295  * Fast swap in place between 2 registers
296  */
297 static void wil_swap_u16(u16 *a, u16 *b)
298 {
299         *a ^= *b;
300         *b ^= *a;
301         *a ^= *b;
302 }
303
304 static void wil_swap_ethaddr(void *data)
305 {
306         struct ethhdr *eth = data;
307         u16 *s = (u16 *)eth->h_source;
308         u16 *d = (u16 *)eth->h_dest;
309
310         wil_swap_u16(s++, d++);
311         wil_swap_u16(s++, d++);
312         wil_swap_u16(s, d);
313 }
314
315 /**
316  * reap 1 frame from @swhead
317  *
318  * Safe to call from IRQ
319  */
320 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
321                                          struct vring *vring)
322 {
323         struct device *dev = wil_to_dev(wil);
324         struct net_device *ndev = wil_to_ndev(wil);
325         volatile struct vring_rx_desc *d;
326         struct sk_buff *skb;
327         dma_addr_t pa;
328         unsigned int sz = RX_BUF_LEN;
329         u8 ftype;
330         u8 ds_bits;
331
332         if (wil_vring_is_empty(vring))
333                 return NULL;
334
335         d = &(vring->va[vring->swhead].rx);
336         if (!(d->dma.status & RX_DMA_STATUS_DU)) {
337                 /* it is not error, we just reached end of Rx done area */
338                 return NULL;
339         }
340
341         pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
342         skb = vring->ctx[vring->swhead];
343         dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
344         skb_trim(skb, d->dma.length);
345
346         wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
347
348         /* use radiotap header only if required */
349         if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
350                 wil_rx_add_radiotap_header(wil, skb, d);
351
352         wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
353         wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
354                           (const void *)d, sizeof(*d), false);
355
356         wil_vring_advance_head(vring, 1);
357
358         /* no extra checks if in sniffer mode */
359         if (ndev->type != ARPHRD_ETHER)
360                 return skb;
361         /*
362          * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
363          * Driver should recognize it by frame type, that is found
364          * in Rx descriptor. If type is not data, it is 802.11 frame as is
365          */
366         ftype = wil_rxdesc_ftype(d) << 2;
367         if (ftype != IEEE80211_FTYPE_DATA) {
368                 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
369                 /* TODO: process it */
370                 kfree_skb(skb);
371                 return NULL;
372         }
373
374         if (skb->len < ETH_HLEN) {
375                 wil_err(wil, "Short frame, len = %d\n", skb->len);
376                 /* TODO: process it (i.e. BAR) */
377                 kfree_skb(skb);
378                 return NULL;
379         }
380
381         ds_bits = wil_rxdesc_ds_bits(d);
382         if (ds_bits == 1) {
383                 /*
384                  * HW bug - in ToDS mode, i.e. Rx on AP side,
385                  * addresses get swapped
386                  */
387                 wil_swap_ethaddr(skb->data);
388         }
389
390         return skb;
391 }
392
393 /**
394  * allocate and fill up to @count buffers in rx ring
395  * buffers posted at @swtail
396  */
397 static int wil_rx_refill(struct wil6210_priv *wil, int count)
398 {
399         struct net_device *ndev = wil_to_ndev(wil);
400         struct vring *v = &wil->vring_rx;
401         u32 next_tail;
402         int rc = 0;
403         int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
404                         WIL6210_RTAP_SIZE : 0;
405
406         for (; next_tail = wil_vring_next_tail(v),
407                         (next_tail != v->swhead) && (count-- > 0);
408                         v->swtail = next_tail) {
409                 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
410                 if (rc) {
411                         wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
412                                 rc, v->swtail);
413                         break;
414                 }
415         }
416         iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
417
418         return rc;
419 }
420
421 /*
422  * Pass Rx packet to the netif. Update statistics.
423  */
424 static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
425 {
426         int rc;
427         unsigned int len = skb->len;
428
429         skb_orphan(skb);
430
431         if (in_interrupt())
432                 rc = netif_rx(skb);
433         else
434                 rc = netif_rx_ni(skb);
435
436         if (likely(rc == NET_RX_SUCCESS)) {
437                 ndev->stats.rx_packets++;
438                 ndev->stats.rx_bytes += len;
439
440         } else {
441                 ndev->stats.rx_dropped++;
442         }
443 }
444
445 /**
446  * Proceed all completed skb's from Rx VRING
447  *
448  * Safe to call from IRQ
449  */
450 void wil_rx_handle(struct wil6210_priv *wil)
451 {
452         struct net_device *ndev = wil_to_ndev(wil);
453         struct vring *v = &wil->vring_rx;
454         struct sk_buff *skb;
455
456         if (!v->va) {
457                 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
458                 return;
459         }
460         wil_dbg_txrx(wil, "%s()\n", __func__);
461         while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
462                 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
463                                   skb->data, skb_headlen(skb), false);
464
465                 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
466                         skb->dev = ndev;
467                         skb_reset_mac_header(skb);
468                         skb->ip_summed = CHECKSUM_UNNECESSARY;
469                         skb->pkt_type = PACKET_OTHERHOST;
470                         skb->protocol = htons(ETH_P_802_2);
471
472                 } else {
473                         skb->protocol = eth_type_trans(skb, ndev);
474                 }
475
476                 wil_netif_rx_any(skb, ndev);
477         }
478         wil_rx_refill(wil, v->size);
479 }
480
481 int wil_rx_init(struct wil6210_priv *wil)
482 {
483         struct vring *vring = &wil->vring_rx;
484         int rc;
485
486         vring->size = WIL6210_RX_RING_SIZE;
487         rc = wil_vring_alloc(wil, vring);
488         if (rc)
489                 return rc;
490
491         rc = wmi_rx_chain_add(wil, vring);
492         if (rc)
493                 goto err_free;
494
495         rc = wil_rx_refill(wil, vring->size);
496         if (rc)
497                 goto err_free;
498
499         return 0;
500  err_free:
501         wil_vring_free(wil, vring, 0);
502
503         return rc;
504 }
505
506 void wil_rx_fini(struct wil6210_priv *wil)
507 {
508         struct vring *vring = &wil->vring_rx;
509
510         if (vring->va)
511                 wil_vring_free(wil, vring, 0);
512 }
513
514 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
515                       int cid, int tid)
516 {
517         int rc;
518         struct wmi_vring_cfg_cmd cmd = {
519                 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
520                 .vring_cfg = {
521                         .tx_sw_ring = {
522                                 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
523                         },
524                         .ringid = id,
525                         .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
526                         .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
527                         .mac_ctrl = 0,
528                         .to_resolution = 0,
529                         .agg_max_wsize = 16,
530                         .schd_params = {
531                                 .priority = cpu_to_le16(0),
532                                 .timeslot_us = cpu_to_le16(0xfff),
533                         },
534                 },
535         };
536         struct {
537                 struct wil6210_mbox_hdr_wmi wmi;
538                 struct wmi_vring_cfg_done_event cmd;
539         } __packed reply;
540         struct vring *vring = &wil->vring_tx[id];
541
542         if (vring->va) {
543                 wil_err(wil, "Tx ring [%d] already allocated\n", id);
544                 rc = -EINVAL;
545                 goto out;
546         }
547
548         vring->size = size;
549         rc = wil_vring_alloc(wil, vring);
550         if (rc)
551                 goto out;
552
553         cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
554         cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
555
556         rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
557                       WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
558         if (rc)
559                 goto out_free;
560
561         if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
562                 wil_err(wil, "Tx config failed, status 0x%02x\n",
563                         reply.cmd.status);
564                 rc = -EINVAL;
565                 goto out_free;
566         }
567         vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
568
569         return 0;
570  out_free:
571         wil_vring_free(wil, vring, 1);
572  out:
573
574         return rc;
575 }
576
577 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
578 {
579         struct vring *vring = &wil->vring_tx[id];
580
581         if (!vring->va)
582                 return;
583
584         wil_vring_free(wil, vring, 1);
585 }
586
587 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
588                                        struct sk_buff *skb)
589 {
590         struct vring *v = &wil->vring_tx[0];
591
592         if (v->va)
593                 return v;
594
595         return NULL;
596 }
597
598 static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
599                            dma_addr_t pa, u32 len)
600 {
601         d->dma.addr_low = lower_32_bits(pa);
602         d->dma.addr_high = (u16)upper_32_bits(pa);
603         d->dma.ip_length = 0;
604         /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
605         d->dma.b11 = 0/*14 | BIT(7)*/;
606         d->dma.error = 0;
607         d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
608         d->dma.length = len;
609         d->dma.d0 = 0;
610         d->mac.d[0] = 0;
611         d->mac.d[1] = 0;
612         d->mac.d[2] = 0;
613         d->mac.ucode_cmd = 0;
614         /* use dst index 0 */
615         d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
616                        (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
617         /* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
618         d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
619                       (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
620
621         return 0;
622 }
623
624 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
625                         struct sk_buff *skb)
626 {
627         struct device *dev = wil_to_dev(wil);
628         volatile struct vring_tx_desc *d;
629         u32 swhead = vring->swhead;
630         int avail = wil_vring_avail_tx(vring);
631         int nr_frags = skb_shinfo(skb)->nr_frags;
632         uint f;
633         int vring_index = vring - wil->vring_tx;
634         uint i = swhead;
635         dma_addr_t pa;
636
637         wil_dbg_txrx(wil, "%s()\n", __func__);
638
639         if (avail < vring->size/8)
640                 netif_tx_stop_all_queues(wil_to_ndev(wil));
641         if (avail < 1 + nr_frags) {
642                 wil_err(wil, "Tx ring full. No space for %d fragments\n",
643                         1 + nr_frags);
644                 return -ENOMEM;
645         }
646         d = &(vring->va[i].tx);
647
648         /* FIXME FW can accept only unicast frames for the peer */
649         memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
650
651         pa = dma_map_single(dev, skb->data,
652                         skb_headlen(skb), DMA_TO_DEVICE);
653
654         wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
655                      skb->data, (unsigned long long)pa);
656         wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
657                           skb->data, skb_headlen(skb), false);
658
659         if (unlikely(dma_mapping_error(dev, pa)))
660                 return -EINVAL;
661         /* 1-st segment */
662         wil_tx_desc_map(d, pa, skb_headlen(skb));
663         d->mac.d[2] |= ((nr_frags + 1) <<
664                        MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
665         /* middle segments */
666         for (f = 0; f < nr_frags; f++) {
667                 const struct skb_frag_struct *frag =
668                                 &skb_shinfo(skb)->frags[f];
669                 int len = skb_frag_size(frag);
670                 i = (swhead + f + 1) % vring->size;
671                 d = &(vring->va[i].tx);
672                 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
673                                 DMA_TO_DEVICE);
674                 if (unlikely(dma_mapping_error(dev, pa)))
675                         goto dma_error;
676                 wil_tx_desc_map(d, pa, len);
677                 vring->ctx[i] = NULL;
678         }
679         /* for the last seg only */
680         d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
681         d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
682         d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
683         d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
684
685         wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
686                           (const void *)d, sizeof(*d), false);
687
688         /* advance swhead */
689         wil_vring_advance_head(vring, nr_frags + 1);
690         wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
691         iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
692         /* hold reference to skb
693          * to prevent skb release before accounting
694          * in case of immediate "tx done"
695          */
696         vring->ctx[i] = skb_get(skb);
697
698         return 0;
699  dma_error:
700         /* unmap what we have mapped */
701         /* Note: increment @f to operate with positive index */
702         for (f++; f > 0; f--) {
703                 i = (swhead + f) % vring->size;
704                 d = &(vring->va[i].tx);
705                 d->dma.status = TX_DMA_STATUS_DU;
706                 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
707                 if (vring->ctx[i])
708                         dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
709                 else
710                         dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
711         }
712
713         return -EINVAL;
714 }
715
716
717 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
718 {
719         struct wil6210_priv *wil = ndev_to_wil(ndev);
720         struct vring *vring;
721         int rc;
722
723         wil_dbg_txrx(wil, "%s()\n", __func__);
724         if (!test_bit(wil_status_fwready, &wil->status)) {
725                 wil_err(wil, "FW not ready\n");
726                 goto drop;
727         }
728         if (!test_bit(wil_status_fwconnected, &wil->status)) {
729                 wil_err(wil, "FW not connected\n");
730                 goto drop;
731         }
732         if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
733                 wil_err(wil, "Xmit in monitor mode not supported\n");
734                 goto drop;
735         }
736         if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
737                 rc = wmi_tx_eapol(wil, skb);
738         } else {
739                 /* find vring */
740                 vring = wil_find_tx_vring(wil, skb);
741                 if (!vring) {
742                         wil_err(wil, "No Tx VRING available\n");
743                         goto drop;
744                 }
745                 /* set up vring entry */
746                 rc = wil_tx_vring(wil, vring, skb);
747         }
748         switch (rc) {
749         case 0:
750                 /* statistics will be updated on the tx_complete */
751                 dev_kfree_skb_any(skb);
752                 return NETDEV_TX_OK;
753         case -ENOMEM:
754                 return NETDEV_TX_BUSY;
755         default:
756                 break; /* goto drop; */
757         }
758  drop:
759         netif_tx_stop_all_queues(ndev);
760         ndev->stats.tx_dropped++;
761         dev_kfree_skb_any(skb);
762
763         return NET_XMIT_DROP;
764 }
765
766 /**
767  * Clean up transmitted skb's from the Tx VRING
768  *
769  * Safe to call from IRQ
770  */
771 void wil_tx_complete(struct wil6210_priv *wil, int ringid)
772 {
773         struct net_device *ndev = wil_to_ndev(wil);
774         struct device *dev = wil_to_dev(wil);
775         struct vring *vring = &wil->vring_tx[ringid];
776
777         if (!vring->va) {
778                 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
779                 return;
780         }
781
782         wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
783
784         while (!wil_vring_is_empty(vring)) {
785                 volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
786                 dma_addr_t pa;
787                 struct sk_buff *skb;
788                 if (!(d->dma.status & TX_DMA_STATUS_DU))
789                         break;
790
791                 wil_dbg_txrx(wil,
792                              "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
793                              vring->swtail, d->dma.length, d->dma.status,
794                              d->dma.error);
795                 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
796                                   (const void *)d, sizeof(*d), false);
797
798                 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
799                 skb = vring->ctx[vring->swtail];
800                 if (skb) {
801                         if (d->dma.error == 0) {
802                                 ndev->stats.tx_packets++;
803                                 ndev->stats.tx_bytes += skb->len;
804                         } else {
805                                 ndev->stats.tx_errors++;
806                         }
807
808                         dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
809                         dev_kfree_skb_any(skb);
810                         vring->ctx[vring->swtail] = NULL;
811                 } else {
812                         dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
813                 }
814                 d->dma.addr_low = 0;
815                 d->dma.addr_high = 0;
816                 d->dma.length = 0;
817                 d->dma.status = TX_DMA_STATUS_DU;
818                 vring->swtail = wil_vring_next_tail(vring);
819         }
820         if (wil_vring_avail_tx(vring) > vring->size/4)
821                 netif_tx_wake_all_queues(wil_to_ndev(wil));
822 }