2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
52 * Rx buffer size. We use largish buffers if possible but settle for single
53 * pages under memory shortage.
56 # define FL_PG_ORDER 0
58 # define FL_PG_ORDER (16 - PAGE_SHIFT)
61 /* RX_PULL_LEN should be <= RX_COPY_THRES */
62 #define RX_COPY_THRES 256
63 #define RX_PULL_LEN 128
66 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
69 #define RX_PKT_SKB_LEN 512
72 * Max number of Tx descriptors we clean up at a time. Should be modest as
73 * freeing skbs isn't cheap and it happens while holding locks. We just need
74 * to free packets faster than they arrive, we eventually catch up and keep
75 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
77 #define MAX_TX_RECLAIM 16
80 * Max number of Rx buffers we replenish at a time. Again keep this modest,
81 * allocating buffers isn't cheap either.
83 #define MAX_RX_REFILL 16U
86 * Period of the Rx queue check timer. This timer is infrequent as it has
87 * something to do only when the system experiences severe memory shortage.
89 #define RX_QCHECK_PERIOD (HZ / 2)
92 * Period of the Tx queue check timer.
94 #define TX_QCHECK_PERIOD (HZ / 2)
97 * Max number of Tx descriptors to be reclaimed by the Tx timer.
99 #define MAX_TIMER_TX_RECLAIM 100
102 * Timer index used when backing off due to memory shortage.
104 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
107 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
108 * attempt to refill it.
110 #define FL_STARVE_THRES 4
113 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
114 * This is the same as calc_tx_descs() for a TSO packet with
115 * nr_frags == MAX_SKB_FRAGS.
117 #define ETHTXQ_STOP_THRES \
118 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
121 * Suspension threshold for non-Ethernet Tx queues. We require enough room
122 * for a full sized WR.
124 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
127 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
130 #define MAX_IMM_TX_PKT_LEN 128
133 * Max size of a WR sent through a control Tx queue.
135 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137 struct tx_sw_desc { /* SW state per Tx descriptor */
139 struct ulptx_sgl *sgl;
142 struct rx_sw_desc { /* SW state per Rx descriptor */
148 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150 * We could easily support more but there doesn't seem to be much need for
153 #define FL_MTU_SMALL 1500
154 #define FL_MTU_LARGE 9000
156 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
159 struct sge *s = &adapter->sge;
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
164 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
168 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169 * these to specify the buffer size as an index into the SGE Free List Buffer
170 * Size register array. We also use bit 4, when the buffer has been unmapped
171 * for DMA, but this is of course never sent to the hardware and is only used
172 * to prevent double unmappings. All of the above requires that the Free List
173 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175 * Free List Buffer alignment is 32 bytes, this works out for us ...
178 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
179 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
183 * XXX We shouldn't depend on being able to use these indices.
184 * XXX Especially when some other Master PF has initialized the
185 * XXX adapter or we use the Firmware Configuration File. We
186 * XXX should really search through the Host Buffer Size register
187 * XXX array for the appropriately sized buffer indices.
189 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
192 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
196 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
198 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
201 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
203 return !(d->dma_addr & RX_UNMAPPED_BUF);
207 * txq_avail - return the number of available slots in a Tx queue
210 * Returns the number of descriptors in a Tx queue available to write new
213 static inline unsigned int txq_avail(const struct sge_txq *q)
215 return q->size - 1 - q->in_use;
219 * fl_cap - return the capacity of a free-buffer list
222 * Returns the capacity of a free-buffer list. The capacity is less than
223 * the size because one descriptor needs to be left unpopulated, otherwise
224 * HW will think the FL is empty.
226 static inline unsigned int fl_cap(const struct sge_fl *fl)
228 return fl->size - 8; /* 1 descriptor = 8 buffers */
231 static inline bool fl_starving(const struct sge_fl *fl)
233 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
236 static int map_skb(struct device *dev, const struct sk_buff *skb,
239 const skb_frag_t *fp, *end;
240 const struct skb_shared_info *si;
242 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
243 if (dma_mapping_error(dev, *addr))
246 si = skb_shinfo(skb);
247 end = &si->frags[si->nr_frags];
249 for (fp = si->frags; fp < end; fp++) {
250 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
252 if (dma_mapping_error(dev, *addr))
258 while (fp-- > si->frags)
259 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
261 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
266 #ifdef CONFIG_NEED_DMA_MAP_STATE
267 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
268 const dma_addr_t *addr)
270 const skb_frag_t *fp, *end;
271 const struct skb_shared_info *si;
273 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
275 si = skb_shinfo(skb);
276 end = &si->frags[si->nr_frags];
277 for (fp = si->frags; fp < end; fp++)
278 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
282 * deferred_unmap_destructor - unmap a packet when it is freed
285 * This is the packet destructor used for Tx packets that need to remain
286 * mapped until they are freed rather than until their Tx descriptors are
289 static void deferred_unmap_destructor(struct sk_buff *skb)
291 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
295 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
296 const struct ulptx_sgl *sgl, const struct sge_txq *q)
298 const struct ulptx_sge_pair *p;
299 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
301 if (likely(skb_headlen(skb)))
302 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
305 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
311 * the complexity below is because of the possibility of a wrap-around
312 * in the middle of an SGL
314 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
315 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
316 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
317 ntohl(p->len[0]), DMA_TO_DEVICE);
318 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
319 ntohl(p->len[1]), DMA_TO_DEVICE);
321 } else if ((u8 *)p == (u8 *)q->stat) {
322 p = (const struct ulptx_sge_pair *)q->desc;
324 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
325 const __be64 *addr = (const __be64 *)q->desc;
327 dma_unmap_page(dev, be64_to_cpu(addr[0]),
328 ntohl(p->len[0]), DMA_TO_DEVICE);
329 dma_unmap_page(dev, be64_to_cpu(addr[1]),
330 ntohl(p->len[1]), DMA_TO_DEVICE);
331 p = (const struct ulptx_sge_pair *)&addr[2];
333 const __be64 *addr = (const __be64 *)q->desc;
335 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
336 ntohl(p->len[0]), DMA_TO_DEVICE);
337 dma_unmap_page(dev, be64_to_cpu(addr[0]),
338 ntohl(p->len[1]), DMA_TO_DEVICE);
339 p = (const struct ulptx_sge_pair *)&addr[1];
345 if ((u8 *)p == (u8 *)q->stat)
346 p = (const struct ulptx_sge_pair *)q->desc;
347 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
348 *(const __be64 *)q->desc;
349 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
355 * free_tx_desc - reclaims Tx descriptors and their buffers
356 * @adapter: the adapter
357 * @q: the Tx queue to reclaim descriptors from
358 * @n: the number of descriptors to reclaim
359 * @unmap: whether the buffers should be unmapped for DMA
361 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
362 * Tx buffers. Called with the Tx queue lock held.
364 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
365 unsigned int n, bool unmap)
367 struct tx_sw_desc *d;
368 unsigned int cidx = q->cidx;
369 struct device *dev = adap->pdev_dev;
373 if (d->skb) { /* an SGL is present */
375 unmap_sgl(dev, d->skb, d->sgl, q);
380 if (++cidx == q->size) {
389 * Return the number of reclaimable descriptors in a Tx queue.
391 static inline int reclaimable(const struct sge_txq *q)
393 int hw_cidx = ntohs(q->stat->cidx);
395 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
399 * reclaim_completed_tx - reclaims completed Tx descriptors
401 * @q: the Tx queue to reclaim completed descriptors from
402 * @unmap: whether the buffers should be unmapped for DMA
404 * Reclaims Tx descriptors that the SGE has indicated it has processed,
405 * and frees the associated buffers if possible. Called with the Tx
408 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
411 int avail = reclaimable(q);
415 * Limit the amount of clean up work we do at a time to keep
416 * the Tx lock hold time O(1).
418 if (avail > MAX_TX_RECLAIM)
419 avail = MAX_TX_RECLAIM;
421 free_tx_desc(adap, q, avail, unmap);
426 static inline int get_buf_size(struct adapter *adapter,
427 const struct rx_sw_desc *d)
429 struct sge *s = &adapter->sge;
430 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
433 switch (rx_buf_size_idx) {
434 case RX_SMALL_PG_BUF:
435 buf_size = PAGE_SIZE;
438 case RX_LARGE_PG_BUF:
439 buf_size = PAGE_SIZE << s->fl_pg_order;
442 case RX_SMALL_MTU_BUF:
443 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
446 case RX_LARGE_MTU_BUF:
447 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
458 * free_rx_bufs - free the Rx buffers on an SGE free list
460 * @q: the SGE free list to free buffers from
461 * @n: how many buffers to free
463 * Release the next @n buffers on an SGE free-buffer Rx queue. The
464 * buffers must be made inaccessible to HW before calling this function.
466 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
469 struct rx_sw_desc *d = &q->sdesc[q->cidx];
471 if (is_buf_mapped(d))
472 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
473 get_buf_size(adap, d),
477 if (++q->cidx == q->size)
484 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
486 * @q: the SGE free list
488 * Unmap the current buffer on an SGE free-buffer Rx queue. The
489 * buffer must be made inaccessible to HW before calling this function.
491 * This is similar to @free_rx_bufs above but does not free the buffer.
492 * Do note that the FL still loses any further access to the buffer.
494 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
496 struct rx_sw_desc *d = &q->sdesc[q->cidx];
498 if (is_buf_mapped(d))
499 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
500 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
502 if (++q->cidx == q->size)
507 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
510 if (q->pend_cred >= 8) {
511 val = PIDX(q->pend_cred / 8);
512 if (!is_t4(adap->params.chip))
515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
516 QID(q->cntxt_id) | val);
521 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
525 sd->dma_addr = mapping; /* includes size low bits */
529 * refill_fl - refill an SGE Rx buffer ring
531 * @q: the ring to refill
532 * @n: the number of new buffers to allocate
533 * @gfp: the gfp flags for the allocations
535 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
536 * allocated with the supplied gfp flags. The caller must assure that
537 * @n does not exceed the queue's capacity. If afterwards the queue is
538 * found critically low mark it as starving in the bitmap of starving FLs.
540 * Returns the number of buffers allocated.
542 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
545 struct sge *s = &adap->sge;
548 unsigned int cred = q->avail;
549 __be64 *d = &q->desc[q->pidx];
550 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
552 gfp |= __GFP_NOWARN | __GFP_COLD;
554 if (s->fl_pg_order == 0)
555 goto alloc_small_pages;
558 * Prefer large buffers
561 pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
563 q->large_alloc_failed++;
564 break; /* fall back to single pages */
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 PAGE_SIZE << s->fl_pg_order,
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 __free_pages(pg, s->fl_pg_order);
572 goto out; /* do not try small pages for this error */
574 mapping |= RX_LARGE_PG_BUF;
575 *d++ = cpu_to_be64(mapping);
577 set_rx_sw_desc(sd, pg, mapping);
581 if (++q->pidx == q->size) {
591 pg = __skb_alloc_page(gfp, NULL);
597 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
599 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
603 *d++ = cpu_to_be64(mapping);
605 set_rx_sw_desc(sd, pg, mapping);
609 if (++q->pidx == q->size) {
616 out: cred = q->avail - cred;
617 q->pend_cred += cred;
620 if (unlikely(fl_starving(q))) {
622 set_bit(q->cntxt_id - adap->sge.egr_start,
623 adap->sge.starving_fl);
629 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
631 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
636 * alloc_ring - allocate resources for an SGE descriptor ring
637 * @dev: the PCI device's core device
638 * @nelem: the number of descriptors
639 * @elem_size: the size of each descriptor
640 * @sw_size: the size of the SW state associated with each ring element
641 * @phys: the physical address of the allocated ring
642 * @metadata: address of the array holding the SW state for the ring
643 * @stat_size: extra space in HW ring for status information
644 * @node: preferred node for memory allocations
646 * Allocates resources for an SGE descriptor ring, such as Tx queues,
647 * free buffer lists, or response queues. Each SGE ring requires
648 * space for its HW descriptors plus, optionally, space for the SW state
649 * associated with each HW entry (the metadata). The function returns
650 * three values: the virtual address for the HW ring (the return value
651 * of the function), the bus address of the HW ring, and the address
654 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
655 size_t sw_size, dma_addr_t *phys, void *metadata,
656 size_t stat_size, int node)
658 size_t len = nelem * elem_size + stat_size;
660 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
665 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
668 dma_free_coherent(dev, len, p, *phys);
673 *(void **)metadata = s;
679 * sgl_len - calculates the size of an SGL of the given capacity
680 * @n: the number of SGL entries
682 * Calculates the number of flits needed for a scatter/gather list that
683 * can hold the given number of entries.
685 static inline unsigned int sgl_len(unsigned int n)
688 return (3 * n) / 2 + (n & 1) + 2;
692 * flits_to_desc - returns the num of Tx descriptors for the given flits
693 * @n: the number of flits
695 * Returns the number of Tx descriptors needed for the supplied number
698 static inline unsigned int flits_to_desc(unsigned int n)
700 BUG_ON(n > SGE_MAX_WR_LEN / 8);
701 return DIV_ROUND_UP(n, 8);
705 * is_eth_imm - can an Ethernet packet be sent as immediate data?
708 * Returns whether an Ethernet packet is small enough to fit as
709 * immediate data. Return value corresponds to headroom required.
711 static inline int is_eth_imm(const struct sk_buff *skb)
713 int hdrlen = skb_shinfo(skb)->gso_size ?
714 sizeof(struct cpl_tx_pkt_lso_core) : 0;
716 hdrlen += sizeof(struct cpl_tx_pkt);
717 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
723 * calc_tx_flits - calculate the number of flits for a packet Tx WR
726 * Returns the number of flits needed for a Tx WR for the given Ethernet
727 * packet, including the needed WR and CPL headers.
729 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
732 int hdrlen = is_eth_imm(skb);
735 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
737 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
738 if (skb_shinfo(skb)->gso_size)
744 * calc_tx_descs - calculate the number of Tx descriptors for a packet
747 * Returns the number of Tx descriptors needed for the given Ethernet
748 * packet, including the needed WR and CPL headers.
750 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
752 return flits_to_desc(calc_tx_flits(skb));
756 * write_sgl - populate a scatter/gather list for a packet
758 * @q: the Tx queue we are writing into
759 * @sgl: starting location for writing the SGL
760 * @end: points right after the end of the SGL
761 * @start: start offset into skb main-body data to include in the SGL
762 * @addr: the list of bus addresses for the SGL elements
764 * Generates a gather list for the buffers that make up a packet.
765 * The caller must provide adequate space for the SGL that will be written.
766 * The SGL includes all of the packet's page fragments and the data in its
767 * main body except for the first @start bytes. @sgl must be 16-byte
768 * aligned and within a Tx descriptor with available space. @end points
769 * right after the end of the SGL but does not account for any potential
770 * wrap around, i.e., @end > @sgl.
772 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
773 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
774 const dma_addr_t *addr)
777 struct ulptx_sge_pair *to;
778 const struct skb_shared_info *si = skb_shinfo(skb);
779 unsigned int nfrags = si->nr_frags;
780 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
782 len = skb_headlen(skb) - start;
784 sgl->len0 = htonl(len);
785 sgl->addr0 = cpu_to_be64(addr[0] + start);
788 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
789 sgl->addr0 = cpu_to_be64(addr[1]);
792 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
793 if (likely(--nfrags == 0))
796 * Most of the complexity below deals with the possibility we hit the
797 * end of the queue in the middle of writing the SGL. For this case
798 * only we create the SGL in a temporary buffer and then copy it.
800 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
802 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
803 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
804 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
805 to->addr[0] = cpu_to_be64(addr[i]);
806 to->addr[1] = cpu_to_be64(addr[++i]);
809 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
810 to->len[1] = cpu_to_be32(0);
811 to->addr[0] = cpu_to_be64(addr[i + 1]);
813 if (unlikely((u8 *)end > (u8 *)q->stat)) {
814 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
817 memcpy(sgl->sge, buf, part0);
818 part1 = (u8 *)end - (u8 *)q->stat;
819 memcpy(q->desc, (u8 *)buf + part0, part1);
820 end = (void *)q->desc + part1;
822 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
826 /* This function copies 64 byte coalesced work request to
827 * memory mapped BAR2 space(user space writes).
828 * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
830 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
843 * ring_tx_db - check and potentially ring a Tx queue's doorbell
846 * @n: number of new descriptors to give to HW
848 * Ring the doorbel for a Tx queue.
850 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
852 unsigned int *wr, index;
854 wmb(); /* write descriptors before telling HW */
855 spin_lock(&q->db_lock);
856 if (!q->db_disabled) {
857 if (is_t4(adap->params.chip)) {
858 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
859 QID(q->cntxt_id) | PIDX(n));
862 index = q->pidx ? (q->pidx - 1) : (q->size - 1);
863 wr = (unsigned int *)&q->desc[index];
864 cxgb_pio_copy((u64 __iomem *)
865 (adap->bar2 + q->udb + 64),
868 writel(n, adap->bar2 + q->udb + 8);
872 q->db_pidx = q->pidx;
873 spin_unlock(&q->db_lock);
877 * inline_tx_skb - inline a packet's data into Tx descriptors
879 * @q: the Tx queue where the packet will be inlined
880 * @pos: starting position in the Tx queue where to inline the packet
882 * Inline a packet's contents directly into Tx descriptors, starting at
883 * the given position within the Tx DMA ring.
884 * Most of the complexity of this operation is dealing with wrap arounds
885 * in the middle of the packet we want to inline.
887 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
891 int left = (void *)q->stat - pos;
893 if (likely(skb->len <= left)) {
894 if (likely(!skb->data_len))
895 skb_copy_from_linear_data(skb, pos, skb->len);
897 skb_copy_bits(skb, 0, pos, skb->len);
900 skb_copy_bits(skb, 0, pos, left);
901 skb_copy_bits(skb, left, q->desc, skb->len - left);
902 pos = (void *)q->desc + (skb->len - left);
905 /* 0-pad to multiple of 16 */
906 p = PTR_ALIGN(pos, 8);
907 if ((uintptr_t)p & 8)
912 * Figure out what HW csum a packet wants and return the appropriate control
915 static u64 hwcsum(const struct sk_buff *skb)
918 const struct iphdr *iph = ip_hdr(skb);
920 if (iph->version == 4) {
921 if (iph->protocol == IPPROTO_TCP)
922 csum_type = TX_CSUM_TCPIP;
923 else if (iph->protocol == IPPROTO_UDP)
924 csum_type = TX_CSUM_UDPIP;
927 * unknown protocol, disable HW csum
928 * and hope a bad packet is detected
930 return TXPKT_L4CSUM_DIS;
934 * this doesn't work with extension headers
936 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
938 if (ip6h->nexthdr == IPPROTO_TCP)
939 csum_type = TX_CSUM_TCPIP6;
940 else if (ip6h->nexthdr == IPPROTO_UDP)
941 csum_type = TX_CSUM_UDPIP6;
946 if (likely(csum_type >= TX_CSUM_TCPIP))
947 return TXPKT_CSUM_TYPE(csum_type) |
948 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
949 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
951 int start = skb_transport_offset(skb);
953 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
954 TXPKT_CSUM_LOC(start + skb->csum_offset);
958 static void eth_txq_stop(struct sge_eth_txq *q)
960 netif_tx_stop_queue(q->txq);
964 static inline void txq_advance(struct sge_txq *q, unsigned int n)
968 if (q->pidx >= q->size)
973 * t4_eth_xmit - add a packet to an Ethernet Tx queue
975 * @dev: the egress net device
977 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
979 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
985 unsigned int flits, ndesc;
986 struct adapter *adap;
987 struct sge_eth_txq *q;
988 const struct port_info *pi;
989 struct fw_eth_tx_pkt_wr *wr;
990 struct cpl_tx_pkt_core *cpl;
991 const struct skb_shared_info *ssi;
992 dma_addr_t addr[MAX_SKB_FRAGS + 1];
993 bool immediate = false;
996 * The chip min packet length is 10 octets but play safe and reject
997 * anything shorter than an Ethernet header.
999 if (unlikely(skb->len < ETH_HLEN)) {
1000 out_free: dev_kfree_skb(skb);
1001 return NETDEV_TX_OK;
1004 pi = netdev_priv(dev);
1006 qidx = skb_get_queue_mapping(skb);
1007 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1009 reclaim_completed_tx(adap, &q->q, true);
1011 flits = calc_tx_flits(skb);
1012 ndesc = flits_to_desc(flits);
1013 credits = txq_avail(&q->q) - ndesc;
1015 if (unlikely(credits < 0)) {
1017 dev_err(adap->pdev_dev,
1018 "%s: Tx ring %u full while queue awake!\n",
1020 return NETDEV_TX_BUSY;
1023 if (is_eth_imm(skb))
1027 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1032 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1033 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1035 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
1038 wr = (void *)&q->q.desc[q->q.pidx];
1039 wr->equiq_to_len16 = htonl(wr_mid);
1040 wr->r3 = cpu_to_be64(0);
1041 end = (u64 *)wr + flits;
1043 len = immediate ? skb->len : 0;
1044 len += sizeof(*cpl);
1045 ssi = skb_shinfo(skb);
1046 if (ssi->gso_size) {
1047 struct cpl_tx_pkt_lso *lso = (void *)wr;
1048 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1049 int l3hdr_len = skb_network_header_len(skb);
1050 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1052 len += sizeof(*lso);
1053 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1054 FW_WR_IMMDLEN(len));
1055 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1056 LSO_FIRST_SLICE | LSO_LAST_SLICE |
1058 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1059 LSO_IPHDR_LEN(l3hdr_len / 4) |
1060 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1061 lso->c.ipid_ofst = htons(0);
1062 lso->c.mss = htons(ssi->gso_size);
1063 lso->c.seqno_offset = htonl(0);
1064 lso->c.len = htonl(skb->len);
1065 cpl = (void *)(lso + 1);
1066 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1067 TXPKT_IPHDR_LEN(l3hdr_len) |
1068 TXPKT_ETHHDR_LEN(eth_xtra_len);
1070 q->tx_cso += ssi->gso_segs;
1072 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1073 FW_WR_IMMDLEN(len));
1074 cpl = (void *)(wr + 1);
1075 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1076 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1079 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1082 if (vlan_tx_tag_present(skb)) {
1084 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1087 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1088 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1089 cpl->pack = htons(0);
1090 cpl->len = htons(skb->len);
1091 cpl->ctrl1 = cpu_to_be64(cntrl);
1094 inline_tx_skb(skb, &q->q, cpl + 1);
1099 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1103 last_desc = q->q.pidx + ndesc - 1;
1104 if (last_desc >= q->q.size)
1105 last_desc -= q->q.size;
1106 q->q.sdesc[last_desc].skb = skb;
1107 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1110 txq_advance(&q->q, ndesc);
1112 ring_tx_db(adap, &q->q, ndesc);
1113 return NETDEV_TX_OK;
1117 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1118 * @q: the SGE control Tx queue
1120 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1121 * that send only immediate data (presently just the control queues) and
1122 * thus do not have any sk_buffs to release.
1124 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1126 int hw_cidx = ntohs(q->stat->cidx);
1127 int reclaim = hw_cidx - q->cidx;
1132 q->in_use -= reclaim;
1137 * is_imm - check whether a packet can be sent as immediate data
1140 * Returns true if a packet can be sent as a WR with immediate data.
1142 static inline int is_imm(const struct sk_buff *skb)
1144 return skb->len <= MAX_CTRL_WR_LEN;
1148 * ctrlq_check_stop - check if a control queue is full and should stop
1150 * @wr: most recent WR written to the queue
1152 * Check if a control queue has become full and should be stopped.
1153 * We clean up control queue descriptors very lazily, only when we are out.
1154 * If the queue is still full after reclaiming any completed descriptors
1155 * we suspend it and have the last WR wake it up.
1157 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1159 reclaim_completed_tx_imm(&q->q);
1160 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1161 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1168 * ctrl_xmit - send a packet through an SGE control Tx queue
1169 * @q: the control queue
1172 * Send a packet through an SGE control Tx queue. Packets sent through
1173 * a control queue must fit entirely as immediate data.
1175 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1178 struct fw_wr_hdr *wr;
1180 if (unlikely(!is_imm(skb))) {
1183 return NET_XMIT_DROP;
1186 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1187 spin_lock(&q->sendq.lock);
1189 if (unlikely(q->full)) {
1190 skb->priority = ndesc; /* save for restart */
1191 __skb_queue_tail(&q->sendq, skb);
1192 spin_unlock(&q->sendq.lock);
1196 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1197 inline_tx_skb(skb, &q->q, wr);
1199 txq_advance(&q->q, ndesc);
1200 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1201 ctrlq_check_stop(q, wr);
1203 ring_tx_db(q->adap, &q->q, ndesc);
1204 spin_unlock(&q->sendq.lock);
1207 return NET_XMIT_SUCCESS;
1211 * restart_ctrlq - restart a suspended control queue
1212 * @data: the control queue to restart
1214 * Resumes transmission on a suspended Tx control queue.
1216 static void restart_ctrlq(unsigned long data)
1218 struct sk_buff *skb;
1219 unsigned int written = 0;
1220 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1222 spin_lock(&q->sendq.lock);
1223 reclaim_completed_tx_imm(&q->q);
1224 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1226 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1227 struct fw_wr_hdr *wr;
1228 unsigned int ndesc = skb->priority; /* previously saved */
1231 * Write descriptors and free skbs outside the lock to limit
1232 * wait times. q->full is still set so new skbs will be queued.
1234 spin_unlock(&q->sendq.lock);
1236 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1237 inline_tx_skb(skb, &q->q, wr);
1241 txq_advance(&q->q, ndesc);
1242 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1243 unsigned long old = q->q.stops;
1245 ctrlq_check_stop(q, wr);
1246 if (q->q.stops != old) { /* suspended anew */
1247 spin_lock(&q->sendq.lock);
1252 ring_tx_db(q->adap, &q->q, written);
1255 spin_lock(&q->sendq.lock);
1258 ringdb: if (written)
1259 ring_tx_db(q->adap, &q->q, written);
1260 spin_unlock(&q->sendq.lock);
1264 * t4_mgmt_tx - send a management message
1265 * @adap: the adapter
1266 * @skb: the packet containing the management message
1268 * Send a management message through control queue 0.
1270 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1275 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1281 * is_ofld_imm - check whether a packet can be sent as immediate data
1284 * Returns true if a packet can be sent as an offload WR with immediate
1285 * data. We currently use the same limit as for Ethernet packets.
1287 static inline int is_ofld_imm(const struct sk_buff *skb)
1289 return skb->len <= MAX_IMM_TX_PKT_LEN;
1293 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1296 * Returns the number of flits needed for the given offload packet.
1297 * These packets are already fully constructed and no additional headers
1300 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1302 unsigned int flits, cnt;
1304 if (is_ofld_imm(skb))
1305 return DIV_ROUND_UP(skb->len, 8);
1307 flits = skb_transport_offset(skb) / 8U; /* headers */
1308 cnt = skb_shinfo(skb)->nr_frags;
1309 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1311 return flits + sgl_len(cnt);
1315 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1316 * @adap: the adapter
1317 * @q: the queue to stop
1319 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1320 * inability to map packets. A periodic timer attempts to restart
1323 static void txq_stop_maperr(struct sge_ofld_txq *q)
1327 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1328 q->adap->sge.txq_maperr);
1332 * ofldtxq_stop - stop an offload Tx queue that has become full
1333 * @q: the queue to stop
1334 * @skb: the packet causing the queue to become full
1336 * Stops an offload Tx queue that has become full and modifies the packet
1337 * being written to request a wakeup.
1339 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1341 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1343 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1349 * service_ofldq - restart a suspended offload queue
1350 * @q: the offload queue
1352 * Services an offload Tx queue by moving packets from its packet queue
1353 * to the HW Tx ring. The function starts and ends with the queue locked.
1355 static void service_ofldq(struct sge_ofld_txq *q)
1359 struct sk_buff *skb;
1360 unsigned int written = 0;
1361 unsigned int flits, ndesc;
1363 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1365 * We drop the lock but leave skb on sendq, thus retaining
1366 * exclusive access to the state of the queue.
1368 spin_unlock(&q->sendq.lock);
1370 reclaim_completed_tx(q->adap, &q->q, false);
1372 flits = skb->priority; /* previously saved */
1373 ndesc = flits_to_desc(flits);
1374 credits = txq_avail(&q->q) - ndesc;
1375 BUG_ON(credits < 0);
1376 if (unlikely(credits < TXQ_STOP_THRES))
1377 ofldtxq_stop(q, skb);
1379 pos = (u64 *)&q->q.desc[q->q.pidx];
1380 if (is_ofld_imm(skb))
1381 inline_tx_skb(skb, &q->q, pos);
1382 else if (map_skb(q->adap->pdev_dev, skb,
1383 (dma_addr_t *)skb->head)) {
1385 spin_lock(&q->sendq.lock);
1388 int last_desc, hdr_len = skb_transport_offset(skb);
1390 memcpy(pos, skb->data, hdr_len);
1391 write_sgl(skb, &q->q, (void *)pos + hdr_len,
1392 pos + flits, hdr_len,
1393 (dma_addr_t *)skb->head);
1394 #ifdef CONFIG_NEED_DMA_MAP_STATE
1395 skb->dev = q->adap->port[0];
1396 skb->destructor = deferred_unmap_destructor;
1398 last_desc = q->q.pidx + ndesc - 1;
1399 if (last_desc >= q->q.size)
1400 last_desc -= q->q.size;
1401 q->q.sdesc[last_desc].skb = skb;
1404 txq_advance(&q->q, ndesc);
1406 if (unlikely(written > 32)) {
1407 ring_tx_db(q->adap, &q->q, written);
1411 spin_lock(&q->sendq.lock);
1412 __skb_unlink(skb, &q->sendq);
1413 if (is_ofld_imm(skb))
1416 if (likely(written))
1417 ring_tx_db(q->adap, &q->q, written);
1421 * ofld_xmit - send a packet through an offload queue
1422 * @q: the Tx offload queue
1425 * Send an offload packet through an SGE offload queue.
1427 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1429 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1430 spin_lock(&q->sendq.lock);
1431 __skb_queue_tail(&q->sendq, skb);
1432 if (q->sendq.qlen == 1)
1434 spin_unlock(&q->sendq.lock);
1435 return NET_XMIT_SUCCESS;
1439 * restart_ofldq - restart a suspended offload queue
1440 * @data: the offload queue to restart
1442 * Resumes transmission on a suspended Tx offload queue.
1444 static void restart_ofldq(unsigned long data)
1446 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1448 spin_lock(&q->sendq.lock);
1449 q->full = 0; /* the queue actually is completely empty now */
1451 spin_unlock(&q->sendq.lock);
1455 * skb_txq - return the Tx queue an offload packet should use
1458 * Returns the Tx queue an offload packet should use as indicated by bits
1459 * 1-15 in the packet's queue_mapping.
1461 static inline unsigned int skb_txq(const struct sk_buff *skb)
1463 return skb->queue_mapping >> 1;
1467 * is_ctrl_pkt - return whether an offload packet is a control packet
1470 * Returns whether an offload packet should use an OFLD or a CTRL
1471 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1473 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1475 return skb->queue_mapping & 1;
1478 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1480 unsigned int idx = skb_txq(skb);
1482 if (unlikely(is_ctrl_pkt(skb))) {
1483 /* Single ctrl queue is a requirement for LE workaround path */
1484 if (adap->tids.nsftids)
1486 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1488 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1492 * t4_ofld_send - send an offload packet
1493 * @adap: the adapter
1496 * Sends an offload packet. We use the packet queue_mapping to select the
1497 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1498 * should be sent as regular or control, bits 1-15 select the queue.
1500 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1505 ret = ofld_send(adap, skb);
1511 * cxgb4_ofld_send - send an offload packet
1512 * @dev: the net device
1515 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1516 * intended for ULDs.
1518 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1520 return t4_ofld_send(netdev2adap(dev), skb);
1522 EXPORT_SYMBOL(cxgb4_ofld_send);
1524 static inline void copy_frags(struct sk_buff *skb,
1525 const struct pkt_gl *gl, unsigned int offset)
1529 /* usually there's just one frag */
1530 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1531 gl->frags[0].offset + offset,
1532 gl->frags[0].size - offset);
1533 skb_shinfo(skb)->nr_frags = gl->nfrags;
1534 for (i = 1; i < gl->nfrags; i++)
1535 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1536 gl->frags[i].offset,
1539 /* get a reference to the last page, we don't own it */
1540 get_page(gl->frags[gl->nfrags - 1].page);
1544 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1545 * @gl: the gather list
1546 * @skb_len: size of sk_buff main body if it carries fragments
1547 * @pull_len: amount of data to move to the sk_buff's main body
1549 * Builds an sk_buff from the given packet gather list. Returns the
1550 * sk_buff or %NULL if sk_buff allocation failed.
1552 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1553 unsigned int skb_len, unsigned int pull_len)
1555 struct sk_buff *skb;
1558 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1559 * size, which is expected since buffers are at least PAGE_SIZEd.
1560 * In this case packets up to RX_COPY_THRES have only one fragment.
1562 if (gl->tot_len <= RX_COPY_THRES) {
1563 skb = dev_alloc_skb(gl->tot_len);
1566 __skb_put(skb, gl->tot_len);
1567 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1569 skb = dev_alloc_skb(skb_len);
1572 __skb_put(skb, pull_len);
1573 skb_copy_to_linear_data(skb, gl->va, pull_len);
1575 copy_frags(skb, gl, pull_len);
1576 skb->len = gl->tot_len;
1577 skb->data_len = skb->len - pull_len;
1578 skb->truesize += skb->data_len;
1582 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1585 * t4_pktgl_free - free a packet gather list
1586 * @gl: the gather list
1588 * Releases the pages of a packet gather list. We do not own the last
1589 * page on the list and do not free it.
1591 static void t4_pktgl_free(const struct pkt_gl *gl)
1594 const struct page_frag *p;
1596 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1601 * Process an MPS trace packet. Give it an unused protocol number so it won't
1602 * be delivered to anyone and send it to the stack for capture.
1604 static noinline int handle_trace_pkt(struct adapter *adap,
1605 const struct pkt_gl *gl)
1607 struct sk_buff *skb;
1609 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1610 if (unlikely(!skb)) {
1615 if (is_t4(adap->params.chip))
1616 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1618 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1620 skb_reset_mac_header(skb);
1621 skb->protocol = htons(0xffff);
1622 skb->dev = adap->port[0];
1623 netif_receive_skb(skb);
1627 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1628 const struct cpl_rx_pkt *pkt)
1630 struct adapter *adapter = rxq->rspq.adap;
1631 struct sge *s = &adapter->sge;
1633 struct sk_buff *skb;
1635 skb = napi_get_frags(&rxq->rspq.napi);
1636 if (unlikely(!skb)) {
1638 rxq->stats.rx_drops++;
1642 copy_frags(skb, gl, s->pktshift);
1643 skb->len = gl->tot_len - s->pktshift;
1644 skb->data_len = skb->len;
1645 skb->truesize += skb->data_len;
1646 skb->ip_summed = CHECKSUM_UNNECESSARY;
1647 skb_record_rx_queue(skb, rxq->rspq.idx);
1648 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1649 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1652 if (unlikely(pkt->vlan_ex)) {
1653 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1654 rxq->stats.vlan_ex++;
1656 ret = napi_gro_frags(&rxq->rspq.napi);
1657 if (ret == GRO_HELD)
1658 rxq->stats.lro_pkts++;
1659 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1660 rxq->stats.lro_merged++;
1662 rxq->stats.rx_cso++;
1666 * t4_ethrx_handler - process an ingress ethernet packet
1667 * @q: the response queue that received the packet
1668 * @rsp: the response queue descriptor holding the RX_PKT message
1669 * @si: the gather list of packet fragments
1671 * Process an ingress ethernet packet and deliver it to the stack.
1673 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1674 const struct pkt_gl *si)
1677 struct sk_buff *skb;
1678 const struct cpl_rx_pkt *pkt;
1679 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1680 struct sge *s = &q->adap->sge;
1681 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1682 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1684 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1685 return handle_trace_pkt(q->adap, si);
1687 pkt = (const struct cpl_rx_pkt *)rsp;
1688 csum_ok = pkt->csum_calc && !pkt->err_vec;
1689 if ((pkt->l2info & htonl(RXF_TCP)) &&
1690 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1691 do_gro(rxq, si, pkt);
1695 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1696 if (unlikely(!skb)) {
1698 rxq->stats.rx_drops++;
1702 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1703 skb->protocol = eth_type_trans(skb, q->netdev);
1704 skb_record_rx_queue(skb, q->idx);
1705 if (skb->dev->features & NETIF_F_RXHASH)
1706 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1711 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1712 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1713 if (!pkt->ip_frag) {
1714 skb->ip_summed = CHECKSUM_UNNECESSARY;
1715 rxq->stats.rx_cso++;
1716 } else if (pkt->l2info & htonl(RXF_IP)) {
1717 __sum16 c = (__force __sum16)pkt->csum;
1718 skb->csum = csum_unfold(c);
1719 skb->ip_summed = CHECKSUM_COMPLETE;
1720 rxq->stats.rx_cso++;
1723 skb_checksum_none_assert(skb);
1725 if (unlikely(pkt->vlan_ex)) {
1726 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1727 rxq->stats.vlan_ex++;
1729 netif_receive_skb(skb);
1734 * restore_rx_bufs - put back a packet's Rx buffers
1735 * @si: the packet gather list
1736 * @q: the SGE free list
1737 * @frags: number of FL buffers to restore
1739 * Puts back on an FL the Rx buffers associated with @si. The buffers
1740 * have already been unmapped and are left unmapped, we mark them so to
1741 * prevent further unmapping attempts.
1743 * This function undoes a series of @unmap_rx_buf calls when we find out
1744 * that the current packet can't be processed right away afterall and we
1745 * need to come back to it later. This is a very rare event and there's
1746 * no effort to make this particularly efficient.
1748 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1751 struct rx_sw_desc *d;
1755 q->cidx = q->size - 1;
1758 d = &q->sdesc[q->cidx];
1759 d->page = si->frags[frags].page;
1760 d->dma_addr |= RX_UNMAPPED_BUF;
1766 * is_new_response - check if a response is newly written
1767 * @r: the response descriptor
1768 * @q: the response queue
1770 * Returns true if a response descriptor contains a yet unprocessed
1773 static inline bool is_new_response(const struct rsp_ctrl *r,
1774 const struct sge_rspq *q)
1776 return RSPD_GEN(r->type_gen) == q->gen;
1780 * rspq_next - advance to the next entry in a response queue
1783 * Updates the state of a response queue to advance it to the next entry.
1785 static inline void rspq_next(struct sge_rspq *q)
1787 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1788 if (unlikely(++q->cidx == q->size)) {
1791 q->cur_desc = q->desc;
1796 * process_responses - process responses from an SGE response queue
1797 * @q: the ingress queue to process
1798 * @budget: how many responses can be processed in this round
1800 * Process responses from an SGE response queue up to the supplied budget.
1801 * Responses include received packets as well as control messages from FW
1804 * Additionally choose the interrupt holdoff time for the next interrupt
1805 * on this queue. If the system is under memory shortage use a fairly
1806 * long delay to help recovery.
1808 static int process_responses(struct sge_rspq *q, int budget)
1811 int budget_left = budget;
1812 const struct rsp_ctrl *rc;
1813 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1814 struct adapter *adapter = q->adap;
1815 struct sge *s = &adapter->sge;
1817 while (likely(budget_left)) {
1818 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1819 if (!is_new_response(rc, q))
1823 rsp_type = RSPD_TYPE(rc->type_gen);
1824 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1825 struct page_frag *fp;
1827 const struct rx_sw_desc *rsd;
1828 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1830 if (len & RSPD_NEWBUF) {
1831 if (likely(q->offset > 0)) {
1832 free_rx_bufs(q->adap, &rxq->fl, 1);
1835 len = RSPD_LEN(len);
1839 /* gather packet fragments */
1840 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1841 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1842 bufsz = get_buf_size(adapter, rsd);
1843 fp->page = rsd->page;
1844 fp->offset = q->offset;
1845 fp->size = min(bufsz, len);
1849 unmap_rx_buf(q->adap, &rxq->fl);
1853 * Last buffer remains mapped so explicitly make it
1854 * coherent for CPU access.
1856 dma_sync_single_for_cpu(q->adap->pdev_dev,
1858 fp->size, DMA_FROM_DEVICE);
1860 si.va = page_address(si.frags[0].page) +
1864 si.nfrags = frags + 1;
1865 ret = q->handler(q, q->cur_desc, &si);
1866 if (likely(ret == 0))
1867 q->offset += ALIGN(fp->size, s->fl_align);
1869 restore_rx_bufs(&si, &rxq->fl, frags);
1870 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1871 ret = q->handler(q, q->cur_desc, NULL);
1873 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1876 if (unlikely(ret)) {
1877 /* couldn't process descriptor, back off for recovery */
1878 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1886 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1887 __refill_fl(q->adap, &rxq->fl);
1888 return budget - budget_left;
1892 * napi_rx_handler - the NAPI handler for Rx processing
1893 * @napi: the napi instance
1894 * @budget: how many packets we can process in this round
1896 * Handler for new data events when using NAPI. This does not need any
1897 * locking or protection from interrupts as data interrupts are off at
1898 * this point and other adapter interrupts do not interfere (the latter
1899 * in not a concern at all with MSI-X as non-data interrupts then have
1900 * a separate handler).
1902 static int napi_rx_handler(struct napi_struct *napi, int budget)
1904 unsigned int params;
1905 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1906 int work_done = process_responses(q, budget);
1908 if (likely(work_done < budget)) {
1909 napi_complete(napi);
1910 params = q->next_intr_params;
1911 q->next_intr_params = q->intr_params;
1913 params = QINTR_TIMER_IDX(7);
1915 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1916 INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1921 * The MSI-X interrupt handler for an SGE response queue.
1923 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1925 struct sge_rspq *q = cookie;
1927 napi_schedule(&q->napi);
1932 * Process the indirect interrupt entries in the interrupt queue and kick off
1933 * NAPI for each queue that has generated an entry.
1935 static unsigned int process_intrq(struct adapter *adap)
1937 unsigned int credits;
1938 const struct rsp_ctrl *rc;
1939 struct sge_rspq *q = &adap->sge.intrq;
1941 spin_lock(&adap->sge.intrq_lock);
1942 for (credits = 0; ; credits++) {
1943 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1944 if (!is_new_response(rc, q))
1948 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1949 unsigned int qid = ntohl(rc->pldbuflen_qid);
1951 qid -= adap->sge.ingr_start;
1952 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1958 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1959 INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1960 spin_unlock(&adap->sge.intrq_lock);
1965 * The MSI interrupt handler, which handles data events from SGE response queues
1966 * as well as error and other async events as they all use the same MSI vector.
1968 static irqreturn_t t4_intr_msi(int irq, void *cookie)
1970 struct adapter *adap = cookie;
1972 t4_slow_intr_handler(adap);
1973 process_intrq(adap);
1978 * Interrupt handler for legacy INTx interrupts.
1979 * Handles data events from SGE response queues as well as error and other
1980 * async events as they all use the same interrupt line.
1982 static irqreturn_t t4_intr_intx(int irq, void *cookie)
1984 struct adapter *adap = cookie;
1986 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1987 if (t4_slow_intr_handler(adap) | process_intrq(adap))
1989 return IRQ_NONE; /* probably shared interrupt */
1993 * t4_intr_handler - select the top-level interrupt handler
1994 * @adap: the adapter
1996 * Selects the top-level interrupt handler based on the type of interrupts
1997 * (MSI-X, MSI, or INTx).
1999 irq_handler_t t4_intr_handler(struct adapter *adap)
2001 if (adap->flags & USING_MSIX)
2002 return t4_sge_intr_msix;
2003 if (adap->flags & USING_MSI)
2005 return t4_intr_intx;
2008 static void sge_rx_timer_cb(unsigned long data)
2011 unsigned int i, cnt[2];
2012 struct adapter *adap = (struct adapter *)data;
2013 struct sge *s = &adap->sge;
2015 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
2016 for (m = s->starving_fl[i]; m; m &= m - 1) {
2017 struct sge_eth_rxq *rxq;
2018 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2019 struct sge_fl *fl = s->egr_map[id];
2021 clear_bit(id, s->starving_fl);
2022 smp_mb__after_clear_bit();
2024 if (fl_starving(fl)) {
2025 rxq = container_of(fl, struct sge_eth_rxq, fl);
2026 if (napi_reschedule(&rxq->rspq.napi))
2029 set_bit(id, s->starving_fl);
2033 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
2034 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
2035 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2037 for (i = 0; i < 2; i++)
2038 if (cnt[i] >= s->starve_thres) {
2039 if (s->idma_state[i] || cnt[i] == 0xffffffff)
2041 s->idma_state[i] = 1;
2042 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
2043 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
2044 dev_warn(adap->pdev_dev,
2045 "SGE idma%u starvation detected for "
2046 "queue %lu\n", i, m & 0xffff);
2047 } else if (s->idma_state[i])
2048 s->idma_state[i] = 0;
2050 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2053 static void sge_tx_timer_cb(unsigned long data)
2056 unsigned int i, budget;
2057 struct adapter *adap = (struct adapter *)data;
2058 struct sge *s = &adap->sge;
2060 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2061 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2062 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2063 struct sge_ofld_txq *txq = s->egr_map[id];
2065 clear_bit(id, s->txq_maperr);
2066 tasklet_schedule(&txq->qresume_tsk);
2069 budget = MAX_TIMER_TX_RECLAIM;
2070 i = s->ethtxq_rover;
2072 struct sge_eth_txq *q = &s->ethtxq[i];
2075 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2076 __netif_tx_trylock(q->txq)) {
2077 int avail = reclaimable(&q->q);
2083 free_tx_desc(adap, &q->q, avail, true);
2084 q->q.in_use -= avail;
2087 __netif_tx_unlock(q->txq);
2090 if (++i >= s->ethqsets)
2092 } while (budget && i != s->ethtxq_rover);
2093 s->ethtxq_rover = i;
2094 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2097 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2098 struct net_device *dev, int intr_idx,
2099 struct sge_fl *fl, rspq_handler_t hnd)
2103 struct sge *s = &adap->sge;
2104 struct port_info *pi = netdev_priv(dev);
2106 /* Size needs to be multiple of 16, including status entry. */
2107 iq->size = roundup(iq->size, 16);
2109 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2110 &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2114 memset(&c, 0, sizeof(c));
2115 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2116 FW_CMD_WRITE | FW_CMD_EXEC |
2117 FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2118 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
2120 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2121 FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2122 FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2123 FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2125 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2126 FW_IQ_CMD_IQGTSMODE |
2127 FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2128 FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2129 c.iqsize = htons(iq->size);
2130 c.iqaddr = cpu_to_be64(iq->phys_addr);
2133 fl->size = roundup(fl->size, 8);
2134 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2135 sizeof(struct rx_sw_desc), &fl->addr,
2136 &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2140 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2141 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
2142 FW_IQ_CMD_FL0FETCHRO(1) |
2143 FW_IQ_CMD_FL0DATARO(1) |
2144 FW_IQ_CMD_FL0PADEN(1));
2145 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2146 FW_IQ_CMD_FL0FBMAX(3));
2147 c.fl0size = htons(flsz);
2148 c.fl0addr = cpu_to_be64(fl->addr);
2151 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2155 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2156 iq->cur_desc = iq->desc;
2159 iq->next_intr_params = iq->intr_params;
2160 iq->cntxt_id = ntohs(c.iqid);
2161 iq->abs_id = ntohs(c.physiqid);
2162 iq->size--; /* subtract status entry */
2167 /* set offset to -1 to distinguish ingress queues without FL */
2168 iq->offset = fl ? 0 : -1;
2170 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2173 fl->cntxt_id = ntohs(c.fl0id);
2174 fl->avail = fl->pend_cred = 0;
2175 fl->pidx = fl->cidx = 0;
2176 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2177 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2178 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2186 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2187 iq->desc, iq->phys_addr);
2190 if (fl && fl->desc) {
2193 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2194 fl->desc, fl->addr);
2200 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2203 if (!is_t4(adap->params.chip)) {
2205 unsigned short udb_density;
2206 unsigned long qpshift;
2209 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
2210 udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
2211 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
2212 qpshift = PAGE_SHIFT - ilog2(udb_density);
2213 q->udb = q->cntxt_id << qpshift;
2214 q->udb &= PAGE_MASK;
2215 page = q->udb / PAGE_SIZE;
2216 q->udb += (q->cntxt_id - (page * udb_density)) * 128;
2220 q->cidx = q->pidx = 0;
2221 q->stops = q->restarts = 0;
2222 q->stat = (void *)&q->desc[q->size];
2223 spin_lock_init(&q->db_lock);
2224 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2227 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2228 struct net_device *dev, struct netdev_queue *netdevq,
2232 struct fw_eq_eth_cmd c;
2233 struct sge *s = &adap->sge;
2234 struct port_info *pi = netdev_priv(dev);
2236 /* Add status entries */
2237 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2239 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2240 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2241 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2242 netdev_queue_numa_node_read(netdevq));
2246 memset(&c, 0, sizeof(c));
2247 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2248 FW_CMD_WRITE | FW_CMD_EXEC |
2249 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2250 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2251 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2252 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2253 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2254 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2255 FW_EQ_ETH_CMD_FETCHRO(1) |
2256 FW_EQ_ETH_CMD_IQID(iqid));
2257 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2258 FW_EQ_ETH_CMD_FBMAX(3) |
2259 FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2260 FW_EQ_ETH_CMD_EQSIZE(nentries));
2261 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2263 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2265 kfree(txq->q.sdesc);
2266 txq->q.sdesc = NULL;
2267 dma_free_coherent(adap->pdev_dev,
2268 nentries * sizeof(struct tx_desc),
2269 txq->q.desc, txq->q.phys_addr);
2274 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2276 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2277 txq->mapping_err = 0;
2281 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2282 struct net_device *dev, unsigned int iqid,
2283 unsigned int cmplqid)
2286 struct fw_eq_ctrl_cmd c;
2287 struct sge *s = &adap->sge;
2288 struct port_info *pi = netdev_priv(dev);
2290 /* Add status entries */
2291 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2293 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2294 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2295 NULL, 0, NUMA_NO_NODE);
2299 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2300 FW_CMD_WRITE | FW_CMD_EXEC |
2301 FW_EQ_CTRL_CMD_PFN(adap->fn) |
2302 FW_EQ_CTRL_CMD_VFN(0));
2303 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2304 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2305 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2306 c.physeqid_pkd = htonl(0);
2307 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2308 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2309 FW_EQ_CTRL_CMD_FETCHRO |
2310 FW_EQ_CTRL_CMD_IQID(iqid));
2311 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2312 FW_EQ_CTRL_CMD_FBMAX(3) |
2313 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2314 FW_EQ_CTRL_CMD_EQSIZE(nentries));
2315 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2317 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2319 dma_free_coherent(adap->pdev_dev,
2320 nentries * sizeof(struct tx_desc),
2321 txq->q.desc, txq->q.phys_addr);
2326 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2328 skb_queue_head_init(&txq->sendq);
2329 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2334 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2335 struct net_device *dev, unsigned int iqid)
2338 struct fw_eq_ofld_cmd c;
2339 struct sge *s = &adap->sge;
2340 struct port_info *pi = netdev_priv(dev);
2342 /* Add status entries */
2343 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2345 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2346 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2347 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2352 memset(&c, 0, sizeof(c));
2353 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2354 FW_CMD_WRITE | FW_CMD_EXEC |
2355 FW_EQ_OFLD_CMD_PFN(adap->fn) |
2356 FW_EQ_OFLD_CMD_VFN(0));
2357 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2358 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2359 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2360 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2361 FW_EQ_OFLD_CMD_FETCHRO(1) |
2362 FW_EQ_OFLD_CMD_IQID(iqid));
2363 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2364 FW_EQ_OFLD_CMD_FBMAX(3) |
2365 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2366 FW_EQ_OFLD_CMD_EQSIZE(nentries));
2367 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2369 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2371 kfree(txq->q.sdesc);
2372 txq->q.sdesc = NULL;
2373 dma_free_coherent(adap->pdev_dev,
2374 nentries * sizeof(struct tx_desc),
2375 txq->q.desc, txq->q.phys_addr);
2380 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2382 skb_queue_head_init(&txq->sendq);
2383 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2385 txq->mapping_err = 0;
2389 static void free_txq(struct adapter *adap, struct sge_txq *q)
2391 struct sge *s = &adap->sge;
2393 dma_free_coherent(adap->pdev_dev,
2394 q->size * sizeof(struct tx_desc) + s->stat_len,
2395 q->desc, q->phys_addr);
2401 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2404 struct sge *s = &adap->sge;
2405 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2407 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2408 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2409 rq->cntxt_id, fl_id, 0xffff);
2410 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2411 rq->desc, rq->phys_addr);
2412 netif_napi_del(&rq->napi);
2414 rq->cntxt_id = rq->abs_id = 0;
2418 free_rx_bufs(adap, fl, fl->avail);
2419 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2420 fl->desc, fl->addr);
2429 * t4_free_sge_resources - free SGE resources
2430 * @adap: the adapter
2432 * Frees resources used by the SGE queue sets.
2434 void t4_free_sge_resources(struct adapter *adap)
2437 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2438 struct sge_eth_txq *etq = adap->sge.ethtxq;
2439 struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2441 /* clean up Ethernet Tx/Rx queues */
2442 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2444 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2446 t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2448 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2449 kfree(etq->q.sdesc);
2450 free_txq(adap, &etq->q);
2454 /* clean up RDMA and iSCSI Rx queues */
2455 for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2457 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2459 for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2461 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2464 /* clean up offload Tx queues */
2465 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2466 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2469 tasklet_kill(&q->qresume_tsk);
2470 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2472 free_tx_desc(adap, &q->q, q->q.in_use, false);
2474 __skb_queue_purge(&q->sendq);
2475 free_txq(adap, &q->q);
2479 /* clean up control Tx queues */
2480 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2481 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2484 tasklet_kill(&cq->qresume_tsk);
2485 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2487 __skb_queue_purge(&cq->sendq);
2488 free_txq(adap, &cq->q);
2492 if (adap->sge.fw_evtq.desc)
2493 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2495 if (adap->sge.intrq.desc)
2496 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2498 /* clear the reverse egress queue map */
2499 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2502 void t4_sge_start(struct adapter *adap)
2504 adap->sge.ethtxq_rover = 0;
2505 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2506 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2510 * t4_sge_stop - disable SGE operation
2511 * @adap: the adapter
2513 * Stop tasklets and timers associated with the DMA engine. Note that
2514 * this is effective only if measures have been taken to disable any HW
2515 * events that may restart them.
2517 void t4_sge_stop(struct adapter *adap)
2520 struct sge *s = &adap->sge;
2522 if (in_interrupt()) /* actions below require waiting */
2525 if (s->rx_timer.function)
2526 del_timer_sync(&s->rx_timer);
2527 if (s->tx_timer.function)
2528 del_timer_sync(&s->tx_timer);
2530 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2531 struct sge_ofld_txq *q = &s->ofldtxq[i];
2534 tasklet_kill(&q->qresume_tsk);
2536 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2537 struct sge_ctrl_txq *cq = &s->ctrlq[i];
2540 tasklet_kill(&cq->qresume_tsk);
2545 * t4_sge_init - initialize SGE
2546 * @adap: the adapter
2548 * Performs SGE initialization needed every time after a chip reset.
2549 * We do not initialize any of the queues here, instead the driver
2550 * top-level must request them individually.
2552 * Called in two different modes:
2554 * 1. Perform actual hardware initialization and record hard-coded
2555 * parameters which were used. This gets used when we're the
2556 * Master PF and the Firmware Configuration File support didn't
2557 * work for some reason.
2559 * 2. We're not the Master PF or initialization was performed with
2560 * a Firmware Configuration File. In this case we need to grab
2561 * any of the SGE operating parameters that we need to have in
2562 * order to do our job and make sure we can live with them ...
2565 static int t4_sge_init_soft(struct adapter *adap)
2567 struct sge *s = &adap->sge;
2568 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2569 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2570 u32 ingress_rx_threshold;
2573 * Verify that CPL messages are going to the Ingress Queue for
2574 * process_responses() and that only packet data is going to the
2577 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2578 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2579 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2584 * Validate the Host Buffer Register Array indices that we want to
2587 * XXX Note that we should really read through the Host Buffer Size
2588 * XXX register array and find the indices of the Buffer Sizes which
2589 * XXX meet our needs!
2591 #define READ_FL_BUF(x) \
2592 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2594 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2595 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2596 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2597 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2601 if (fl_small_pg != PAGE_SIZE ||
2602 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2603 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2604 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2605 fl_small_pg, fl_large_pg);
2609 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2611 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2612 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2613 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2614 fl_small_mtu, fl_large_mtu);
2619 * Retrieve our RX interrupt holdoff timer values and counter
2620 * threshold values from the SGE parameters.
2622 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2623 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2624 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2625 s->timer_val[0] = core_ticks_to_us(adap,
2626 TIMERVALUE0_GET(timer_value_0_and_1));
2627 s->timer_val[1] = core_ticks_to_us(adap,
2628 TIMERVALUE1_GET(timer_value_0_and_1));
2629 s->timer_val[2] = core_ticks_to_us(adap,
2630 TIMERVALUE2_GET(timer_value_2_and_3));
2631 s->timer_val[3] = core_ticks_to_us(adap,
2632 TIMERVALUE3_GET(timer_value_2_and_3));
2633 s->timer_val[4] = core_ticks_to_us(adap,
2634 TIMERVALUE4_GET(timer_value_4_and_5));
2635 s->timer_val[5] = core_ticks_to_us(adap,
2636 TIMERVALUE5_GET(timer_value_4_and_5));
2638 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2639 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2640 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2641 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2642 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2647 static int t4_sge_init_hard(struct adapter *adap)
2649 struct sge *s = &adap->sge;
2652 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2653 * Queue and Packet Date to the Free List.
2655 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2659 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2660 * and generate an interrupt when this occurs so we can recover.
2662 if (is_t4(adap->params.chip)) {
2663 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2664 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2665 V_LP_INT_THRESH(M_LP_INT_THRESH),
2666 V_HP_INT_THRESH(dbfifo_int_thresh) |
2667 V_LP_INT_THRESH(dbfifo_int_thresh));
2669 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2670 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2671 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2672 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2673 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2674 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2676 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2680 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2681 * t4_fixup_host_params().
2683 s->fl_pg_order = FL_PG_ORDER;
2686 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2687 PAGE_SIZE << FL_PG_ORDER);
2688 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2689 FL_MTU_SMALL_BUFSIZE(adap));
2690 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2691 FL_MTU_LARGE_BUFSIZE(adap));
2694 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2695 * Timer Holdoff values must be supplied by our caller.
2697 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2698 THRESHOLD_0(s->counter_val[0]) |
2699 THRESHOLD_1(s->counter_val[1]) |
2700 THRESHOLD_2(s->counter_val[2]) |
2701 THRESHOLD_3(s->counter_val[3]));
2702 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2703 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2704 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2705 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2706 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2707 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2708 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2709 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2710 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2715 int t4_sge_init(struct adapter *adap)
2717 struct sge *s = &adap->sge;
2722 * Ingress Padding Boundary and Egress Status Page Size are set up by
2723 * t4_fixup_host_params().
2725 sge_control = t4_read_reg(adap, SGE_CONTROL);
2726 s->pktshift = PKTSHIFT_GET(sge_control);
2727 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2728 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2729 X_INGPADBOUNDARY_SHIFT);
2731 if (adap->flags & USING_SOFT_PARAMS)
2732 ret = t4_sge_init_soft(adap);
2734 ret = t4_sge_init_hard(adap);
2739 * A FL with <= fl_starve_thres buffers is starving and a periodic
2740 * timer will attempt to refill it. This needs to be larger than the
2741 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2742 * stuck waiting for new packets while the SGE is waiting for us to
2743 * give it more Free List entries. (Note that the SGE's Egress
2744 * Congestion Threshold is in units of 2 Free List pointers.)
2747 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2749 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2750 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2751 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2752 s->idma_state[0] = s->idma_state[1] = 0;
2753 spin_lock_init(&s->intrq_lock);