2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
51 #define SGE_RX_SM_BUF_SIZE 1536
53 #define SGE_RX_COPY_THRES 256
54 #define SGE_RX_PULL_LEN 128
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
62 #define FL0_PG_CHUNK_SIZE 2048
63 #define FL0_PG_ORDER 0
64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
69 #define SGE_RX_DROP_THRES 16
70 #define RX_RECLAIM_PERIOD (HZ/4)
73 * Max number of Rx buffers we replenish at a time.
75 #define MAX_RX_REFILL 16U
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
80 #define TX_RECLAIM_PERIOD (HZ / 4)
81 #define TX_RECLAIM_TIMER_CHUNK 64U
82 #define TX_RECLAIM_CHUNK 16U
84 /* WR size in bytes */
85 #define WR_LEN (WR_FLITS * 8)
88 * Types of Tx queues in each queue set. Order here matters, do not change.
90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
92 /* Values for sge_txq.flags */
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
99 __be64 flit[TX_DESC_FLITS];
109 struct tx_sw_desc { /* SW state per Tx descriptor */
111 u8 eop; /* set if last descriptor for packet */
112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
113 u8 fragidx; /* first page fragment associated with descriptor */
114 s8 sflit; /* start flit of first SGL entry in descriptor */
117 struct rx_sw_desc { /* SW state per Rx descriptor */
120 struct fl_pg_chunk pg_chunk;
122 DEFINE_DMA_UNMAP_ADDR(dma_addr);
125 struct rsp_desc { /* response queue descriptor */
126 struct rss_header rss_hdr;
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
137 struct deferred_unmap_info {
138 struct pci_dev *pdev;
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
148 * HW allows up to 4 descriptors to be combined into a WR.
150 static u8 flit_desc_map[] = {
152 #if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157 #elif SGE_NUM_GENBITS == 2
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
163 # error "SGE_NUM_GENBITS must be 1 or 2"
167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
169 return container_of(q, struct sge_qset, fl[qidx]);
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
174 return container_of(q, struct sge_qset, rspq);
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
179 return container_of(q, struct sge_qset, txq[qidx]);
183 * refill_rspq - replenish an SGE response queue
184 * @adapter: the adapter
185 * @q: the response queue to replenish
186 * @credits: how many new responses to make available
188 * Replenishes a response queue by making the supplied number of responses
191 static inline void refill_rspq(struct adapter *adapter,
192 const struct sge_rspq *q, unsigned int credits)
195 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
202 * Returns true if the platform needs sk_buff unmapping. The compiler
203 * optimizes away unnecessary code if this returns true.
205 static inline int need_skb_unmap(void)
207 #ifdef CONFIG_NEED_DMA_MAP_STATE
215 * unmap_skb - unmap a packet main body and its page fragments
217 * @q: the Tx queue containing Tx descriptors for the packet
218 * @cidx: index of Tx descriptor
219 * @pdev: the PCI device
221 * Unmap the main body of an sk_buff and its page fragments, if any.
222 * Because of the fairly complicated structure of our SGLs and the desire
223 * to conserve space for metadata, the information necessary to unmap an
224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225 * descriptors (the physical addresses of the various data buffers), and
226 * the SW descriptor state (assorted indices). The send functions
227 * initialize the indices for the first packet descriptor so we can unmap
228 * the buffers held in the first Tx descriptor here, and we have enough
229 * information at this point to set the state for the next Tx descriptor.
231 * Note that it is possible to clean up the first descriptor of a packet
232 * before the send routines have written the next descriptors, but this
233 * race does not cause any problem. We just end up writing the unmapping
234 * info for the descriptor first.
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237 unsigned int cidx, struct pci_dev *pdev)
239 const struct sg_ent *sgp;
240 struct tx_sw_desc *d = &q->sdesc[cidx];
241 int nfrags, frag_idx, curflit, j = d->addr_idx;
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244 frag_idx = d->fragidx;
246 if (frag_idx == 0 && skb_headlen(skb)) {
247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248 skb_headlen(skb), PCI_DMA_TODEVICE);
252 curflit = d->sflit + 1 + j;
253 nfrags = skb_shinfo(skb)->nr_frags;
255 while (frag_idx < nfrags && curflit < WR_FLITS) {
256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
257 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
269 d = cidx + 1 == q->size ? q->sdesc : d + 1;
270 d->fragidx = frag_idx;
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
277 * free_tx_desc - reclaims Tx descriptors and their buffers
278 * @adapter: the adapter
279 * @q: the Tx queue to reclaim descriptors from
280 * @n: the number of descriptors to reclaim
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
288 struct tx_sw_desc *d;
289 struct pci_dev *pdev = adapter->pdev;
290 unsigned int cidx = q->cidx;
292 const int need_unmap = need_skb_unmap() &&
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
297 if (d->skb) { /* an SGL is present */
299 unmap_skb(d->skb, q, cidx, pdev);
306 if (++cidx == q->size) {
315 * reclaim_completed_tx - reclaims completed Tx descriptors
316 * @adapter: the adapter
317 * @q: the Tx queue to reclaim completed descriptors from
318 * @chunk: maximum number of descriptors to reclaim
320 * Reclaims Tx descriptors that the SGE has indicated it has processed,
321 * and frees the associated buffers if possible. Called with the Tx
324 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
328 unsigned int reclaim = q->processed - q->cleaned;
330 reclaim = min(chunk, reclaim);
332 free_tx_desc(adapter, q, reclaim);
333 q->cleaned += reclaim;
334 q->in_use -= reclaim;
336 return q->processed - q->cleaned;
340 * should_restart_tx - are there enough resources to restart a Tx queue?
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
345 static inline int should_restart_tx(const struct sge_txq *q)
347 unsigned int r = q->processed - q->cleaned;
349 return q->in_use - r < (q->size >> 1);
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353 struct rx_sw_desc *d)
355 if (q->use_pages && d->pg_chunk.page) {
356 (*d->pg_chunk.p_cnt)--;
357 if (!*d->pg_chunk.p_cnt)
360 q->alloc_size, PCI_DMA_FROMDEVICE);
362 put_page(d->pg_chunk.page);
363 d->pg_chunk.page = NULL;
365 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366 q->buf_size, PCI_DMA_FROMDEVICE);
373 * free_rx_bufs - free the Rx buffers on an SGE free list
374 * @pdev: the PCI device associated with the adapter
375 * @rxq: the SGE free list to clean up
377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
378 * this queue should be stopped before calling this function.
380 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
382 unsigned int cidx = q->cidx;
384 while (q->credits--) {
385 struct rx_sw_desc *d = &q->sdesc[cidx];
388 clear_rx_desc(pdev, q, d);
389 if (++cidx == q->size)
393 if (q->pg_chunk.page) {
394 __free_pages(q->pg_chunk.page, q->order);
395 q->pg_chunk.page = NULL;
400 * add_one_rx_buf - add a packet buffer to a free-buffer list
401 * @va: buffer start VA
402 * @len: the buffer length
403 * @d: the HW Rx descriptor to write
404 * @sd: the SW Rx descriptor to write
405 * @gen: the generation bit value
406 * @pdev: the PCI device associated with the adapter
408 * Add a buffer of the given length to the supplied HW and SW Rx
411 static inline int add_one_rx_buf(void *va, unsigned int len,
412 struct rx_desc *d, struct rx_sw_desc *sd,
413 unsigned int gen, struct pci_dev *pdev)
417 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
421 dma_unmap_addr_set(sd, dma_addr, mapping);
423 d->addr_lo = cpu_to_be32(mapping);
424 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
426 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
431 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
434 d->addr_lo = cpu_to_be32(mapping);
435 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
437 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
442 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443 struct rx_sw_desc *sd, gfp_t gfp,
446 if (!q->pg_chunk.page) {
449 q->pg_chunk.page = alloc_pages(gfp, order);
450 if (unlikely(!q->pg_chunk.page))
452 q->pg_chunk.va = page_address(q->pg_chunk.page);
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 q->pg_chunk.mapping = mapping;
460 sd->pg_chunk = q->pg_chunk;
462 prefetch(sd->pg_chunk.p_cnt);
464 q->pg_chunk.offset += q->buf_size;
465 if (q->pg_chunk.offset == (PAGE_SIZE << order))
466 q->pg_chunk.page = NULL;
468 q->pg_chunk.va += q->buf_size;
469 get_page(q->pg_chunk.page);
472 if (sd->pg_chunk.offset == 0)
473 *sd->pg_chunk.p_cnt = 1;
475 *sd->pg_chunk.p_cnt += 1;
480 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
482 if (q->pend_cred >= q->credits / 4) {
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
490 * refill_fl - refill an SGE free-buffer list
491 * @adapter: the adapter
492 * @q: the free-list to refill
493 * @n: the number of new buffers to allocate
494 * @gfp: the gfp flags for allocating new buffers
496 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
497 * allocated with the supplied gfp flags. The caller must assure that
498 * @n does not exceed the queue's capacity.
500 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
502 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
503 struct rx_desc *d = &q->desc[q->pidx];
504 unsigned int count = 0;
511 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
513 nomem: q->alloc_failed++;
516 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
517 dma_unmap_addr_set(sd, dma_addr, mapping);
519 add_one_rx_chunk(mapping, d, q->gen);
520 pci_dma_sync_single_for_device(adap->pdev, mapping,
521 q->buf_size - SGE_PG_RSVD,
526 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
531 buf_start = skb->data;
532 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
535 clear_rx_desc(adap->pdev, q, sd);
542 if (++q->pidx == q->size) {
552 q->pend_cred += count;
558 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
561 GFP_ATOMIC | __GFP_COMP);
565 * recycle_rx_buf - recycle a receive buffer
566 * @adapter: the adapter
567 * @q: the SGE free list
568 * @idx: index of buffer to recycle
570 * Recycles the specified buffer on the given free list by adding it at
571 * the next available slot on the list.
573 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
576 struct rx_desc *from = &q->desc[idx];
577 struct rx_desc *to = &q->desc[q->pidx];
579 q->sdesc[q->pidx] = q->sdesc[idx];
580 to->addr_lo = from->addr_lo; /* already big endian */
581 to->addr_hi = from->addr_hi; /* likewise */
583 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
584 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
586 if (++q->pidx == q->size) {
597 * alloc_ring - allocate resources for an SGE descriptor ring
598 * @pdev: the PCI device
599 * @nelem: the number of descriptors
600 * @elem_size: the size of each descriptor
601 * @sw_size: the size of the SW state associated with each ring element
602 * @phys: the physical address of the allocated ring
603 * @metadata: address of the array holding the SW state for the ring
605 * Allocates resources for an SGE descriptor ring, such as Tx queues,
606 * free buffer lists, or response queues. Each SGE ring requires
607 * space for its HW descriptors plus, optionally, space for the SW state
608 * associated with each HW entry (the metadata). The function returns
609 * three values: the virtual address for the HW ring (the return value
610 * of the function), the physical address of the HW ring, and the address
613 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
614 size_t sw_size, dma_addr_t * phys, void *metadata)
616 size_t len = nelem * elem_size;
618 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
622 if (sw_size && metadata) {
623 s = kcalloc(nelem, sw_size, GFP_KERNEL);
626 dma_free_coherent(&pdev->dev, len, p, *phys);
629 *(void **)metadata = s;
636 * t3_reset_qset - reset a sge qset
639 * Reset the qset structure.
640 * the NAPI structure is preserved in the event of
641 * the qset's reincarnation, for example during EEH recovery.
643 static void t3_reset_qset(struct sge_qset *q)
646 !(q->adap->flags & NAPI_INIT)) {
647 memset(q, 0, sizeof(*q));
652 memset(&q->rspq, 0, sizeof(q->rspq));
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
657 q->rx_reclaim_timer.function = NULL;
659 napi_free_frags(&q->napi);
664 * free_qset - free the resources of an SGE queue set
665 * @adapter: the adapter owning the queue set
668 * Release the HW and SW resources associated with an SGE queue set, such
669 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
670 * queue set must be quiesced prior to calling this.
672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
675 struct pci_dev *pdev = adapter->pdev;
677 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
679 spin_lock_irq(&adapter->sge.reg_lock);
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
681 spin_unlock_irq(&adapter->sge.reg_lock);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
684 dma_free_coherent(&pdev->dev,
686 sizeof(struct rx_desc), q->fl[i].desc,
690 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
691 if (q->txq[i].desc) {
692 spin_lock_irq(&adapter->sge.reg_lock);
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
694 spin_unlock_irq(&adapter->sge.reg_lock);
695 if (q->txq[i].sdesc) {
696 free_tx_desc(adapter, &q->txq[i],
698 kfree(q->txq[i].sdesc);
700 dma_free_coherent(&pdev->dev,
702 sizeof(struct tx_desc),
703 q->txq[i].desc, q->txq[i].phys_addr);
704 __skb_queue_purge(&q->txq[i].sendq);
708 spin_lock_irq(&adapter->sge.reg_lock);
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
710 spin_unlock_irq(&adapter->sge.reg_lock);
711 dma_free_coherent(&pdev->dev,
712 q->rspq.size * sizeof(struct rsp_desc),
713 q->rspq.desc, q->rspq.phys_addr);
720 * init_qset_cntxt - initialize an SGE queue set context info
722 * @id: the queue set id
724 * Initializes the TIDs and context ids for the queues of a queue set.
726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
728 qs->rspq.cntxt_id = id;
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
739 * sgl_len - calculates the size of an SGL of the given capacity
740 * @n: the number of SGL entries
742 * Calculates the number of flits needed for a scatter/gather list that
743 * can hold the given number of entries.
745 static inline unsigned int sgl_len(unsigned int n)
747 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
748 return (3 * n) / 2 + (n & 1);
752 * flits_to_desc - returns the num of Tx descriptors for the given flits
753 * @n: the number of flits
755 * Calculates the number of Tx descriptors needed for the supplied number
758 static inline unsigned int flits_to_desc(unsigned int n)
760 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
761 return flit_desc_map[n];
765 * get_packet - return the next ingress packet buffer from a free list
766 * @adap: the adapter that received the packet
767 * @fl: the SGE free list holding the packet
768 * @len: the packet length including any SGE padding
769 * @drop_thres: # of remaining buffers before we start dropping packets
771 * Get the next packet from a free list and complete setup of the
772 * sk_buff. If the packet is small we make a copy and recycle the
773 * original buffer, otherwise we use the original buffer itself. If a
774 * positive drop threshold is supplied packets are dropped and their
775 * buffers recycled if (a) the number of remaining buffers is under the
776 * threshold and the packet is too big to copy, or (b) the packet should
777 * be copied but there is no memory for the copy.
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
780 unsigned int len, unsigned int drop_thres)
782 struct sk_buff *skb = NULL;
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
785 prefetch(sd->skb->data);
788 if (len <= SGE_RX_COPY_THRES) {
789 skb = alloc_skb(len, GFP_ATOMIC);
790 if (likely(skb != NULL)) {
792 pci_dma_sync_single_for_cpu(adap->pdev,
793 dma_unmap_addr(sd, dma_addr), len,
795 memcpy(skb->data, sd->skb->data, len);
796 pci_dma_sync_single_for_device(adap->pdev,
797 dma_unmap_addr(sd, dma_addr), len,
799 } else if (!drop_thres)
802 recycle_rx_buf(adap, fl, fl->cidx);
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
808 GFP_ATOMIC | __GFP_COMP) == 0)
812 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
813 fl->buf_size, PCI_DMA_FROMDEVICE);
816 __refill_fl(adap, fl);
821 * get_packet_pg - return the next ingress packet buffer from a free list
822 * @adap: the adapter that received the packet
823 * @fl: the SGE free list holding the packet
824 * @len: the packet length including any SGE padding
825 * @drop_thres: # of remaining buffers before we start dropping packets
827 * Get the next packet from a free list populated with page chunks.
828 * If the packet is small we make a copy and recycle the original buffer,
829 * otherwise we attach the original buffer as a page fragment to a fresh
830 * sk_buff. If a positive drop threshold is supplied packets are dropped
831 * and their buffers recycled if (a) the number of remaining buffers is
832 * under the threshold and the packet is too big to copy, or (b) there's
835 * Note: this function is similar to @get_packet but deals with Rx buffers
836 * that are page chunks rather than sk_buffs.
838 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
839 struct sge_rspq *q, unsigned int len,
840 unsigned int drop_thres)
842 struct sk_buff *newskb, *skb;
843 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
845 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
847 newskb = skb = q->pg_skb;
848 if (!skb && (len <= SGE_RX_COPY_THRES)) {
849 newskb = alloc_skb(len, GFP_ATOMIC);
850 if (likely(newskb != NULL)) {
851 __skb_put(newskb, len);
852 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
854 memcpy(newskb->data, sd->pg_chunk.va, len);
855 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
858 } else if (!drop_thres)
862 recycle_rx_buf(adap, fl, fl->cidx);
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
870 prefetch(sd->pg_chunk.p_cnt);
873 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
875 if (unlikely(!newskb)) {
881 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
883 (*sd->pg_chunk.p_cnt)--;
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
885 pci_unmap_page(adap->pdev,
886 sd->pg_chunk.mapping,
890 __skb_put(newskb, SGE_RX_PULL_LEN);
891 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
892 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
893 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
894 len - SGE_RX_PULL_LEN);
896 newskb->data_len = len - SGE_RX_PULL_LEN;
897 newskb->truesize += newskb->data_len;
899 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
901 sd->pg_chunk.offset, len);
903 newskb->data_len += len;
904 newskb->truesize += len;
909 * We do not refill FLs here, we let the caller do it to overlap a
916 * get_imm_packet - return the next ingress packet buffer from a response
917 * @resp: the response descriptor containing the packet data
919 * Return a packet containing the immediate data of the given response.
921 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
923 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
926 __skb_put(skb, IMMED_PKT_SIZE);
927 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
933 * calc_tx_descs - calculate the number of Tx descriptors for a packet
936 * Returns the number of Tx descriptors needed for the given Ethernet
937 * packet. Ethernet packets require addition of WR and CPL headers.
939 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
943 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
946 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
947 if (skb_shinfo(skb)->gso_size)
949 return flits_to_desc(flits);
953 * make_sgl - populate a scatter/gather list for a packet
955 * @sgp: the SGL to populate
956 * @start: start address of skb main body data to include in the SGL
957 * @len: length of skb main body data to include in the SGL
958 * @pdev: the PCI device
960 * Generates a scatter/gather list for the buffers that make up a packet
961 * and returns the SGL size in 8-byte words. The caller must size the SGL
964 static inline unsigned int make_sgl(const struct sk_buff *skb,
965 struct sg_ent *sgp, unsigned char *start,
966 unsigned int len, struct pci_dev *pdev)
969 unsigned int i, j = 0, nfrags;
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
973 sgp->len[0] = cpu_to_be32(len);
974 sgp->addr[0] = cpu_to_be64(mapping);
978 nfrags = skb_shinfo(skb)->nr_frags;
979 for (i = 0; i < nfrags; i++) {
980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
985 sgp->addr[j] = cpu_to_be64(mapping);
992 return ((nfrags + (len != 0)) * 3) / 2 + j;
996 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1000 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1001 * where the HW is going to sleep just after we checked, however,
1002 * then the interrupt handler will detect the outstanding TX packet
1003 * and ring the doorbell for us.
1005 * When GTS is disabled we unconditionally ring the doorbell.
1007 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1010 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1011 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1012 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1013 t3_write_reg(adap, A_SG_KDOORBELL,
1014 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1017 wmb(); /* write descriptors before telling HW */
1018 t3_write_reg(adap, A_SG_KDOORBELL,
1019 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1023 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1025 #if SGE_NUM_GENBITS == 2
1026 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1031 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1032 * @ndesc: number of Tx descriptors spanned by the SGL
1033 * @skb: the packet corresponding to the WR
1034 * @d: first Tx descriptor to be written
1035 * @pidx: index of above descriptors
1036 * @q: the SGE Tx queue
1038 * @flits: number of flits to the start of the SGL in the first descriptor
1039 * @sgl_flits: the SGL size in flits
1040 * @gen: the Tx descriptor generation
1041 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1042 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1044 * Write a work request header and an associated SGL. If the SGL is
1045 * small enough to fit into one Tx descriptor it has already been written
1046 * and we just need to write the WR header. Otherwise we distribute the
1047 * SGL across the number of descriptors it spans.
1049 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1050 struct tx_desc *d, unsigned int pidx,
1051 const struct sge_txq *q,
1052 const struct sg_ent *sgl,
1053 unsigned int flits, unsigned int sgl_flits,
1054 unsigned int gen, __be32 wr_hi,
1057 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1058 struct tx_sw_desc *sd = &q->sdesc[pidx];
1061 if (need_skb_unmap()) {
1067 if (likely(ndesc == 1)) {
1069 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1070 V_WR_SGLSFLT(flits)) | wr_hi;
1072 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1073 V_WR_GEN(gen)) | wr_lo;
1076 unsigned int ogen = gen;
1077 const u64 *fp = (const u64 *)sgl;
1078 struct work_request_hdr *wp = wrp;
1080 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1081 V_WR_SGLSFLT(flits)) | wr_hi;
1084 unsigned int avail = WR_FLITS - flits;
1086 if (avail > sgl_flits)
1088 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1098 if (++pidx == q->size) {
1106 wrp = (struct work_request_hdr *)d;
1107 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1108 V_WR_SGLSFLT(1)) | wr_hi;
1109 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1111 V_WR_GEN(gen)) | wr_lo;
1116 wrp->wr_hi |= htonl(F_WR_EOP);
1118 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1119 wr_gen2((struct tx_desc *)wp, ogen);
1120 WARN_ON(ndesc != 0);
1125 * write_tx_pkt_wr - write a TX_PKT work request
1126 * @adap: the adapter
1127 * @skb: the packet to send
1128 * @pi: the egress interface
1129 * @pidx: index of the first Tx descriptor to write
1130 * @gen: the generation value to use
1132 * @ndesc: number of descriptors the packet will occupy
1133 * @compl: the value of the COMPL bit to use
1135 * Generate a TX_PKT work request to send the supplied packet.
1137 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138 const struct port_info *pi,
1139 unsigned int pidx, unsigned int gen,
1140 struct sge_txq *q, unsigned int ndesc,
1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1145 struct tx_desc *d = &q->desc[pidx];
1146 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1148 cpl->len = htonl(skb->len);
1149 cntrl = V_TXPKT_INTF(pi->port_id);
1151 if (vlan_tx_tag_present(skb))
1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1157 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1160 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1161 hdr->cntrl = htonl(cntrl);
1162 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1163 CPL_ETH_II : CPL_ETH_II_VLAN;
1164 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1165 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1166 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1167 hdr->lso_info = htonl(tso_info);
1170 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1171 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1172 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1173 cpl->cntrl = htonl(cntrl);
1175 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1176 q->sdesc[pidx].skb = NULL;
1178 skb_copy_from_linear_data(skb, &d->flit[2],
1181 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1183 flits = (skb->len + 7) / 8 + 2;
1184 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1185 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1186 | F_WR_SOP | F_WR_EOP | compl);
1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1189 V_WR_TID(q->token));
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1203 htonl(V_WR_TID(q->token)));
1206 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1207 struct sge_qset *qs, struct sge_txq *q)
1209 netif_tx_stop_queue(txq);
1210 set_bit(TXQ_ETH, &qs->txq_stopped);
1215 * eth_xmit - add a packet to the Ethernet Tx queue
1217 * @dev: the egress net device
1219 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1221 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1224 unsigned int ndesc, pidx, credits, gen, compl;
1225 const struct port_info *pi = netdev_priv(dev);
1226 struct adapter *adap = pi->adapter;
1227 struct netdev_queue *txq;
1228 struct sge_qset *qs;
1232 * The chip min packet length is 9 octets but play safe and reject
1233 * anything shorter than an Ethernet header.
1235 if (unlikely(skb->len < ETH_HLEN)) {
1237 return NETDEV_TX_OK;
1240 qidx = skb_get_queue_mapping(skb);
1242 q = &qs->txq[TXQ_ETH];
1243 txq = netdev_get_tx_queue(dev, qidx);
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1247 credits = q->size - q->in_use;
1248 ndesc = calc_tx_descs(skb);
1250 if (unlikely(credits < ndesc)) {
1251 t3_stop_tx_queue(txq, qs, q);
1252 dev_err(&adap->pdev->dev,
1253 "%s: Tx ring %u full while queue awake!\n",
1254 dev->name, q->cntxt_id & 7);
1255 return NETDEV_TX_BUSY;
1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1260 t3_stop_tx_queue(txq, qs, q);
1262 if (should_restart_tx(q) &&
1263 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1265 netif_tx_start_queue(txq);
1270 q->unacked += ndesc;
1271 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1275 if (q->pidx >= q->size) {
1280 /* update port statistics */
1281 if (skb->ip_summed == CHECKSUM_PARTIAL)
1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1283 if (skb_shinfo(skb)->gso_size)
1284 qs->port_stats[SGE_PSTAT_TSO]++;
1285 if (vlan_tx_tag_present(skb))
1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1289 * We do not use Tx completion interrupts to free DMAd Tx packets.
1290 * This is good for performance but means that we rely on new Tx
1291 * packets arriving to run the destructors of completed packets,
1292 * which open up space in their sockets' send queues. Sometimes
1293 * we do not get such new packets causing Tx to stall. A single
1294 * UDP transmitter is a good example of this situation. We have
1295 * a clean up timer that periodically reclaims completed packets
1296 * but it doesn't run often enough (nor do we want it to) to prevent
1297 * lengthy stalls. A solution to this problem is to run the
1298 * destructor early, after the packet is queued but before it's DMAd.
1299 * A cons is that we lie to socket memory accounting, but the amount
1300 * of extra memory is reasonable (limited by the number of Tx
1301 * descriptors), the packets do actually get freed quickly by new
1302 * packets almost always, and for protocols like TCP that wait for
1303 * acks to really free up the data the extra memory is even less.
1304 * On the positive side we run the destructors on the sending CPU
1305 * rather than on a potentially different completing CPU, usually a
1306 * good thing. We also run them without holding our Tx queue lock,
1307 * unlike what reclaim_completed_tx() would otherwise do.
1309 * Run the destructor before telling the DMA engine about the packet
1310 * to make sure it doesn't complete and get freed prematurely.
1312 if (likely(!skb_shared(skb)))
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1316 check_ring_tx_db(adap, q);
1317 return NETDEV_TX_OK;
1321 * write_imm - write a packet into a Tx descriptor as immediate data
1322 * @d: the Tx descriptor to write
1324 * @len: the length of packet data to write as immediate data
1325 * @gen: the generation bit value to write
1327 * Writes a packet as immediate data into a Tx descriptor. The packet
1328 * contains a work request at its beginning. We must write the packet
1329 * carefully so the SGE doesn't read it accidentally before it's written
1332 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1333 unsigned int len, unsigned int gen)
1335 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1336 struct work_request_hdr *to = (struct work_request_hdr *)d;
1338 if (likely(!skb->data_len))
1339 memcpy(&to[1], &from[1], len - sizeof(*from));
1341 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1343 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1344 V_WR_BCNTLFLT(len & 7));
1346 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1347 V_WR_LEN((len + 7) / 8));
1353 * check_desc_avail - check descriptor availability on a send queue
1354 * @adap: the adapter
1355 * @q: the send queue
1356 * @skb: the packet needing the descriptors
1357 * @ndesc: the number of Tx descriptors needed
1358 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1360 * Checks if the requested number of Tx descriptors is available on an
1361 * SGE send queue. If the queue is already suspended or not enough
1362 * descriptors are available the packet is queued for later transmission.
1363 * Must be called with the Tx queue locked.
1365 * Returns 0 if enough descriptors are available, 1 if there aren't
1366 * enough descriptors and the packet has been queued, and 2 if the caller
1367 * needs to retry because there weren't enough descriptors at the
1368 * beginning of the call but some freed up in the mean time.
1370 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1371 struct sk_buff *skb, unsigned int ndesc,
1374 if (unlikely(!skb_queue_empty(&q->sendq))) {
1375 addq_exit:__skb_queue_tail(&q->sendq, skb);
1378 if (unlikely(q->size - q->in_use < ndesc)) {
1379 struct sge_qset *qs = txq_to_qset(q, qid);
1381 set_bit(qid, &qs->txq_stopped);
1382 smp_mb__after_clear_bit();
1384 if (should_restart_tx(q) &&
1385 test_and_clear_bit(qid, &qs->txq_stopped))
1395 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1396 * @q: the SGE control Tx queue
1398 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1399 * that send only immediate data (presently just the control queues) and
1400 * thus do not have any sk_buffs to release.
1402 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1404 unsigned int reclaim = q->processed - q->cleaned;
1406 q->in_use -= reclaim;
1407 q->cleaned += reclaim;
1410 static inline int immediate(const struct sk_buff *skb)
1412 return skb->len <= WR_LEN;
1416 * ctrl_xmit - send a packet through an SGE control Tx queue
1417 * @adap: the adapter
1418 * @q: the control queue
1421 * Send a packet through an SGE control Tx queue. Packets sent through
1422 * a control queue must fit entirely as immediate data in a single Tx
1423 * descriptor and have no page fragments.
1425 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1426 struct sk_buff *skb)
1429 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1431 if (unlikely(!immediate(skb))) {
1434 return NET_XMIT_SUCCESS;
1437 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1438 wrp->wr_lo = htonl(V_WR_TID(q->token));
1440 spin_lock(&q->lock);
1441 again:reclaim_completed_tx_imm(q);
1443 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1444 if (unlikely(ret)) {
1446 spin_unlock(&q->lock);
1452 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1455 if (++q->pidx >= q->size) {
1459 spin_unlock(&q->lock);
1461 t3_write_reg(adap, A_SG_KDOORBELL,
1462 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1463 return NET_XMIT_SUCCESS;
1467 * restart_ctrlq - restart a suspended control queue
1468 * @qs: the queue set cotaining the control queue
1470 * Resumes transmission on a suspended Tx control queue.
1472 static void restart_ctrlq(unsigned long data)
1474 struct sk_buff *skb;
1475 struct sge_qset *qs = (struct sge_qset *)data;
1476 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1478 spin_lock(&q->lock);
1479 again:reclaim_completed_tx_imm(q);
1481 while (q->in_use < q->size &&
1482 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1484 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1486 if (++q->pidx >= q->size) {
1493 if (!skb_queue_empty(&q->sendq)) {
1494 set_bit(TXQ_CTRL, &qs->txq_stopped);
1495 smp_mb__after_clear_bit();
1497 if (should_restart_tx(q) &&
1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1503 spin_unlock(&q->lock);
1505 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1506 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1510 * Send a management message through control queue 0
1512 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1523 * deferred_unmap_destructor - unmap a packet when it is freed
1526 * This is the packet destructor used for Tx packets that need to remain
1527 * mapped until they are freed rather than until their Tx descriptors are
1530 static void deferred_unmap_destructor(struct sk_buff *skb)
1533 const dma_addr_t *p;
1534 const struct skb_shared_info *si;
1535 const struct deferred_unmap_info *dui;
1537 dui = (struct deferred_unmap_info *)skb->head;
1540 if (skb_tail_pointer(skb) - skb_transport_header(skb))
1541 pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1542 skb_transport_header(skb), PCI_DMA_TODEVICE);
1544 si = skb_shinfo(skb);
1545 for (i = 0; i < si->nr_frags; i++)
1546 pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1550 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1551 const struct sg_ent *sgl, int sgl_flits)
1554 struct deferred_unmap_info *dui;
1556 dui = (struct deferred_unmap_info *)skb->head;
1558 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1559 *p++ = be64_to_cpu(sgl->addr[0]);
1560 *p++ = be64_to_cpu(sgl->addr[1]);
1563 *p = be64_to_cpu(sgl->addr[0]);
1567 * write_ofld_wr - write an offload work request
1568 * @adap: the adapter
1569 * @skb: the packet to send
1571 * @pidx: index of the first Tx descriptor to write
1572 * @gen: the generation value to use
1573 * @ndesc: number of descriptors the packet will occupy
1575 * Write an offload work request to send the supplied packet. The packet
1576 * data already carry the work request with most fields populated.
1578 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1579 struct sge_txq *q, unsigned int pidx,
1580 unsigned int gen, unsigned int ndesc)
1582 unsigned int sgl_flits, flits;
1583 struct work_request_hdr *from;
1584 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1585 struct tx_desc *d = &q->desc[pidx];
1587 if (immediate(skb)) {
1588 q->sdesc[pidx].skb = NULL;
1589 write_imm(d, skb, skb->len, gen);
1593 /* Only TX_DATA builds SGLs */
1595 from = (struct work_request_hdr *)skb->data;
1596 memcpy(&d->flit[1], &from[1],
1597 skb_transport_offset(skb) - sizeof(*from));
1599 flits = skb_transport_offset(skb) / 8;
1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1602 skb->tail - skb->transport_header,
1604 if (need_skb_unmap()) {
1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1606 skb->destructor = deferred_unmap_destructor;
1609 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1610 gen, from->wr_hi, from->wr_lo);
1614 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1617 * Returns the number of Tx descriptors needed for the given offload
1618 * packet. These packets are already fully constructed.
1620 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1622 unsigned int flits, cnt;
1624 if (skb->len <= WR_LEN)
1625 return 1; /* packet fits as immediate data */
1627 flits = skb_transport_offset(skb) / 8; /* headers */
1628 cnt = skb_shinfo(skb)->nr_frags;
1629 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1631 return flits_to_desc(flits + sgl_len(cnt));
1635 * ofld_xmit - send a packet through an offload queue
1636 * @adap: the adapter
1637 * @q: the Tx offload queue
1640 * Send an offload packet through an SGE offload queue.
1642 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1643 struct sk_buff *skb)
1646 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1648 spin_lock(&q->lock);
1649 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1651 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1652 if (unlikely(ret)) {
1654 skb->priority = ndesc; /* save for restart */
1655 spin_unlock(&q->lock);
1665 if (q->pidx >= q->size) {
1669 spin_unlock(&q->lock);
1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1672 check_ring_tx_db(adap, q);
1673 return NET_XMIT_SUCCESS;
1677 * restart_offloadq - restart a suspended offload queue
1678 * @qs: the queue set cotaining the offload queue
1680 * Resumes transmission on a suspended Tx offload queue.
1682 static void restart_offloadq(unsigned long data)
1684 struct sk_buff *skb;
1685 struct sge_qset *qs = (struct sge_qset *)data;
1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1687 const struct port_info *pi = netdev_priv(qs->netdev);
1688 struct adapter *adap = pi->adapter;
1690 spin_lock(&q->lock);
1691 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1693 while ((skb = skb_peek(&q->sendq)) != NULL) {
1694 unsigned int gen, pidx;
1695 unsigned int ndesc = skb->priority;
1697 if (unlikely(q->size - q->in_use < ndesc)) {
1698 set_bit(TXQ_OFLD, &qs->txq_stopped);
1699 smp_mb__after_clear_bit();
1701 if (should_restart_tx(q) &&
1702 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1712 if (q->pidx >= q->size) {
1716 __skb_unlink(skb, &q->sendq);
1717 spin_unlock(&q->lock);
1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1720 spin_lock(&q->lock);
1722 spin_unlock(&q->lock);
1725 set_bit(TXQ_RUNNING, &q->flags);
1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1729 t3_write_reg(adap, A_SG_KDOORBELL,
1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1734 * queue_set - return the queue set a packet should use
1737 * Maps a packet to the SGE queue set it should use. The desired queue
1738 * set is carried in bits 1-3 in the packet's priority.
1740 static inline int queue_set(const struct sk_buff *skb)
1742 return skb->priority >> 1;
1746 * is_ctrl_pkt - return whether an offload packet is a control packet
1749 * Determines whether an offload packet should use an OFLD or a CTRL
1750 * Tx queue. This is indicated by bit 0 in the packet's priority.
1752 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1754 return skb->priority & 1;
1758 * t3_offload_tx - send an offload packet
1759 * @tdev: the offload device to send to
1762 * Sends an offload packet. We use the packet priority to select the
1763 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1764 * should be sent as regular or control, bits 1-3 select the queue set.
1766 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1768 struct adapter *adap = tdev2adap(tdev);
1769 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1771 if (unlikely(is_ctrl_pkt(skb)))
1772 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1774 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1778 * offload_enqueue - add an offload packet to an SGE offload receive queue
1779 * @q: the SGE response queue
1782 * Add a new offload packet to an SGE response queue's offload packet
1783 * queue. If the packet is the first on the queue it schedules the RX
1784 * softirq to process the queue.
1786 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1788 int was_empty = skb_queue_empty(&q->rx_queue);
1790 __skb_queue_tail(&q->rx_queue, skb);
1793 struct sge_qset *qs = rspq_to_qset(q);
1795 napi_schedule(&qs->napi);
1800 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1801 * @tdev: the offload device that will be receiving the packets
1802 * @q: the SGE response queue that assembled the bundle
1803 * @skbs: the partial bundle
1804 * @n: the number of packets in the bundle
1806 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1808 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1810 struct sk_buff *skbs[], int n)
1813 q->offload_bundles++;
1814 tdev->recv(tdev, skbs, n);
1819 * ofld_poll - NAPI handler for offload packets in interrupt mode
1820 * @dev: the network device doing the polling
1821 * @budget: polling budget
1823 * The NAPI handler for offload packets when a response queue is serviced
1824 * by the hard interrupt handler, i.e., when it's operating in non-polling
1825 * mode. Creates small packet batches and sends them through the offload
1826 * receive handler. Batches need to be of modest size as we do prefetches
1827 * on the packets in each.
1829 static int ofld_poll(struct napi_struct *napi, int budget)
1831 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1832 struct sge_rspq *q = &qs->rspq;
1833 struct adapter *adapter = qs->adap;
1836 while (work_done < budget) {
1837 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1838 struct sk_buff_head queue;
1841 spin_lock_irq(&q->lock);
1842 __skb_queue_head_init(&queue);
1843 skb_queue_splice_init(&q->rx_queue, &queue);
1844 if (skb_queue_empty(&queue)) {
1845 napi_complete(napi);
1846 spin_unlock_irq(&q->lock);
1849 spin_unlock_irq(&q->lock);
1852 skb_queue_walk_safe(&queue, skb, tmp) {
1853 if (work_done >= budget)
1857 __skb_unlink(skb, &queue);
1858 prefetch(skb->data);
1859 skbs[ngathered] = skb;
1860 if (++ngathered == RX_BUNDLE_SIZE) {
1861 q->offload_bundles++;
1862 adapter->tdev.recv(&adapter->tdev, skbs,
1867 if (!skb_queue_empty(&queue)) {
1868 /* splice remaining packets back onto Rx queue */
1869 spin_lock_irq(&q->lock);
1870 skb_queue_splice(&queue, &q->rx_queue);
1871 spin_unlock_irq(&q->lock);
1873 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1880 * rx_offload - process a received offload packet
1881 * @tdev: the offload device receiving the packet
1882 * @rq: the response queue that received the packet
1884 * @rx_gather: a gather list of packets if we are building a bundle
1885 * @gather_idx: index of the next available slot in the bundle
1887 * Process an ingress offload pakcet and add it to the offload ingress
1888 * queue. Returns the index of the next available slot in the bundle.
1890 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1891 struct sk_buff *skb, struct sk_buff *rx_gather[],
1892 unsigned int gather_idx)
1894 skb_reset_mac_header(skb);
1895 skb_reset_network_header(skb);
1896 skb_reset_transport_header(skb);
1899 rx_gather[gather_idx++] = skb;
1900 if (gather_idx == RX_BUNDLE_SIZE) {
1901 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1903 rq->offload_bundles++;
1906 offload_enqueue(rq, skb);
1912 * restart_tx - check whether to restart suspended Tx queues
1913 * @qs: the queue set to resume
1915 * Restarts suspended Tx queues of an SGE queue set if they have enough
1916 * free resources to resume operation.
1918 static void restart_tx(struct sge_qset *qs)
1920 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1921 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1922 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1923 qs->txq[TXQ_ETH].restarts++;
1924 if (netif_running(qs->netdev))
1925 netif_tx_wake_queue(qs->tx_q);
1928 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1929 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1930 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1931 qs->txq[TXQ_OFLD].restarts++;
1932 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1934 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1935 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1936 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1937 qs->txq[TXQ_CTRL].restarts++;
1938 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1943 * cxgb3_arp_process - process an ARP request probing a private IP address
1944 * @adapter: the adapter
1945 * @skb: the skbuff containing the ARP request
1947 * Check if the ARP request is probing the private IP address
1948 * dedicated to iSCSI, generate an ARP reply if so.
1950 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1952 struct net_device *dev = skb->dev;
1954 unsigned char *arp_ptr;
1961 skb_reset_network_header(skb);
1964 if (arp->ar_op != htons(ARPOP_REQUEST))
1967 arp_ptr = (unsigned char *)(arp + 1);
1969 arp_ptr += dev->addr_len;
1970 memcpy(&sip, arp_ptr, sizeof(sip));
1971 arp_ptr += sizeof(sip);
1972 arp_ptr += dev->addr_len;
1973 memcpy(&tip, arp_ptr, sizeof(tip));
1975 if (tip != pi->iscsi_ipv4addr)
1978 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1979 pi->iscsic.mac_addr, sha);
1983 static inline int is_arp(struct sk_buff *skb)
1985 return skb->protocol == htons(ETH_P_ARP);
1988 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
1989 struct sk_buff *skb)
1992 cxgb3_arp_process(pi, skb);
1996 if (pi->iscsic.recv)
1997 pi->iscsic.recv(pi, skb);
2002 * rx_eth - process an ingress ethernet packet
2003 * @adap: the adapter
2004 * @rq: the response queue that received the packet
2006 * @pad: amount of padding at the start of the buffer
2008 * Process an ingress ethernet pakcet and deliver it to the stack.
2009 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2010 * if it was immediate data in a response.
2012 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2013 struct sk_buff *skb, int pad, int lro)
2015 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2016 struct sge_qset *qs = rspq_to_qset(rq);
2017 struct port_info *pi;
2019 skb_pull(skb, sizeof(*p) + pad);
2020 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2021 pi = netdev_priv(skb->dev);
2022 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2023 p->csum == htons(0xffff) && !p->fragment) {
2024 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2025 skb->ip_summed = CHECKSUM_UNNECESSARY;
2027 skb_checksum_none_assert(skb);
2028 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2030 if (p->vlan_valid) {
2031 qs->port_stats[SGE_PSTAT_VLANEX]++;
2032 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2036 napi_gro_receive(&qs->napi, skb);
2038 if (unlikely(pi->iscsic.flags))
2039 cxgb3_process_iscsi_prov_pack(pi, skb);
2040 netif_receive_skb(skb);
2046 static inline int is_eth_tcp(u32 rss)
2048 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2052 * lro_add_page - add a page chunk to an LRO session
2053 * @adap: the adapter
2054 * @qs: the associated queue set
2055 * @fl: the free list containing the page chunk to add
2056 * @len: packet length
2057 * @complete: Indicates the last fragment of a frame
2059 * Add a received packet contained in a page chunk to an existing LRO
2062 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2063 struct sge_fl *fl, int len, int complete)
2065 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2066 struct port_info *pi = netdev_priv(qs->netdev);
2067 struct sk_buff *skb = NULL;
2068 struct cpl_rx_pkt *cpl;
2069 struct skb_frag_struct *rx_frag;
2074 skb = napi_get_frags(&qs->napi);
2080 pci_dma_sync_single_for_cpu(adap->pdev,
2081 dma_unmap_addr(sd, dma_addr),
2082 fl->buf_size - SGE_PG_RSVD,
2083 PCI_DMA_FROMDEVICE);
2085 (*sd->pg_chunk.p_cnt)--;
2086 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2087 pci_unmap_page(adap->pdev,
2088 sd->pg_chunk.mapping,
2090 PCI_DMA_FROMDEVICE);
2093 put_page(sd->pg_chunk.page);
2099 rx_frag = skb_shinfo(skb)->frags;
2100 nr_frags = skb_shinfo(skb)->nr_frags;
2103 offset = 2 + sizeof(struct cpl_rx_pkt);
2104 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2106 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2107 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2108 skb->ip_summed = CHECKSUM_UNNECESSARY;
2109 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2111 skb->ip_summed = CHECKSUM_NONE;
2117 rx_frag += nr_frags;
2118 __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2119 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2120 skb_frag_size_set(rx_frag, len);
2123 skb->data_len += len;
2124 skb->truesize += len;
2125 skb_shinfo(skb)->nr_frags++;
2130 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2132 if (cpl->vlan_valid) {
2133 qs->port_stats[SGE_PSTAT_VLANEX]++;
2134 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2136 napi_gro_frags(&qs->napi);
2140 * handle_rsp_cntrl_info - handles control information in a response
2141 * @qs: the queue set corresponding to the response
2142 * @flags: the response control flags
2144 * Handles the control information of an SGE response, such as GTS
2145 * indications and completion credits for the queue set's Tx queues.
2146 * HW coalesces credits, we don't do any extra SW coalescing.
2148 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2150 unsigned int credits;
2153 if (flags & F_RSPD_TXQ0_GTS)
2154 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2157 credits = G_RSPD_TXQ0_CR(flags);
2159 qs->txq[TXQ_ETH].processed += credits;
2161 credits = G_RSPD_TXQ2_CR(flags);
2163 qs->txq[TXQ_CTRL].processed += credits;
2166 if (flags & F_RSPD_TXQ1_GTS)
2167 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2169 credits = G_RSPD_TXQ1_CR(flags);
2171 qs->txq[TXQ_OFLD].processed += credits;
2175 * check_ring_db - check if we need to ring any doorbells
2176 * @adapter: the adapter
2177 * @qs: the queue set whose Tx queues are to be examined
2178 * @sleeping: indicates which Tx queue sent GTS
2180 * Checks if some of a queue set's Tx queues need to ring their doorbells
2181 * to resume transmission after idling while they still have unprocessed
2184 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2185 unsigned int sleeping)
2187 if (sleeping & F_RSPD_TXQ0_GTS) {
2188 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2190 if (txq->cleaned + txq->in_use != txq->processed &&
2191 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2192 set_bit(TXQ_RUNNING, &txq->flags);
2193 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2194 V_EGRCNTX(txq->cntxt_id));
2198 if (sleeping & F_RSPD_TXQ1_GTS) {
2199 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2201 if (txq->cleaned + txq->in_use != txq->processed &&
2202 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2203 set_bit(TXQ_RUNNING, &txq->flags);
2204 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2205 V_EGRCNTX(txq->cntxt_id));
2211 * is_new_response - check if a response is newly written
2212 * @r: the response descriptor
2213 * @q: the response queue
2215 * Returns true if a response descriptor contains a yet unprocessed
2218 static inline int is_new_response(const struct rsp_desc *r,
2219 const struct sge_rspq *q)
2221 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2224 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2227 q->rx_recycle_buf = 0;
2230 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2231 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2232 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2233 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2234 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2236 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2237 #define NOMEM_INTR_DELAY 2500
2240 * process_responses - process responses from an SGE response queue
2241 * @adap: the adapter
2242 * @qs: the queue set to which the response queue belongs
2243 * @budget: how many responses can be processed in this round
2245 * Process responses from an SGE response queue up to the supplied budget.
2246 * Responses include received packets as well as credits and other events
2247 * for the queues that belong to the response queue's queue set.
2248 * A negative budget is effectively unlimited.
2250 * Additionally choose the interrupt holdoff time for the next interrupt
2251 * on this queue. If the system is under memory shortage use a fairly
2252 * long delay to help recovery.
2254 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2257 struct sge_rspq *q = &qs->rspq;
2258 struct rsp_desc *r = &q->desc[q->cidx];
2259 int budget_left = budget;
2260 unsigned int sleeping = 0;
2261 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2264 q->next_holdoff = q->holdoff_tmr;
2266 while (likely(budget_left && is_new_response(r, q))) {
2267 int packet_complete, eth, ethpad = 2;
2268 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2269 struct sk_buff *skb = NULL;
2271 __be32 rss_hi, rss_lo;
2274 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2275 rss_hi = *(const __be32 *)r;
2276 rss_lo = r->rss_hdr.rss_hash_val;
2277 flags = ntohl(r->flags);
2279 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2280 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2284 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2285 skb->data[0] = CPL_ASYNC_NOTIF;
2286 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2288 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2289 skb = get_imm_packet(r);
2290 if (unlikely(!skb)) {
2292 q->next_holdoff = NOMEM_INTR_DELAY;
2294 /* consume one credit since we tried */
2300 } else if ((len = ntohl(r->len_cq)) != 0) {
2303 lro &= eth && is_eth_tcp(rss_hi);
2305 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2306 if (fl->use_pages) {
2307 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2310 #if L1_CACHE_BYTES < 128
2311 prefetch(addr + L1_CACHE_BYTES);
2313 __refill_fl(adap, fl);
2315 lro_add_page(adap, qs, fl,
2317 flags & F_RSPD_EOP);
2321 skb = get_packet_pg(adap, fl, q,
2324 SGE_RX_DROP_THRES : 0);
2327 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2328 eth ? SGE_RX_DROP_THRES : 0);
2329 if (unlikely(!skb)) {
2333 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2336 if (++fl->cidx == fl->size)
2341 if (flags & RSPD_CTRL_MASK) {
2342 sleeping |= flags & RSPD_GTS_MASK;
2343 handle_rsp_cntrl_info(qs, flags);
2347 if (unlikely(++q->cidx == q->size)) {
2354 if (++q->credits >= (q->size / 4)) {
2355 refill_rspq(adap, q, q->credits);
2359 packet_complete = flags &
2360 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2361 F_RSPD_ASYNC_NOTIF);
2363 if (skb != NULL && packet_complete) {
2365 rx_eth(adap, q, skb, ethpad, lro);
2368 /* Preserve the RSS info in csum & priority */
2370 skb->priority = rss_lo;
2371 ngathered = rx_offload(&adap->tdev, q, skb,
2376 if (flags & F_RSPD_EOP)
2377 clear_rspq_bufstate(q);
2382 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2385 check_ring_db(adap, qs, sleeping);
2387 smp_mb(); /* commit Tx queue .processed updates */
2388 if (unlikely(qs->txq_stopped != 0))
2391 budget -= budget_left;
2395 static inline int is_pure_response(const struct rsp_desc *r)
2397 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2399 return (n | r->len_cq) == 0;
2403 * napi_rx_handler - the NAPI handler for Rx processing
2404 * @napi: the napi instance
2405 * @budget: how many packets we can process in this round
2407 * Handler for new data events when using NAPI.
2409 static int napi_rx_handler(struct napi_struct *napi, int budget)
2411 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2412 struct adapter *adap = qs->adap;
2413 int work_done = process_responses(adap, qs, budget);
2415 if (likely(work_done < budget)) {
2416 napi_complete(napi);
2419 * Because we don't atomically flush the following
2420 * write it is possible that in very rare cases it can
2421 * reach the device in a way that races with a new
2422 * response being written plus an error interrupt
2423 * causing the NAPI interrupt handler below to return
2424 * unhandled status to the OS. To protect against
2425 * this would require flushing the write and doing
2426 * both the write and the flush with interrupts off.
2427 * Way too expensive and unjustifiable given the
2428 * rarity of the race.
2430 * The race cannot happen at all with MSI-X.
2432 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2433 V_NEWTIMER(qs->rspq.next_holdoff) |
2434 V_NEWINDEX(qs->rspq.cidx));
2440 * Returns true if the device is already scheduled for polling.
2442 static inline int napi_is_scheduled(struct napi_struct *napi)
2444 return test_bit(NAPI_STATE_SCHED, &napi->state);
2448 * process_pure_responses - process pure responses from a response queue
2449 * @adap: the adapter
2450 * @qs: the queue set owning the response queue
2451 * @r: the first pure response to process
2453 * A simpler version of process_responses() that handles only pure (i.e.,
2454 * non data-carrying) responses. Such respones are too light-weight to
2455 * justify calling a softirq under NAPI, so we handle them specially in
2456 * the interrupt handler. The function is called with a pointer to a
2457 * response, which the caller must ensure is a valid pure response.
2459 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2461 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2464 struct sge_rspq *q = &qs->rspq;
2465 unsigned int sleeping = 0;
2468 u32 flags = ntohl(r->flags);
2471 if (unlikely(++q->cidx == q->size)) {
2478 if (flags & RSPD_CTRL_MASK) {
2479 sleeping |= flags & RSPD_GTS_MASK;
2480 handle_rsp_cntrl_info(qs, flags);
2484 if (++q->credits >= (q->size / 4)) {
2485 refill_rspq(adap, q, q->credits);
2488 if (!is_new_response(r, q))
2491 } while (is_pure_response(r));
2494 check_ring_db(adap, qs, sleeping);
2496 smp_mb(); /* commit Tx queue .processed updates */
2497 if (unlikely(qs->txq_stopped != 0))
2500 return is_new_response(r, q);
2504 * handle_responses - decide what to do with new responses in NAPI mode
2505 * @adap: the adapter
2506 * @q: the response queue
2508 * This is used by the NAPI interrupt handlers to decide what to do with
2509 * new SGE responses. If there are no new responses it returns -1. If
2510 * there are new responses and they are pure (i.e., non-data carrying)
2511 * it handles them straight in hard interrupt context as they are very
2512 * cheap and don't deliver any packets. Finally, if there are any data
2513 * signaling responses it schedules the NAPI handler. Returns 1 if it
2514 * schedules NAPI, 0 if all new responses were pure.
2516 * The caller must ascertain NAPI is not already running.
2518 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2520 struct sge_qset *qs = rspq_to_qset(q);
2521 struct rsp_desc *r = &q->desc[q->cidx];
2523 if (!is_new_response(r, q))
2526 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2527 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2528 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2531 napi_schedule(&qs->napi);
2536 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2537 * (i.e., response queue serviced in hard interrupt).
2539 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2541 struct sge_qset *qs = cookie;
2542 struct adapter *adap = qs->adap;
2543 struct sge_rspq *q = &qs->rspq;
2545 spin_lock(&q->lock);
2546 if (process_responses(adap, qs, -1) == 0)
2547 q->unhandled_irqs++;
2548 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2549 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2550 spin_unlock(&q->lock);
2555 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2556 * (i.e., response queue serviced by NAPI polling).
2558 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2560 struct sge_qset *qs = cookie;
2561 struct sge_rspq *q = &qs->rspq;
2563 spin_lock(&q->lock);
2565 if (handle_responses(qs->adap, q) < 0)
2566 q->unhandled_irqs++;
2567 spin_unlock(&q->lock);
2572 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2573 * SGE response queues as well as error and other async events as they all use
2574 * the same MSI vector. We use one SGE response queue per port in this mode
2575 * and protect all response queues with queue 0's lock.
2577 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2579 int new_packets = 0;
2580 struct adapter *adap = cookie;
2581 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2583 spin_lock(&q->lock);
2585 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2586 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2587 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2591 if (adap->params.nports == 2 &&
2592 process_responses(adap, &adap->sge.qs[1], -1)) {
2593 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2595 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2596 V_NEWTIMER(q1->next_holdoff) |
2597 V_NEWINDEX(q1->cidx));
2601 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2602 q->unhandled_irqs++;
2604 spin_unlock(&q->lock);
2608 static int rspq_check_napi(struct sge_qset *qs)
2610 struct sge_rspq *q = &qs->rspq;
2612 if (!napi_is_scheduled(&qs->napi) &&
2613 is_new_response(&q->desc[q->cidx], q)) {
2614 napi_schedule(&qs->napi);
2621 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2622 * by NAPI polling). Handles data events from SGE response queues as well as
2623 * error and other async events as they all use the same MSI vector. We use
2624 * one SGE response queue per port in this mode and protect all response
2625 * queues with queue 0's lock.
2627 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2630 struct adapter *adap = cookie;
2631 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2633 spin_lock(&q->lock);
2635 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2636 if (adap->params.nports == 2)
2637 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2638 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2639 q->unhandled_irqs++;
2641 spin_unlock(&q->lock);
2646 * A helper function that processes responses and issues GTS.
2648 static inline int process_responses_gts(struct adapter *adap,
2649 struct sge_rspq *rq)
2653 work = process_responses(adap, rspq_to_qset(rq), -1);
2654 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2655 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2660 * The legacy INTx interrupt handler. This needs to handle data events from
2661 * SGE response queues as well as error and other async events as they all use
2662 * the same interrupt pin. We use one SGE response queue per port in this mode
2663 * and protect all response queues with queue 0's lock.
2665 static irqreturn_t t3_intr(int irq, void *cookie)
2667 int work_done, w0, w1;
2668 struct adapter *adap = cookie;
2669 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2670 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2672 spin_lock(&q0->lock);
2674 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2675 w1 = adap->params.nports == 2 &&
2676 is_new_response(&q1->desc[q1->cidx], q1);
2678 if (likely(w0 | w1)) {
2679 t3_write_reg(adap, A_PL_CLI, 0);
2680 t3_read_reg(adap, A_PL_CLI); /* flush */
2683 process_responses_gts(adap, q0);
2686 process_responses_gts(adap, q1);
2688 work_done = w0 | w1;
2690 work_done = t3_slow_intr_handler(adap);
2692 spin_unlock(&q0->lock);
2693 return IRQ_RETVAL(work_done != 0);
2697 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2698 * Handles data events from SGE response queues as well as error and other
2699 * async events as they all use the same interrupt pin. We use one SGE
2700 * response queue per port in this mode and protect all response queues with
2703 static irqreturn_t t3b_intr(int irq, void *cookie)
2706 struct adapter *adap = cookie;
2707 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2709 t3_write_reg(adap, A_PL_CLI, 0);
2710 map = t3_read_reg(adap, A_SG_DATA_INTR);
2712 if (unlikely(!map)) /* shared interrupt, most likely */
2715 spin_lock(&q0->lock);
2717 if (unlikely(map & F_ERRINTR))
2718 t3_slow_intr_handler(adap);
2720 if (likely(map & 1))
2721 process_responses_gts(adap, q0);
2724 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2726 spin_unlock(&q0->lock);
2731 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2732 * Handles data events from SGE response queues as well as error and other
2733 * async events as they all use the same interrupt pin. We use one SGE
2734 * response queue per port in this mode and protect all response queues with
2737 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2740 struct adapter *adap = cookie;
2741 struct sge_qset *qs0 = &adap->sge.qs[0];
2742 struct sge_rspq *q0 = &qs0->rspq;
2744 t3_write_reg(adap, A_PL_CLI, 0);
2745 map = t3_read_reg(adap, A_SG_DATA_INTR);
2747 if (unlikely(!map)) /* shared interrupt, most likely */
2750 spin_lock(&q0->lock);
2752 if (unlikely(map & F_ERRINTR))
2753 t3_slow_intr_handler(adap);
2755 if (likely(map & 1))
2756 napi_schedule(&qs0->napi);
2759 napi_schedule(&adap->sge.qs[1].napi);
2761 spin_unlock(&q0->lock);
2766 * t3_intr_handler - select the top-level interrupt handler
2767 * @adap: the adapter
2768 * @polling: whether using NAPI to service response queues
2770 * Selects the top-level interrupt handler based on the type of interrupts
2771 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2774 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2776 if (adap->flags & USING_MSIX)
2777 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2778 if (adap->flags & USING_MSI)
2779 return polling ? t3_intr_msi_napi : t3_intr_msi;
2780 if (adap->params.rev > 0)
2781 return polling ? t3b_intr_napi : t3b_intr;
2785 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2786 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2787 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2788 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2790 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2791 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2795 * t3_sge_err_intr_handler - SGE async event interrupt handler
2796 * @adapter: the adapter
2798 * Interrupt handler for SGE asynchronous (non-data) events.
2800 void t3_sge_err_intr_handler(struct adapter *adapter)
2802 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2805 if (status & SGE_PARERR)
2806 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2807 status & SGE_PARERR);
2808 if (status & SGE_FRAMINGERR)
2809 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2810 status & SGE_FRAMINGERR);
2812 if (status & F_RSPQCREDITOVERFOW)
2813 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2815 if (status & F_RSPQDISABLED) {
2816 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2819 "packet delivered to disabled response queue "
2820 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2823 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2824 queue_work(cxgb3_wq, &adapter->db_drop_task);
2826 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2827 queue_work(cxgb3_wq, &adapter->db_full_task);
2829 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2830 queue_work(cxgb3_wq, &adapter->db_empty_task);
2832 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2833 if (status & SGE_FATALERR)
2834 t3_fatal_err(adapter);
2838 * sge_timer_tx - perform periodic maintenance of an SGE qset
2839 * @data: the SGE queue set to maintain
2841 * Runs periodically from a timer to perform maintenance of an SGE queue
2842 * set. It performs two tasks:
2844 * Cleans up any completed Tx descriptors that may still be pending.
2845 * Normal descriptor cleanup happens when new packets are added to a Tx
2846 * queue so this timer is relatively infrequent and does any cleanup only
2847 * if the Tx queue has not seen any new packets in a while. We make a
2848 * best effort attempt to reclaim descriptors, in that we don't wait
2849 * around if we cannot get a queue's lock (which most likely is because
2850 * someone else is queueing new packets and so will also handle the clean
2851 * up). Since control queues use immediate data exclusively we don't
2852 * bother cleaning them up here.
2855 static void sge_timer_tx(unsigned long data)
2857 struct sge_qset *qs = (struct sge_qset *)data;
2858 struct port_info *pi = netdev_priv(qs->netdev);
2859 struct adapter *adap = pi->adapter;
2860 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2861 unsigned long next_period;
2863 if (__netif_tx_trylock(qs->tx_q)) {
2864 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2865 TX_RECLAIM_TIMER_CHUNK);
2866 __netif_tx_unlock(qs->tx_q);
2869 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2870 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2871 TX_RECLAIM_TIMER_CHUNK);
2872 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2875 next_period = TX_RECLAIM_PERIOD >>
2876 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2877 TX_RECLAIM_TIMER_CHUNK);
2878 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2882 * sge_timer_rx - perform periodic maintenance of an SGE qset
2883 * @data: the SGE queue set to maintain
2885 * a) Replenishes Rx queues that have run out due to memory shortage.
2886 * Normally new Rx buffers are added when existing ones are consumed but
2887 * when out of memory a queue can become empty. We try to add only a few
2888 * buffers here, the queue will be replenished fully as these new buffers
2889 * are used up if memory shortage has subsided.
2891 * b) Return coalesced response queue credits in case a response queue is
2895 static void sge_timer_rx(unsigned long data)
2898 struct sge_qset *qs = (struct sge_qset *)data;
2899 struct port_info *pi = netdev_priv(qs->netdev);
2900 struct adapter *adap = pi->adapter;
2903 lock = adap->params.rev > 0 ?
2904 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2906 if (!spin_trylock_irq(lock))
2909 if (napi_is_scheduled(&qs->napi))
2912 if (adap->params.rev < 4) {
2913 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2915 if (status & (1 << qs->rspq.cntxt_id)) {
2917 if (qs->rspq.credits) {
2919 refill_rspq(adap, &qs->rspq, 1);
2920 qs->rspq.restarted++;
2921 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2922 1 << qs->rspq.cntxt_id);
2927 if (qs->fl[0].credits < qs->fl[0].size)
2928 __refill_fl(adap, &qs->fl[0]);
2929 if (qs->fl[1].credits < qs->fl[1].size)
2930 __refill_fl(adap, &qs->fl[1]);
2933 spin_unlock_irq(lock);
2935 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2939 * t3_update_qset_coalesce - update coalescing settings for a queue set
2940 * @qs: the SGE queue set
2941 * @p: new queue set parameters
2943 * Update the coalescing settings for an SGE queue set. Nothing is done
2944 * if the queue set is not initialized yet.
2946 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2948 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2949 qs->rspq.polling = p->polling;
2950 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2954 * t3_sge_alloc_qset - initialize an SGE queue set
2955 * @adapter: the adapter
2956 * @id: the queue set id
2957 * @nports: how many Ethernet ports will be using this queue set
2958 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2959 * @p: configuration parameters for this queue set
2960 * @ntxq: number of Tx queues for the queue set
2961 * @netdev: net device associated with this queue set
2962 * @netdevq: net device TX queue associated with this queue set
2964 * Allocate resources and initialize an SGE queue set. A queue set
2965 * comprises a response queue, two Rx free-buffer queues, and up to 3
2966 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2967 * queue, offload queue, and control queue.
2969 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2970 int irq_vec_idx, const struct qset_params *p,
2971 int ntxq, struct net_device *dev,
2972 struct netdev_queue *netdevq)
2974 int i, avail, ret = -ENOMEM;
2975 struct sge_qset *q = &adapter->sge.qs[id];
2977 init_qset_cntxt(q, id);
2978 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2979 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2981 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2982 sizeof(struct rx_desc),
2983 sizeof(struct rx_sw_desc),
2984 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2988 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2989 sizeof(struct rx_desc),
2990 sizeof(struct rx_sw_desc),
2991 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2995 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2996 sizeof(struct rsp_desc), 0,
2997 &q->rspq.phys_addr, NULL);
3001 for (i = 0; i < ntxq; ++i) {
3003 * The control queue always uses immediate data so does not
3004 * need to keep track of any sk_buffs.
3006 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3008 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3009 sizeof(struct tx_desc), sz,
3010 &q->txq[i].phys_addr,
3012 if (!q->txq[i].desc)
3016 q->txq[i].size = p->txq_size[i];
3017 spin_lock_init(&q->txq[i].lock);
3018 skb_queue_head_init(&q->txq[i].sendq);
3021 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3023 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3026 q->fl[0].gen = q->fl[1].gen = 1;
3027 q->fl[0].size = p->fl_size;
3028 q->fl[1].size = p->jumbo_size;
3031 q->rspq.size = p->rspq_size;
3032 spin_lock_init(&q->rspq.lock);
3033 skb_queue_head_init(&q->rspq.rx_queue);
3035 q->txq[TXQ_ETH].stop_thres = nports *
3036 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3038 #if FL0_PG_CHUNK_SIZE > 0
3039 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3041 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3043 #if FL1_PG_CHUNK_SIZE > 0
3044 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3046 q->fl[1].buf_size = is_offload(adapter) ?
3047 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3048 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3051 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3052 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3053 q->fl[0].order = FL0_PG_ORDER;
3054 q->fl[1].order = FL1_PG_ORDER;
3055 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3056 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3058 spin_lock_irq(&adapter->sge.reg_lock);
3060 /* FL threshold comparison uses < */
3061 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3062 q->rspq.phys_addr, q->rspq.size,
3063 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3067 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3068 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3069 q->fl[i].phys_addr, q->fl[i].size,
3070 q->fl[i].buf_size - SGE_PG_RSVD,
3071 p->cong_thres, 1, 0);
3076 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3077 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3078 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3084 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3085 USE_GTS, SGE_CNTXT_OFLD, id,
3086 q->txq[TXQ_OFLD].phys_addr,
3087 q->txq[TXQ_OFLD].size, 0, 1, 0);
3093 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3095 q->txq[TXQ_CTRL].phys_addr,
3096 q->txq[TXQ_CTRL].size,
3097 q->txq[TXQ_CTRL].token, 1, 0);
3102 spin_unlock_irq(&adapter->sge.reg_lock);
3107 t3_update_qset_coalesce(q, p);
3109 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3110 GFP_KERNEL | __GFP_COMP);
3112 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3115 if (avail < q->fl[0].size)
3116 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3119 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3120 GFP_KERNEL | __GFP_COMP);
3121 if (avail < q->fl[1].size)
3122 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3124 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3126 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3127 V_NEWTIMER(q->rspq.holdoff_tmr));
3132 spin_unlock_irq(&adapter->sge.reg_lock);
3134 t3_free_qset(adapter, q);
3139 * t3_start_sge_timers - start SGE timer call backs
3140 * @adap: the adapter
3142 * Starts each SGE queue set's timer call back
3144 void t3_start_sge_timers(struct adapter *adap)
3148 for (i = 0; i < SGE_QSETS; ++i) {
3149 struct sge_qset *q = &adap->sge.qs[i];
3151 if (q->tx_reclaim_timer.function)
3152 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3154 if (q->rx_reclaim_timer.function)
3155 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3160 * t3_stop_sge_timers - stop SGE timer call backs
3161 * @adap: the adapter
3163 * Stops each SGE queue set's timer call back
3165 void t3_stop_sge_timers(struct adapter *adap)
3169 for (i = 0; i < SGE_QSETS; ++i) {
3170 struct sge_qset *q = &adap->sge.qs[i];
3172 if (q->tx_reclaim_timer.function)
3173 del_timer_sync(&q->tx_reclaim_timer);
3174 if (q->rx_reclaim_timer.function)
3175 del_timer_sync(&q->rx_reclaim_timer);
3180 * t3_free_sge_resources - free SGE resources
3181 * @adap: the adapter
3183 * Frees resources used by the SGE queue sets.
3185 void t3_free_sge_resources(struct adapter *adap)
3189 for (i = 0; i < SGE_QSETS; ++i)
3190 t3_free_qset(adap, &adap->sge.qs[i]);
3194 * t3_sge_start - enable SGE
3195 * @adap: the adapter
3197 * Enables the SGE for DMAs. This is the last step in starting packet
3200 void t3_sge_start(struct adapter *adap)
3202 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3206 * t3_sge_stop - disable SGE operation
3207 * @adap: the adapter
3209 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3210 * from error interrupts) or from normal process context. In the latter
3211 * case it also disables any pending queue restart tasklets. Note that
3212 * if it is called in interrupt context it cannot disable the restart
3213 * tasklets as it cannot wait, however the tasklets will have no effect
3214 * since the doorbells are disabled and the driver will call this again
3215 * later from process context, at which time the tasklets will be stopped
3216 * if they are still running.
3218 void t3_sge_stop(struct adapter *adap)
3220 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3221 if (!in_interrupt()) {
3224 for (i = 0; i < SGE_QSETS; ++i) {
3225 struct sge_qset *qs = &adap->sge.qs[i];
3227 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3228 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3234 * t3_sge_init - initialize SGE
3235 * @adap: the adapter
3236 * @p: the SGE parameters
3238 * Performs SGE initialization needed every time after a chip reset.
3239 * We do not initialize any of the queue sets here, instead the driver
3240 * top-level must request those individually. We also do not enable DMA
3241 * here, that should be done after the queues have been set up.
3243 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3245 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3247 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3248 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3249 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3250 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3251 #if SGE_NUM_GENBITS == 1
3252 ctrl |= F_EGRGENCTRL;
3254 if (adap->params.rev > 0) {
3255 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3256 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3258 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3259 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3260 V_LORCQDRBTHRSH(512));
3261 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3262 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3263 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3264 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3265 adap->params.rev < T3_REV_C ? 1000 : 500);
3266 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3267 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3268 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3269 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3270 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3274 * t3_sge_prep - one-time SGE initialization
3275 * @adap: the associated adapter
3276 * @p: SGE parameters
3278 * Performs one-time initialization of SGE SW state. Includes determining
3279 * defaults for the assorted SGE parameters, which admins can change until
3280 * they are used to initialize the SGE.
3282 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3286 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3287 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3289 for (i = 0; i < SGE_QSETS; ++i) {
3290 struct qset_params *q = p->qset + i;
3292 q->polling = adap->params.rev > 0;
3293 q->coalesce_usecs = 5;
3294 q->rspq_size = 1024;
3296 q->jumbo_size = 512;
3297 q->txq_size[TXQ_ETH] = 1024;
3298 q->txq_size[TXQ_OFLD] = 1024;
3299 q->txq_size[TXQ_CTRL] = 256;
3303 spin_lock_init(&adap->sge.reg_lock);