2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
44 #include "firmware_exports.h"
48 #define SGE_RX_SM_BUF_SIZE 1536
50 #define SGE_RX_COPY_THRES 256
51 #define SGE_RX_PULL_LEN 128
53 #define SGE_PG_RSVD SMP_CACHE_BYTES
55 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
56 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
59 #define FL0_PG_CHUNK_SIZE 2048
60 #define FL0_PG_ORDER 0
61 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
62 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
63 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
64 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
66 #define SGE_RX_DROP_THRES 16
67 #define RX_RECLAIM_PERIOD (HZ/4)
70 * Max number of Rx buffers we replenish at a time.
72 #define MAX_RX_REFILL 16U
74 * Period of the Tx buffer reclaim timer. This timer does not need to run
75 * frequently as Tx buffers are usually reclaimed by new Tx packets.
77 #define TX_RECLAIM_PERIOD (HZ / 4)
78 #define TX_RECLAIM_TIMER_CHUNK 64U
79 #define TX_RECLAIM_CHUNK 16U
81 /* WR size in bytes */
82 #define WR_LEN (WR_FLITS * 8)
85 * Types of Tx queues in each queue set. Order here matters, do not change.
87 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
89 /* Values for sge_txq.flags */
91 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
92 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
96 __be64 flit[TX_DESC_FLITS];
106 struct tx_sw_desc { /* SW state per Tx descriptor */
108 u8 eop; /* set if last descriptor for packet */
109 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
110 u8 fragidx; /* first page fragment associated with descriptor */
111 s8 sflit; /* start flit of first SGL entry in descriptor */
114 struct rx_sw_desc { /* SW state per Rx descriptor */
117 struct fl_pg_chunk pg_chunk;
119 DECLARE_PCI_UNMAP_ADDR(dma_addr);
122 struct rsp_desc { /* response queue descriptor */
123 struct rss_header rss_hdr;
131 * Holds unmapping information for Tx packets that need deferred unmapping.
132 * This structure lives at skb->head and must be allocated by callers.
134 struct deferred_unmap_info {
135 struct pci_dev *pdev;
136 dma_addr_t addr[MAX_SKB_FRAGS + 1];
140 * Maps a number of flits to the number of Tx descriptors that can hold them.
143 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
145 * HW allows up to 4 descriptors to be combined into a WR.
147 static u8 flit_desc_map[] = {
149 #if SGE_NUM_GENBITS == 1
150 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
151 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
152 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
153 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
154 #elif SGE_NUM_GENBITS == 2
155 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
157 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
158 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
160 # error "SGE_NUM_GENBITS must be 1 or 2"
164 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
166 return container_of(q, struct sge_qset, fl[qidx]);
169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
171 return container_of(q, struct sge_qset, rspq);
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
176 return container_of(q, struct sge_qset, txq[qidx]);
180 * refill_rspq - replenish an SGE response queue
181 * @adapter: the adapter
182 * @q: the response queue to replenish
183 * @credits: how many new responses to make available
185 * Replenishes a response queue by making the supplied number of responses
188 static inline void refill_rspq(struct adapter *adapter,
189 const struct sge_rspq *q, unsigned int credits)
192 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197 * need_skb_unmap - does the platform need unmapping of sk_buffs?
199 * Returns true if the platfrom needs sk_buff unmapping. The compiler
200 * optimizes away unecessary code if this returns true.
202 static inline int need_skb_unmap(void)
205 * This structure is used to tell if the platfrom needs buffer
206 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
209 DECLARE_PCI_UNMAP_ADDR(addr);
212 return sizeof(struct dummy) != 0;
216 * unmap_skb - unmap a packet main body and its page fragments
218 * @q: the Tx queue containing Tx descriptors for the packet
219 * @cidx: index of Tx descriptor
220 * @pdev: the PCI device
222 * Unmap the main body of an sk_buff and its page fragments, if any.
223 * Because of the fairly complicated structure of our SGLs and the desire
224 * to conserve space for metadata, the information necessary to unmap an
225 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
226 * descriptors (the physical addresses of the various data buffers), and
227 * the SW descriptor state (assorted indices). The send functions
228 * initialize the indices for the first packet descriptor so we can unmap
229 * the buffers held in the first Tx descriptor here, and we have enough
230 * information at this point to set the state for the next Tx descriptor.
232 * Note that it is possible to clean up the first descriptor of a packet
233 * before the send routines have written the next descriptors, but this
234 * race does not cause any problem. We just end up writing the unmapping
235 * info for the descriptor first.
237 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
238 unsigned int cidx, struct pci_dev *pdev)
240 const struct sg_ent *sgp;
241 struct tx_sw_desc *d = &q->sdesc[cidx];
242 int nfrags, frag_idx, curflit, j = d->addr_idx;
244 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
245 frag_idx = d->fragidx;
247 if (frag_idx == 0 && skb_headlen(skb)) {
248 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
249 skb_headlen(skb), PCI_DMA_TODEVICE);
253 curflit = d->sflit + 1 + j;
254 nfrags = skb_shinfo(skb)->nr_frags;
256 while (frag_idx < nfrags && curflit < WR_FLITS) {
257 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
258 skb_shinfo(skb)->frags[frag_idx].size,
269 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
270 d = cidx + 1 == q->size ? q->sdesc : d + 1;
271 d->fragidx = frag_idx;
273 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
278 * free_tx_desc - reclaims Tx descriptors and their buffers
279 * @adapter: the adapter
280 * @q: the Tx queue to reclaim descriptors from
281 * @n: the number of descriptors to reclaim
283 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
284 * Tx buffers. Called with the Tx queue lock held.
286 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
289 struct tx_sw_desc *d;
290 struct pci_dev *pdev = adapter->pdev;
291 unsigned int cidx = q->cidx;
293 const int need_unmap = need_skb_unmap() &&
294 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
298 if (d->skb) { /* an SGL is present */
300 unmap_skb(d->skb, q, cidx, pdev);
305 if (++cidx == q->size) {
314 * reclaim_completed_tx - reclaims completed Tx descriptors
315 * @adapter: the adapter
316 * @q: the Tx queue to reclaim completed descriptors from
317 * @chunk: maximum number of descriptors to reclaim
319 * Reclaims Tx descriptors that the SGE has indicated it has processed,
320 * and frees the associated buffers if possible. Called with the Tx
323 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
327 unsigned int reclaim = q->processed - q->cleaned;
329 reclaim = min(chunk, reclaim);
331 free_tx_desc(adapter, q, reclaim);
332 q->cleaned += reclaim;
333 q->in_use -= reclaim;
335 return q->processed - q->cleaned;
339 * should_restart_tx - are there enough resources to restart a Tx queue?
342 * Checks if there are enough descriptors to restart a suspended Tx queue.
344 static inline int should_restart_tx(const struct sge_txq *q)
346 unsigned int r = q->processed - q->cleaned;
348 return q->in_use - r < (q->size >> 1);
351 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
354 if (q->use_pages && d->pg_chunk.page) {
355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt)
358 pci_unmap_addr(&d->pg_chunk, mapping),
359 q->alloc_size, PCI_DMA_FROMDEVICE);
361 put_page(d->pg_chunk.page);
362 d->pg_chunk.page = NULL;
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
372 * free_rx_bufs - free the Rx buffers on an SGE free list
373 * @pdev: the PCI device associated with the adapter
374 * @rxq: the SGE free list to clean up
376 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
377 * this queue should be stopped before calling this function.
379 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381 unsigned int cidx = q->cidx;
383 while (q->credits--) {
384 struct rx_sw_desc *d = &q->sdesc[cidx];
387 clear_rx_desc(pdev, q, d);
388 if (++cidx == q->size)
392 if (q->pg_chunk.page) {
393 __free_pages(q->pg_chunk.page, q->order);
394 q->pg_chunk.page = NULL;
399 * add_one_rx_buf - add a packet buffer to a free-buffer list
400 * @va: buffer start VA
401 * @len: the buffer length
402 * @d: the HW Rx descriptor to write
403 * @sd: the SW Rx descriptor to write
404 * @gen: the generation bit value
405 * @pdev: the PCI device associated with the adapter
407 * Add a buffer of the given length to the supplied HW and SW Rx
410 static inline int add_one_rx_buf(void *va, unsigned int len,
411 struct rx_desc *d, struct rx_sw_desc *sd,
412 unsigned int gen, struct pci_dev *pdev)
416 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
417 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
420 pci_unmap_addr_set(sd, dma_addr, mapping);
422 d->addr_lo = cpu_to_be32(mapping);
423 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
430 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
441 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
445 if (!q->pg_chunk.page) {
448 q->pg_chunk.page = alloc_pages(gfp, order);
449 if (unlikely(!q->pg_chunk.page))
451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
459 sd->pg_chunk = q->pg_chunk;
461 prefetch(sd->pg_chunk.p_cnt);
463 q->pg_chunk.offset += q->buf_size;
464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
465 q->pg_chunk.page = NULL;
467 q->pg_chunk.va += q->buf_size;
468 get_page(q->pg_chunk.page);
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
474 *sd->pg_chunk.p_cnt += 1;
479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
481 if (q->pend_cred >= q->credits / 4) {
483 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
488 * refill_fl - refill an SGE free-buffer list
489 * @adapter: the adapter
490 * @q: the free-list to refill
491 * @n: the number of new buffers to allocate
492 * @gfp: the gfp flags for allocating new buffers
494 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
495 * allocated with the supplied gfp flags. The caller must assure that
496 * @n does not exceed the queue's capacity.
498 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
501 struct rx_desc *d = &q->desc[q->pidx];
502 unsigned int count = 0;
509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
511 nomem: q->alloc_failed++;
514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
516 pci_unmap_addr_set(sd, dma_addr, mapping);
518 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping,
520 q->buf_size - SGE_PG_RSVD,
525 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
530 buf_start = skb->data;
531 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
534 clear_rx_desc(adap->pdev, q, sd);
541 if (++q->pidx == q->size) {
551 q->pend_cred += count;
557 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
559 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
560 GFP_ATOMIC | __GFP_COMP);
564 * recycle_rx_buf - recycle a receive buffer
565 * @adapter: the adapter
566 * @q: the SGE free list
567 * @idx: index of buffer to recycle
569 * Recycles the specified buffer on the given free list by adding it at
570 * the next available slot on the list.
572 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
575 struct rx_desc *from = &q->desc[idx];
576 struct rx_desc *to = &q->desc[q->pidx];
578 q->sdesc[q->pidx] = q->sdesc[idx];
579 to->addr_lo = from->addr_lo; /* already big endian */
580 to->addr_hi = from->addr_hi; /* likewise */
582 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
583 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
585 if (++q->pidx == q->size) {
596 * alloc_ring - allocate resources for an SGE descriptor ring
597 * @pdev: the PCI device
598 * @nelem: the number of descriptors
599 * @elem_size: the size of each descriptor
600 * @sw_size: the size of the SW state associated with each ring element
601 * @phys: the physical address of the allocated ring
602 * @metadata: address of the array holding the SW state for the ring
604 * Allocates resources for an SGE descriptor ring, such as Tx queues,
605 * free buffer lists, or response queues. Each SGE ring requires
606 * space for its HW descriptors plus, optionally, space for the SW state
607 * associated with each HW entry (the metadata). The function returns
608 * three values: the virtual address for the HW ring (the return value
609 * of the function), the physical address of the HW ring, and the address
612 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
613 size_t sw_size, dma_addr_t * phys, void *metadata)
615 size_t len = nelem * elem_size;
617 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
621 if (sw_size && metadata) {
622 s = kcalloc(nelem, sw_size, GFP_KERNEL);
625 dma_free_coherent(&pdev->dev, len, p, *phys);
628 *(void **)metadata = s;
635 * t3_reset_qset - reset a sge qset
638 * Reset the qset structure.
639 * the NAPI structure is preserved in the event of
640 * the qset's reincarnation, for example during EEH recovery.
642 static void t3_reset_qset(struct sge_qset *q)
645 !(q->adap->flags & NAPI_INIT)) {
646 memset(q, 0, sizeof(*q));
651 memset(&q->rspq, 0, sizeof(q->rspq));
652 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
653 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
655 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
656 q->rx_reclaim_timer.function = NULL;
658 napi_free_frags(&q->napi);
663 * free_qset - free the resources of an SGE queue set
664 * @adapter: the adapter owning the queue set
667 * Release the HW and SW resources associated with an SGE queue set, such
668 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
669 * queue set must be quiesced prior to calling this.
671 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
674 struct pci_dev *pdev = adapter->pdev;
676 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
678 spin_lock_irq(&adapter->sge.reg_lock);
679 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
680 spin_unlock_irq(&adapter->sge.reg_lock);
681 free_rx_bufs(pdev, &q->fl[i]);
682 kfree(q->fl[i].sdesc);
683 dma_free_coherent(&pdev->dev,
685 sizeof(struct rx_desc), q->fl[i].desc,
689 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
690 if (q->txq[i].desc) {
691 spin_lock_irq(&adapter->sge.reg_lock);
692 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
693 spin_unlock_irq(&adapter->sge.reg_lock);
694 if (q->txq[i].sdesc) {
695 free_tx_desc(adapter, &q->txq[i],
697 kfree(q->txq[i].sdesc);
699 dma_free_coherent(&pdev->dev,
701 sizeof(struct tx_desc),
702 q->txq[i].desc, q->txq[i].phys_addr);
703 __skb_queue_purge(&q->txq[i].sendq);
707 spin_lock_irq(&adapter->sge.reg_lock);
708 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
709 spin_unlock_irq(&adapter->sge.reg_lock);
710 dma_free_coherent(&pdev->dev,
711 q->rspq.size * sizeof(struct rsp_desc),
712 q->rspq.desc, q->rspq.phys_addr);
719 * init_qset_cntxt - initialize an SGE queue set context info
721 * @id: the queue set id
723 * Initializes the TIDs and context ids for the queues of a queue set.
725 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
727 qs->rspq.cntxt_id = id;
728 qs->fl[0].cntxt_id = 2 * id;
729 qs->fl[1].cntxt_id = 2 * id + 1;
730 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
731 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
732 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
733 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
738 * sgl_len - calculates the size of an SGL of the given capacity
739 * @n: the number of SGL entries
741 * Calculates the number of flits needed for a scatter/gather list that
742 * can hold the given number of entries.
744 static inline unsigned int sgl_len(unsigned int n)
746 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
747 return (3 * n) / 2 + (n & 1);
751 * flits_to_desc - returns the num of Tx descriptors for the given flits
752 * @n: the number of flits
754 * Calculates the number of Tx descriptors needed for the supplied number
757 static inline unsigned int flits_to_desc(unsigned int n)
759 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
760 return flit_desc_map[n];
764 * get_packet - return the next ingress packet buffer from a free list
765 * @adap: the adapter that received the packet
766 * @fl: the SGE free list holding the packet
767 * @len: the packet length including any SGE padding
768 * @drop_thres: # of remaining buffers before we start dropping packets
770 * Get the next packet from a free list and complete setup of the
771 * sk_buff. If the packet is small we make a copy and recycle the
772 * original buffer, otherwise we use the original buffer itself. If a
773 * positive drop threshold is supplied packets are dropped and their
774 * buffers recycled if (a) the number of remaining buffers is under the
775 * threshold and the packet is too big to copy, or (b) the packet should
776 * be copied but there is no memory for the copy.
778 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
779 unsigned int len, unsigned int drop_thres)
781 struct sk_buff *skb = NULL;
782 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
784 prefetch(sd->skb->data);
787 if (len <= SGE_RX_COPY_THRES) {
788 skb = alloc_skb(len, GFP_ATOMIC);
789 if (likely(skb != NULL)) {
791 pci_dma_sync_single_for_cpu(adap->pdev,
792 pci_unmap_addr(sd, dma_addr), len,
794 memcpy(skb->data, sd->skb->data, len);
795 pci_dma_sync_single_for_device(adap->pdev,
796 pci_unmap_addr(sd, dma_addr), len,
798 } else if (!drop_thres)
801 recycle_rx_buf(adap, fl, fl->cidx);
805 if (unlikely(fl->credits < drop_thres) &&
806 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
807 GFP_ATOMIC | __GFP_COMP) == 0)
811 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
812 fl->buf_size, PCI_DMA_FROMDEVICE);
815 __refill_fl(adap, fl);
820 * get_packet_pg - return the next ingress packet buffer from a free list
821 * @adap: the adapter that received the packet
822 * @fl: the SGE free list holding the packet
823 * @len: the packet length including any SGE padding
824 * @drop_thres: # of remaining buffers before we start dropping packets
826 * Get the next packet from a free list populated with page chunks.
827 * If the packet is small we make a copy and recycle the original buffer,
828 * otherwise we attach the original buffer as a page fragment to a fresh
829 * sk_buff. If a positive drop threshold is supplied packets are dropped
830 * and their buffers recycled if (a) the number of remaining buffers is
831 * under the threshold and the packet is too big to copy, or (b) there's
834 * Note: this function is similar to @get_packet but deals with Rx buffers
835 * that are page chunks rather than sk_buffs.
837 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
838 struct sge_rspq *q, unsigned int len,
839 unsigned int drop_thres)
841 struct sk_buff *newskb, *skb;
842 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
844 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
846 newskb = skb = q->pg_skb;
847 if (!skb && (len <= SGE_RX_COPY_THRES)) {
848 newskb = alloc_skb(len, GFP_ATOMIC);
849 if (likely(newskb != NULL)) {
850 __skb_put(newskb, len);
851 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
853 memcpy(newskb->data, sd->pg_chunk.va, len);
854 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
857 } else if (!drop_thres)
861 recycle_rx_buf(adap, fl, fl->cidx);
866 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
869 prefetch(sd->pg_chunk.p_cnt);
872 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
874 if (unlikely(!newskb)) {
880 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
882 (*sd->pg_chunk.p_cnt)--;
883 if (!*sd->pg_chunk.p_cnt)
884 pci_unmap_page(adap->pdev,
885 pci_unmap_addr(&sd->pg_chunk, mapping),
889 __skb_put(newskb, SGE_RX_PULL_LEN);
890 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
891 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
892 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
893 len - SGE_RX_PULL_LEN);
895 newskb->data_len = len - SGE_RX_PULL_LEN;
896 newskb->truesize += newskb->data_len;
898 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
900 sd->pg_chunk.offset, len);
902 newskb->data_len += len;
903 newskb->truesize += len;
908 * We do not refill FLs here, we let the caller do it to overlap a
915 * get_imm_packet - return the next ingress packet buffer from a response
916 * @resp: the response descriptor containing the packet data
918 * Return a packet containing the immediate data of the given response.
920 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
922 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
925 __skb_put(skb, IMMED_PKT_SIZE);
926 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
932 * calc_tx_descs - calculate the number of Tx descriptors for a packet
935 * Returns the number of Tx descriptors needed for the given Ethernet
936 * packet. Ethernet packets require addition of WR and CPL headers.
938 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
942 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
945 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
946 if (skb_shinfo(skb)->gso_size)
948 return flits_to_desc(flits);
952 * make_sgl - populate a scatter/gather list for a packet
954 * @sgp: the SGL to populate
955 * @start: start address of skb main body data to include in the SGL
956 * @len: length of skb main body data to include in the SGL
957 * @pdev: the PCI device
959 * Generates a scatter/gather list for the buffers that make up a packet
960 * and returns the SGL size in 8-byte words. The caller must size the SGL
963 static inline unsigned int make_sgl(const struct sk_buff *skb,
964 struct sg_ent *sgp, unsigned char *start,
965 unsigned int len, struct pci_dev *pdev)
968 unsigned int i, j = 0, nfrags;
971 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
972 sgp->len[0] = cpu_to_be32(len);
973 sgp->addr[0] = cpu_to_be64(mapping);
977 nfrags = skb_shinfo(skb)->nr_frags;
978 for (i = 0; i < nfrags; i++) {
979 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
981 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
982 frag->size, PCI_DMA_TODEVICE);
983 sgp->len[j] = cpu_to_be32(frag->size);
984 sgp->addr[j] = cpu_to_be64(mapping);
991 return ((nfrags + (len != 0)) * 3) / 2 + j;
995 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
999 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1000 * where the HW is going to sleep just after we checked, however,
1001 * then the interrupt handler will detect the outstanding TX packet
1002 * and ring the doorbell for us.
1004 * When GTS is disabled we unconditionally ring the doorbell.
1006 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1009 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1010 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1011 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1012 t3_write_reg(adap, A_SG_KDOORBELL,
1013 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1016 wmb(); /* write descriptors before telling HW */
1017 t3_write_reg(adap, A_SG_KDOORBELL,
1018 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1022 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1024 #if SGE_NUM_GENBITS == 2
1025 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1030 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1031 * @ndesc: number of Tx descriptors spanned by the SGL
1032 * @skb: the packet corresponding to the WR
1033 * @d: first Tx descriptor to be written
1034 * @pidx: index of above descriptors
1035 * @q: the SGE Tx queue
1037 * @flits: number of flits to the start of the SGL in the first descriptor
1038 * @sgl_flits: the SGL size in flits
1039 * @gen: the Tx descriptor generation
1040 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1041 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1043 * Write a work request header and an associated SGL. If the SGL is
1044 * small enough to fit into one Tx descriptor it has already been written
1045 * and we just need to write the WR header. Otherwise we distribute the
1046 * SGL across the number of descriptors it spans.
1048 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1049 struct tx_desc *d, unsigned int pidx,
1050 const struct sge_txq *q,
1051 const struct sg_ent *sgl,
1052 unsigned int flits, unsigned int sgl_flits,
1053 unsigned int gen, __be32 wr_hi,
1056 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1057 struct tx_sw_desc *sd = &q->sdesc[pidx];
1060 if (need_skb_unmap()) {
1066 if (likely(ndesc == 1)) {
1068 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1069 V_WR_SGLSFLT(flits)) | wr_hi;
1071 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1072 V_WR_GEN(gen)) | wr_lo;
1075 unsigned int ogen = gen;
1076 const u64 *fp = (const u64 *)sgl;
1077 struct work_request_hdr *wp = wrp;
1079 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1080 V_WR_SGLSFLT(flits)) | wr_hi;
1083 unsigned int avail = WR_FLITS - flits;
1085 if (avail > sgl_flits)
1087 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1097 if (++pidx == q->size) {
1105 wrp = (struct work_request_hdr *)d;
1106 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1107 V_WR_SGLSFLT(1)) | wr_hi;
1108 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1110 V_WR_GEN(gen)) | wr_lo;
1115 wrp->wr_hi |= htonl(F_WR_EOP);
1117 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1118 wr_gen2((struct tx_desc *)wp, ogen);
1119 WARN_ON(ndesc != 0);
1124 * write_tx_pkt_wr - write a TX_PKT work request
1125 * @adap: the adapter
1126 * @skb: the packet to send
1127 * @pi: the egress interface
1128 * @pidx: index of the first Tx descriptor to write
1129 * @gen: the generation value to use
1131 * @ndesc: number of descriptors the packet will occupy
1132 * @compl: the value of the COMPL bit to use
1134 * Generate a TX_PKT work request to send the supplied packet.
1136 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1137 const struct port_info *pi,
1138 unsigned int pidx, unsigned int gen,
1139 struct sge_txq *q, unsigned int ndesc,
1142 unsigned int flits, sgl_flits, cntrl, tso_info;
1143 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1144 struct tx_desc *d = &q->desc[pidx];
1145 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1147 cpl->len = htonl(skb->len);
1148 cntrl = V_TXPKT_INTF(pi->port_id);
1150 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1151 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1153 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1156 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1159 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1160 hdr->cntrl = htonl(cntrl);
1161 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1162 CPL_ETH_II : CPL_ETH_II_VLAN;
1163 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1164 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1165 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1166 hdr->lso_info = htonl(tso_info);
1169 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1170 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1171 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1172 cpl->cntrl = htonl(cntrl);
1174 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1175 q->sdesc[pidx].skb = NULL;
1177 skb_copy_from_linear_data(skb, &d->flit[2],
1180 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1182 flits = (skb->len + 7) / 8 + 2;
1183 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1184 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1185 | F_WR_SOP | F_WR_EOP | compl);
1187 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1188 V_WR_TID(q->token));
1197 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1198 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1200 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1201 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1202 htonl(V_WR_TID(q->token)));
1205 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1206 struct sge_qset *qs, struct sge_txq *q)
1208 netif_tx_stop_queue(txq);
1209 set_bit(TXQ_ETH, &qs->txq_stopped);
1214 * eth_xmit - add a packet to the Ethernet Tx queue
1216 * @dev: the egress net device
1218 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1220 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1223 unsigned int ndesc, pidx, credits, gen, compl;
1224 const struct port_info *pi = netdev_priv(dev);
1225 struct adapter *adap = pi->adapter;
1226 struct netdev_queue *txq;
1227 struct sge_qset *qs;
1231 * The chip min packet length is 9 octets but play safe and reject
1232 * anything shorter than an Ethernet header.
1234 if (unlikely(skb->len < ETH_HLEN)) {
1236 return NETDEV_TX_OK;
1239 qidx = skb_get_queue_mapping(skb);
1241 q = &qs->txq[TXQ_ETH];
1242 txq = netdev_get_tx_queue(dev, qidx);
1244 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1246 credits = q->size - q->in_use;
1247 ndesc = calc_tx_descs(skb);
1249 if (unlikely(credits < ndesc)) {
1250 t3_stop_tx_queue(txq, qs, q);
1251 dev_err(&adap->pdev->dev,
1252 "%s: Tx ring %u full while queue awake!\n",
1253 dev->name, q->cntxt_id & 7);
1254 return NETDEV_TX_BUSY;
1258 if (unlikely(credits - ndesc < q->stop_thres)) {
1259 t3_stop_tx_queue(txq, qs, q);
1261 if (should_restart_tx(q) &&
1262 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1264 netif_tx_wake_queue(txq);
1269 q->unacked += ndesc;
1270 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1274 if (q->pidx >= q->size) {
1279 /* update port statistics */
1280 if (skb->ip_summed == CHECKSUM_COMPLETE)
1281 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1282 if (skb_shinfo(skb)->gso_size)
1283 qs->port_stats[SGE_PSTAT_TSO]++;
1284 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1285 qs->port_stats[SGE_PSTAT_VLANINS]++;
1288 * We do not use Tx completion interrupts to free DMAd Tx packets.
1289 * This is good for performamce but means that we rely on new Tx
1290 * packets arriving to run the destructors of completed packets,
1291 * which open up space in their sockets' send queues. Sometimes
1292 * we do not get such new packets causing Tx to stall. A single
1293 * UDP transmitter is a good example of this situation. We have
1294 * a clean up timer that periodically reclaims completed packets
1295 * but it doesn't run often enough (nor do we want it to) to prevent
1296 * lengthy stalls. A solution to this problem is to run the
1297 * destructor early, after the packet is queued but before it's DMAd.
1298 * A cons is that we lie to socket memory accounting, but the amount
1299 * of extra memory is reasonable (limited by the number of Tx
1300 * descriptors), the packets do actually get freed quickly by new
1301 * packets almost always, and for protocols like TCP that wait for
1302 * acks to really free up the data the extra memory is even less.
1303 * On the positive side we run the destructors on the sending CPU
1304 * rather than on a potentially different completing CPU, usually a
1305 * good thing. We also run them without holding our Tx queue lock,
1306 * unlike what reclaim_completed_tx() would otherwise do.
1308 * Run the destructor before telling the DMA engine about the packet
1309 * to make sure it doesn't complete and get freed prematurely.
1311 if (likely(!skb_shared(skb)))
1314 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1315 check_ring_tx_db(adap, q);
1316 return NETDEV_TX_OK;
1320 * write_imm - write a packet into a Tx descriptor as immediate data
1321 * @d: the Tx descriptor to write
1323 * @len: the length of packet data to write as immediate data
1324 * @gen: the generation bit value to write
1326 * Writes a packet as immediate data into a Tx descriptor. The packet
1327 * contains a work request at its beginning. We must write the packet
1328 * carefully so the SGE doesn't read it accidentally before it's written
1331 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1332 unsigned int len, unsigned int gen)
1334 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1335 struct work_request_hdr *to = (struct work_request_hdr *)d;
1337 if (likely(!skb->data_len))
1338 memcpy(&to[1], &from[1], len - sizeof(*from));
1340 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1342 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1343 V_WR_BCNTLFLT(len & 7));
1345 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1346 V_WR_LEN((len + 7) / 8));
1352 * check_desc_avail - check descriptor availability on a send queue
1353 * @adap: the adapter
1354 * @q: the send queue
1355 * @skb: the packet needing the descriptors
1356 * @ndesc: the number of Tx descriptors needed
1357 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1359 * Checks if the requested number of Tx descriptors is available on an
1360 * SGE send queue. If the queue is already suspended or not enough
1361 * descriptors are available the packet is queued for later transmission.
1362 * Must be called with the Tx queue locked.
1364 * Returns 0 if enough descriptors are available, 1 if there aren't
1365 * enough descriptors and the packet has been queued, and 2 if the caller
1366 * needs to retry because there weren't enough descriptors at the
1367 * beginning of the call but some freed up in the mean time.
1369 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1370 struct sk_buff *skb, unsigned int ndesc,
1373 if (unlikely(!skb_queue_empty(&q->sendq))) {
1374 addq_exit:__skb_queue_tail(&q->sendq, skb);
1377 if (unlikely(q->size - q->in_use < ndesc)) {
1378 struct sge_qset *qs = txq_to_qset(q, qid);
1380 set_bit(qid, &qs->txq_stopped);
1381 smp_mb__after_clear_bit();
1383 if (should_restart_tx(q) &&
1384 test_and_clear_bit(qid, &qs->txq_stopped))
1394 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1395 * @q: the SGE control Tx queue
1397 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1398 * that send only immediate data (presently just the control queues) and
1399 * thus do not have any sk_buffs to release.
1401 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1403 unsigned int reclaim = q->processed - q->cleaned;
1405 q->in_use -= reclaim;
1406 q->cleaned += reclaim;
1409 static inline int immediate(const struct sk_buff *skb)
1411 return skb->len <= WR_LEN;
1415 * ctrl_xmit - send a packet through an SGE control Tx queue
1416 * @adap: the adapter
1417 * @q: the control queue
1420 * Send a packet through an SGE control Tx queue. Packets sent through
1421 * a control queue must fit entirely as immediate data in a single Tx
1422 * descriptor and have no page fragments.
1424 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1425 struct sk_buff *skb)
1428 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1430 if (unlikely(!immediate(skb))) {
1433 return NET_XMIT_SUCCESS;
1436 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1437 wrp->wr_lo = htonl(V_WR_TID(q->token));
1439 spin_lock(&q->lock);
1440 again:reclaim_completed_tx_imm(q);
1442 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1443 if (unlikely(ret)) {
1445 spin_unlock(&q->lock);
1451 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1454 if (++q->pidx >= q->size) {
1458 spin_unlock(&q->lock);
1460 t3_write_reg(adap, A_SG_KDOORBELL,
1461 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1462 return NET_XMIT_SUCCESS;
1466 * restart_ctrlq - restart a suspended control queue
1467 * @qs: the queue set cotaining the control queue
1469 * Resumes transmission on a suspended Tx control queue.
1471 static void restart_ctrlq(unsigned long data)
1473 struct sk_buff *skb;
1474 struct sge_qset *qs = (struct sge_qset *)data;
1475 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1477 spin_lock(&q->lock);
1478 again:reclaim_completed_tx_imm(q);
1480 while (q->in_use < q->size &&
1481 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1483 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1485 if (++q->pidx >= q->size) {
1492 if (!skb_queue_empty(&q->sendq)) {
1493 set_bit(TXQ_CTRL, &qs->txq_stopped);
1494 smp_mb__after_clear_bit();
1496 if (should_restart_tx(q) &&
1497 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1502 spin_unlock(&q->lock);
1504 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1505 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1509 * Send a management message through control queue 0
1511 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1515 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1522 * deferred_unmap_destructor - unmap a packet when it is freed
1525 * This is the packet destructor used for Tx packets that need to remain
1526 * mapped until they are freed rather than until their Tx descriptors are
1529 static void deferred_unmap_destructor(struct sk_buff *skb)
1532 const dma_addr_t *p;
1533 const struct skb_shared_info *si;
1534 const struct deferred_unmap_info *dui;
1536 dui = (struct deferred_unmap_info *)skb->head;
1539 if (skb->tail - skb->transport_header)
1540 pci_unmap_single(dui->pdev, *p++,
1541 skb->tail - skb->transport_header,
1544 si = skb_shinfo(skb);
1545 for (i = 0; i < si->nr_frags; i++)
1546 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1550 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1551 const struct sg_ent *sgl, int sgl_flits)
1554 struct deferred_unmap_info *dui;
1556 dui = (struct deferred_unmap_info *)skb->head;
1558 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1559 *p++ = be64_to_cpu(sgl->addr[0]);
1560 *p++ = be64_to_cpu(sgl->addr[1]);
1563 *p = be64_to_cpu(sgl->addr[0]);
1567 * write_ofld_wr - write an offload work request
1568 * @adap: the adapter
1569 * @skb: the packet to send
1571 * @pidx: index of the first Tx descriptor to write
1572 * @gen: the generation value to use
1573 * @ndesc: number of descriptors the packet will occupy
1575 * Write an offload work request to send the supplied packet. The packet
1576 * data already carry the work request with most fields populated.
1578 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1579 struct sge_txq *q, unsigned int pidx,
1580 unsigned int gen, unsigned int ndesc)
1582 unsigned int sgl_flits, flits;
1583 struct work_request_hdr *from;
1584 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1585 struct tx_desc *d = &q->desc[pidx];
1587 if (immediate(skb)) {
1588 q->sdesc[pidx].skb = NULL;
1589 write_imm(d, skb, skb->len, gen);
1593 /* Only TX_DATA builds SGLs */
1595 from = (struct work_request_hdr *)skb->data;
1596 memcpy(&d->flit[1], &from[1],
1597 skb_transport_offset(skb) - sizeof(*from));
1599 flits = skb_transport_offset(skb) / 8;
1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1602 skb->tail - skb->transport_header,
1604 if (need_skb_unmap()) {
1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1606 skb->destructor = deferred_unmap_destructor;
1609 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1610 gen, from->wr_hi, from->wr_lo);
1614 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1617 * Returns the number of Tx descriptors needed for the given offload
1618 * packet. These packets are already fully constructed.
1620 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1622 unsigned int flits, cnt;
1624 if (skb->len <= WR_LEN)
1625 return 1; /* packet fits as immediate data */
1627 flits = skb_transport_offset(skb) / 8; /* headers */
1628 cnt = skb_shinfo(skb)->nr_frags;
1629 if (skb->tail != skb->transport_header)
1631 return flits_to_desc(flits + sgl_len(cnt));
1635 * ofld_xmit - send a packet through an offload queue
1636 * @adap: the adapter
1637 * @q: the Tx offload queue
1640 * Send an offload packet through an SGE offload queue.
1642 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1643 struct sk_buff *skb)
1646 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1648 spin_lock(&q->lock);
1649 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1651 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1652 if (unlikely(ret)) {
1654 skb->priority = ndesc; /* save for restart */
1655 spin_unlock(&q->lock);
1665 if (q->pidx >= q->size) {
1669 spin_unlock(&q->lock);
1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1672 check_ring_tx_db(adap, q);
1673 return NET_XMIT_SUCCESS;
1677 * restart_offloadq - restart a suspended offload queue
1678 * @qs: the queue set cotaining the offload queue
1680 * Resumes transmission on a suspended Tx offload queue.
1682 static void restart_offloadq(unsigned long data)
1684 struct sk_buff *skb;
1685 struct sge_qset *qs = (struct sge_qset *)data;
1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1687 const struct port_info *pi = netdev_priv(qs->netdev);
1688 struct adapter *adap = pi->adapter;
1690 spin_lock(&q->lock);
1691 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1693 while ((skb = skb_peek(&q->sendq)) != NULL) {
1694 unsigned int gen, pidx;
1695 unsigned int ndesc = skb->priority;
1697 if (unlikely(q->size - q->in_use < ndesc)) {
1698 set_bit(TXQ_OFLD, &qs->txq_stopped);
1699 smp_mb__after_clear_bit();
1701 if (should_restart_tx(q) &&
1702 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1712 if (q->pidx >= q->size) {
1716 __skb_unlink(skb, &q->sendq);
1717 spin_unlock(&q->lock);
1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1720 spin_lock(&q->lock);
1722 spin_unlock(&q->lock);
1725 set_bit(TXQ_RUNNING, &q->flags);
1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1729 t3_write_reg(adap, A_SG_KDOORBELL,
1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1734 * queue_set - return the queue set a packet should use
1737 * Maps a packet to the SGE queue set it should use. The desired queue
1738 * set is carried in bits 1-3 in the packet's priority.
1740 static inline int queue_set(const struct sk_buff *skb)
1742 return skb->priority >> 1;
1746 * is_ctrl_pkt - return whether an offload packet is a control packet
1749 * Determines whether an offload packet should use an OFLD or a CTRL
1750 * Tx queue. This is indicated by bit 0 in the packet's priority.
1752 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1754 return skb->priority & 1;
1758 * t3_offload_tx - send an offload packet
1759 * @tdev: the offload device to send to
1762 * Sends an offload packet. We use the packet priority to select the
1763 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1764 * should be sent as regular or control, bits 1-3 select the queue set.
1766 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1768 struct adapter *adap = tdev2adap(tdev);
1769 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1771 if (unlikely(is_ctrl_pkt(skb)))
1772 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1774 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1778 * offload_enqueue - add an offload packet to an SGE offload receive queue
1779 * @q: the SGE response queue
1782 * Add a new offload packet to an SGE response queue's offload packet
1783 * queue. If the packet is the first on the queue it schedules the RX
1784 * softirq to process the queue.
1786 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1788 int was_empty = skb_queue_empty(&q->rx_queue);
1790 __skb_queue_tail(&q->rx_queue, skb);
1793 struct sge_qset *qs = rspq_to_qset(q);
1795 napi_schedule(&qs->napi);
1800 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1801 * @tdev: the offload device that will be receiving the packets
1802 * @q: the SGE response queue that assembled the bundle
1803 * @skbs: the partial bundle
1804 * @n: the number of packets in the bundle
1806 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1808 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1810 struct sk_buff *skbs[], int n)
1813 q->offload_bundles++;
1814 tdev->recv(tdev, skbs, n);
1819 * ofld_poll - NAPI handler for offload packets in interrupt mode
1820 * @dev: the network device doing the polling
1821 * @budget: polling budget
1823 * The NAPI handler for offload packets when a response queue is serviced
1824 * by the hard interrupt handler, i.e., when it's operating in non-polling
1825 * mode. Creates small packet batches and sends them through the offload
1826 * receive handler. Batches need to be of modest size as we do prefetches
1827 * on the packets in each.
1829 static int ofld_poll(struct napi_struct *napi, int budget)
1831 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1832 struct sge_rspq *q = &qs->rspq;
1833 struct adapter *adapter = qs->adap;
1836 while (work_done < budget) {
1837 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1838 struct sk_buff_head queue;
1841 spin_lock_irq(&q->lock);
1842 __skb_queue_head_init(&queue);
1843 skb_queue_splice_init(&q->rx_queue, &queue);
1844 if (skb_queue_empty(&queue)) {
1845 napi_complete(napi);
1846 spin_unlock_irq(&q->lock);
1849 spin_unlock_irq(&q->lock);
1852 skb_queue_walk_safe(&queue, skb, tmp) {
1853 if (work_done >= budget)
1857 __skb_unlink(skb, &queue);
1858 prefetch(skb->data);
1859 skbs[ngathered] = skb;
1860 if (++ngathered == RX_BUNDLE_SIZE) {
1861 q->offload_bundles++;
1862 adapter->tdev.recv(&adapter->tdev, skbs,
1867 if (!skb_queue_empty(&queue)) {
1868 /* splice remaining packets back onto Rx queue */
1869 spin_lock_irq(&q->lock);
1870 skb_queue_splice(&queue, &q->rx_queue);
1871 spin_unlock_irq(&q->lock);
1873 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1880 * rx_offload - process a received offload packet
1881 * @tdev: the offload device receiving the packet
1882 * @rq: the response queue that received the packet
1884 * @rx_gather: a gather list of packets if we are building a bundle
1885 * @gather_idx: index of the next available slot in the bundle
1887 * Process an ingress offload pakcet and add it to the offload ingress
1888 * queue. Returns the index of the next available slot in the bundle.
1890 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1891 struct sk_buff *skb, struct sk_buff *rx_gather[],
1892 unsigned int gather_idx)
1894 skb_reset_mac_header(skb);
1895 skb_reset_network_header(skb);
1896 skb_reset_transport_header(skb);
1899 rx_gather[gather_idx++] = skb;
1900 if (gather_idx == RX_BUNDLE_SIZE) {
1901 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1903 rq->offload_bundles++;
1906 offload_enqueue(rq, skb);
1912 * restart_tx - check whether to restart suspended Tx queues
1913 * @qs: the queue set to resume
1915 * Restarts suspended Tx queues of an SGE queue set if they have enough
1916 * free resources to resume operation.
1918 static void restart_tx(struct sge_qset *qs)
1920 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1921 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1922 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1923 qs->txq[TXQ_ETH].restarts++;
1924 if (netif_running(qs->netdev))
1925 netif_tx_wake_queue(qs->tx_q);
1928 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1929 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1930 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1931 qs->txq[TXQ_OFLD].restarts++;
1932 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1934 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1935 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1936 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1937 qs->txq[TXQ_CTRL].restarts++;
1938 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1943 * cxgb3_arp_process - process an ARP request probing a private IP address
1944 * @adapter: the adapter
1945 * @skb: the skbuff containing the ARP request
1947 * Check if the ARP request is probing the private IP address
1948 * dedicated to iSCSI, generate an ARP reply if so.
1950 static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1952 struct net_device *dev = skb->dev;
1953 struct port_info *pi;
1955 unsigned char *arp_ptr;
1962 skb_reset_network_header(skb);
1965 if (arp->ar_op != htons(ARPOP_REQUEST))
1968 arp_ptr = (unsigned char *)(arp + 1);
1970 arp_ptr += dev->addr_len;
1971 memcpy(&sip, arp_ptr, sizeof(sip));
1972 arp_ptr += sizeof(sip);
1973 arp_ptr += dev->addr_len;
1974 memcpy(&tip, arp_ptr, sizeof(tip));
1976 pi = netdev_priv(dev);
1977 if (tip != pi->iscsi_ipv4addr)
1980 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1981 dev->dev_addr, sha);
1985 static inline int is_arp(struct sk_buff *skb)
1987 return skb->protocol == htons(ETH_P_ARP);
1991 * rx_eth - process an ingress ethernet packet
1992 * @adap: the adapter
1993 * @rq: the response queue that received the packet
1995 * @pad: amount of padding at the start of the buffer
1997 * Process an ingress ethernet pakcet and deliver it to the stack.
1998 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1999 * if it was immediate data in a response.
2001 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2002 struct sk_buff *skb, int pad, int lro)
2004 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2005 struct sge_qset *qs = rspq_to_qset(rq);
2006 struct port_info *pi;
2008 skb_pull(skb, sizeof(*p) + pad);
2009 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2010 pi = netdev_priv(skb->dev);
2011 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
2012 p->csum == htons(0xffff) && !p->fragment) {
2013 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2014 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 skb->ip_summed = CHECKSUM_NONE;
2017 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2019 if (unlikely(p->vlan_valid)) {
2020 struct vlan_group *grp = pi->vlan_grp;
2022 qs->port_stats[SGE_PSTAT_VLANEX]++;
2025 vlan_gro_receive(&qs->napi, grp,
2026 ntohs(p->vlan), skb);
2028 if (unlikely(pi->iscsi_ipv4addr &&
2030 unsigned short vtag = ntohs(p->vlan) &
2032 skb->dev = vlan_group_get_device(grp,
2034 cxgb3_arp_process(adap, skb);
2036 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
2040 dev_kfree_skb_any(skb);
2041 } else if (rq->polling) {
2043 napi_gro_receive(&qs->napi, skb);
2045 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
2046 cxgb3_arp_process(adap, skb);
2047 netif_receive_skb(skb);
2053 static inline int is_eth_tcp(u32 rss)
2055 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2059 * lro_add_page - add a page chunk to an LRO session
2060 * @adap: the adapter
2061 * @qs: the associated queue set
2062 * @fl: the free list containing the page chunk to add
2063 * @len: packet length
2064 * @complete: Indicates the last fragment of a frame
2066 * Add a received packet contained in a page chunk to an existing LRO
2069 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2070 struct sge_fl *fl, int len, int complete)
2072 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2073 struct sk_buff *skb = NULL;
2074 struct cpl_rx_pkt *cpl;
2075 struct skb_frag_struct *rx_frag;
2080 skb = napi_get_frags(&qs->napi);
2086 pci_dma_sync_single_for_cpu(adap->pdev,
2087 pci_unmap_addr(sd, dma_addr),
2088 fl->buf_size - SGE_PG_RSVD,
2089 PCI_DMA_FROMDEVICE);
2091 (*sd->pg_chunk.p_cnt)--;
2092 if (!*sd->pg_chunk.p_cnt)
2093 pci_unmap_page(adap->pdev,
2094 pci_unmap_addr(&sd->pg_chunk, mapping),
2096 PCI_DMA_FROMDEVICE);
2099 put_page(sd->pg_chunk.page);
2105 rx_frag = skb_shinfo(skb)->frags;
2106 nr_frags = skb_shinfo(skb)->nr_frags;
2109 offset = 2 + sizeof(struct cpl_rx_pkt);
2110 qs->lro_va = sd->pg_chunk.va + 2;
2114 prefetch(qs->lro_va);
2116 rx_frag += nr_frags;
2117 rx_frag->page = sd->pg_chunk.page;
2118 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2119 rx_frag->size = len;
2122 skb->data_len += len;
2123 skb->truesize += len;
2124 skb_shinfo(skb)->nr_frags++;
2129 skb->ip_summed = CHECKSUM_UNNECESSARY;
2132 if (unlikely(cpl->vlan_valid)) {
2133 struct net_device *dev = qs->netdev;
2134 struct port_info *pi = netdev_priv(dev);
2135 struct vlan_group *grp = pi->vlan_grp;
2137 if (likely(grp != NULL)) {
2138 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
2142 napi_gro_frags(&qs->napi);
2146 * handle_rsp_cntrl_info - handles control information in a response
2147 * @qs: the queue set corresponding to the response
2148 * @flags: the response control flags
2150 * Handles the control information of an SGE response, such as GTS
2151 * indications and completion credits for the queue set's Tx queues.
2152 * HW coalesces credits, we don't do any extra SW coalescing.
2154 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2156 unsigned int credits;
2159 if (flags & F_RSPD_TXQ0_GTS)
2160 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2163 credits = G_RSPD_TXQ0_CR(flags);
2165 qs->txq[TXQ_ETH].processed += credits;
2167 credits = G_RSPD_TXQ2_CR(flags);
2169 qs->txq[TXQ_CTRL].processed += credits;
2172 if (flags & F_RSPD_TXQ1_GTS)
2173 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2175 credits = G_RSPD_TXQ1_CR(flags);
2177 qs->txq[TXQ_OFLD].processed += credits;
2181 * check_ring_db - check if we need to ring any doorbells
2182 * @adapter: the adapter
2183 * @qs: the queue set whose Tx queues are to be examined
2184 * @sleeping: indicates which Tx queue sent GTS
2186 * Checks if some of a queue set's Tx queues need to ring their doorbells
2187 * to resume transmission after idling while they still have unprocessed
2190 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2191 unsigned int sleeping)
2193 if (sleeping & F_RSPD_TXQ0_GTS) {
2194 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2196 if (txq->cleaned + txq->in_use != txq->processed &&
2197 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2198 set_bit(TXQ_RUNNING, &txq->flags);
2199 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2200 V_EGRCNTX(txq->cntxt_id));
2204 if (sleeping & F_RSPD_TXQ1_GTS) {
2205 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2207 if (txq->cleaned + txq->in_use != txq->processed &&
2208 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2209 set_bit(TXQ_RUNNING, &txq->flags);
2210 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2211 V_EGRCNTX(txq->cntxt_id));
2217 * is_new_response - check if a response is newly written
2218 * @r: the response descriptor
2219 * @q: the response queue
2221 * Returns true if a response descriptor contains a yet unprocessed
2224 static inline int is_new_response(const struct rsp_desc *r,
2225 const struct sge_rspq *q)
2227 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2230 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2233 q->rx_recycle_buf = 0;
2236 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2237 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2238 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2239 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2240 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2242 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2243 #define NOMEM_INTR_DELAY 2500
2246 * process_responses - process responses from an SGE response queue
2247 * @adap: the adapter
2248 * @qs: the queue set to which the response queue belongs
2249 * @budget: how many responses can be processed in this round
2251 * Process responses from an SGE response queue up to the supplied budget.
2252 * Responses include received packets as well as credits and other events
2253 * for the queues that belong to the response queue's queue set.
2254 * A negative budget is effectively unlimited.
2256 * Additionally choose the interrupt holdoff time for the next interrupt
2257 * on this queue. If the system is under memory shortage use a fairly
2258 * long delay to help recovery.
2260 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2263 struct sge_rspq *q = &qs->rspq;
2264 struct rsp_desc *r = &q->desc[q->cidx];
2265 int budget_left = budget;
2266 unsigned int sleeping = 0;
2267 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2270 q->next_holdoff = q->holdoff_tmr;
2272 while (likely(budget_left && is_new_response(r, q))) {
2273 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2274 struct sk_buff *skb = NULL;
2275 u32 len, flags = ntohl(r->flags);
2276 __be32 rss_hi = *(const __be32 *)r,
2277 rss_lo = r->rss_hdr.rss_hash_val;
2279 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2281 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2282 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2286 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2287 skb->data[0] = CPL_ASYNC_NOTIF;
2288 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2290 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2291 skb = get_imm_packet(r);
2292 if (unlikely(!skb)) {
2294 q->next_holdoff = NOMEM_INTR_DELAY;
2296 /* consume one credit since we tried */
2302 } else if ((len = ntohl(r->len_cq)) != 0) {
2305 lro &= eth && is_eth_tcp(rss_hi);
2307 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2308 if (fl->use_pages) {
2309 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2312 #if L1_CACHE_BYTES < 128
2313 prefetch(addr + L1_CACHE_BYTES);
2315 __refill_fl(adap, fl);
2317 lro_add_page(adap, qs, fl,
2319 flags & F_RSPD_EOP);
2323 skb = get_packet_pg(adap, fl, q,
2326 SGE_RX_DROP_THRES : 0);
2329 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2330 eth ? SGE_RX_DROP_THRES : 0);
2331 if (unlikely(!skb)) {
2335 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2338 if (++fl->cidx == fl->size)
2343 if (flags & RSPD_CTRL_MASK) {
2344 sleeping |= flags & RSPD_GTS_MASK;
2345 handle_rsp_cntrl_info(qs, flags);
2349 if (unlikely(++q->cidx == q->size)) {
2356 if (++q->credits >= (q->size / 4)) {
2357 refill_rspq(adap, q, q->credits);
2361 packet_complete = flags &
2362 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2363 F_RSPD_ASYNC_NOTIF);
2365 if (skb != NULL && packet_complete) {
2367 rx_eth(adap, q, skb, ethpad, lro);
2370 /* Preserve the RSS info in csum & priority */
2372 skb->priority = rss_lo;
2373 ngathered = rx_offload(&adap->tdev, q, skb,
2378 if (flags & F_RSPD_EOP)
2379 clear_rspq_bufstate(q);
2384 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2387 check_ring_db(adap, qs, sleeping);
2389 smp_mb(); /* commit Tx queue .processed updates */
2390 if (unlikely(qs->txq_stopped != 0))
2393 budget -= budget_left;
2397 static inline int is_pure_response(const struct rsp_desc *r)
2399 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2401 return (n | r->len_cq) == 0;
2405 * napi_rx_handler - the NAPI handler for Rx processing
2406 * @napi: the napi instance
2407 * @budget: how many packets we can process in this round
2409 * Handler for new data events when using NAPI.
2411 static int napi_rx_handler(struct napi_struct *napi, int budget)
2413 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2414 struct adapter *adap = qs->adap;
2415 int work_done = process_responses(adap, qs, budget);
2417 if (likely(work_done < budget)) {
2418 napi_complete(napi);
2421 * Because we don't atomically flush the following
2422 * write it is possible that in very rare cases it can
2423 * reach the device in a way that races with a new
2424 * response being written plus an error interrupt
2425 * causing the NAPI interrupt handler below to return
2426 * unhandled status to the OS. To protect against
2427 * this would require flushing the write and doing
2428 * both the write and the flush with interrupts off.
2429 * Way too expensive and unjustifiable given the
2430 * rarity of the race.
2432 * The race cannot happen at all with MSI-X.
2434 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2435 V_NEWTIMER(qs->rspq.next_holdoff) |
2436 V_NEWINDEX(qs->rspq.cidx));
2442 * Returns true if the device is already scheduled for polling.
2444 static inline int napi_is_scheduled(struct napi_struct *napi)
2446 return test_bit(NAPI_STATE_SCHED, &napi->state);
2450 * process_pure_responses - process pure responses from a response queue
2451 * @adap: the adapter
2452 * @qs: the queue set owning the response queue
2453 * @r: the first pure response to process
2455 * A simpler version of process_responses() that handles only pure (i.e.,
2456 * non data-carrying) responses. Such respones are too light-weight to
2457 * justify calling a softirq under NAPI, so we handle them specially in
2458 * the interrupt handler. The function is called with a pointer to a
2459 * response, which the caller must ensure is a valid pure response.
2461 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2463 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2466 struct sge_rspq *q = &qs->rspq;
2467 unsigned int sleeping = 0;
2470 u32 flags = ntohl(r->flags);
2473 if (unlikely(++q->cidx == q->size)) {
2480 if (flags & RSPD_CTRL_MASK) {
2481 sleeping |= flags & RSPD_GTS_MASK;
2482 handle_rsp_cntrl_info(qs, flags);
2486 if (++q->credits >= (q->size / 4)) {
2487 refill_rspq(adap, q, q->credits);
2490 } while (is_new_response(r, q) && is_pure_response(r));
2493 check_ring_db(adap, qs, sleeping);
2495 smp_mb(); /* commit Tx queue .processed updates */
2496 if (unlikely(qs->txq_stopped != 0))
2499 return is_new_response(r, q);
2503 * handle_responses - decide what to do with new responses in NAPI mode
2504 * @adap: the adapter
2505 * @q: the response queue
2507 * This is used by the NAPI interrupt handlers to decide what to do with
2508 * new SGE responses. If there are no new responses it returns -1. If
2509 * there are new responses and they are pure (i.e., non-data carrying)
2510 * it handles them straight in hard interrupt context as they are very
2511 * cheap and don't deliver any packets. Finally, if there are any data
2512 * signaling responses it schedules the NAPI handler. Returns 1 if it
2513 * schedules NAPI, 0 if all new responses were pure.
2515 * The caller must ascertain NAPI is not already running.
2517 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2519 struct sge_qset *qs = rspq_to_qset(q);
2520 struct rsp_desc *r = &q->desc[q->cidx];
2522 if (!is_new_response(r, q))
2524 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2525 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2526 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2529 napi_schedule(&qs->napi);
2534 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2535 * (i.e., response queue serviced in hard interrupt).
2537 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2539 struct sge_qset *qs = cookie;
2540 struct adapter *adap = qs->adap;
2541 struct sge_rspq *q = &qs->rspq;
2543 spin_lock(&q->lock);
2544 if (process_responses(adap, qs, -1) == 0)
2545 q->unhandled_irqs++;
2546 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2547 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2548 spin_unlock(&q->lock);
2553 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2554 * (i.e., response queue serviced by NAPI polling).
2556 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2558 struct sge_qset *qs = cookie;
2559 struct sge_rspq *q = &qs->rspq;
2561 spin_lock(&q->lock);
2563 if (handle_responses(qs->adap, q) < 0)
2564 q->unhandled_irqs++;
2565 spin_unlock(&q->lock);
2570 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2571 * SGE response queues as well as error and other async events as they all use
2572 * the same MSI vector. We use one SGE response queue per port in this mode
2573 * and protect all response queues with queue 0's lock.
2575 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2577 int new_packets = 0;
2578 struct adapter *adap = cookie;
2579 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2581 spin_lock(&q->lock);
2583 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2584 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2585 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2589 if (adap->params.nports == 2 &&
2590 process_responses(adap, &adap->sge.qs[1], -1)) {
2591 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2593 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2594 V_NEWTIMER(q1->next_holdoff) |
2595 V_NEWINDEX(q1->cidx));
2599 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2600 q->unhandled_irqs++;
2602 spin_unlock(&q->lock);
2606 static int rspq_check_napi(struct sge_qset *qs)
2608 struct sge_rspq *q = &qs->rspq;
2610 if (!napi_is_scheduled(&qs->napi) &&
2611 is_new_response(&q->desc[q->cidx], q)) {
2612 napi_schedule(&qs->napi);
2619 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2620 * by NAPI polling). Handles data events from SGE response queues as well as
2621 * error and other async events as they all use the same MSI vector. We use
2622 * one SGE response queue per port in this mode and protect all response
2623 * queues with queue 0's lock.
2625 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2628 struct adapter *adap = cookie;
2629 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2631 spin_lock(&q->lock);
2633 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2634 if (adap->params.nports == 2)
2635 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2636 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2637 q->unhandled_irqs++;
2639 spin_unlock(&q->lock);
2644 * A helper function that processes responses and issues GTS.
2646 static inline int process_responses_gts(struct adapter *adap,
2647 struct sge_rspq *rq)
2651 work = process_responses(adap, rspq_to_qset(rq), -1);
2652 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2653 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2658 * The legacy INTx interrupt handler. This needs to handle data events from
2659 * SGE response queues as well as error and other async events as they all use
2660 * the same interrupt pin. We use one SGE response queue per port in this mode
2661 * and protect all response queues with queue 0's lock.
2663 static irqreturn_t t3_intr(int irq, void *cookie)
2665 int work_done, w0, w1;
2666 struct adapter *adap = cookie;
2667 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2668 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2670 spin_lock(&q0->lock);
2672 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2673 w1 = adap->params.nports == 2 &&
2674 is_new_response(&q1->desc[q1->cidx], q1);
2676 if (likely(w0 | w1)) {
2677 t3_write_reg(adap, A_PL_CLI, 0);
2678 t3_read_reg(adap, A_PL_CLI); /* flush */
2681 process_responses_gts(adap, q0);
2684 process_responses_gts(adap, q1);
2686 work_done = w0 | w1;
2688 work_done = t3_slow_intr_handler(adap);
2690 spin_unlock(&q0->lock);
2691 return IRQ_RETVAL(work_done != 0);
2695 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2696 * Handles data events from SGE response queues as well as error and other
2697 * async events as they all use the same interrupt pin. We use one SGE
2698 * response queue per port in this mode and protect all response queues with
2701 static irqreturn_t t3b_intr(int irq, void *cookie)
2704 struct adapter *adap = cookie;
2705 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2707 t3_write_reg(adap, A_PL_CLI, 0);
2708 map = t3_read_reg(adap, A_SG_DATA_INTR);
2710 if (unlikely(!map)) /* shared interrupt, most likely */
2713 spin_lock(&q0->lock);
2715 if (unlikely(map & F_ERRINTR))
2716 t3_slow_intr_handler(adap);
2718 if (likely(map & 1))
2719 process_responses_gts(adap, q0);
2722 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2724 spin_unlock(&q0->lock);
2729 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2730 * Handles data events from SGE response queues as well as error and other
2731 * async events as they all use the same interrupt pin. We use one SGE
2732 * response queue per port in this mode and protect all response queues with
2735 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2738 struct adapter *adap = cookie;
2739 struct sge_qset *qs0 = &adap->sge.qs[0];
2740 struct sge_rspq *q0 = &qs0->rspq;
2742 t3_write_reg(adap, A_PL_CLI, 0);
2743 map = t3_read_reg(adap, A_SG_DATA_INTR);
2745 if (unlikely(!map)) /* shared interrupt, most likely */
2748 spin_lock(&q0->lock);
2750 if (unlikely(map & F_ERRINTR))
2751 t3_slow_intr_handler(adap);
2753 if (likely(map & 1))
2754 napi_schedule(&qs0->napi);
2757 napi_schedule(&adap->sge.qs[1].napi);
2759 spin_unlock(&q0->lock);
2764 * t3_intr_handler - select the top-level interrupt handler
2765 * @adap: the adapter
2766 * @polling: whether using NAPI to service response queues
2768 * Selects the top-level interrupt handler based on the type of interrupts
2769 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2772 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2774 if (adap->flags & USING_MSIX)
2775 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2776 if (adap->flags & USING_MSI)
2777 return polling ? t3_intr_msi_napi : t3_intr_msi;
2778 if (adap->params.rev > 0)
2779 return polling ? t3b_intr_napi : t3b_intr;
2783 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2784 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2785 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2786 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2788 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2789 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2793 * t3_sge_err_intr_handler - SGE async event interrupt handler
2794 * @adapter: the adapter
2796 * Interrupt handler for SGE asynchronous (non-data) events.
2798 void t3_sge_err_intr_handler(struct adapter *adapter)
2800 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2803 if (status & SGE_PARERR)
2804 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2805 status & SGE_PARERR);
2806 if (status & SGE_FRAMINGERR)
2807 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2808 status & SGE_FRAMINGERR);
2810 if (status & F_RSPQCREDITOVERFOW)
2811 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2813 if (status & F_RSPQDISABLED) {
2814 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2817 "packet delivered to disabled response queue "
2818 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2821 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2822 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2823 status & F_HIPIODRBDROPERR ? "high" : "lo");
2825 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2826 if (status & SGE_FATALERR)
2827 t3_fatal_err(adapter);
2831 * sge_timer_tx - perform periodic maintenance of an SGE qset
2832 * @data: the SGE queue set to maintain
2834 * Runs periodically from a timer to perform maintenance of an SGE queue
2835 * set. It performs two tasks:
2837 * Cleans up any completed Tx descriptors that may still be pending.
2838 * Normal descriptor cleanup happens when new packets are added to a Tx
2839 * queue so this timer is relatively infrequent and does any cleanup only
2840 * if the Tx queue has not seen any new packets in a while. We make a
2841 * best effort attempt to reclaim descriptors, in that we don't wait
2842 * around if we cannot get a queue's lock (which most likely is because
2843 * someone else is queueing new packets and so will also handle the clean
2844 * up). Since control queues use immediate data exclusively we don't
2845 * bother cleaning them up here.
2848 static void sge_timer_tx(unsigned long data)
2850 struct sge_qset *qs = (struct sge_qset *)data;
2851 struct port_info *pi = netdev_priv(qs->netdev);
2852 struct adapter *adap = pi->adapter;
2853 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2854 unsigned long next_period;
2856 if (__netif_tx_trylock(qs->tx_q)) {
2857 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2858 TX_RECLAIM_TIMER_CHUNK);
2859 __netif_tx_unlock(qs->tx_q);
2862 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2863 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2864 TX_RECLAIM_TIMER_CHUNK);
2865 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2868 next_period = TX_RECLAIM_PERIOD >>
2869 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2870 TX_RECLAIM_TIMER_CHUNK);
2871 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2875 * sge_timer_rx - perform periodic maintenance of an SGE qset
2876 * @data: the SGE queue set to maintain
2878 * a) Replenishes Rx queues that have run out due to memory shortage.
2879 * Normally new Rx buffers are added when existing ones are consumed but
2880 * when out of memory a queue can become empty. We try to add only a few
2881 * buffers here, the queue will be replenished fully as these new buffers
2882 * are used up if memory shortage has subsided.
2884 * b) Return coalesced response queue credits in case a response queue is
2888 static void sge_timer_rx(unsigned long data)
2891 struct sge_qset *qs = (struct sge_qset *)data;
2892 struct port_info *pi = netdev_priv(qs->netdev);
2893 struct adapter *adap = pi->adapter;
2896 lock = adap->params.rev > 0 ?
2897 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2899 if (!spin_trylock_irq(lock))
2902 if (napi_is_scheduled(&qs->napi))
2905 if (adap->params.rev < 4) {
2906 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2908 if (status & (1 << qs->rspq.cntxt_id)) {
2910 if (qs->rspq.credits) {
2912 refill_rspq(adap, &qs->rspq, 1);
2913 qs->rspq.restarted++;
2914 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2915 1 << qs->rspq.cntxt_id);
2920 if (qs->fl[0].credits < qs->fl[0].size)
2921 __refill_fl(adap, &qs->fl[0]);
2922 if (qs->fl[1].credits < qs->fl[1].size)
2923 __refill_fl(adap, &qs->fl[1]);
2926 spin_unlock_irq(lock);
2928 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2932 * t3_update_qset_coalesce - update coalescing settings for a queue set
2933 * @qs: the SGE queue set
2934 * @p: new queue set parameters
2936 * Update the coalescing settings for an SGE queue set. Nothing is done
2937 * if the queue set is not initialized yet.
2939 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2941 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2942 qs->rspq.polling = p->polling;
2943 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2947 * t3_sge_alloc_qset - initialize an SGE queue set
2948 * @adapter: the adapter
2949 * @id: the queue set id
2950 * @nports: how many Ethernet ports will be using this queue set
2951 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2952 * @p: configuration parameters for this queue set
2953 * @ntxq: number of Tx queues for the queue set
2954 * @netdev: net device associated with this queue set
2955 * @netdevq: net device TX queue associated with this queue set
2957 * Allocate resources and initialize an SGE queue set. A queue set
2958 * comprises a response queue, two Rx free-buffer queues, and up to 3
2959 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2960 * queue, offload queue, and control queue.
2962 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2963 int irq_vec_idx, const struct qset_params *p,
2964 int ntxq, struct net_device *dev,
2965 struct netdev_queue *netdevq)
2967 int i, avail, ret = -ENOMEM;
2968 struct sge_qset *q = &adapter->sge.qs[id];
2970 init_qset_cntxt(q, id);
2971 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2972 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2974 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2975 sizeof(struct rx_desc),
2976 sizeof(struct rx_sw_desc),
2977 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2981 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2982 sizeof(struct rx_desc),
2983 sizeof(struct rx_sw_desc),
2984 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2988 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2989 sizeof(struct rsp_desc), 0,
2990 &q->rspq.phys_addr, NULL);
2994 for (i = 0; i < ntxq; ++i) {
2996 * The control queue always uses immediate data so does not
2997 * need to keep track of any sk_buffs.
2999 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3001 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3002 sizeof(struct tx_desc), sz,
3003 &q->txq[i].phys_addr,
3005 if (!q->txq[i].desc)
3009 q->txq[i].size = p->txq_size[i];
3010 spin_lock_init(&q->txq[i].lock);
3011 skb_queue_head_init(&q->txq[i].sendq);
3014 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3016 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3019 q->fl[0].gen = q->fl[1].gen = 1;
3020 q->fl[0].size = p->fl_size;
3021 q->fl[1].size = p->jumbo_size;
3024 q->rspq.size = p->rspq_size;
3025 spin_lock_init(&q->rspq.lock);
3026 skb_queue_head_init(&q->rspq.rx_queue);
3028 q->txq[TXQ_ETH].stop_thres = nports *
3029 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3031 #if FL0_PG_CHUNK_SIZE > 0
3032 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3034 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3036 #if FL1_PG_CHUNK_SIZE > 0
3037 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3039 q->fl[1].buf_size = is_offload(adapter) ?
3040 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3041 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3044 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3045 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3046 q->fl[0].order = FL0_PG_ORDER;
3047 q->fl[1].order = FL1_PG_ORDER;
3048 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3049 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3051 spin_lock_irq(&adapter->sge.reg_lock);
3053 /* FL threshold comparison uses < */
3054 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3055 q->rspq.phys_addr, q->rspq.size,
3056 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3060 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3061 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3062 q->fl[i].phys_addr, q->fl[i].size,
3063 q->fl[i].buf_size - SGE_PG_RSVD,
3064 p->cong_thres, 1, 0);
3069 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3070 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3071 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3077 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3078 USE_GTS, SGE_CNTXT_OFLD, id,
3079 q->txq[TXQ_OFLD].phys_addr,
3080 q->txq[TXQ_OFLD].size, 0, 1, 0);
3086 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3088 q->txq[TXQ_CTRL].phys_addr,
3089 q->txq[TXQ_CTRL].size,
3090 q->txq[TXQ_CTRL].token, 1, 0);
3095 spin_unlock_irq(&adapter->sge.reg_lock);
3100 t3_update_qset_coalesce(q, p);
3102 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3103 GFP_KERNEL | __GFP_COMP);
3105 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3108 if (avail < q->fl[0].size)
3109 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3112 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3113 GFP_KERNEL | __GFP_COMP);
3114 if (avail < q->fl[1].size)
3115 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3117 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3119 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3120 V_NEWTIMER(q->rspq.holdoff_tmr));
3125 spin_unlock_irq(&adapter->sge.reg_lock);
3127 t3_free_qset(adapter, q);
3132 * t3_start_sge_timers - start SGE timer call backs
3133 * @adap: the adapter
3135 * Starts each SGE queue set's timer call back
3137 void t3_start_sge_timers(struct adapter *adap)
3141 for (i = 0; i < SGE_QSETS; ++i) {
3142 struct sge_qset *q = &adap->sge.qs[i];
3144 if (q->tx_reclaim_timer.function)
3145 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3147 if (q->rx_reclaim_timer.function)
3148 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3153 * t3_stop_sge_timers - stop SGE timer call backs
3154 * @adap: the adapter
3156 * Stops each SGE queue set's timer call back
3158 void t3_stop_sge_timers(struct adapter *adap)
3162 for (i = 0; i < SGE_QSETS; ++i) {
3163 struct sge_qset *q = &adap->sge.qs[i];
3165 if (q->tx_reclaim_timer.function)
3166 del_timer_sync(&q->tx_reclaim_timer);
3167 if (q->rx_reclaim_timer.function)
3168 del_timer_sync(&q->rx_reclaim_timer);
3173 * t3_free_sge_resources - free SGE resources
3174 * @adap: the adapter
3176 * Frees resources used by the SGE queue sets.
3178 void t3_free_sge_resources(struct adapter *adap)
3182 for (i = 0; i < SGE_QSETS; ++i)
3183 t3_free_qset(adap, &adap->sge.qs[i]);
3187 * t3_sge_start - enable SGE
3188 * @adap: the adapter
3190 * Enables the SGE for DMAs. This is the last step in starting packet
3193 void t3_sge_start(struct adapter *adap)
3195 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3199 * t3_sge_stop - disable SGE operation
3200 * @adap: the adapter
3202 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3203 * from error interrupts) or from normal process context. In the latter
3204 * case it also disables any pending queue restart tasklets. Note that
3205 * if it is called in interrupt context it cannot disable the restart
3206 * tasklets as it cannot wait, however the tasklets will have no effect
3207 * since the doorbells are disabled and the driver will call this again
3208 * later from process context, at which time the tasklets will be stopped
3209 * if they are still running.
3211 void t3_sge_stop(struct adapter *adap)
3213 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3214 if (!in_interrupt()) {
3217 for (i = 0; i < SGE_QSETS; ++i) {
3218 struct sge_qset *qs = &adap->sge.qs[i];
3220 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3221 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3227 * t3_sge_init - initialize SGE
3228 * @adap: the adapter
3229 * @p: the SGE parameters
3231 * Performs SGE initialization needed every time after a chip reset.
3232 * We do not initialize any of the queue sets here, instead the driver
3233 * top-level must request those individually. We also do not enable DMA
3234 * here, that should be done after the queues have been set up.
3236 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3238 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3240 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3241 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3242 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3243 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3244 #if SGE_NUM_GENBITS == 1
3245 ctrl |= F_EGRGENCTRL;
3247 if (adap->params.rev > 0) {
3248 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3249 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3251 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3252 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3253 V_LORCQDRBTHRSH(512));
3254 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3255 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3256 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3257 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3258 adap->params.rev < T3_REV_C ? 1000 : 500);
3259 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3260 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3261 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3262 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3263 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3267 * t3_sge_prep - one-time SGE initialization
3268 * @adap: the associated adapter
3269 * @p: SGE parameters
3271 * Performs one-time initialization of SGE SW state. Includes determining
3272 * defaults for the assorted SGE parameters, which admins can change until
3273 * they are used to initialize the SGE.
3275 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3279 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3280 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3282 for (i = 0; i < SGE_QSETS; ++i) {
3283 struct qset_params *q = p->qset + i;
3285 q->polling = adap->params.rev > 0;
3286 q->coalesce_usecs = 5;
3287 q->rspq_size = 1024;
3289 q->jumbo_size = 512;
3290 q->txq_size[TXQ_ETH] = 1024;
3291 q->txq_size[TXQ_OFLD] = 1024;
3292 q->txq_size[TXQ_CTRL] = 256;
3296 spin_lock_init(&adap->sge.reg_lock);
3300 * t3_get_desc - dump an SGE descriptor for debugging purposes
3301 * @qs: the queue set
3302 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3303 * @idx: the descriptor index in the queue
3304 * @data: where to dump the descriptor contents
3306 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3307 * size of the descriptor.
3309 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3310 unsigned char *data)
3316 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3318 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3319 return sizeof(struct tx_desc);
3323 if (!qs->rspq.desc || idx >= qs->rspq.size)
3325 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3326 return sizeof(struct rsp_desc);
3330 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3332 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3333 return sizeof(struct rx_desc);