1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include "net_driver.h"
23 #include "workarounds.h"
26 * TX descriptor ring full threshold
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
31 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
33 /* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
37 void efx_stop_queue(struct efx_channel *channel)
39 struct efx_nic *efx = channel->efx;
40 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
45 spin_lock_bh(&channel->tx_stop_lock);
46 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
48 atomic_inc(&channel->tx_stop_count);
50 netdev_get_tx_queue(efx->net_dev,
51 tx_queue->queue / EFX_TXQ_TYPES));
53 spin_unlock_bh(&channel->tx_stop_lock);
56 /* Decrement core TX queue stop count and wake it if the count is 0 */
57 void efx_wake_queue(struct efx_channel *channel)
59 struct efx_nic *efx = channel->efx;
60 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
66 if (atomic_dec_and_lock(&channel->tx_stop_count,
67 &channel->tx_stop_lock)) {
68 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
70 netdev_get_tx_queue(efx->net_dev,
71 tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
77 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 struct efx_tx_buffer *buffer)
80 if (buffer->unmap_len) {
81 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
82 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
84 if (buffer->unmap_single)
85 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
88 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
90 buffer->unmap_len = 0;
91 buffer->unmap_single = false;
95 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
97 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
98 "TX queue %d transmission id %x complete\n",
99 tx_queue->queue, tx_queue->read_count);
104 * struct efx_tso_header - a DMA mapped buffer for packet headers
105 * @next: Linked list of free ones.
106 * The list is protected by the TX queue lock.
107 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
108 * @dma_addr: The DMA address of the header below.
110 * This controls the memory used for a TSO header. Use TSOH_DATA()
111 * to find the packet header data. Use TSOH_SIZE() to calculate the
112 * total size required for a given packet header length. TSO headers
113 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
115 struct efx_tso_header {
117 struct efx_tso_header *next;
123 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
124 struct sk_buff *skb);
125 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
126 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
127 struct efx_tso_header *tsoh);
129 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
130 struct efx_tx_buffer *buffer)
133 if (likely(!buffer->tsoh->unmap_len)) {
134 buffer->tsoh->next = tx_queue->tso_headers_free;
135 tx_queue->tso_headers_free = buffer->tsoh;
137 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
144 static inline unsigned
145 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
147 /* Depending on the NIC revision, we can use descriptor
148 * lengths up to 8K or 8K-1. However, since PCI Express
149 * devices must split read requests at 4K boundaries, there is
150 * little benefit from using descriptors that cross those
151 * boundaries and we keep things simple by not doing so.
153 unsigned len = (~dma_addr & 0xfff) + 1;
155 /* Work around hardware bug for unaligned buffers. */
156 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
157 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
163 * Add a socket buffer to a TX queue
165 * This maps all fragments of a socket buffer for DMA and adds them to
166 * the TX queue. The queue's insert pointer will be incremented by
167 * the number of fragments in the socket buffer.
169 * If any DMA mapping fails, any mapped fragments will be unmapped,
170 * the queue's insert pointer will be restored to its original value.
172 * This function is split out from efx_hard_start_xmit to allow the
173 * loopback test to direct packets via specific TX queues.
175 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
176 * You must hold netif_tx_lock() to call this function.
178 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
180 struct efx_nic *efx = tx_queue->efx;
181 struct pci_dev *pci_dev = efx->pci_dev;
182 struct efx_tx_buffer *buffer;
183 skb_frag_t *fragment;
186 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
187 dma_addr_t dma_addr, unmap_addr = 0;
188 unsigned int dma_len;
191 netdev_tx_t rc = NETDEV_TX_OK;
193 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
195 if (skb_shinfo(skb)->gso_size)
196 return efx_enqueue_skb_tso(tx_queue, skb);
198 /* Get size of the initial fragment */
199 len = skb_headlen(skb);
201 /* Pad if necessary */
202 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
203 EFX_BUG_ON_PARANOID(skb->data_len);
205 if (skb_pad(skb, len - skb->len))
209 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
210 q_space = efx->txq_entries - 1 - fill_level;
212 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse
217 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
219 /* Process all fragments */
221 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
224 /* Store fields for marking in the per-fragment final
227 unmap_addr = dma_addr;
229 /* Add to TX queue, splitting across DMA boundaries */
231 if (unlikely(q_space-- <= 0)) {
232 /* It might be that completions have
233 * happened since the xmit path last
234 * checked. Update the xmit path's
235 * copy of read_count.
238 /* This memory barrier protects the
239 * change of stopped from the access
242 tx_queue->old_read_count =
243 *(volatile unsigned *)
244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count);
247 q_space = efx->txq_entries - 1 - fill_level;
248 if (unlikely(q_space-- <= 0))
254 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
255 buffer = &tx_queue->buffer[insert_ptr];
256 efx_tsoh_free(tx_queue, buffer);
257 EFX_BUG_ON_PARANOID(buffer->tsoh);
258 EFX_BUG_ON_PARANOID(buffer->skb);
259 EFX_BUG_ON_PARANOID(buffer->len);
260 EFX_BUG_ON_PARANOID(!buffer->continuation);
261 EFX_BUG_ON_PARANOID(buffer->unmap_len);
263 dma_len = efx_max_tx_len(efx, dma_addr);
264 if (likely(dma_len >= len))
267 /* Fill out per descriptor fields */
268 buffer->len = dma_len;
269 buffer->dma_addr = dma_addr;
272 ++tx_queue->insert_count;
275 /* Transfer ownership of the unmapping to the final buffer */
276 buffer->unmap_single = unmap_single;
277 buffer->unmap_len = unmap_len;
280 /* Get address and size of next fragment */
281 if (i >= skb_shinfo(skb)->nr_frags)
283 fragment = &skb_shinfo(skb)->frags[i];
284 len = fragment->size;
285 page = fragment->page;
286 page_offset = fragment->page_offset;
289 unmap_single = false;
290 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
294 /* Transfer ownership of the skb to the final buffer */
296 buffer->continuation = false;
298 /* Pass off to hardware */
299 efx_nic_push_buffers(tx_queue);
304 netif_err(efx, tx_err, efx->net_dev,
305 " TX queue %d could not map skb with %d bytes %d "
306 "fragments for DMA\n", tx_queue->queue, skb->len,
307 skb_shinfo(skb)->nr_frags + 1);
309 /* Mark the packet as transmitted, and free the SKB ourselves */
310 dev_kfree_skb_any(skb);
316 if (tx_queue->stopped == 1)
317 efx_stop_queue(tx_queue->channel);
320 /* Work backwards until we hit the original insert pointer value */
321 while (tx_queue->insert_count != tx_queue->write_count) {
322 --tx_queue->insert_count;
323 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[insert_ptr];
325 efx_dequeue_buffer(tx_queue, buffer);
329 /* Free the fragment we were mid-way through pushing */
332 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
335 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
342 /* Remove packets from the TX queue
344 * This removes packets from the TX queue, up to and including the
347 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
350 struct efx_nic *efx = tx_queue->efx;
351 unsigned int stop_index, read_ptr;
353 stop_index = (index + 1) & tx_queue->ptr_mask;
354 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
356 while (read_ptr != stop_index) {
357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
358 if (unlikely(buffer->len == 0)) {
359 netif_err(efx, tx_err, efx->net_dev,
360 "TX queue %d spurious TX completion id %x\n",
361 tx_queue->queue, read_ptr);
362 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
366 efx_dequeue_buffer(tx_queue, buffer);
367 buffer->continuation = true;
370 ++tx_queue->read_count;
371 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
375 /* Initiate a packet transmission. We use one channel per CPU
376 * (sharing when we have more CPUs than channels). On Falcon, the TX
377 * completion events will be directed back to the CPU that transmitted
378 * the packet, which should be cache-efficient.
380 * Context: non-blocking.
381 * Note that returning anything other than NETDEV_TX_OK will cause the
382 * OS to free the skb.
384 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
385 struct net_device *net_dev)
387 struct efx_nic *efx = netdev_priv(net_dev);
388 struct efx_tx_queue *tx_queue;
390 if (unlikely(efx->port_inhibited))
391 return NETDEV_TX_BUSY;
393 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
394 skb->ip_summed == CHECKSUM_PARTIAL ?
395 EFX_TXQ_TYPE_OFFLOAD : 0);
397 return efx_enqueue_skb(tx_queue, skb);
400 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
403 struct efx_nic *efx = tx_queue->efx;
404 struct netdev_queue *queue;
406 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
408 efx_dequeue_buffers(tx_queue, index);
410 /* See if we need to restart the netif queue. This barrier
411 * separates the update of read_count from the test of
414 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
415 fill_level = tx_queue->insert_count - tx_queue->read_count;
416 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
417 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
419 /* Do this under netif_tx_lock(), to avoid racing
420 * with efx_xmit(). */
421 queue = netdev_get_tx_queue(
423 tx_queue->queue / EFX_TXQ_TYPES);
424 __netif_tx_lock(queue, smp_processor_id());
425 if (tx_queue->stopped) {
426 tx_queue->stopped = 0;
427 efx_wake_queue(tx_queue->channel);
429 __netif_tx_unlock(queue);
434 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
436 struct efx_nic *efx = tx_queue->efx;
437 unsigned int entries;
440 /* Create the smallest power-of-two aligned ring */
441 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
442 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
443 tx_queue->ptr_mask = entries - 1;
445 netif_dbg(efx, probe, efx->net_dev,
446 "creating TX queue %d size %#x mask %#x\n",
447 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
449 /* Allocate software ring */
450 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
452 if (!tx_queue->buffer)
454 for (i = 0; i <= tx_queue->ptr_mask; ++i)
455 tx_queue->buffer[i].continuation = true;
457 /* Allocate hardware ring */
458 rc = efx_nic_probe_tx(tx_queue);
465 kfree(tx_queue->buffer);
466 tx_queue->buffer = NULL;
470 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
472 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
473 "initialising TX queue %d\n", tx_queue->queue);
475 tx_queue->insert_count = 0;
476 tx_queue->write_count = 0;
477 tx_queue->read_count = 0;
478 tx_queue->old_read_count = 0;
479 BUG_ON(tx_queue->stopped);
481 /* Set up TX descriptor ring */
482 efx_nic_init_tx(tx_queue);
485 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
487 struct efx_tx_buffer *buffer;
489 if (!tx_queue->buffer)
492 /* Free any buffers left in the ring */
493 while (tx_queue->read_count != tx_queue->write_count) {
494 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
495 efx_dequeue_buffer(tx_queue, buffer);
496 buffer->continuation = true;
499 ++tx_queue->read_count;
503 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
505 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
506 "shutting down TX queue %d\n", tx_queue->queue);
508 /* Flush TX queue, remove descriptor ring */
509 efx_nic_fini_tx(tx_queue);
511 efx_release_tx_buffers(tx_queue);
513 /* Free up TSO header cache */
514 efx_fini_tso(tx_queue);
516 /* Release queue's stop on port, if any */
517 if (tx_queue->stopped) {
518 tx_queue->stopped = 0;
519 efx_wake_queue(tx_queue->channel);
523 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
525 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
526 "destroying TX queue %d\n", tx_queue->queue);
527 efx_nic_remove_tx(tx_queue);
529 kfree(tx_queue->buffer);
530 tx_queue->buffer = NULL;
534 /* Efx TCP segmentation acceleration.
536 * Why? Because by doing it here in the driver we can go significantly
537 * faster than the GSO.
539 * Requires TX checksum offload support.
542 /* Number of bytes inserted at the start of a TSO header buffer,
543 * similar to NET_IP_ALIGN.
545 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
546 #define TSOH_OFFSET 0
548 #define TSOH_OFFSET NET_IP_ALIGN
551 #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
553 /* Total size of struct efx_tso_header, buffer and padding */
554 #define TSOH_SIZE(hdr_len) \
555 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
557 /* Size of blocks on free list. Larger blocks must be allocated from
560 #define TSOH_STD_SIZE 128
562 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
563 #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
564 #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
565 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
566 #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
569 * struct tso_state - TSO state for an SKB
570 * @out_len: Remaining length in current segment
571 * @seqnum: Current sequence number
572 * @ipv4_id: Current IPv4 ID, host endian
573 * @packet_space: Remaining space in current packet
574 * @dma_addr: DMA address of current position
575 * @in_len: Remaining length in current SKB fragment
576 * @unmap_len: Length of SKB fragment
577 * @unmap_addr: DMA address of SKB fragment
578 * @unmap_single: DMA single vs page mapping flag
579 * @protocol: Network protocol (after any VLAN header)
580 * @header_len: Number of bytes of header
581 * @full_packet_size: Number of bytes to put in each outgoing segment
583 * The state used during segmentation. It is put into this data structure
584 * just to make it easy to pass into inline functions.
587 /* Output position */
591 unsigned packet_space;
597 dma_addr_t unmap_addr;
602 int full_packet_size;
607 * Verify that our various assumptions about sk_buffs and the conditions
608 * under which TSO will be attempted hold true. Return the protocol number.
610 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
612 __be16 protocol = skb->protocol;
614 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
616 if (protocol == htons(ETH_P_8021Q)) {
617 /* Find the encapsulated protocol; reset network header
618 * and transport header based on that. */
619 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
620 protocol = veh->h_vlan_encapsulated_proto;
621 skb_set_network_header(skb, sizeof(*veh));
622 if (protocol == htons(ETH_P_IP))
623 skb_set_transport_header(skb, sizeof(*veh) +
624 4 * ip_hdr(skb)->ihl);
625 else if (protocol == htons(ETH_P_IPV6))
626 skb_set_transport_header(skb, sizeof(*veh) +
627 sizeof(struct ipv6hdr));
630 if (protocol == htons(ETH_P_IP)) {
631 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
633 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
634 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
636 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
637 + (tcp_hdr(skb)->doff << 2u)) >
645 * Allocate a page worth of efx_tso_header structures, and string them
646 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
648 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
651 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
652 struct efx_tso_header *tsoh;
656 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
657 if (base_kva == NULL) {
658 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
659 "Unable to allocate page for TSO headers\n");
663 /* pci_alloc_consistent() allocates pages. */
664 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
666 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
667 tsoh = (struct efx_tso_header *)kva;
668 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
669 tsoh->next = tx_queue->tso_headers_free;
670 tx_queue->tso_headers_free = tsoh;
677 /* Free up a TSO header, and all others in the same page. */
678 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
679 struct efx_tso_header *tsoh,
680 struct pci_dev *pci_dev)
682 struct efx_tso_header **p;
683 unsigned long base_kva;
686 base_kva = (unsigned long)tsoh & PAGE_MASK;
687 base_dma = tsoh->dma_addr & PAGE_MASK;
689 p = &tx_queue->tso_headers_free;
691 if (((unsigned long)*p & PAGE_MASK) == base_kva)
697 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
700 static struct efx_tso_header *
701 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
703 struct efx_tso_header *tsoh;
705 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
709 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
710 TSOH_BUFFER(tsoh), header_len,
712 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
718 tsoh->unmap_len = header_len;
723 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
725 pci_unmap_single(tx_queue->efx->pci_dev,
726 tsoh->dma_addr, tsoh->unmap_len,
732 * efx_tx_queue_insert - push descriptors onto the TX queue
733 * @tx_queue: Efx TX queue
734 * @dma_addr: DMA address of fragment
735 * @len: Length of fragment
736 * @final_buffer: The final buffer inserted into the queue
738 * Push descriptors onto the TX queue. Return 0 on success or 1 if
741 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
742 dma_addr_t dma_addr, unsigned len,
743 struct efx_tx_buffer **final_buffer)
745 struct efx_tx_buffer *buffer;
746 struct efx_nic *efx = tx_queue->efx;
747 unsigned dma_len, fill_level, insert_ptr;
750 EFX_BUG_ON_PARANOID(len <= 0);
752 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
753 /* -1 as there is no way to represent all descriptors used */
754 q_space = efx->txq_entries - 1 - fill_level;
757 if (unlikely(q_space-- <= 0)) {
758 /* It might be that completions have happened
759 * since the xmit path last checked. Update
760 * the xmit path's copy of read_count.
763 /* This memory barrier protects the change of
764 * stopped from the access of read_count. */
766 tx_queue->old_read_count =
767 *(volatile unsigned *)&tx_queue->read_count;
768 fill_level = (tx_queue->insert_count
769 - tx_queue->old_read_count);
770 q_space = efx->txq_entries - 1 - fill_level;
771 if (unlikely(q_space-- <= 0)) {
772 *final_buffer = NULL;
779 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
780 buffer = &tx_queue->buffer[insert_ptr];
781 ++tx_queue->insert_count;
783 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
784 tx_queue->read_count >=
787 efx_tsoh_free(tx_queue, buffer);
788 EFX_BUG_ON_PARANOID(buffer->len);
789 EFX_BUG_ON_PARANOID(buffer->unmap_len);
790 EFX_BUG_ON_PARANOID(buffer->skb);
791 EFX_BUG_ON_PARANOID(!buffer->continuation);
792 EFX_BUG_ON_PARANOID(buffer->tsoh);
794 buffer->dma_addr = dma_addr;
796 dma_len = efx_max_tx_len(efx, dma_addr);
798 /* If there is enough space to send then do so */
802 buffer->len = dma_len; /* Don't set the other members */
807 EFX_BUG_ON_PARANOID(!len);
809 *final_buffer = buffer;
815 * Put a TSO header into the TX queue.
817 * This is special-cased because we know that it is small enough to fit in
818 * a single fragment, and we know it doesn't cross a page boundary. It
819 * also allows us to not worry about end-of-packet etc.
821 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
822 struct efx_tso_header *tsoh, unsigned len)
824 struct efx_tx_buffer *buffer;
826 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
827 efx_tsoh_free(tx_queue, buffer);
828 EFX_BUG_ON_PARANOID(buffer->len);
829 EFX_BUG_ON_PARANOID(buffer->unmap_len);
830 EFX_BUG_ON_PARANOID(buffer->skb);
831 EFX_BUG_ON_PARANOID(!buffer->continuation);
832 EFX_BUG_ON_PARANOID(buffer->tsoh);
834 buffer->dma_addr = tsoh->dma_addr;
837 ++tx_queue->insert_count;
841 /* Remove descriptors put into a tx_queue. */
842 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
844 struct efx_tx_buffer *buffer;
845 dma_addr_t unmap_addr;
847 /* Work backwards until we hit the original insert pointer value */
848 while (tx_queue->insert_count != tx_queue->write_count) {
849 --tx_queue->insert_count;
850 buffer = &tx_queue->buffer[tx_queue->insert_count &
852 efx_tsoh_free(tx_queue, buffer);
853 EFX_BUG_ON_PARANOID(buffer->skb);
854 if (buffer->unmap_len) {
855 unmap_addr = (buffer->dma_addr + buffer->len -
857 if (buffer->unmap_single)
858 pci_unmap_single(tx_queue->efx->pci_dev,
859 unmap_addr, buffer->unmap_len,
862 pci_unmap_page(tx_queue->efx->pci_dev,
863 unmap_addr, buffer->unmap_len,
865 buffer->unmap_len = 0;
868 buffer->continuation = true;
873 /* Parse the SKB header and initialise state. */
874 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
876 /* All ethernet/IP/TCP headers combined size is TCP header size
877 * plus offset of TCP header relative to start of packet.
879 st->header_len = ((tcp_hdr(skb)->doff << 2u)
880 + PTR_DIFF(tcp_hdr(skb), skb->data));
881 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
883 if (st->protocol == htons(ETH_P_IP))
884 st->ipv4_id = ntohs(ip_hdr(skb)->id);
887 st->seqnum = ntohl(tcp_hdr(skb)->seq);
889 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
890 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
891 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
893 st->packet_space = st->full_packet_size;
894 st->out_len = skb->len - st->header_len;
896 st->unmap_single = false;
899 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
902 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
903 frag->page_offset, frag->size,
905 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
906 st->unmap_single = false;
907 st->unmap_len = frag->size;
908 st->in_len = frag->size;
909 st->dma_addr = st->unmap_addr;
915 static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
916 const struct sk_buff *skb)
918 int hl = st->header_len;
919 int len = skb_headlen(skb) - hl;
921 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
922 len, PCI_DMA_TODEVICE);
923 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
924 st->unmap_single = true;
927 st->dma_addr = st->unmap_addr;
935 * tso_fill_packet_with_fragment - form descriptors for the current fragment
936 * @tx_queue: Efx TX queue
937 * @skb: Socket buffer
940 * Form descriptors for the current fragment, until we reach the end
941 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
942 * space in @tx_queue.
944 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
945 const struct sk_buff *skb,
946 struct tso_state *st)
948 struct efx_tx_buffer *buffer;
949 int n, end_of_packet, rc;
953 if (st->packet_space == 0)
956 EFX_BUG_ON_PARANOID(st->in_len <= 0);
957 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
959 n = min(st->in_len, st->packet_space);
961 st->packet_space -= n;
965 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
966 if (likely(rc == 0)) {
967 if (st->out_len == 0)
968 /* Transfer ownership of the skb */
971 end_of_packet = st->out_len == 0 || st->packet_space == 0;
972 buffer->continuation = !end_of_packet;
974 if (st->in_len == 0) {
975 /* Transfer ownership of the pci mapping */
976 buffer->unmap_len = st->unmap_len;
977 buffer->unmap_single = st->unmap_single;
988 * tso_start_new_packet - generate a new header and prepare for the new packet
989 * @tx_queue: Efx TX queue
990 * @skb: Socket buffer
993 * Generate a new header and prepare for the new packet. Return 0 on
994 * success, or -1 if failed to alloc header.
996 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
997 const struct sk_buff *skb,
998 struct tso_state *st)
1000 struct efx_tso_header *tsoh;
1001 struct tcphdr *tsoh_th;
1005 /* Allocate a DMA-mapped header buffer. */
1006 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1007 if (tx_queue->tso_headers_free == NULL) {
1008 if (efx_tsoh_block_alloc(tx_queue))
1011 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1012 tsoh = tx_queue->tso_headers_free;
1013 tx_queue->tso_headers_free = tsoh->next;
1014 tsoh->unmap_len = 0;
1016 tx_queue->tso_long_headers++;
1017 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1018 if (unlikely(!tsoh))
1022 header = TSOH_BUFFER(tsoh);
1023 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1025 /* Copy and update the headers. */
1026 memcpy(header, skb->data, st->header_len);
1028 tsoh_th->seq = htonl(st->seqnum);
1029 st->seqnum += skb_shinfo(skb)->gso_size;
1030 if (st->out_len > skb_shinfo(skb)->gso_size) {
1031 /* This packet will not finish the TSO burst. */
1032 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1036 /* This packet will be the last in the TSO burst. */
1037 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1038 tsoh_th->fin = tcp_hdr(skb)->fin;
1039 tsoh_th->psh = tcp_hdr(skb)->psh;
1042 if (st->protocol == htons(ETH_P_IP)) {
1043 struct iphdr *tsoh_iph =
1044 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1046 tsoh_iph->tot_len = htons(ip_length);
1048 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1049 tsoh_iph->id = htons(st->ipv4_id);
1052 struct ipv6hdr *tsoh_iph =
1053 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1055 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1058 st->packet_space = skb_shinfo(skb)->gso_size;
1059 ++tx_queue->tso_packets;
1061 /* Form a descriptor for this header. */
1062 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1069 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1070 * @tx_queue: Efx TX queue
1071 * @skb: Socket buffer
1073 * Context: You must hold netif_tx_lock() to call this function.
1075 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1076 * @skb was not enqueued. In all cases @skb is consumed. Return
1077 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1079 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1080 struct sk_buff *skb)
1082 struct efx_nic *efx = tx_queue->efx;
1083 int frag_i, rc, rc2 = NETDEV_TX_OK;
1084 struct tso_state state;
1086 /* Find the packet protocol and sanity-check it */
1087 state.protocol = efx_tso_check_protocol(skb);
1089 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1091 tso_start(&state, skb);
1093 /* Assume that skb header area contains exactly the headers, and
1094 * all payload is in the frag list.
1096 if (skb_headlen(skb) == state.header_len) {
1097 /* Grab the first payload fragment. */
1098 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1100 rc = tso_get_fragment(&state, efx,
1101 skb_shinfo(skb)->frags + frag_i);
1105 rc = tso_get_head_fragment(&state, efx, skb);
1111 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1115 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1119 /* Move onto the next fragment? */
1120 if (state.in_len == 0) {
1121 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1122 /* End of payload reached. */
1124 rc = tso_get_fragment(&state, efx,
1125 skb_shinfo(skb)->frags + frag_i);
1130 /* Start at new packet? */
1131 if (state.packet_space == 0 &&
1132 tso_start_new_packet(tx_queue, skb, &state) < 0)
1136 /* Pass off to hardware */
1137 efx_nic_push_buffers(tx_queue);
1139 tx_queue->tso_bursts++;
1140 return NETDEV_TX_OK;
1143 netif_err(efx, tx_err, efx->net_dev,
1144 "Out of memory for TSO headers, or PCI mapping error\n");
1145 dev_kfree_skb_any(skb);
1149 rc2 = NETDEV_TX_BUSY;
1151 /* Stop the queue if it wasn't stopped before. */
1152 if (tx_queue->stopped == 1)
1153 efx_stop_queue(tx_queue->channel);
1156 /* Free the DMA mapping we were in the process of writing out */
1157 if (state.unmap_len) {
1158 if (state.unmap_single)
1159 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1160 state.unmap_len, PCI_DMA_TODEVICE);
1162 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1163 state.unmap_len, PCI_DMA_TODEVICE);
1166 efx_enqueue_unwind(tx_queue);
1172 * Free up all TSO datastructures associated with tx_queue. This
1173 * routine should be called only once the tx_queue is both empty and
1174 * will no longer be used.
1176 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1180 if (tx_queue->buffer) {
1181 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1182 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1185 while (tx_queue->tso_headers_free != NULL)
1186 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1187 tx_queue->efx->pci_dev);