1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
238 return err ? -EOPNOTSUPP : 0;
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
246 * @add: true adds a filter, false removes it
248 * Returns 0 if the filters were successfully added or removed
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
254 struct i40e_pf *pf = vsi->back;
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
301 dev_info(&pf->pdev->dev,
302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
315 return err ? -EOPNOTSUPP : 0;
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
323 * @add: true adds a filter, false removes it
325 * Always returns -EOPNOTSUPP
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
334 #define I40E_IP_DUMMY_PACKET_LEN 34
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
340 * @add: true adds a filter, false removes it
342 * Returns 0 if the filters were successfully added or removed
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
348 struct i40e_pf *pf = vsi->back;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
374 dev_info(&pf->pdev->dev,
375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 return err ? -EOPNOTSUPP : 0;
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
403 struct i40e_pf *pf = vsi->back;
406 switch (input->flow_type & ~FLOW_EXT) {
408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
420 switch (input->ip4_proto) {
422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449 * @prog_id: the id originally used for programming
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
469 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470 (I40E_DEBUG_FD & pf->hw.debug_mask))
471 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
474 /* Check if the programming error is for ATR.
475 * If so, auto disable ATR and set a state for
476 * flush in progress. Next time we come here if flush is in
477 * progress do nothing, once flush is complete the state will
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
484 /* store the current atr filter count */
485 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
487 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
493 /* filter programming failed most likely due to table full */
494 fcnt_prog = i40e_get_global_fd_count(pf);
495 fcnt_avail = pf->fdir_pf_filter_count;
496 /* If ATR is running fcnt_prog can quickly change,
497 * if we are very close to full, it makes sense to disable
498 * FD ATR/SB and then re-enable it when there is room.
500 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502 !(pf->auto_disable_flags &
503 I40E_FLAG_FD_SB_ENABLED)) {
504 if (I40E_DEBUG_FD & pf->hw.debug_mask)
505 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
506 pf->auto_disable_flags |=
507 I40E_FLAG_FD_SB_ENABLED;
511 "FD filter programming failed due to incorrect filter parameters\n");
513 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
514 if (I40E_DEBUG_FD & pf->hw.debug_mask)
515 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
516 rx_desc->wb.qword0.hi_dword.fd_id);
521 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522 * @ring: the ring that owns the buffer
523 * @tx_buffer: the buffer to free
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526 struct i40e_tx_buffer *tx_buffer)
528 if (tx_buffer->skb) {
529 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530 kfree(tx_buffer->raw_buf);
532 dev_kfree_skb_any(tx_buffer->skb);
534 if (dma_unmap_len(tx_buffer, len))
535 dma_unmap_single(ring->dev,
536 dma_unmap_addr(tx_buffer, dma),
537 dma_unmap_len(tx_buffer, len),
539 } else if (dma_unmap_len(tx_buffer, len)) {
540 dma_unmap_page(ring->dev,
541 dma_unmap_addr(tx_buffer, dma),
542 dma_unmap_len(tx_buffer, len),
545 tx_buffer->next_to_watch = NULL;
546 tx_buffer->skb = NULL;
547 dma_unmap_len_set(tx_buffer, len, 0);
548 /* tx_buffer must be completely set up in the transmit path */
552 * i40e_clean_tx_ring - Free any empty Tx buffers
553 * @tx_ring: ring to be cleaned
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
557 unsigned long bi_size;
560 /* ring already cleared, nothing to do */
564 /* Free all the Tx ring sk_buffs */
565 for (i = 0; i < tx_ring->count; i++)
566 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
568 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569 memset(tx_ring->tx_bi, 0, bi_size);
571 /* Zero out the descriptor ring */
572 memset(tx_ring->desc, 0, tx_ring->size);
574 tx_ring->next_to_use = 0;
575 tx_ring->next_to_clean = 0;
577 if (!tx_ring->netdev)
580 /* cleanup Tx queue statistics */
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582 tx_ring->queue_index));
586 * i40e_free_tx_resources - Free Tx resources per queue
587 * @tx_ring: Tx descriptor ring for a specific queue
589 * Free all transmit software resources
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
593 i40e_clean_tx_ring(tx_ring);
594 kfree(tx_ring->tx_bi);
595 tx_ring->tx_bi = NULL;
598 dma_free_coherent(tx_ring->dev, tx_ring->size,
599 tx_ring->desc, tx_ring->dma);
600 tx_ring->desc = NULL;
605 * i40e_get_tx_pending - how many tx descriptors not processed
606 * @tx_ring: the ring of descriptors
608 * Since there is no access to the ring head register
609 * in XL710, we need to use our local copies
611 u32 i40e_get_tx_pending(struct i40e_ring *ring)
615 head = i40e_get_head(ring);
616 tail = readl(ring->tail);
619 return (head < tail) ?
620 tail - head : (tail + ring->count - head);
625 #define WB_STRIDE 0x3
628 * i40e_clean_tx_irq - Reclaim resources after transmit completes
629 * @tx_ring: tx ring to clean
630 * @budget: how many cleans we're allowed
632 * Returns true if there's any budget left (e.g. the clean is finished)
634 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
636 u16 i = tx_ring->next_to_clean;
637 struct i40e_tx_buffer *tx_buf;
638 struct i40e_tx_desc *tx_head;
639 struct i40e_tx_desc *tx_desc;
640 unsigned int total_packets = 0;
641 unsigned int total_bytes = 0;
643 tx_buf = &tx_ring->tx_bi[i];
644 tx_desc = I40E_TX_DESC(tx_ring, i);
647 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
650 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
652 /* if next_to_watch is not set then there is no work pending */
656 /* prevent any other reads prior to eop_desc */
657 read_barrier_depends();
659 /* we have caught up to head, no work left to do */
660 if (tx_head == tx_desc)
663 /* clear next_to_watch to prevent false hangs */
664 tx_buf->next_to_watch = NULL;
666 /* update the statistics for this packet */
667 total_bytes += tx_buf->bytecount;
668 total_packets += tx_buf->gso_segs;
671 dev_consume_skb_any(tx_buf->skb);
673 /* unmap skb header data */
674 dma_unmap_single(tx_ring->dev,
675 dma_unmap_addr(tx_buf, dma),
676 dma_unmap_len(tx_buf, len),
679 /* clear tx_buffer data */
681 dma_unmap_len_set(tx_buf, len, 0);
683 /* unmap remaining buffers */
684 while (tx_desc != eop_desc) {
691 tx_buf = tx_ring->tx_bi;
692 tx_desc = I40E_TX_DESC(tx_ring, 0);
695 /* unmap any remaining paged data */
696 if (dma_unmap_len(tx_buf, len)) {
697 dma_unmap_page(tx_ring->dev,
698 dma_unmap_addr(tx_buf, dma),
699 dma_unmap_len(tx_buf, len),
701 dma_unmap_len_set(tx_buf, len, 0);
705 /* move us one more past the eop_desc for start of next pkt */
711 tx_buf = tx_ring->tx_bi;
712 tx_desc = I40E_TX_DESC(tx_ring, 0);
717 /* update budget accounting */
719 } while (likely(budget));
722 tx_ring->next_to_clean = i;
723 u64_stats_update_begin(&tx_ring->syncp);
724 tx_ring->stats.bytes += total_bytes;
725 tx_ring->stats.packets += total_packets;
726 u64_stats_update_end(&tx_ring->syncp);
727 tx_ring->q_vector->tx.total_bytes += total_bytes;
728 tx_ring->q_vector->tx.total_packets += total_packets;
730 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
733 /* check to see if there are < 4 descriptors
734 * waiting to be written back, then kick the hardware to force
735 * them to be written back in case we stay in NAPI.
736 * In this mode on X722 we do not enable Interrupt.
738 j = i40e_get_tx_pending(tx_ring);
741 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744 tx_ring->arm_wb = true;
747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 tx_ring->queue_index),
749 total_packets, total_bytes);
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
758 if (__netif_subqueue_stopped(tx_ring->netdev,
759 tx_ring->queue_index) &&
760 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 netif_wake_subqueue(tx_ring->netdev,
762 tx_ring->queue_index);
763 ++tx_ring->tx_stats.restart_queue;
771 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772 * @vsi: the VSI we care about
773 * @q_vector: the vector on which to force writeback
776 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
778 u16 flags = q_vector->tx.ring[0].flags;
780 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
783 if (q_vector->arm_wb_state)
786 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
789 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790 vsi->base_vector - 1),
792 q_vector->arm_wb_state = true;
793 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798 /* allow 00 to be written to the index */
801 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802 vsi->base_vector - 1), val);
804 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808 /* allow 00 to be written to the index */
810 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
815 * i40e_set_new_dynamic_itr - Find new ITR level
816 * @rc: structure containing ring performance data
818 * Stores a new ITR value based on packets and byte counts during
819 * the last interrupt. The advantage of per interrupt computation
820 * is faster updates and more accurate ITR for the current traffic
821 * pattern. Constants in this function were computed based on
822 * theoretical maximum wire speed and thresholds were set based on
823 * testing data as well as attempting to minimize response time
824 * while increasing bulk throughput.
826 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
828 enum i40e_latency_range new_latency_range = rc->latency_range;
829 u32 new_itr = rc->itr;
832 if (rc->total_packets == 0 || !rc->itr)
835 /* simple throttlerate management
836 * 0-10MB/s lowest (100000 ints/s)
837 * 10-20MB/s low (20000 ints/s)
838 * 20-1249MB/s bulk (8000 ints/s)
840 bytes_per_int = rc->total_bytes / rc->itr;
841 switch (new_latency_range) {
842 case I40E_LOWEST_LATENCY:
843 if (bytes_per_int > 10)
844 new_latency_range = I40E_LOW_LATENCY;
846 case I40E_LOW_LATENCY:
847 if (bytes_per_int > 20)
848 new_latency_range = I40E_BULK_LATENCY;
849 else if (bytes_per_int <= 10)
850 new_latency_range = I40E_LOWEST_LATENCY;
852 case I40E_BULK_LATENCY:
853 if (bytes_per_int <= 20)
854 new_latency_range = I40E_LOW_LATENCY;
857 if (bytes_per_int <= 20)
858 new_latency_range = I40E_LOW_LATENCY;
861 rc->latency_range = new_latency_range;
863 switch (new_latency_range) {
864 case I40E_LOWEST_LATENCY:
865 new_itr = I40E_ITR_100K;
867 case I40E_LOW_LATENCY:
868 new_itr = I40E_ITR_20K;
870 case I40E_BULK_LATENCY:
871 new_itr = I40E_ITR_8K;
877 if (new_itr != rc->itr)
881 rc->total_packets = 0;
885 * i40e_clean_programming_status - clean the programming status descriptor
886 * @rx_ring: the rx ring that has this descriptor
887 * @rx_desc: the rx descriptor written back by HW
889 * Flow director should handle FD_FILTER_STATUS to check its filter programming
890 * status being successful or not and take actions accordingly. FCoE should
891 * handle its context/filter programming/invalidation status and take actions.
894 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
895 union i40e_rx_desc *rx_desc)
900 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
901 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
902 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
904 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
905 i40e_fd_handle_status(rx_ring, rx_desc, id);
907 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
908 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
909 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
914 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
915 * @tx_ring: the tx ring to set up
917 * Return 0 on success, negative on error
919 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
921 struct device *dev = tx_ring->dev;
927 /* warn if we are about to overwrite the pointer */
928 WARN_ON(tx_ring->tx_bi);
929 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
930 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
934 /* round up to nearest 4K */
935 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
936 /* add u32 for head writeback, align after this takes care of
937 * guaranteeing this is at least one cache line in size
939 tx_ring->size += sizeof(u32);
940 tx_ring->size = ALIGN(tx_ring->size, 4096);
941 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
942 &tx_ring->dma, GFP_KERNEL);
943 if (!tx_ring->desc) {
944 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
949 tx_ring->next_to_use = 0;
950 tx_ring->next_to_clean = 0;
954 kfree(tx_ring->tx_bi);
955 tx_ring->tx_bi = NULL;
960 * i40e_clean_rx_ring - Free Rx buffers
961 * @rx_ring: ring to be cleaned
963 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
965 struct device *dev = rx_ring->dev;
966 struct i40e_rx_buffer *rx_bi;
967 unsigned long bi_size;
970 /* ring already cleared, nothing to do */
974 if (ring_is_ps_enabled(rx_ring)) {
975 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
977 rx_bi = &rx_ring->rx_bi[0];
978 if (rx_bi->hdr_buf) {
979 dma_free_coherent(dev,
983 for (i = 0; i < rx_ring->count; i++) {
984 rx_bi = &rx_ring->rx_bi[i];
986 rx_bi->hdr_buf = NULL;
990 /* Free all the Rx ring sk_buffs */
991 for (i = 0; i < rx_ring->count; i++) {
992 rx_bi = &rx_ring->rx_bi[i];
994 dma_unmap_single(dev,
1001 dev_kfree_skb(rx_bi->skb);
1005 if (rx_bi->page_dma) {
1010 rx_bi->page_dma = 0;
1012 __free_page(rx_bi->page);
1014 rx_bi->page_offset = 0;
1018 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1019 memset(rx_ring->rx_bi, 0, bi_size);
1021 /* Zero out the descriptor ring */
1022 memset(rx_ring->desc, 0, rx_ring->size);
1024 rx_ring->next_to_clean = 0;
1025 rx_ring->next_to_use = 0;
1029 * i40e_free_rx_resources - Free Rx resources
1030 * @rx_ring: ring to clean the resources from
1032 * Free all receive software resources
1034 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1036 i40e_clean_rx_ring(rx_ring);
1037 kfree(rx_ring->rx_bi);
1038 rx_ring->rx_bi = NULL;
1040 if (rx_ring->desc) {
1041 dma_free_coherent(rx_ring->dev, rx_ring->size,
1042 rx_ring->desc, rx_ring->dma);
1043 rx_ring->desc = NULL;
1048 * i40e_alloc_rx_headers - allocate rx header buffers
1049 * @rx_ring: ring to alloc buffers
1051 * Allocate rx header buffers for the entire ring. As these are static,
1052 * this is only called when setting up a new ring.
1054 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1056 struct device *dev = rx_ring->dev;
1057 struct i40e_rx_buffer *rx_bi;
1063 if (rx_ring->rx_bi[0].hdr_buf)
1065 /* Make sure the buffers don't cross cache line boundaries. */
1066 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1067 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1071 for (i = 0; i < rx_ring->count; i++) {
1072 rx_bi = &rx_ring->rx_bi[i];
1073 rx_bi->dma = dma + (i * buf_size);
1074 rx_bi->hdr_buf = buffer + (i * buf_size);
1079 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1080 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1082 * Returns 0 on success, negative on failure
1084 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1086 struct device *dev = rx_ring->dev;
1089 /* warn if we are about to overwrite the pointer */
1090 WARN_ON(rx_ring->rx_bi);
1091 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1092 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1093 if (!rx_ring->rx_bi)
1096 u64_stats_init(&rx_ring->syncp);
1098 /* Round up to nearest 4K */
1099 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1100 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1101 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1102 rx_ring->size = ALIGN(rx_ring->size, 4096);
1103 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1104 &rx_ring->dma, GFP_KERNEL);
1106 if (!rx_ring->desc) {
1107 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1112 rx_ring->next_to_clean = 0;
1113 rx_ring->next_to_use = 0;
1117 kfree(rx_ring->rx_bi);
1118 rx_ring->rx_bi = NULL;
1123 * i40e_release_rx_desc - Store the new tail and head values
1124 * @rx_ring: ring to bump
1125 * @val: new head index
1127 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1129 rx_ring->next_to_use = val;
1130 /* Force memory writes to complete before letting h/w
1131 * know there are new descriptors to fetch. (Only
1132 * applicable for weak-ordered memory model archs,
1136 writel(val, rx_ring->tail);
1140 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1141 * @rx_ring: ring to place buffers on
1142 * @cleaned_count: number of buffers to replace
1144 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1146 u16 i = rx_ring->next_to_use;
1147 union i40e_rx_desc *rx_desc;
1148 struct i40e_rx_buffer *bi;
1150 /* do nothing if no valid netdev defined */
1151 if (!rx_ring->netdev || !cleaned_count)
1154 while (cleaned_count--) {
1155 rx_desc = I40E_RX_DESC(rx_ring, i);
1156 bi = &rx_ring->rx_bi[i];
1158 if (bi->skb) /* desc is in use */
1161 bi->page = alloc_page(GFP_ATOMIC);
1163 rx_ring->rx_stats.alloc_page_failed++;
1168 if (!bi->page_dma) {
1169 /* use a half page if we're re-using */
1170 bi->page_offset ^= PAGE_SIZE / 2;
1171 bi->page_dma = dma_map_page(rx_ring->dev,
1176 if (dma_mapping_error(rx_ring->dev,
1178 rx_ring->rx_stats.alloc_page_failed++;
1184 dma_sync_single_range_for_device(rx_ring->dev,
1187 rx_ring->rx_hdr_len,
1189 /* Refresh the desc even if buffer_addrs didn't change
1190 * because each write-back erases this info.
1192 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1193 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1195 if (i == rx_ring->count)
1200 if (rx_ring->next_to_use != i)
1201 i40e_release_rx_desc(rx_ring, i);
1205 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1206 * @rx_ring: ring to place buffers on
1207 * @cleaned_count: number of buffers to replace
1209 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1211 u16 i = rx_ring->next_to_use;
1212 union i40e_rx_desc *rx_desc;
1213 struct i40e_rx_buffer *bi;
1214 struct sk_buff *skb;
1216 /* do nothing if no valid netdev defined */
1217 if (!rx_ring->netdev || !cleaned_count)
1220 while (cleaned_count--) {
1221 rx_desc = I40E_RX_DESC(rx_ring, i);
1222 bi = &rx_ring->rx_bi[i];
1226 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1227 rx_ring->rx_buf_len);
1229 rx_ring->rx_stats.alloc_buff_failed++;
1232 /* initialize queue mapping */
1233 skb_record_rx_queue(skb, rx_ring->queue_index);
1238 bi->dma = dma_map_single(rx_ring->dev,
1240 rx_ring->rx_buf_len,
1242 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1243 rx_ring->rx_stats.alloc_buff_failed++;
1249 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1250 rx_desc->read.hdr_addr = 0;
1252 if (i == rx_ring->count)
1257 if (rx_ring->next_to_use != i)
1258 i40e_release_rx_desc(rx_ring, i);
1262 * i40e_receive_skb - Send a completed packet up the stack
1263 * @rx_ring: rx ring in play
1264 * @skb: packet to send up
1265 * @vlan_tag: vlan tag for packet
1267 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1268 struct sk_buff *skb, u16 vlan_tag)
1270 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1271 struct i40e_vsi *vsi = rx_ring->vsi;
1272 u64 flags = vsi->back->flags;
1274 if (vlan_tag & VLAN_VID_MASK)
1275 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1277 if (flags & I40E_FLAG_IN_NETPOLL)
1280 napi_gro_receive(&q_vector->napi, skb);
1284 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1285 * @vsi: the VSI we care about
1286 * @skb: skb currently being received and modified
1287 * @rx_status: status value of last descriptor in packet
1288 * @rx_error: error value of last descriptor in packet
1289 * @rx_ptype: ptype value of last descriptor in packet
1291 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1292 struct sk_buff *skb,
1297 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1298 bool ipv4 = false, ipv6 = false;
1299 bool ipv4_tunnel, ipv6_tunnel;
1304 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1305 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1306 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1307 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1309 skb->ip_summed = CHECKSUM_NONE;
1311 /* Rx csum enabled and ip headers found? */
1312 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1315 /* did the hardware decode the packet and checksum? */
1316 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1319 /* both known and outer_ip must be set for the below code to work */
1320 if (!(decoded.known && decoded.outer_ip))
1323 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1324 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1326 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1327 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1331 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1332 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1335 /* likely incorrect csum if alternate IP extension headers found */
1337 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1338 /* don't increment checksum err here, non-fatal err */
1341 /* there was some L4 error, count error and punt packet to the stack */
1342 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1345 /* handle packets that were not able to be checksummed due
1346 * to arrival speed, in this case the stack can compute
1349 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1352 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1353 * it in the driver, hardware does not do it for us.
1354 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1355 * so the total length of IPv4 header is IHL*4 bytes
1356 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1358 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1360 skb->transport_header = skb->mac_header +
1361 sizeof(struct ethhdr) +
1362 (ip_hdr(skb)->ihl * 4);
1364 /* Add 4 bytes for VLAN tagged packets */
1365 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1366 skb->protocol == htons(ETH_P_8021AD))
1369 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1370 (udp_hdr(skb)->check != 0)) {
1371 rx_udp_csum = udp_csum(skb);
1373 csum = csum_tcpudp_magic(
1374 iph->saddr, iph->daddr,
1375 (skb->len - skb_transport_offset(skb)),
1376 IPPROTO_UDP, rx_udp_csum);
1378 if (udp_hdr(skb)->check != csum)
1381 } /* else its GRE and so no outer UDP header */
1384 skb->ip_summed = CHECKSUM_UNNECESSARY;
1385 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1390 vsi->back->hw_csum_rx_error++;
1394 * i40e_rx_hash - returns the hash value from the Rx descriptor
1395 * @ring: descriptor ring
1396 * @rx_desc: specific descriptor
1398 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1399 union i40e_rx_desc *rx_desc)
1401 const __le64 rss_mask =
1402 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1403 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1405 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1406 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1407 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1413 * i40e_ptype_to_hash - get a hash type
1414 * @ptype: the ptype value from the descriptor
1416 * Returns a hash type to be used by skb_set_hash
1418 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1420 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1423 return PKT_HASH_TYPE_NONE;
1425 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1426 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1427 return PKT_HASH_TYPE_L4;
1428 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1429 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1430 return PKT_HASH_TYPE_L3;
1432 return PKT_HASH_TYPE_L2;
1436 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1437 * @rx_ring: rx ring to clean
1438 * @budget: how many cleans we're allowed
1440 * Returns true if there's any budget left (e.g. the clean is finished)
1442 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1444 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1445 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1446 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1447 const int current_node = numa_mem_id();
1448 struct i40e_vsi *vsi = rx_ring->vsi;
1449 u16 i = rx_ring->next_to_clean;
1450 union i40e_rx_desc *rx_desc;
1451 u32 rx_error, rx_status;
1459 struct i40e_rx_buffer *rx_bi;
1460 struct sk_buff *skb;
1462 /* return some buffers to hardware, one at a time is too slow */
1463 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1464 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1468 i = rx_ring->next_to_clean;
1469 rx_desc = I40E_RX_DESC(rx_ring, i);
1470 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1471 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1472 I40E_RXD_QW1_STATUS_SHIFT;
1474 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1477 /* This memory barrier is needed to keep us from reading
1478 * any other fields out of the rx_desc until we know the
1482 if (i40e_rx_is_programming_status(qword)) {
1483 i40e_clean_programming_status(rx_ring, rx_desc);
1484 I40E_RX_INCREMENT(rx_ring, i);
1487 rx_bi = &rx_ring->rx_bi[i];
1490 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1491 rx_ring->rx_hdr_len);
1493 rx_ring->rx_stats.alloc_buff_failed++;
1497 /* initialize queue mapping */
1498 skb_record_rx_queue(skb, rx_ring->queue_index);
1499 /* we are reusing so sync this buffer for CPU use */
1500 dma_sync_single_range_for_cpu(rx_ring->dev,
1503 rx_ring->rx_hdr_len,
1506 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1507 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1508 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1509 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1510 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1511 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1513 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1514 I40E_RXD_QW1_ERROR_SHIFT;
1515 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1516 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1518 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1519 I40E_RXD_QW1_PTYPE_SHIFT;
1520 prefetch(rx_bi->page);
1523 if (rx_hbo || rx_sph) {
1527 len = I40E_RX_HDR_SIZE;
1529 len = rx_header_len;
1530 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1531 } else if (skb->len == 0) {
1534 len = (rx_packet_len > skb_headlen(skb) ?
1535 skb_headlen(skb) : rx_packet_len);
1536 memcpy(__skb_put(skb, len),
1537 rx_bi->page + rx_bi->page_offset,
1539 rx_bi->page_offset += len;
1540 rx_packet_len -= len;
1543 /* Get the rest of the data if this was a header split */
1544 if (rx_packet_len) {
1545 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1550 skb->len += rx_packet_len;
1551 skb->data_len += rx_packet_len;
1552 skb->truesize += rx_packet_len;
1554 if ((page_count(rx_bi->page) == 1) &&
1555 (page_to_nid(rx_bi->page) == current_node))
1556 get_page(rx_bi->page);
1560 dma_unmap_page(rx_ring->dev,
1564 rx_bi->page_dma = 0;
1566 I40E_RX_INCREMENT(rx_ring, i);
1569 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1570 struct i40e_rx_buffer *next_buffer;
1572 next_buffer = &rx_ring->rx_bi[i];
1573 next_buffer->skb = skb;
1574 rx_ring->rx_stats.non_eop_descs++;
1578 /* ERR_MASK will only have valid bits if EOP set */
1579 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1580 dev_kfree_skb_any(skb);
1584 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1585 i40e_ptype_to_hash(rx_ptype));
1586 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1587 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1588 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1589 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1590 rx_ring->last_rx_timestamp = jiffies;
1593 /* probably a little skewed due to removing CRC */
1594 total_rx_bytes += skb->len;
1597 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1599 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1601 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1602 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1605 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1606 dev_kfree_skb_any(skb);
1610 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1611 i40e_receive_skb(rx_ring, skb, vlan_tag);
1613 rx_desc->wb.qword1.status_error_len = 0;
1615 } while (likely(total_rx_packets < budget));
1617 u64_stats_update_begin(&rx_ring->syncp);
1618 rx_ring->stats.packets += total_rx_packets;
1619 rx_ring->stats.bytes += total_rx_bytes;
1620 u64_stats_update_end(&rx_ring->syncp);
1621 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1622 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1624 return total_rx_packets;
1628 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1629 * @rx_ring: rx ring to clean
1630 * @budget: how many cleans we're allowed
1632 * Returns number of packets cleaned
1634 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1636 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1637 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1638 struct i40e_vsi *vsi = rx_ring->vsi;
1639 union i40e_rx_desc *rx_desc;
1640 u32 rx_error, rx_status;
1647 struct i40e_rx_buffer *rx_bi;
1648 struct sk_buff *skb;
1650 /* return some buffers to hardware, one at a time is too slow */
1651 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1652 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1656 i = rx_ring->next_to_clean;
1657 rx_desc = I40E_RX_DESC(rx_ring, i);
1658 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1659 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1660 I40E_RXD_QW1_STATUS_SHIFT;
1662 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1665 /* This memory barrier is needed to keep us from reading
1666 * any other fields out of the rx_desc until we know the
1671 if (i40e_rx_is_programming_status(qword)) {
1672 i40e_clean_programming_status(rx_ring, rx_desc);
1673 I40E_RX_INCREMENT(rx_ring, i);
1676 rx_bi = &rx_ring->rx_bi[i];
1678 prefetch(skb->data);
1680 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1681 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1683 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1684 I40E_RXD_QW1_ERROR_SHIFT;
1685 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1687 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1688 I40E_RXD_QW1_PTYPE_SHIFT;
1692 /* Get the header and possibly the whole packet
1693 * If this is an skb from previous receive dma will be 0
1695 skb_put(skb, rx_packet_len);
1696 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1700 I40E_RX_INCREMENT(rx_ring, i);
1703 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1704 rx_ring->rx_stats.non_eop_descs++;
1708 /* ERR_MASK will only have valid bits if EOP set */
1709 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1710 dev_kfree_skb_any(skb);
1714 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1715 i40e_ptype_to_hash(rx_ptype));
1716 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1717 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1718 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1719 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1720 rx_ring->last_rx_timestamp = jiffies;
1723 /* probably a little skewed due to removing CRC */
1724 total_rx_bytes += skb->len;
1727 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1729 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1731 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1732 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1735 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1736 dev_kfree_skb_any(skb);
1740 i40e_receive_skb(rx_ring, skb, vlan_tag);
1742 rx_desc->wb.qword1.status_error_len = 0;
1743 } while (likely(total_rx_packets < budget));
1745 u64_stats_update_begin(&rx_ring->syncp);
1746 rx_ring->stats.packets += total_rx_packets;
1747 rx_ring->stats.bytes += total_rx_bytes;
1748 u64_stats_update_end(&rx_ring->syncp);
1749 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1750 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1752 return total_rx_packets;
1756 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1757 * @vsi: the VSI we care about
1758 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1761 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1762 struct i40e_q_vector *q_vector)
1764 struct i40e_hw *hw = &vsi->back->hw;
1769 vector = (q_vector->v_idx + vsi->base_vector);
1770 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1771 old_itr = q_vector->rx.itr;
1772 i40e_set_new_dynamic_itr(&q_vector->rx);
1773 if (old_itr != q_vector->rx.itr) {
1774 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1775 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1777 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1778 (q_vector->rx.itr <<
1779 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1781 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1782 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1784 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1786 if (!test_bit(__I40E_DOWN, &vsi->state))
1787 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1789 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1791 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1792 old_itr = q_vector->tx.itr;
1793 i40e_set_new_dynamic_itr(&q_vector->tx);
1794 if (old_itr != q_vector->tx.itr) {
1795 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1796 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1798 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1799 (q_vector->tx.itr <<
1800 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1802 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1803 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1805 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1807 if (!test_bit(__I40E_DOWN, &vsi->state))
1808 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1809 vsi->base_vector - 1), val);
1811 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1816 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1817 * @napi: napi struct with our devices info in it
1818 * @budget: amount of work driver is allowed to do this pass, in packets
1820 * This function will clean all queues associated with a q_vector.
1822 * Returns the amount of work done
1824 int i40e_napi_poll(struct napi_struct *napi, int budget)
1826 struct i40e_q_vector *q_vector =
1827 container_of(napi, struct i40e_q_vector, napi);
1828 struct i40e_vsi *vsi = q_vector->vsi;
1829 struct i40e_ring *ring;
1830 bool clean_complete = true;
1831 bool arm_wb = false;
1832 int budget_per_ring;
1835 if (test_bit(__I40E_DOWN, &vsi->state)) {
1836 napi_complete(napi);
1840 /* Since the actual Tx work is minimal, we can give the Tx a larger
1841 * budget and be more aggressive about cleaning up the Tx descriptors.
1843 i40e_for_each_ring(ring, q_vector->tx) {
1844 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1845 arm_wb |= ring->arm_wb;
1846 ring->arm_wb = false;
1849 /* We attempt to distribute budget to each Rx queue fairly, but don't
1850 * allow the budget to go below 1 because that would exit polling early.
1852 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1854 i40e_for_each_ring(ring, q_vector->rx) {
1855 if (ring_is_ps_enabled(ring))
1856 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1858 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1859 /* if we didn't clean as many as budgeted, we must be done */
1860 clean_complete &= (budget_per_ring != cleaned);
1863 /* If work not completed, return budget and polling will return */
1864 if (!clean_complete) {
1866 i40e_force_wb(vsi, q_vector);
1870 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1871 q_vector->arm_wb_state = false;
1873 /* Work is done so exit the polling mode and re-enable the interrupt */
1874 napi_complete(napi);
1875 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1876 i40e_update_enable_itr(vsi, q_vector);
1877 } else { /* Legacy mode */
1878 struct i40e_hw *hw = &vsi->back->hw;
1879 /* We re-enable the queue 0 cause, but
1880 * don't worry about dynamic_enable
1881 * because we left it on for the other
1882 * possible interrupts during napi
1884 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1885 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1887 wr32(hw, I40E_QINT_RQCTL(0), qval);
1888 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1889 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1890 wr32(hw, I40E_QINT_TQCTL(0), qval);
1891 i40e_irq_dynamic_enable_icr0(vsi->back);
1897 * i40e_atr - Add a Flow Director ATR filter
1898 * @tx_ring: ring to add programming descriptor to
1900 * @tx_flags: send tx flags
1901 * @protocol: wire protocol
1903 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1904 u32 tx_flags, __be16 protocol)
1906 struct i40e_filter_program_desc *fdir_desc;
1907 struct i40e_pf *pf = tx_ring->vsi->back;
1909 unsigned char *network;
1911 struct ipv6hdr *ipv6;
1915 u32 flex_ptype, dtype_cmd;
1918 /* make sure ATR is enabled */
1919 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1922 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1925 /* if sampling is disabled do nothing */
1926 if (!tx_ring->atr_sample_rate)
1929 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1932 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1933 /* snag network header to get L4 type and address */
1934 hdr.network = skb_network_header(skb);
1936 /* Currently only IPv4/IPv6 with TCP is supported
1937 * access ihl as u8 to avoid unaligned access on ia64
1939 if (tx_flags & I40E_TX_FLAGS_IPV4)
1940 hlen = (hdr.network[0] & 0x0F) << 2;
1941 else if (protocol == htons(ETH_P_IPV6))
1942 hlen = sizeof(struct ipv6hdr);
1946 hdr.network = skb_inner_network_header(skb);
1947 hlen = skb_inner_network_header_len(skb);
1950 /* Currently only IPv4/IPv6 with TCP is supported
1951 * Note: tx_flags gets modified to reflect inner protocols in
1952 * tx_enable_csum function if encap is enabled.
1954 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1955 (hdr.ipv4->protocol != IPPROTO_TCP))
1957 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1958 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1961 th = (struct tcphdr *)(hdr.network + hlen);
1963 /* Due to lack of space, no more new filters can be programmed */
1964 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1966 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1967 /* HW ATR eviction will take care of removing filters on FIN
1970 if (th->fin || th->rst)
1974 tx_ring->atr_count++;
1976 /* sample on all syn/fin/rst packets or once every atr sample rate */
1980 (tx_ring->atr_count < tx_ring->atr_sample_rate))
1983 tx_ring->atr_count = 0;
1985 /* grab the next descriptor */
1986 i = tx_ring->next_to_use;
1987 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1990 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1992 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1993 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1994 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1995 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1996 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1997 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1998 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2000 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2002 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2004 dtype_cmd |= (th->fin || th->rst) ?
2005 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2006 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2007 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2008 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2010 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2011 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2013 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2014 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2016 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2017 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2019 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2020 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2021 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2024 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2025 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2026 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2028 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2029 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2031 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2032 fdir_desc->rsvd = cpu_to_le32(0);
2033 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2034 fdir_desc->fd_id = cpu_to_le32(0);
2038 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2040 * @tx_ring: ring to send buffer on
2041 * @flags: the tx flags to be set
2043 * Checks the skb and set up correspondingly several generic transmit flags
2044 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2046 * Returns error code indicate the frame should be dropped upon error and the
2047 * otherwise returns 0 to indicate the flags has been set properly.
2050 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2051 struct i40e_ring *tx_ring,
2054 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2055 struct i40e_ring *tx_ring,
2059 __be16 protocol = skb->protocol;
2062 if (protocol == htons(ETH_P_8021Q) &&
2063 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2064 /* When HW VLAN acceleration is turned off by the user the
2065 * stack sets the protocol to 8021q so that the driver
2066 * can take any steps required to support the SW only
2067 * VLAN handling. In our case the driver doesn't need
2068 * to take any further steps so just set the protocol
2069 * to the encapsulated ethertype.
2071 skb->protocol = vlan_get_protocol(skb);
2075 /* if we have a HW VLAN tag being added, default to the HW one */
2076 if (skb_vlan_tag_present(skb)) {
2077 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2078 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2079 /* else if it is a SW VLAN, check the next protocol and store the tag */
2080 } else if (protocol == htons(ETH_P_8021Q)) {
2081 struct vlan_hdr *vhdr, _vhdr;
2083 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2087 protocol = vhdr->h_vlan_encapsulated_proto;
2088 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2089 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2092 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2095 /* Insert 802.1p priority into VLAN header */
2096 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2097 (skb->priority != TC_PRIO_CONTROL)) {
2098 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2099 tx_flags |= (skb->priority & 0x7) <<
2100 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2101 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2102 struct vlan_ethhdr *vhdr;
2105 rc = skb_cow_head(skb, 0);
2108 vhdr = (struct vlan_ethhdr *)skb->data;
2109 vhdr->h_vlan_TCI = htons(tx_flags >>
2110 I40E_TX_FLAGS_VLAN_SHIFT);
2112 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2122 * i40e_tso - set up the tso context descriptor
2123 * @tx_ring: ptr to the ring to send
2124 * @skb: ptr to the skb we're sending
2125 * @hdr_len: ptr to the size of the packet header
2126 * @cd_tunneling: ptr to context descriptor bits
2128 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2130 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2131 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2134 u32 cd_cmd, cd_tso_len, cd_mss;
2135 struct ipv6hdr *ipv6h;
2136 struct tcphdr *tcph;
2141 if (!skb_is_gso(skb))
2144 err = skb_cow_head(skb, 0);
2148 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2149 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2151 if (iph->version == 4) {
2152 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2155 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2157 } else if (ipv6h->version == 6) {
2158 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2159 ipv6h->payload_len = 0;
2160 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2164 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2165 *hdr_len = (skb->encapsulation
2166 ? (skb_inner_transport_header(skb) - skb->data)
2167 : skb_transport_offset(skb)) + l4len;
2169 /* find the field values */
2170 cd_cmd = I40E_TX_CTX_DESC_TSO;
2171 cd_tso_len = skb->len - *hdr_len;
2172 cd_mss = skb_shinfo(skb)->gso_size;
2173 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2175 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2176 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2181 * i40e_tsyn - set up the tsyn context descriptor
2182 * @tx_ring: ptr to the ring to send
2183 * @skb: ptr to the skb we're sending
2184 * @tx_flags: the collected send information
2186 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2188 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2189 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2193 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2196 /* Tx timestamps cannot be sampled when doing TSO */
2197 if (tx_flags & I40E_TX_FLAGS_TSO)
2200 /* only timestamp the outbound packet if the user has requested it and
2201 * we are not already transmitting a packet to be timestamped
2203 pf = i40e_netdev_to_pf(tx_ring->netdev);
2204 if (!(pf->flags & I40E_FLAG_PTP))
2208 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2209 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2210 pf->ptp_tx_skb = skb_get(skb);
2215 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2216 I40E_TXD_CTX_QW1_CMD_SHIFT;
2222 * i40e_tx_enable_csum - Enable Tx checksum offloads
2224 * @tx_flags: pointer to Tx flags currently set
2225 * @td_cmd: Tx descriptor command bits to set
2226 * @td_offset: Tx descriptor header offsets to set
2227 * @cd_tunneling: ptr to context desc bits
2229 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2230 u32 *td_cmd, u32 *td_offset,
2231 struct i40e_ring *tx_ring,
2234 struct ipv6hdr *this_ipv6_hdr;
2235 unsigned int this_tcp_hdrlen;
2236 struct iphdr *this_ip_hdr;
2237 u32 network_hdr_len;
2239 struct udphdr *oudph;
2243 if (skb->encapsulation) {
2244 switch (ip_hdr(skb)->protocol) {
2246 oudph = udp_hdr(skb);
2248 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2249 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2252 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2257 network_hdr_len = skb_inner_network_header_len(skb);
2258 this_ip_hdr = inner_ip_hdr(skb);
2259 this_ipv6_hdr = inner_ipv6_hdr(skb);
2260 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2262 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2263 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2264 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2265 ip_hdr(skb)->check = 0;
2268 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2270 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2271 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2272 if (*tx_flags & I40E_TX_FLAGS_TSO)
2273 ip_hdr(skb)->check = 0;
2276 /* Now set the ctx descriptor fields */
2277 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2278 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2280 ((skb_inner_network_offset(skb) -
2281 skb_transport_offset(skb)) >> 1) <<
2282 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2283 if (this_ip_hdr->version == 6) {
2284 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2285 *tx_flags |= I40E_TX_FLAGS_IPV6;
2287 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2288 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2289 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2290 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2292 (skb->len - skb_transport_offset(skb)),
2294 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2297 network_hdr_len = skb_network_header_len(skb);
2298 this_ip_hdr = ip_hdr(skb);
2299 this_ipv6_hdr = ipv6_hdr(skb);
2300 this_tcp_hdrlen = tcp_hdrlen(skb);
2303 /* Enable IP checksum offloads */
2304 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2305 l4_hdr = this_ip_hdr->protocol;
2306 /* the stack computes the IP header already, the only time we
2307 * need the hardware to recompute it is in the case of TSO.
2309 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2310 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2311 this_ip_hdr->check = 0;
2313 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2315 /* Now set the td_offset for IP header length */
2316 *td_offset = (network_hdr_len >> 2) <<
2317 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2318 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2319 l4_hdr = this_ipv6_hdr->nexthdr;
2320 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2321 /* Now set the td_offset for IP header length */
2322 *td_offset = (network_hdr_len >> 2) <<
2323 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2325 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2326 *td_offset |= (skb_network_offset(skb) >> 1) <<
2327 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2329 /* Enable L4 checksum offloads */
2332 /* enable checksum offloads */
2333 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2334 *td_offset |= (this_tcp_hdrlen >> 2) <<
2335 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2338 /* enable SCTP checksum offload */
2339 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2340 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2341 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2344 /* enable UDP checksum offload */
2345 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2346 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2347 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2355 * i40e_create_tx_ctx Build the Tx context descriptor
2356 * @tx_ring: ring to create the descriptor on
2357 * @cd_type_cmd_tso_mss: Quad Word 1
2358 * @cd_tunneling: Quad Word 0 - bits 0-31
2359 * @cd_l2tag2: Quad Word 0 - bits 32-63
2361 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2362 const u64 cd_type_cmd_tso_mss,
2363 const u32 cd_tunneling, const u32 cd_l2tag2)
2365 struct i40e_tx_context_desc *context_desc;
2366 int i = tx_ring->next_to_use;
2368 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2369 !cd_tunneling && !cd_l2tag2)
2372 /* grab the next descriptor */
2373 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2376 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2378 /* cpu_to_le32 and assign to struct fields */
2379 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2380 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2381 context_desc->rsvd = cpu_to_le16(0);
2382 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2386 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2387 * @tx_ring: the ring to be checked
2388 * @size: the size buffer we want to assure is available
2390 * Returns -EBUSY if a stop is needed, else 0
2392 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2394 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2395 /* Memory barrier before checking head and tail */
2398 /* Check again in a case another CPU has just made room available. */
2399 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2402 /* A reprieve! - use start_queue because it doesn't call schedule */
2403 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2404 ++tx_ring->tx_stats.restart_queue;
2409 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2410 * @tx_ring: the ring to be checked
2411 * @size: the size buffer we want to assure is available
2413 * Returns 0 if stop is not needed
2416 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2418 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2421 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2423 return __i40e_maybe_stop_tx(tx_ring, size);
2427 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2429 * @tx_flags: collected send information
2431 * Note: Our HW can't scatter-gather more than 8 fragments to build
2432 * a packet on the wire and so we need to figure out the cases where we
2433 * need to linearize the skb.
2435 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2437 struct skb_frag_struct *frag;
2438 bool linearize = false;
2439 unsigned int size = 0;
2443 num_frags = skb_shinfo(skb)->nr_frags;
2444 gso_segs = skb_shinfo(skb)->gso_segs;
2446 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2449 if (num_frags < (I40E_MAX_BUFFER_TXD))
2450 goto linearize_chk_done;
2451 /* try the simple math, if we have too many frags per segment */
2452 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2453 I40E_MAX_BUFFER_TXD) {
2455 goto linearize_chk_done;
2457 frag = &skb_shinfo(skb)->frags[0];
2458 /* we might still have more fragments per segment */
2460 size += skb_frag_size(frag);
2462 if ((size >= skb_shinfo(skb)->gso_size) &&
2463 (j < I40E_MAX_BUFFER_TXD)) {
2464 size = (size % skb_shinfo(skb)->gso_size);
2467 if (j == I40E_MAX_BUFFER_TXD) {
2472 } while (num_frags);
2474 if (num_frags >= I40E_MAX_BUFFER_TXD)
2483 * i40e_tx_map - Build the Tx descriptor
2484 * @tx_ring: ring to send buffer on
2486 * @first: first buffer info buffer to use
2487 * @tx_flags: collected send information
2488 * @hdr_len: size of the packet header
2489 * @td_cmd: the command field in the descriptor
2490 * @td_offset: offset for checksum or crc
2493 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2494 struct i40e_tx_buffer *first, u32 tx_flags,
2495 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2497 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2498 struct i40e_tx_buffer *first, u32 tx_flags,
2499 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2502 unsigned int data_len = skb->data_len;
2503 unsigned int size = skb_headlen(skb);
2504 struct skb_frag_struct *frag;
2505 struct i40e_tx_buffer *tx_bi;
2506 struct i40e_tx_desc *tx_desc;
2507 u16 i = tx_ring->next_to_use;
2512 bool tail_bump = true;
2515 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2516 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2517 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2518 I40E_TX_FLAGS_VLAN_SHIFT;
2521 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2522 gso_segs = skb_shinfo(skb)->gso_segs;
2526 /* multiply data chunks by size of headers */
2527 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2528 first->gso_segs = gso_segs;
2530 first->tx_flags = tx_flags;
2532 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2534 tx_desc = I40E_TX_DESC(tx_ring, i);
2537 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2538 if (dma_mapping_error(tx_ring->dev, dma))
2541 /* record length, and DMA address */
2542 dma_unmap_len_set(tx_bi, len, size);
2543 dma_unmap_addr_set(tx_bi, dma, dma);
2545 tx_desc->buffer_addr = cpu_to_le64(dma);
2547 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2548 tx_desc->cmd_type_offset_bsz =
2549 build_ctob(td_cmd, td_offset,
2550 I40E_MAX_DATA_PER_TXD, td_tag);
2556 if (i == tx_ring->count) {
2557 tx_desc = I40E_TX_DESC(tx_ring, 0);
2561 dma += I40E_MAX_DATA_PER_TXD;
2562 size -= I40E_MAX_DATA_PER_TXD;
2564 tx_desc->buffer_addr = cpu_to_le64(dma);
2567 if (likely(!data_len))
2570 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2577 if (i == tx_ring->count) {
2578 tx_desc = I40E_TX_DESC(tx_ring, 0);
2582 size = skb_frag_size(frag);
2585 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2588 tx_bi = &tx_ring->tx_bi[i];
2591 /* set next_to_watch value indicating a packet is present */
2592 first->next_to_watch = tx_desc;
2595 if (i == tx_ring->count)
2598 tx_ring->next_to_use = i;
2600 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2601 tx_ring->queue_index),
2603 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2605 /* Algorithm to optimize tail and RS bit setting:
2606 * if xmit_more is supported
2607 * if xmit_more is true
2608 * do not update tail and do not mark RS bit.
2609 * if xmit_more is false and last xmit_more was false
2610 * if every packet spanned less than 4 desc
2611 * then set RS bit on 4th packet and update tail
2614 * update tail and set RS bit on every packet.
2615 * if xmit_more is false and last_xmit_more was true
2616 * update tail and set RS bit.
2618 * Optimization: wmb to be issued only in case of tail update.
2619 * Also optimize the Descriptor WB path for RS bit with the same
2622 * Note: If there are less than 4 packets
2623 * pending and interrupts were disabled the service task will
2624 * trigger a force WB.
2626 if (skb->xmit_more &&
2627 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2628 tx_ring->queue_index))) {
2629 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2631 } else if (!skb->xmit_more &&
2632 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2633 tx_ring->queue_index)) &&
2634 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2635 (tx_ring->packet_stride < WB_STRIDE) &&
2636 (desc_count < WB_STRIDE)) {
2637 tx_ring->packet_stride++;
2639 tx_ring->packet_stride = 0;
2640 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2644 tx_ring->packet_stride = 0;
2646 tx_desc->cmd_type_offset_bsz =
2647 build_ctob(td_cmd, td_offset, size, td_tag) |
2648 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2649 I40E_TX_DESC_CMD_EOP) <<
2650 I40E_TXD_QW1_CMD_SHIFT);
2652 /* notify HW of packet */
2654 prefetchw(tx_desc + 1);
2657 /* Force memory writes to complete before letting h/w
2658 * know there are new descriptors to fetch. (Only
2659 * applicable for weak-ordered memory model archs,
2663 writel(i, tx_ring->tail);
2669 dev_info(tx_ring->dev, "TX DMA map failed\n");
2671 /* clear dma mappings for failed tx_bi map */
2673 tx_bi = &tx_ring->tx_bi[i];
2674 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2682 tx_ring->next_to_use = i;
2686 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2688 * @tx_ring: ring to send buffer on
2690 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2691 * there is not enough descriptors available in this ring since we need at least
2695 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2696 struct i40e_ring *tx_ring)
2698 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2699 struct i40e_ring *tx_ring)
2705 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2706 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2707 * + 4 desc gap to avoid the cache line where head is,
2708 * + 1 desc for context descriptor,
2709 * otherwise try next time
2711 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2712 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2714 count += TXD_USE_COUNT(skb_headlen(skb));
2715 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2716 tx_ring->tx_stats.tx_busy++;
2723 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2725 * @tx_ring: ring to send buffer on
2727 * Returns NETDEV_TX_OK if sent, else an error code
2729 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2730 struct i40e_ring *tx_ring)
2732 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2733 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2734 struct i40e_tx_buffer *first;
2743 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2744 return NETDEV_TX_BUSY;
2746 /* prepare the xmit flags */
2747 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2750 /* obtain protocol of skb */
2751 protocol = vlan_get_protocol(skb);
2753 /* record the location of the first descriptor for this packet */
2754 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2756 /* setup IPv4/IPv6 offloads */
2757 if (protocol == htons(ETH_P_IP))
2758 tx_flags |= I40E_TX_FLAGS_IPV4;
2759 else if (protocol == htons(ETH_P_IPV6))
2760 tx_flags |= I40E_TX_FLAGS_IPV6;
2762 tso = i40e_tso(tx_ring, skb, &hdr_len,
2763 &cd_type_cmd_tso_mss, &cd_tunneling);
2768 tx_flags |= I40E_TX_FLAGS_TSO;
2770 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2773 tx_flags |= I40E_TX_FLAGS_TSYN;
2775 if (i40e_chk_linearize(skb, tx_flags)) {
2776 if (skb_linearize(skb))
2778 tx_ring->tx_stats.tx_linearize++;
2780 skb_tx_timestamp(skb);
2782 /* always enable CRC insertion offload */
2783 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2785 /* Always offload the checksum, since it's in the data descriptor */
2786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2787 tx_flags |= I40E_TX_FLAGS_CSUM;
2789 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2790 tx_ring, &cd_tunneling);
2793 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2794 cd_tunneling, cd_l2tag2);
2796 /* Add Flow Director ATR if it's enabled.
2798 * NOTE: this must always be directly before the data descriptor.
2800 i40e_atr(tx_ring, skb, tx_flags, protocol);
2802 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2805 return NETDEV_TX_OK;
2808 dev_kfree_skb_any(skb);
2809 return NETDEV_TX_OK;
2813 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2815 * @netdev: network interface device structure
2817 * Returns NETDEV_TX_OK if sent, else an error code
2819 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2821 struct i40e_netdev_priv *np = netdev_priv(netdev);
2822 struct i40e_vsi *vsi = np->vsi;
2823 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2825 /* hardware can't handle really short frames, hardware padding works
2828 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2829 return NETDEV_TX_OK;
2831 return i40e_xmit_frame_ring(skb, tx_ring);