1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
238 return err ? -EOPNOTSUPP : 0;
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
246 * @add: true adds a filter, false removes it
248 * Returns 0 if the filters were successfully added or removed
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
254 struct i40e_pf *pf = vsi->back;
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
301 dev_info(&pf->pdev->dev,
302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
315 return err ? -EOPNOTSUPP : 0;
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
323 * @add: true adds a filter, false removes it
325 * Always returns -EOPNOTSUPP
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
334 #define I40E_IP_DUMMY_PACKET_LEN 34
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
340 * @add: true adds a filter, false removes it
342 * Returns 0 if the filters were successfully added or removed
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
348 struct i40e_pf *pf = vsi->back;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
374 dev_info(&pf->pdev->dev,
375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 return err ? -EOPNOTSUPP : 0;
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
403 struct i40e_pf *pf = vsi->back;
406 switch (input->flow_type & ~FLOW_EXT) {
408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
420 switch (input->ip4_proto) {
422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449 * @prog_id: the id originally used for programming
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
469 (I40E_DEBUG_FD & pf->hw.debug_mask))
470 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
471 rx_desc->wb.qword0.hi_dword.fd_id);
473 /* Check if the programming error is for ATR.
474 * If so, auto disable ATR and set a state for
475 * flush in progress. Next time we come here if flush is in
476 * progress do nothing, once flush is complete the state will
479 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
483 /* store the current atr filter count */
484 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
486 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
487 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
488 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
489 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
492 /* filter programming failed most likely due to table full */
493 fcnt_prog = i40e_get_global_fd_count(pf);
494 fcnt_avail = pf->fdir_pf_filter_count;
495 /* If ATR is running fcnt_prog can quickly change,
496 * if we are very close to full, it makes sense to disable
497 * FD ATR/SB and then re-enable it when there is room.
499 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
501 !(pf->auto_disable_flags &
502 I40E_FLAG_FD_SB_ENABLED)) {
503 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
505 pf->auto_disable_flags |=
506 I40E_FLAG_FD_SB_ENABLED;
510 "FD filter programming failed due to incorrect filter parameters\n");
512 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
515 rx_desc->wb.qword0.hi_dword.fd_id);
520 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
521 * @ring: the ring that owns the buffer
522 * @tx_buffer: the buffer to free
524 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
525 struct i40e_tx_buffer *tx_buffer)
527 if (tx_buffer->skb) {
528 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
529 kfree(tx_buffer->raw_buf);
531 dev_kfree_skb_any(tx_buffer->skb);
533 if (dma_unmap_len(tx_buffer, len))
534 dma_unmap_single(ring->dev,
535 dma_unmap_addr(tx_buffer, dma),
536 dma_unmap_len(tx_buffer, len),
538 } else if (dma_unmap_len(tx_buffer, len)) {
539 dma_unmap_page(ring->dev,
540 dma_unmap_addr(tx_buffer, dma),
541 dma_unmap_len(tx_buffer, len),
544 tx_buffer->next_to_watch = NULL;
545 tx_buffer->skb = NULL;
546 dma_unmap_len_set(tx_buffer, len, 0);
547 /* tx_buffer must be completely set up in the transmit path */
551 * i40e_clean_tx_ring - Free any empty Tx buffers
552 * @tx_ring: ring to be cleaned
554 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
556 unsigned long bi_size;
559 /* ring already cleared, nothing to do */
563 /* Free all the Tx ring sk_buffs */
564 for (i = 0; i < tx_ring->count; i++)
565 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
567 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
568 memset(tx_ring->tx_bi, 0, bi_size);
570 /* Zero out the descriptor ring */
571 memset(tx_ring->desc, 0, tx_ring->size);
573 tx_ring->next_to_use = 0;
574 tx_ring->next_to_clean = 0;
576 if (!tx_ring->netdev)
579 /* cleanup Tx queue statistics */
580 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
581 tx_ring->queue_index));
585 * i40e_free_tx_resources - Free Tx resources per queue
586 * @tx_ring: Tx descriptor ring for a specific queue
588 * Free all transmit software resources
590 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
592 i40e_clean_tx_ring(tx_ring);
593 kfree(tx_ring->tx_bi);
594 tx_ring->tx_bi = NULL;
597 dma_free_coherent(tx_ring->dev, tx_ring->size,
598 tx_ring->desc, tx_ring->dma);
599 tx_ring->desc = NULL;
604 * i40e_get_tx_pending - how many tx descriptors not processed
605 * @tx_ring: the ring of descriptors
607 * Since there is no access to the ring head register
608 * in XL710, we need to use our local copies
610 u32 i40e_get_tx_pending(struct i40e_ring *ring)
614 head = i40e_get_head(ring);
615 tail = readl(ring->tail);
618 return (head < tail) ?
619 tail - head : (tail + ring->count - head);
624 #define WB_STRIDE 0x3
627 * i40e_clean_tx_irq - Reclaim resources after transmit completes
628 * @tx_ring: tx ring to clean
629 * @budget: how many cleans we're allowed
631 * Returns true if there's any budget left (e.g. the clean is finished)
633 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
635 u16 i = tx_ring->next_to_clean;
636 struct i40e_tx_buffer *tx_buf;
637 struct i40e_tx_desc *tx_head;
638 struct i40e_tx_desc *tx_desc;
639 unsigned int total_packets = 0;
640 unsigned int total_bytes = 0;
642 tx_buf = &tx_ring->tx_bi[i];
643 tx_desc = I40E_TX_DESC(tx_ring, i);
646 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
649 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
651 /* if next_to_watch is not set then there is no work pending */
655 /* prevent any other reads prior to eop_desc */
656 read_barrier_depends();
658 /* we have caught up to head, no work left to do */
659 if (tx_head == tx_desc)
662 /* clear next_to_watch to prevent false hangs */
663 tx_buf->next_to_watch = NULL;
665 /* update the statistics for this packet */
666 total_bytes += tx_buf->bytecount;
667 total_packets += tx_buf->gso_segs;
670 dev_consume_skb_any(tx_buf->skb);
672 /* unmap skb header data */
673 dma_unmap_single(tx_ring->dev,
674 dma_unmap_addr(tx_buf, dma),
675 dma_unmap_len(tx_buf, len),
678 /* clear tx_buffer data */
680 dma_unmap_len_set(tx_buf, len, 0);
682 /* unmap remaining buffers */
683 while (tx_desc != eop_desc) {
690 tx_buf = tx_ring->tx_bi;
691 tx_desc = I40E_TX_DESC(tx_ring, 0);
694 /* unmap any remaining paged data */
695 if (dma_unmap_len(tx_buf, len)) {
696 dma_unmap_page(tx_ring->dev,
697 dma_unmap_addr(tx_buf, dma),
698 dma_unmap_len(tx_buf, len),
700 dma_unmap_len_set(tx_buf, len, 0);
704 /* move us one more past the eop_desc for start of next pkt */
710 tx_buf = tx_ring->tx_bi;
711 tx_desc = I40E_TX_DESC(tx_ring, 0);
716 /* update budget accounting */
718 } while (likely(budget));
721 tx_ring->next_to_clean = i;
722 u64_stats_update_begin(&tx_ring->syncp);
723 tx_ring->stats.bytes += total_bytes;
724 tx_ring->stats.packets += total_packets;
725 u64_stats_update_end(&tx_ring->syncp);
726 tx_ring->q_vector->tx.total_bytes += total_bytes;
727 tx_ring->q_vector->tx.total_packets += total_packets;
729 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
732 /* check to see if there are < 4 descriptors
733 * waiting to be written back, then kick the hardware to force
734 * them to be written back in case we stay in NAPI.
735 * In this mode on X722 we do not enable Interrupt.
737 j = i40e_get_tx_pending(tx_ring);
740 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
741 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
742 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
743 tx_ring->arm_wb = true;
746 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
747 tx_ring->queue_index),
748 total_packets, total_bytes);
750 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
751 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
752 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
753 /* Make sure that anybody stopping the queue after this
754 * sees the new next_to_clean.
757 if (__netif_subqueue_stopped(tx_ring->netdev,
758 tx_ring->queue_index) &&
759 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
760 netif_wake_subqueue(tx_ring->netdev,
761 tx_ring->queue_index);
762 ++tx_ring->tx_stats.restart_queue;
770 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
771 * @vsi: the VSI we care about
772 * @q_vector: the vector on which to force writeback
775 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
777 u16 flags = q_vector->tx.ring[0].flags;
779 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
782 if (q_vector->arm_wb_state)
785 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
788 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
789 vsi->base_vector - 1),
791 q_vector->arm_wb_state = true;
792 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
793 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
794 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
795 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
796 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
797 /* allow 00 to be written to the index */
800 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
801 vsi->base_vector - 1), val);
803 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
804 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
805 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
806 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
807 /* allow 00 to be written to the index */
809 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
814 * i40e_set_new_dynamic_itr - Find new ITR level
815 * @rc: structure containing ring performance data
817 * Stores a new ITR value based on packets and byte counts during
818 * the last interrupt. The advantage of per interrupt computation
819 * is faster updates and more accurate ITR for the current traffic
820 * pattern. Constants in this function were computed based on
821 * theoretical maximum wire speed and thresholds were set based on
822 * testing data as well as attempting to minimize response time
823 * while increasing bulk throughput.
825 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
827 enum i40e_latency_range new_latency_range = rc->latency_range;
828 u32 new_itr = rc->itr;
831 if (rc->total_packets == 0 || !rc->itr)
834 /* simple throttlerate management
835 * 0-10MB/s lowest (100000 ints/s)
836 * 10-20MB/s low (20000 ints/s)
837 * 20-1249MB/s bulk (8000 ints/s)
839 bytes_per_int = rc->total_bytes / rc->itr;
840 switch (new_latency_range) {
841 case I40E_LOWEST_LATENCY:
842 if (bytes_per_int > 10)
843 new_latency_range = I40E_LOW_LATENCY;
845 case I40E_LOW_LATENCY:
846 if (bytes_per_int > 20)
847 new_latency_range = I40E_BULK_LATENCY;
848 else if (bytes_per_int <= 10)
849 new_latency_range = I40E_LOWEST_LATENCY;
851 case I40E_BULK_LATENCY:
852 if (bytes_per_int <= 20)
853 new_latency_range = I40E_LOW_LATENCY;
856 if (bytes_per_int <= 20)
857 new_latency_range = I40E_LOW_LATENCY;
860 rc->latency_range = new_latency_range;
862 switch (new_latency_range) {
863 case I40E_LOWEST_LATENCY:
864 new_itr = I40E_ITR_100K;
866 case I40E_LOW_LATENCY:
867 new_itr = I40E_ITR_20K;
869 case I40E_BULK_LATENCY:
870 new_itr = I40E_ITR_8K;
876 if (new_itr != rc->itr)
880 rc->total_packets = 0;
884 * i40e_clean_programming_status - clean the programming status descriptor
885 * @rx_ring: the rx ring that has this descriptor
886 * @rx_desc: the rx descriptor written back by HW
888 * Flow director should handle FD_FILTER_STATUS to check its filter programming
889 * status being successful or not and take actions accordingly. FCoE should
890 * handle its context/filter programming/invalidation status and take actions.
893 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
894 union i40e_rx_desc *rx_desc)
899 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
900 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
901 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
903 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
904 i40e_fd_handle_status(rx_ring, rx_desc, id);
906 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
907 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
908 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
913 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
914 * @tx_ring: the tx ring to set up
916 * Return 0 on success, negative on error
918 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
920 struct device *dev = tx_ring->dev;
926 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
927 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
931 /* round up to nearest 4K */
932 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
933 /* add u32 for head writeback, align after this takes care of
934 * guaranteeing this is at least one cache line in size
936 tx_ring->size += sizeof(u32);
937 tx_ring->size = ALIGN(tx_ring->size, 4096);
938 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
939 &tx_ring->dma, GFP_KERNEL);
940 if (!tx_ring->desc) {
941 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
946 tx_ring->next_to_use = 0;
947 tx_ring->next_to_clean = 0;
951 kfree(tx_ring->tx_bi);
952 tx_ring->tx_bi = NULL;
957 * i40e_clean_rx_ring - Free Rx buffers
958 * @rx_ring: ring to be cleaned
960 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
962 struct device *dev = rx_ring->dev;
963 struct i40e_rx_buffer *rx_bi;
964 unsigned long bi_size;
967 /* ring already cleared, nothing to do */
971 if (ring_is_ps_enabled(rx_ring)) {
972 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
974 rx_bi = &rx_ring->rx_bi[0];
975 if (rx_bi->hdr_buf) {
976 dma_free_coherent(dev,
980 for (i = 0; i < rx_ring->count; i++) {
981 rx_bi = &rx_ring->rx_bi[i];
983 rx_bi->hdr_buf = NULL;
987 /* Free all the Rx ring sk_buffs */
988 for (i = 0; i < rx_ring->count; i++) {
989 rx_bi = &rx_ring->rx_bi[i];
991 dma_unmap_single(dev,
998 dev_kfree_skb(rx_bi->skb);
1002 if (rx_bi->page_dma) {
1007 rx_bi->page_dma = 0;
1009 __free_page(rx_bi->page);
1011 rx_bi->page_offset = 0;
1015 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1016 memset(rx_ring->rx_bi, 0, bi_size);
1018 /* Zero out the descriptor ring */
1019 memset(rx_ring->desc, 0, rx_ring->size);
1021 rx_ring->next_to_clean = 0;
1022 rx_ring->next_to_use = 0;
1026 * i40e_free_rx_resources - Free Rx resources
1027 * @rx_ring: ring to clean the resources from
1029 * Free all receive software resources
1031 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1033 i40e_clean_rx_ring(rx_ring);
1034 kfree(rx_ring->rx_bi);
1035 rx_ring->rx_bi = NULL;
1037 if (rx_ring->desc) {
1038 dma_free_coherent(rx_ring->dev, rx_ring->size,
1039 rx_ring->desc, rx_ring->dma);
1040 rx_ring->desc = NULL;
1045 * i40e_alloc_rx_headers - allocate rx header buffers
1046 * @rx_ring: ring to alloc buffers
1048 * Allocate rx header buffers for the entire ring. As these are static,
1049 * this is only called when setting up a new ring.
1051 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1053 struct device *dev = rx_ring->dev;
1054 struct i40e_rx_buffer *rx_bi;
1060 if (rx_ring->rx_bi[0].hdr_buf)
1062 /* Make sure the buffers don't cross cache line boundaries. */
1063 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1064 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1068 for (i = 0; i < rx_ring->count; i++) {
1069 rx_bi = &rx_ring->rx_bi[i];
1070 rx_bi->dma = dma + (i * buf_size);
1071 rx_bi->hdr_buf = buffer + (i * buf_size);
1076 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1077 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1079 * Returns 0 on success, negative on failure
1081 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1083 struct device *dev = rx_ring->dev;
1086 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1087 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1088 if (!rx_ring->rx_bi)
1091 u64_stats_init(&rx_ring->syncp);
1093 /* Round up to nearest 4K */
1094 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1095 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1096 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1097 rx_ring->size = ALIGN(rx_ring->size, 4096);
1098 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1099 &rx_ring->dma, GFP_KERNEL);
1101 if (!rx_ring->desc) {
1102 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1107 rx_ring->next_to_clean = 0;
1108 rx_ring->next_to_use = 0;
1112 kfree(rx_ring->rx_bi);
1113 rx_ring->rx_bi = NULL;
1118 * i40e_release_rx_desc - Store the new tail and head values
1119 * @rx_ring: ring to bump
1120 * @val: new head index
1122 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1124 rx_ring->next_to_use = val;
1125 /* Force memory writes to complete before letting h/w
1126 * know there are new descriptors to fetch. (Only
1127 * applicable for weak-ordered memory model archs,
1131 writel(val, rx_ring->tail);
1135 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1136 * @rx_ring: ring to place buffers on
1137 * @cleaned_count: number of buffers to replace
1139 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1141 u16 i = rx_ring->next_to_use;
1142 union i40e_rx_desc *rx_desc;
1143 struct i40e_rx_buffer *bi;
1145 /* do nothing if no valid netdev defined */
1146 if (!rx_ring->netdev || !cleaned_count)
1149 while (cleaned_count--) {
1150 rx_desc = I40E_RX_DESC(rx_ring, i);
1151 bi = &rx_ring->rx_bi[i];
1153 if (bi->skb) /* desc is in use */
1156 bi->page = alloc_page(GFP_ATOMIC);
1158 rx_ring->rx_stats.alloc_page_failed++;
1163 if (!bi->page_dma) {
1164 /* use a half page if we're re-using */
1165 bi->page_offset ^= PAGE_SIZE / 2;
1166 bi->page_dma = dma_map_page(rx_ring->dev,
1171 if (dma_mapping_error(rx_ring->dev,
1173 rx_ring->rx_stats.alloc_page_failed++;
1179 dma_sync_single_range_for_device(rx_ring->dev,
1182 rx_ring->rx_hdr_len,
1184 /* Refresh the desc even if buffer_addrs didn't change
1185 * because each write-back erases this info.
1187 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1188 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1190 if (i == rx_ring->count)
1195 if (rx_ring->next_to_use != i)
1196 i40e_release_rx_desc(rx_ring, i);
1200 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1201 * @rx_ring: ring to place buffers on
1202 * @cleaned_count: number of buffers to replace
1204 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1206 u16 i = rx_ring->next_to_use;
1207 union i40e_rx_desc *rx_desc;
1208 struct i40e_rx_buffer *bi;
1209 struct sk_buff *skb;
1211 /* do nothing if no valid netdev defined */
1212 if (!rx_ring->netdev || !cleaned_count)
1215 while (cleaned_count--) {
1216 rx_desc = I40E_RX_DESC(rx_ring, i);
1217 bi = &rx_ring->rx_bi[i];
1221 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1222 rx_ring->rx_buf_len);
1224 rx_ring->rx_stats.alloc_buff_failed++;
1227 /* initialize queue mapping */
1228 skb_record_rx_queue(skb, rx_ring->queue_index);
1233 bi->dma = dma_map_single(rx_ring->dev,
1235 rx_ring->rx_buf_len,
1237 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1238 rx_ring->rx_stats.alloc_buff_failed++;
1244 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1245 rx_desc->read.hdr_addr = 0;
1247 if (i == rx_ring->count)
1252 if (rx_ring->next_to_use != i)
1253 i40e_release_rx_desc(rx_ring, i);
1257 * i40e_receive_skb - Send a completed packet up the stack
1258 * @rx_ring: rx ring in play
1259 * @skb: packet to send up
1260 * @vlan_tag: vlan tag for packet
1262 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1263 struct sk_buff *skb, u16 vlan_tag)
1265 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1266 struct i40e_vsi *vsi = rx_ring->vsi;
1267 u64 flags = vsi->back->flags;
1269 if (vlan_tag & VLAN_VID_MASK)
1270 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1272 if (flags & I40E_FLAG_IN_NETPOLL)
1275 napi_gro_receive(&q_vector->napi, skb);
1279 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1280 * @vsi: the VSI we care about
1281 * @skb: skb currently being received and modified
1282 * @rx_status: status value of last descriptor in packet
1283 * @rx_error: error value of last descriptor in packet
1284 * @rx_ptype: ptype value of last descriptor in packet
1286 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1287 struct sk_buff *skb,
1292 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1293 bool ipv4 = false, ipv6 = false;
1294 bool ipv4_tunnel, ipv6_tunnel;
1299 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1300 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1301 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1302 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1304 skb->ip_summed = CHECKSUM_NONE;
1306 /* Rx csum enabled and ip headers found? */
1307 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1310 /* did the hardware decode the packet and checksum? */
1311 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1314 /* both known and outer_ip must be set for the below code to work */
1315 if (!(decoded.known && decoded.outer_ip))
1318 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1319 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1321 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1322 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1326 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1327 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1330 /* likely incorrect csum if alternate IP extension headers found */
1332 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1333 /* don't increment checksum err here, non-fatal err */
1336 /* there was some L4 error, count error and punt packet to the stack */
1337 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1340 /* handle packets that were not able to be checksummed due
1341 * to arrival speed, in this case the stack can compute
1344 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1347 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1348 * it in the driver, hardware does not do it for us.
1349 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1350 * so the total length of IPv4 header is IHL*4 bytes
1351 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1353 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1355 skb->transport_header = skb->mac_header +
1356 sizeof(struct ethhdr) +
1357 (ip_hdr(skb)->ihl * 4);
1359 /* Add 4 bytes for VLAN tagged packets */
1360 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1361 skb->protocol == htons(ETH_P_8021AD))
1364 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1365 (udp_hdr(skb)->check != 0)) {
1366 rx_udp_csum = udp_csum(skb);
1368 csum = csum_tcpudp_magic(
1369 iph->saddr, iph->daddr,
1370 (skb->len - skb_transport_offset(skb)),
1371 IPPROTO_UDP, rx_udp_csum);
1373 if (udp_hdr(skb)->check != csum)
1376 } /* else its GRE and so no outer UDP header */
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1385 vsi->back->hw_csum_rx_error++;
1389 * i40e_rx_hash - returns the hash value from the Rx descriptor
1390 * @ring: descriptor ring
1391 * @rx_desc: specific descriptor
1393 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1394 union i40e_rx_desc *rx_desc)
1396 const __le64 rss_mask =
1397 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1398 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1400 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1401 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1402 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1408 * i40e_ptype_to_hash - get a hash type
1409 * @ptype: the ptype value from the descriptor
1411 * Returns a hash type to be used by skb_set_hash
1413 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1415 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1418 return PKT_HASH_TYPE_NONE;
1420 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1421 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1422 return PKT_HASH_TYPE_L4;
1423 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1424 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1425 return PKT_HASH_TYPE_L3;
1427 return PKT_HASH_TYPE_L2;
1431 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1432 * @rx_ring: rx ring to clean
1433 * @budget: how many cleans we're allowed
1435 * Returns true if there's any budget left (e.g. the clean is finished)
1437 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1439 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1440 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1441 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1442 const int current_node = numa_node_id();
1443 struct i40e_vsi *vsi = rx_ring->vsi;
1444 u16 i = rx_ring->next_to_clean;
1445 union i40e_rx_desc *rx_desc;
1446 u32 rx_error, rx_status;
1454 struct i40e_rx_buffer *rx_bi;
1455 struct sk_buff *skb;
1457 /* return some buffers to hardware, one at a time is too slow */
1458 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1459 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1463 i = rx_ring->next_to_clean;
1464 rx_desc = I40E_RX_DESC(rx_ring, i);
1465 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1466 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1467 I40E_RXD_QW1_STATUS_SHIFT;
1469 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1472 /* This memory barrier is needed to keep us from reading
1473 * any other fields out of the rx_desc until we know the
1477 if (i40e_rx_is_programming_status(qword)) {
1478 i40e_clean_programming_status(rx_ring, rx_desc);
1479 I40E_RX_INCREMENT(rx_ring, i);
1482 rx_bi = &rx_ring->rx_bi[i];
1485 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1486 rx_ring->rx_hdr_len);
1488 rx_ring->rx_stats.alloc_buff_failed++;
1492 /* initialize queue mapping */
1493 skb_record_rx_queue(skb, rx_ring->queue_index);
1494 /* we are reusing so sync this buffer for CPU use */
1495 dma_sync_single_range_for_cpu(rx_ring->dev,
1498 rx_ring->rx_hdr_len,
1501 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1502 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1503 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1504 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1505 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1506 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1508 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1509 I40E_RXD_QW1_ERROR_SHIFT;
1510 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1511 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1513 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1514 I40E_RXD_QW1_PTYPE_SHIFT;
1515 prefetch(rx_bi->page);
1518 if (rx_hbo || rx_sph) {
1521 len = I40E_RX_HDR_SIZE;
1523 len = rx_header_len;
1524 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1525 } else if (skb->len == 0) {
1528 len = (rx_packet_len > skb_headlen(skb) ?
1529 skb_headlen(skb) : rx_packet_len);
1530 memcpy(__skb_put(skb, len),
1531 rx_bi->page + rx_bi->page_offset,
1533 rx_bi->page_offset += len;
1534 rx_packet_len -= len;
1537 /* Get the rest of the data if this was a header split */
1538 if (rx_packet_len) {
1539 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1544 skb->len += rx_packet_len;
1545 skb->data_len += rx_packet_len;
1546 skb->truesize += rx_packet_len;
1548 if ((page_count(rx_bi->page) == 1) &&
1549 (page_to_nid(rx_bi->page) == current_node))
1550 get_page(rx_bi->page);
1554 dma_unmap_page(rx_ring->dev,
1558 rx_bi->page_dma = 0;
1560 I40E_RX_INCREMENT(rx_ring, i);
1563 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1564 struct i40e_rx_buffer *next_buffer;
1566 next_buffer = &rx_ring->rx_bi[i];
1567 next_buffer->skb = skb;
1568 rx_ring->rx_stats.non_eop_descs++;
1572 /* ERR_MASK will only have valid bits if EOP set */
1573 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1574 dev_kfree_skb_any(skb);
1578 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1579 i40e_ptype_to_hash(rx_ptype));
1580 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1581 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1582 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1583 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1584 rx_ring->last_rx_timestamp = jiffies;
1587 /* probably a little skewed due to removing CRC */
1588 total_rx_bytes += skb->len;
1591 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1593 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1595 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1596 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1599 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1600 dev_kfree_skb_any(skb);
1604 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1605 i40e_receive_skb(rx_ring, skb, vlan_tag);
1607 rx_desc->wb.qword1.status_error_len = 0;
1609 } while (likely(total_rx_packets < budget));
1611 u64_stats_update_begin(&rx_ring->syncp);
1612 rx_ring->stats.packets += total_rx_packets;
1613 rx_ring->stats.bytes += total_rx_bytes;
1614 u64_stats_update_end(&rx_ring->syncp);
1615 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1616 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1618 return total_rx_packets;
1622 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1623 * @rx_ring: rx ring to clean
1624 * @budget: how many cleans we're allowed
1626 * Returns number of packets cleaned
1628 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1630 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1631 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1632 struct i40e_vsi *vsi = rx_ring->vsi;
1633 union i40e_rx_desc *rx_desc;
1634 u32 rx_error, rx_status;
1641 struct i40e_rx_buffer *rx_bi;
1642 struct sk_buff *skb;
1644 /* return some buffers to hardware, one at a time is too slow */
1645 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1646 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1650 i = rx_ring->next_to_clean;
1651 rx_desc = I40E_RX_DESC(rx_ring, i);
1652 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1653 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1654 I40E_RXD_QW1_STATUS_SHIFT;
1656 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1659 /* This memory barrier is needed to keep us from reading
1660 * any other fields out of the rx_desc until we know the
1665 if (i40e_rx_is_programming_status(qword)) {
1666 i40e_clean_programming_status(rx_ring, rx_desc);
1667 I40E_RX_INCREMENT(rx_ring, i);
1670 rx_bi = &rx_ring->rx_bi[i];
1672 prefetch(skb->data);
1674 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1675 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1677 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1678 I40E_RXD_QW1_ERROR_SHIFT;
1679 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1681 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1682 I40E_RXD_QW1_PTYPE_SHIFT;
1686 /* Get the header and possibly the whole packet
1687 * If this is an skb from previous receive dma will be 0
1689 skb_put(skb, rx_packet_len);
1690 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1694 I40E_RX_INCREMENT(rx_ring, i);
1697 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1698 rx_ring->rx_stats.non_eop_descs++;
1702 /* ERR_MASK will only have valid bits if EOP set */
1703 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1704 dev_kfree_skb_any(skb);
1705 /* TODO: shouldn't we increment a counter indicating the
1711 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1712 i40e_ptype_to_hash(rx_ptype));
1713 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1714 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1715 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1716 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1717 rx_ring->last_rx_timestamp = jiffies;
1720 /* probably a little skewed due to removing CRC */
1721 total_rx_bytes += skb->len;
1724 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1726 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1728 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1729 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1732 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1733 dev_kfree_skb_any(skb);
1737 i40e_receive_skb(rx_ring, skb, vlan_tag);
1739 rx_desc->wb.qword1.status_error_len = 0;
1740 } while (likely(total_rx_packets < budget));
1742 u64_stats_update_begin(&rx_ring->syncp);
1743 rx_ring->stats.packets += total_rx_packets;
1744 rx_ring->stats.bytes += total_rx_bytes;
1745 u64_stats_update_end(&rx_ring->syncp);
1746 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1747 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1749 return total_rx_packets;
1753 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1754 * @vsi: the VSI we care about
1755 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1758 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1759 struct i40e_q_vector *q_vector)
1761 struct i40e_hw *hw = &vsi->back->hw;
1766 vector = (q_vector->v_idx + vsi->base_vector);
1767 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1768 old_itr = q_vector->rx.itr;
1769 i40e_set_new_dynamic_itr(&q_vector->rx);
1770 if (old_itr != q_vector->rx.itr) {
1771 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1772 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1774 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1775 (q_vector->rx.itr <<
1776 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1778 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1779 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1781 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1783 if (!test_bit(__I40E_DOWN, &vsi->state))
1784 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1786 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1788 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1789 old_itr = q_vector->tx.itr;
1790 i40e_set_new_dynamic_itr(&q_vector->tx);
1791 if (old_itr != q_vector->tx.itr) {
1792 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1793 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1795 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1796 (q_vector->tx.itr <<
1797 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1799 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1800 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1802 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1804 if (!test_bit(__I40E_DOWN, &vsi->state))
1805 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1806 vsi->base_vector - 1), val);
1808 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1813 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1814 * @napi: napi struct with our devices info in it
1815 * @budget: amount of work driver is allowed to do this pass, in packets
1817 * This function will clean all queues associated with a q_vector.
1819 * Returns the amount of work done
1821 int i40e_napi_poll(struct napi_struct *napi, int budget)
1823 struct i40e_q_vector *q_vector =
1824 container_of(napi, struct i40e_q_vector, napi);
1825 struct i40e_vsi *vsi = q_vector->vsi;
1826 struct i40e_ring *ring;
1827 bool clean_complete = true;
1828 bool arm_wb = false;
1829 int budget_per_ring;
1832 if (test_bit(__I40E_DOWN, &vsi->state)) {
1833 napi_complete(napi);
1837 /* Since the actual Tx work is minimal, we can give the Tx a larger
1838 * budget and be more aggressive about cleaning up the Tx descriptors.
1840 i40e_for_each_ring(ring, q_vector->tx) {
1841 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1842 arm_wb |= ring->arm_wb;
1843 ring->arm_wb = false;
1846 /* We attempt to distribute budget to each Rx queue fairly, but don't
1847 * allow the budget to go below 1 because that would exit polling early.
1849 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1851 i40e_for_each_ring(ring, q_vector->rx) {
1852 if (ring_is_ps_enabled(ring))
1853 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1855 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1856 /* if we didn't clean as many as budgeted, we must be done */
1857 clean_complete &= (budget_per_ring != cleaned);
1860 /* If work not completed, return budget and polling will return */
1861 if (!clean_complete) {
1863 i40e_force_wb(vsi, q_vector);
1867 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1868 q_vector->arm_wb_state = false;
1870 /* Work is done so exit the polling mode and re-enable the interrupt */
1871 napi_complete(napi);
1872 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1873 i40e_update_enable_itr(vsi, q_vector);
1874 } else { /* Legacy mode */
1875 struct i40e_hw *hw = &vsi->back->hw;
1876 /* We re-enable the queue 0 cause, but
1877 * don't worry about dynamic_enable
1878 * because we left it on for the other
1879 * possible interrupts during napi
1881 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1882 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1884 wr32(hw, I40E_QINT_RQCTL(0), qval);
1885 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1886 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1887 wr32(hw, I40E_QINT_TQCTL(0), qval);
1888 i40e_irq_dynamic_enable_icr0(vsi->back);
1894 * i40e_atr - Add a Flow Director ATR filter
1895 * @tx_ring: ring to add programming descriptor to
1897 * @tx_flags: send tx flags
1898 * @protocol: wire protocol
1900 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1901 u32 tx_flags, __be16 protocol)
1903 struct i40e_filter_program_desc *fdir_desc;
1904 struct i40e_pf *pf = tx_ring->vsi->back;
1906 unsigned char *network;
1908 struct ipv6hdr *ipv6;
1912 u32 flex_ptype, dtype_cmd;
1915 /* make sure ATR is enabled */
1916 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1919 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1922 /* if sampling is disabled do nothing */
1923 if (!tx_ring->atr_sample_rate)
1926 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1929 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1930 /* snag network header to get L4 type and address */
1931 hdr.network = skb_network_header(skb);
1933 /* Currently only IPv4/IPv6 with TCP is supported
1934 * access ihl as u8 to avoid unaligned access on ia64
1936 if (tx_flags & I40E_TX_FLAGS_IPV4)
1937 hlen = (hdr.network[0] & 0x0F) << 2;
1938 else if (protocol == htons(ETH_P_IPV6))
1939 hlen = sizeof(struct ipv6hdr);
1943 hdr.network = skb_inner_network_header(skb);
1944 hlen = skb_inner_network_header_len(skb);
1947 /* Currently only IPv4/IPv6 with TCP is supported
1948 * Note: tx_flags gets modified to reflect inner protocols in
1949 * tx_enable_csum function if encap is enabled.
1951 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1952 (hdr.ipv4->protocol != IPPROTO_TCP))
1954 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1955 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1958 th = (struct tcphdr *)(hdr.network + hlen);
1960 /* Due to lack of space, no more new filters can be programmed */
1961 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1963 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1964 /* HW ATR eviction will take care of removing filters on FIN
1967 if (th->fin || th->rst)
1971 tx_ring->atr_count++;
1973 /* sample on all syn/fin/rst packets or once every atr sample rate */
1977 (tx_ring->atr_count < tx_ring->atr_sample_rate))
1980 tx_ring->atr_count = 0;
1982 /* grab the next descriptor */
1983 i = tx_ring->next_to_use;
1984 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1987 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1989 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1990 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1991 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1992 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1993 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1994 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1995 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1997 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1999 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2001 dtype_cmd |= (th->fin || th->rst) ?
2002 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2003 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2004 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2005 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2007 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2008 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2010 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2011 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2013 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2014 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2016 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2017 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2018 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2021 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2022 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2023 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2025 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2026 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2028 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2029 fdir_desc->rsvd = cpu_to_le32(0);
2030 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2031 fdir_desc->fd_id = cpu_to_le32(0);
2035 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2037 * @tx_ring: ring to send buffer on
2038 * @flags: the tx flags to be set
2040 * Checks the skb and set up correspondingly several generic transmit flags
2041 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2043 * Returns error code indicate the frame should be dropped upon error and the
2044 * otherwise returns 0 to indicate the flags has been set properly.
2047 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2048 struct i40e_ring *tx_ring,
2051 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2052 struct i40e_ring *tx_ring,
2056 __be16 protocol = skb->protocol;
2059 if (protocol == htons(ETH_P_8021Q) &&
2060 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2061 /* When HW VLAN acceleration is turned off by the user the
2062 * stack sets the protocol to 8021q so that the driver
2063 * can take any steps required to support the SW only
2064 * VLAN handling. In our case the driver doesn't need
2065 * to take any further steps so just set the protocol
2066 * to the encapsulated ethertype.
2068 skb->protocol = vlan_get_protocol(skb);
2072 /* if we have a HW VLAN tag being added, default to the HW one */
2073 if (skb_vlan_tag_present(skb)) {
2074 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2075 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2076 /* else if it is a SW VLAN, check the next protocol and store the tag */
2077 } else if (protocol == htons(ETH_P_8021Q)) {
2078 struct vlan_hdr *vhdr, _vhdr;
2079 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2083 protocol = vhdr->h_vlan_encapsulated_proto;
2084 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2085 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2088 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2091 /* Insert 802.1p priority into VLAN header */
2092 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2093 (skb->priority != TC_PRIO_CONTROL)) {
2094 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2095 tx_flags |= (skb->priority & 0x7) <<
2096 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2097 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2098 struct vlan_ethhdr *vhdr;
2101 rc = skb_cow_head(skb, 0);
2104 vhdr = (struct vlan_ethhdr *)skb->data;
2105 vhdr->h_vlan_TCI = htons(tx_flags >>
2106 I40E_TX_FLAGS_VLAN_SHIFT);
2108 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2118 * i40e_tso - set up the tso context descriptor
2119 * @tx_ring: ptr to the ring to send
2120 * @skb: ptr to the skb we're sending
2121 * @hdr_len: ptr to the size of the packet header
2122 * @cd_tunneling: ptr to context descriptor bits
2124 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2126 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2127 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2130 u32 cd_cmd, cd_tso_len, cd_mss;
2131 struct ipv6hdr *ipv6h;
2132 struct tcphdr *tcph;
2137 if (!skb_is_gso(skb))
2140 err = skb_cow_head(skb, 0);
2144 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2145 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2147 if (iph->version == 4) {
2148 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2151 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2153 } else if (ipv6h->version == 6) {
2154 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2155 ipv6h->payload_len = 0;
2156 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2160 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2161 *hdr_len = (skb->encapsulation
2162 ? (skb_inner_transport_header(skb) - skb->data)
2163 : skb_transport_offset(skb)) + l4len;
2165 /* find the field values */
2166 cd_cmd = I40E_TX_CTX_DESC_TSO;
2167 cd_tso_len = skb->len - *hdr_len;
2168 cd_mss = skb_shinfo(skb)->gso_size;
2169 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2171 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2172 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2177 * i40e_tsyn - set up the tsyn context descriptor
2178 * @tx_ring: ptr to the ring to send
2179 * @skb: ptr to the skb we're sending
2180 * @tx_flags: the collected send information
2182 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2184 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2185 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2189 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2192 /* Tx timestamps cannot be sampled when doing TSO */
2193 if (tx_flags & I40E_TX_FLAGS_TSO)
2196 /* only timestamp the outbound packet if the user has requested it and
2197 * we are not already transmitting a packet to be timestamped
2199 pf = i40e_netdev_to_pf(tx_ring->netdev);
2200 if (!(pf->flags & I40E_FLAG_PTP))
2204 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2205 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2206 pf->ptp_tx_skb = skb_get(skb);
2211 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2212 I40E_TXD_CTX_QW1_CMD_SHIFT;
2218 * i40e_tx_enable_csum - Enable Tx checksum offloads
2220 * @tx_flags: pointer to Tx flags currently set
2221 * @td_cmd: Tx descriptor command bits to set
2222 * @td_offset: Tx descriptor header offsets to set
2223 * @cd_tunneling: ptr to context desc bits
2225 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2226 u32 *td_cmd, u32 *td_offset,
2227 struct i40e_ring *tx_ring,
2230 struct ipv6hdr *this_ipv6_hdr;
2231 unsigned int this_tcp_hdrlen;
2232 struct iphdr *this_ip_hdr;
2233 u32 network_hdr_len;
2235 struct udphdr *oudph;
2239 if (skb->encapsulation) {
2240 switch (ip_hdr(skb)->protocol) {
2242 oudph = udp_hdr(skb);
2244 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2245 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2248 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2253 network_hdr_len = skb_inner_network_header_len(skb);
2254 this_ip_hdr = inner_ip_hdr(skb);
2255 this_ipv6_hdr = inner_ipv6_hdr(skb);
2256 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2258 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2259 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2260 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2261 ip_hdr(skb)->check = 0;
2264 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2266 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2267 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2268 if (*tx_flags & I40E_TX_FLAGS_TSO)
2269 ip_hdr(skb)->check = 0;
2272 /* Now set the ctx descriptor fields */
2273 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2274 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2276 ((skb_inner_network_offset(skb) -
2277 skb_transport_offset(skb)) >> 1) <<
2278 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2279 if (this_ip_hdr->version == 6) {
2280 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2281 *tx_flags |= I40E_TX_FLAGS_IPV6;
2283 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2284 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2285 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2286 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2288 (skb->len - skb_transport_offset(skb)),
2290 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2293 network_hdr_len = skb_network_header_len(skb);
2294 this_ip_hdr = ip_hdr(skb);
2295 this_ipv6_hdr = ipv6_hdr(skb);
2296 this_tcp_hdrlen = tcp_hdrlen(skb);
2299 /* Enable IP checksum offloads */
2300 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2301 l4_hdr = this_ip_hdr->protocol;
2302 /* the stack computes the IP header already, the only time we
2303 * need the hardware to recompute it is in the case of TSO.
2305 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2306 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2307 this_ip_hdr->check = 0;
2309 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2311 /* Now set the td_offset for IP header length */
2312 *td_offset = (network_hdr_len >> 2) <<
2313 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2314 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2315 l4_hdr = this_ipv6_hdr->nexthdr;
2316 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2317 /* Now set the td_offset for IP header length */
2318 *td_offset = (network_hdr_len >> 2) <<
2319 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2321 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2322 *td_offset |= (skb_network_offset(skb) >> 1) <<
2323 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2325 /* Enable L4 checksum offloads */
2328 /* enable checksum offloads */
2329 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2330 *td_offset |= (this_tcp_hdrlen >> 2) <<
2331 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2334 /* enable SCTP checksum offload */
2335 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2336 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2337 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2340 /* enable UDP checksum offload */
2341 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2342 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2343 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2351 * i40e_create_tx_ctx Build the Tx context descriptor
2352 * @tx_ring: ring to create the descriptor on
2353 * @cd_type_cmd_tso_mss: Quad Word 1
2354 * @cd_tunneling: Quad Word 0 - bits 0-31
2355 * @cd_l2tag2: Quad Word 0 - bits 32-63
2357 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2358 const u64 cd_type_cmd_tso_mss,
2359 const u32 cd_tunneling, const u32 cd_l2tag2)
2361 struct i40e_tx_context_desc *context_desc;
2362 int i = tx_ring->next_to_use;
2364 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2365 !cd_tunneling && !cd_l2tag2)
2368 /* grab the next descriptor */
2369 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2372 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2374 /* cpu_to_le32 and assign to struct fields */
2375 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2376 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2377 context_desc->rsvd = cpu_to_le16(0);
2378 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2382 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2383 * @tx_ring: the ring to be checked
2384 * @size: the size buffer we want to assure is available
2386 * Returns -EBUSY if a stop is needed, else 0
2388 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2390 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2391 /* Memory barrier before checking head and tail */
2394 /* Check again in a case another CPU has just made room available. */
2395 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2398 /* A reprieve! - use start_queue because it doesn't call schedule */
2399 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2400 ++tx_ring->tx_stats.restart_queue;
2405 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2406 * @tx_ring: the ring to be checked
2407 * @size: the size buffer we want to assure is available
2409 * Returns 0 if stop is not needed
2412 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2414 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2417 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2419 return __i40e_maybe_stop_tx(tx_ring, size);
2423 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2425 * @tx_flags: collected send information
2427 * Note: Our HW can't scatter-gather more than 8 fragments to build
2428 * a packet on the wire and so we need to figure out the cases where we
2429 * need to linearize the skb.
2431 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2433 struct skb_frag_struct *frag;
2434 bool linearize = false;
2435 unsigned int size = 0;
2439 num_frags = skb_shinfo(skb)->nr_frags;
2440 gso_segs = skb_shinfo(skb)->gso_segs;
2442 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2445 if (num_frags < (I40E_MAX_BUFFER_TXD))
2446 goto linearize_chk_done;
2447 /* try the simple math, if we have too many frags per segment */
2448 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2449 I40E_MAX_BUFFER_TXD) {
2451 goto linearize_chk_done;
2453 frag = &skb_shinfo(skb)->frags[0];
2454 /* we might still have more fragments per segment */
2456 size += skb_frag_size(frag);
2458 if ((size >= skb_shinfo(skb)->gso_size) &&
2459 (j < I40E_MAX_BUFFER_TXD)) {
2460 size = (size % skb_shinfo(skb)->gso_size);
2463 if (j == I40E_MAX_BUFFER_TXD) {
2468 } while (num_frags);
2470 if (num_frags >= I40E_MAX_BUFFER_TXD)
2479 * i40e_tx_map - Build the Tx descriptor
2480 * @tx_ring: ring to send buffer on
2482 * @first: first buffer info buffer to use
2483 * @tx_flags: collected send information
2484 * @hdr_len: size of the packet header
2485 * @td_cmd: the command field in the descriptor
2486 * @td_offset: offset for checksum or crc
2489 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2490 struct i40e_tx_buffer *first, u32 tx_flags,
2491 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2493 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2494 struct i40e_tx_buffer *first, u32 tx_flags,
2495 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2498 unsigned int data_len = skb->data_len;
2499 unsigned int size = skb_headlen(skb);
2500 struct skb_frag_struct *frag;
2501 struct i40e_tx_buffer *tx_bi;
2502 struct i40e_tx_desc *tx_desc;
2503 u16 i = tx_ring->next_to_use;
2508 bool tail_bump = true;
2511 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2512 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2513 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2514 I40E_TX_FLAGS_VLAN_SHIFT;
2517 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2518 gso_segs = skb_shinfo(skb)->gso_segs;
2522 /* multiply data chunks by size of headers */
2523 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2524 first->gso_segs = gso_segs;
2526 first->tx_flags = tx_flags;
2528 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2530 tx_desc = I40E_TX_DESC(tx_ring, i);
2533 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2534 if (dma_mapping_error(tx_ring->dev, dma))
2537 /* record length, and DMA address */
2538 dma_unmap_len_set(tx_bi, len, size);
2539 dma_unmap_addr_set(tx_bi, dma, dma);
2541 tx_desc->buffer_addr = cpu_to_le64(dma);
2543 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2544 tx_desc->cmd_type_offset_bsz =
2545 build_ctob(td_cmd, td_offset,
2546 I40E_MAX_DATA_PER_TXD, td_tag);
2552 if (i == tx_ring->count) {
2553 tx_desc = I40E_TX_DESC(tx_ring, 0);
2557 dma += I40E_MAX_DATA_PER_TXD;
2558 size -= I40E_MAX_DATA_PER_TXD;
2560 tx_desc->buffer_addr = cpu_to_le64(dma);
2563 if (likely(!data_len))
2566 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2573 if (i == tx_ring->count) {
2574 tx_desc = I40E_TX_DESC(tx_ring, 0);
2578 size = skb_frag_size(frag);
2581 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2584 tx_bi = &tx_ring->tx_bi[i];
2587 /* set next_to_watch value indicating a packet is present */
2588 first->next_to_watch = tx_desc;
2591 if (i == tx_ring->count)
2594 tx_ring->next_to_use = i;
2596 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2597 tx_ring->queue_index),
2599 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2601 /* Algorithm to optimize tail and RS bit setting:
2602 * if xmit_more is supported
2603 * if xmit_more is true
2604 * do not update tail and do not mark RS bit.
2605 * if xmit_more is false and last xmit_more was false
2606 * if every packet spanned less than 4 desc
2607 * then set RS bit on 4th packet and update tail
2610 * update tail and set RS bit on every packet.
2611 * if xmit_more is false and last_xmit_more was true
2612 * update tail and set RS bit.
2614 * Optimization: wmb to be issued only in case of tail update.
2615 * Also optimize the Descriptor WB path for RS bit with the same
2618 * Note: If there are less than 4 packets
2619 * pending and interrupts were disabled the service task will
2620 * trigger a force WB.
2622 if (skb->xmit_more &&
2623 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2624 tx_ring->queue_index))) {
2625 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2627 } else if (!skb->xmit_more &&
2628 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2629 tx_ring->queue_index)) &&
2630 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2631 (tx_ring->packet_stride < WB_STRIDE) &&
2632 (desc_count < WB_STRIDE)) {
2633 tx_ring->packet_stride++;
2635 tx_ring->packet_stride = 0;
2636 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2640 tx_ring->packet_stride = 0;
2642 tx_desc->cmd_type_offset_bsz =
2643 build_ctob(td_cmd, td_offset, size, td_tag) |
2644 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2645 I40E_TX_DESC_CMD_EOP) <<
2646 I40E_TXD_QW1_CMD_SHIFT);
2648 /* notify HW of packet */
2650 prefetchw(tx_desc + 1);
2653 /* Force memory writes to complete before letting h/w
2654 * know there are new descriptors to fetch. (Only
2655 * applicable for weak-ordered memory model archs,
2659 writel(i, tx_ring->tail);
2665 dev_info(tx_ring->dev, "TX DMA map failed\n");
2667 /* clear dma mappings for failed tx_bi map */
2669 tx_bi = &tx_ring->tx_bi[i];
2670 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2678 tx_ring->next_to_use = i;
2682 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2684 * @tx_ring: ring to send buffer on
2686 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2687 * there is not enough descriptors available in this ring since we need at least
2691 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2692 struct i40e_ring *tx_ring)
2694 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2695 struct i40e_ring *tx_ring)
2701 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2702 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2703 * + 4 desc gap to avoid the cache line where head is,
2704 * + 1 desc for context descriptor,
2705 * otherwise try next time
2707 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2708 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2710 count += TXD_USE_COUNT(skb_headlen(skb));
2711 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2712 tx_ring->tx_stats.tx_busy++;
2719 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2721 * @tx_ring: ring to send buffer on
2723 * Returns NETDEV_TX_OK if sent, else an error code
2725 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2726 struct i40e_ring *tx_ring)
2728 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2729 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2730 struct i40e_tx_buffer *first;
2738 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2739 return NETDEV_TX_BUSY;
2741 /* prepare the xmit flags */
2742 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2745 /* obtain protocol of skb */
2746 protocol = vlan_get_protocol(skb);
2748 /* record the location of the first descriptor for this packet */
2749 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2751 /* setup IPv4/IPv6 offloads */
2752 if (protocol == htons(ETH_P_IP))
2753 tx_flags |= I40E_TX_FLAGS_IPV4;
2754 else if (protocol == htons(ETH_P_IPV6))
2755 tx_flags |= I40E_TX_FLAGS_IPV6;
2757 tso = i40e_tso(tx_ring, skb, &hdr_len,
2758 &cd_type_cmd_tso_mss, &cd_tunneling);
2763 tx_flags |= I40E_TX_FLAGS_TSO;
2765 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2768 tx_flags |= I40E_TX_FLAGS_TSYN;
2770 if (i40e_chk_linearize(skb, tx_flags))
2771 if (skb_linearize(skb))
2774 skb_tx_timestamp(skb);
2776 /* always enable CRC insertion offload */
2777 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2779 /* Always offload the checksum, since it's in the data descriptor */
2780 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2781 tx_flags |= I40E_TX_FLAGS_CSUM;
2783 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2784 tx_ring, &cd_tunneling);
2787 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2788 cd_tunneling, cd_l2tag2);
2790 /* Add Flow Director ATR if it's enabled.
2792 * NOTE: this must always be directly before the data descriptor.
2794 i40e_atr(tx_ring, skb, tx_flags, protocol);
2796 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2799 return NETDEV_TX_OK;
2802 dev_kfree_skb_any(skb);
2803 return NETDEV_TX_OK;
2807 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2809 * @netdev: network interface device structure
2811 * Returns NETDEV_TX_OK if sent, else an error code
2813 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2815 struct i40e_netdev_priv *np = netdev_priv(netdev);
2816 struct i40e_vsi *vsi = np->vsi;
2817 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2819 /* hardware can't handle really short frames, hardware padding works
2822 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2823 return NETDEV_TX_OK;
2825 return i40e_xmit_frame_ring(skb, tx_ring);