1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
238 return err ? -EOPNOTSUPP : 0;
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
246 * @add: true adds a filter, false removes it
248 * Returns 0 if the filters were successfully added or removed
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
254 struct i40e_pf *pf = vsi->back;
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
301 dev_info(&pf->pdev->dev,
302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
315 return err ? -EOPNOTSUPP : 0;
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
323 * @add: true adds a filter, false removes it
325 * Always returns -EOPNOTSUPP
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
334 #define I40E_IP_DUMMY_PACKET_LEN 34
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
340 * @add: true adds a filter, false removes it
342 * Returns 0 if the filters were successfully added or removed
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
348 struct i40e_pf *pf = vsi->back;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
374 dev_info(&pf->pdev->dev,
375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 return err ? -EOPNOTSUPP : 0;
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
403 struct i40e_pf *pf = vsi->back;
406 switch (input->flow_type & ~FLOW_EXT) {
408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
420 switch (input->ip4_proto) {
422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449 * @prog_id: the id originally used for programming
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
469 (I40E_DEBUG_FD & pf->hw.debug_mask))
470 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
471 rx_desc->wb.qword0.hi_dword.fd_id);
473 /* Check if the programming error is for ATR.
474 * If so, auto disable ATR and set a state for
475 * flush in progress. Next time we come here if flush is in
476 * progress do nothing, once flush is complete the state will
479 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
483 /* store the current atr filter count */
484 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
486 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
487 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
488 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
489 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
492 /* filter programming failed most likely due to table full */
493 fcnt_prog = i40e_get_global_fd_count(pf);
494 fcnt_avail = pf->fdir_pf_filter_count;
495 /* If ATR is running fcnt_prog can quickly change,
496 * if we are very close to full, it makes sense to disable
497 * FD ATR/SB and then re-enable it when there is room.
499 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
501 !(pf->auto_disable_flags &
502 I40E_FLAG_FD_SB_ENABLED)) {
503 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
505 pf->auto_disable_flags |=
506 I40E_FLAG_FD_SB_ENABLED;
510 "FD filter programming failed due to incorrect filter parameters\n");
512 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
515 rx_desc->wb.qword0.hi_dword.fd_id);
520 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
521 * @ring: the ring that owns the buffer
522 * @tx_buffer: the buffer to free
524 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
525 struct i40e_tx_buffer *tx_buffer)
527 if (tx_buffer->skb) {
528 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
529 kfree(tx_buffer->raw_buf);
531 dev_kfree_skb_any(tx_buffer->skb);
533 if (dma_unmap_len(tx_buffer, len))
534 dma_unmap_single(ring->dev,
535 dma_unmap_addr(tx_buffer, dma),
536 dma_unmap_len(tx_buffer, len),
538 } else if (dma_unmap_len(tx_buffer, len)) {
539 dma_unmap_page(ring->dev,
540 dma_unmap_addr(tx_buffer, dma),
541 dma_unmap_len(tx_buffer, len),
544 tx_buffer->next_to_watch = NULL;
545 tx_buffer->skb = NULL;
546 dma_unmap_len_set(tx_buffer, len, 0);
547 /* tx_buffer must be completely set up in the transmit path */
551 * i40e_clean_tx_ring - Free any empty Tx buffers
552 * @tx_ring: ring to be cleaned
554 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
556 unsigned long bi_size;
559 /* ring already cleared, nothing to do */
563 /* Free all the Tx ring sk_buffs */
564 for (i = 0; i < tx_ring->count; i++)
565 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
567 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
568 memset(tx_ring->tx_bi, 0, bi_size);
570 /* Zero out the descriptor ring */
571 memset(tx_ring->desc, 0, tx_ring->size);
573 tx_ring->next_to_use = 0;
574 tx_ring->next_to_clean = 0;
576 if (!tx_ring->netdev)
579 /* cleanup Tx queue statistics */
580 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
581 tx_ring->queue_index));
585 * i40e_free_tx_resources - Free Tx resources per queue
586 * @tx_ring: Tx descriptor ring for a specific queue
588 * Free all transmit software resources
590 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
592 i40e_clean_tx_ring(tx_ring);
593 kfree(tx_ring->tx_bi);
594 tx_ring->tx_bi = NULL;
597 dma_free_coherent(tx_ring->dev, tx_ring->size,
598 tx_ring->desc, tx_ring->dma);
599 tx_ring->desc = NULL;
604 * i40e_get_tx_pending - how many tx descriptors not processed
605 * @tx_ring: the ring of descriptors
607 * Since there is no access to the ring head register
608 * in XL710, we need to use our local copies
610 u32 i40e_get_tx_pending(struct i40e_ring *ring)
614 head = i40e_get_head(ring);
615 tail = readl(ring->tail);
618 return (head < tail) ?
619 tail - head : (tail + ring->count - head);
624 #define WB_STRIDE 0x3
627 * i40e_clean_tx_irq - Reclaim resources after transmit completes
628 * @tx_ring: tx ring to clean
629 * @budget: how many cleans we're allowed
631 * Returns true if there's any budget left (e.g. the clean is finished)
633 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
635 u16 i = tx_ring->next_to_clean;
636 struct i40e_tx_buffer *tx_buf;
637 struct i40e_tx_desc *tx_head;
638 struct i40e_tx_desc *tx_desc;
639 unsigned int total_packets = 0;
640 unsigned int total_bytes = 0;
642 tx_buf = &tx_ring->tx_bi[i];
643 tx_desc = I40E_TX_DESC(tx_ring, i);
646 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
649 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
651 /* if next_to_watch is not set then there is no work pending */
655 /* prevent any other reads prior to eop_desc */
656 read_barrier_depends();
658 /* we have caught up to head, no work left to do */
659 if (tx_head == tx_desc)
662 /* clear next_to_watch to prevent false hangs */
663 tx_buf->next_to_watch = NULL;
665 /* update the statistics for this packet */
666 total_bytes += tx_buf->bytecount;
667 total_packets += tx_buf->gso_segs;
670 dev_consume_skb_any(tx_buf->skb);
672 /* unmap skb header data */
673 dma_unmap_single(tx_ring->dev,
674 dma_unmap_addr(tx_buf, dma),
675 dma_unmap_len(tx_buf, len),
678 /* clear tx_buffer data */
680 dma_unmap_len_set(tx_buf, len, 0);
682 /* unmap remaining buffers */
683 while (tx_desc != eop_desc) {
690 tx_buf = tx_ring->tx_bi;
691 tx_desc = I40E_TX_DESC(tx_ring, 0);
694 /* unmap any remaining paged data */
695 if (dma_unmap_len(tx_buf, len)) {
696 dma_unmap_page(tx_ring->dev,
697 dma_unmap_addr(tx_buf, dma),
698 dma_unmap_len(tx_buf, len),
700 dma_unmap_len_set(tx_buf, len, 0);
704 /* move us one more past the eop_desc for start of next pkt */
710 tx_buf = tx_ring->tx_bi;
711 tx_desc = I40E_TX_DESC(tx_ring, 0);
716 /* update budget accounting */
718 } while (likely(budget));
721 tx_ring->next_to_clean = i;
722 u64_stats_update_begin(&tx_ring->syncp);
723 tx_ring->stats.bytes += total_bytes;
724 tx_ring->stats.packets += total_packets;
725 u64_stats_update_end(&tx_ring->syncp);
726 tx_ring->q_vector->tx.total_bytes += total_bytes;
727 tx_ring->q_vector->tx.total_packets += total_packets;
729 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
732 /* check to see if there are < 4 descriptors
733 * waiting to be written back, then kick the hardware to force
734 * them to be written back in case we stay in NAPI.
735 * In this mode on X722 we do not enable Interrupt.
737 j = i40e_get_tx_pending(tx_ring);
740 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
741 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
742 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
743 tx_ring->arm_wb = true;
746 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
747 tx_ring->queue_index),
748 total_packets, total_bytes);
750 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
751 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
752 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
753 /* Make sure that anybody stopping the queue after this
754 * sees the new next_to_clean.
757 if (__netif_subqueue_stopped(tx_ring->netdev,
758 tx_ring->queue_index) &&
759 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
760 netif_wake_subqueue(tx_ring->netdev,
761 tx_ring->queue_index);
762 ++tx_ring->tx_stats.restart_queue;
770 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
771 * @vsi: the VSI we care about
772 * @q_vector: the vector on which to force writeback
775 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
777 u16 flags = q_vector->tx.ring[0].flags;
779 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
782 if (q_vector->arm_wb_state)
785 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
788 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
789 vsi->base_vector - 1),
791 q_vector->arm_wb_state = true;
792 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
793 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
794 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
795 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
796 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
797 /* allow 00 to be written to the index */
800 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
801 vsi->base_vector - 1), val);
803 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
804 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
805 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
806 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
807 /* allow 00 to be written to the index */
809 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
814 * i40e_set_new_dynamic_itr - Find new ITR level
815 * @rc: structure containing ring performance data
817 * Stores a new ITR value based on packets and byte counts during
818 * the last interrupt. The advantage of per interrupt computation
819 * is faster updates and more accurate ITR for the current traffic
820 * pattern. Constants in this function were computed based on
821 * theoretical maximum wire speed and thresholds were set based on
822 * testing data as well as attempting to minimize response time
823 * while increasing bulk throughput.
825 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
827 enum i40e_latency_range new_latency_range = rc->latency_range;
828 u32 new_itr = rc->itr;
831 if (rc->total_packets == 0 || !rc->itr)
834 /* simple throttlerate management
835 * 0-10MB/s lowest (100000 ints/s)
836 * 10-20MB/s low (20000 ints/s)
837 * 20-1249MB/s bulk (8000 ints/s)
839 bytes_per_int = rc->total_bytes / rc->itr;
840 switch (new_latency_range) {
841 case I40E_LOWEST_LATENCY:
842 if (bytes_per_int > 10)
843 new_latency_range = I40E_LOW_LATENCY;
845 case I40E_LOW_LATENCY:
846 if (bytes_per_int > 20)
847 new_latency_range = I40E_BULK_LATENCY;
848 else if (bytes_per_int <= 10)
849 new_latency_range = I40E_LOWEST_LATENCY;
851 case I40E_BULK_LATENCY:
852 if (bytes_per_int <= 20)
853 new_latency_range = I40E_LOW_LATENCY;
856 if (bytes_per_int <= 20)
857 new_latency_range = I40E_LOW_LATENCY;
860 rc->latency_range = new_latency_range;
862 switch (new_latency_range) {
863 case I40E_LOWEST_LATENCY:
864 new_itr = I40E_ITR_100K;
866 case I40E_LOW_LATENCY:
867 new_itr = I40E_ITR_20K;
869 case I40E_BULK_LATENCY:
870 new_itr = I40E_ITR_8K;
876 if (new_itr != rc->itr)
880 rc->total_packets = 0;
884 * i40e_clean_programming_status - clean the programming status descriptor
885 * @rx_ring: the rx ring that has this descriptor
886 * @rx_desc: the rx descriptor written back by HW
888 * Flow director should handle FD_FILTER_STATUS to check its filter programming
889 * status being successful or not and take actions accordingly. FCoE should
890 * handle its context/filter programming/invalidation status and take actions.
893 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
894 union i40e_rx_desc *rx_desc)
899 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
900 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
901 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
903 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
904 i40e_fd_handle_status(rx_ring, rx_desc, id);
906 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
907 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
908 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
913 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
914 * @tx_ring: the tx ring to set up
916 * Return 0 on success, negative on error
918 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
920 struct device *dev = tx_ring->dev;
926 /* warn if we are about to overwrite the pointer */
927 WARN_ON(tx_ring->tx_bi);
928 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
929 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
933 /* round up to nearest 4K */
934 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
935 /* add u32 for head writeback, align after this takes care of
936 * guaranteeing this is at least one cache line in size
938 tx_ring->size += sizeof(u32);
939 tx_ring->size = ALIGN(tx_ring->size, 4096);
940 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
941 &tx_ring->dma, GFP_KERNEL);
942 if (!tx_ring->desc) {
943 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
948 tx_ring->next_to_use = 0;
949 tx_ring->next_to_clean = 0;
953 kfree(tx_ring->tx_bi);
954 tx_ring->tx_bi = NULL;
959 * i40e_clean_rx_ring - Free Rx buffers
960 * @rx_ring: ring to be cleaned
962 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
964 struct device *dev = rx_ring->dev;
965 struct i40e_rx_buffer *rx_bi;
966 unsigned long bi_size;
969 /* ring already cleared, nothing to do */
973 if (ring_is_ps_enabled(rx_ring)) {
974 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
976 rx_bi = &rx_ring->rx_bi[0];
977 if (rx_bi->hdr_buf) {
978 dma_free_coherent(dev,
982 for (i = 0; i < rx_ring->count; i++) {
983 rx_bi = &rx_ring->rx_bi[i];
985 rx_bi->hdr_buf = NULL;
989 /* Free all the Rx ring sk_buffs */
990 for (i = 0; i < rx_ring->count; i++) {
991 rx_bi = &rx_ring->rx_bi[i];
993 dma_unmap_single(dev,
1000 dev_kfree_skb(rx_bi->skb);
1004 if (rx_bi->page_dma) {
1009 rx_bi->page_dma = 0;
1011 __free_page(rx_bi->page);
1013 rx_bi->page_offset = 0;
1017 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1018 memset(rx_ring->rx_bi, 0, bi_size);
1020 /* Zero out the descriptor ring */
1021 memset(rx_ring->desc, 0, rx_ring->size);
1023 rx_ring->next_to_clean = 0;
1024 rx_ring->next_to_use = 0;
1028 * i40e_free_rx_resources - Free Rx resources
1029 * @rx_ring: ring to clean the resources from
1031 * Free all receive software resources
1033 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1035 i40e_clean_rx_ring(rx_ring);
1036 kfree(rx_ring->rx_bi);
1037 rx_ring->rx_bi = NULL;
1039 if (rx_ring->desc) {
1040 dma_free_coherent(rx_ring->dev, rx_ring->size,
1041 rx_ring->desc, rx_ring->dma);
1042 rx_ring->desc = NULL;
1047 * i40e_alloc_rx_headers - allocate rx header buffers
1048 * @rx_ring: ring to alloc buffers
1050 * Allocate rx header buffers for the entire ring. As these are static,
1051 * this is only called when setting up a new ring.
1053 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1055 struct device *dev = rx_ring->dev;
1056 struct i40e_rx_buffer *rx_bi;
1062 if (rx_ring->rx_bi[0].hdr_buf)
1064 /* Make sure the buffers don't cross cache line boundaries. */
1065 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1066 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1070 for (i = 0; i < rx_ring->count; i++) {
1071 rx_bi = &rx_ring->rx_bi[i];
1072 rx_bi->dma = dma + (i * buf_size);
1073 rx_bi->hdr_buf = buffer + (i * buf_size);
1078 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1079 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1081 * Returns 0 on success, negative on failure
1083 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1085 struct device *dev = rx_ring->dev;
1088 /* warn if we are about to overwrite the pointer */
1089 WARN_ON(rx_ring->rx_bi);
1090 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1091 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1092 if (!rx_ring->rx_bi)
1095 u64_stats_init(&rx_ring->syncp);
1097 /* Round up to nearest 4K */
1098 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1099 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1100 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1101 rx_ring->size = ALIGN(rx_ring->size, 4096);
1102 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1103 &rx_ring->dma, GFP_KERNEL);
1105 if (!rx_ring->desc) {
1106 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1111 rx_ring->next_to_clean = 0;
1112 rx_ring->next_to_use = 0;
1116 kfree(rx_ring->rx_bi);
1117 rx_ring->rx_bi = NULL;
1122 * i40e_release_rx_desc - Store the new tail and head values
1123 * @rx_ring: ring to bump
1124 * @val: new head index
1126 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1128 rx_ring->next_to_use = val;
1129 /* Force memory writes to complete before letting h/w
1130 * know there are new descriptors to fetch. (Only
1131 * applicable for weak-ordered memory model archs,
1135 writel(val, rx_ring->tail);
1139 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1140 * @rx_ring: ring to place buffers on
1141 * @cleaned_count: number of buffers to replace
1143 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1145 u16 i = rx_ring->next_to_use;
1146 union i40e_rx_desc *rx_desc;
1147 struct i40e_rx_buffer *bi;
1149 /* do nothing if no valid netdev defined */
1150 if (!rx_ring->netdev || !cleaned_count)
1153 while (cleaned_count--) {
1154 rx_desc = I40E_RX_DESC(rx_ring, i);
1155 bi = &rx_ring->rx_bi[i];
1157 if (bi->skb) /* desc is in use */
1160 bi->page = alloc_page(GFP_ATOMIC);
1162 rx_ring->rx_stats.alloc_page_failed++;
1167 if (!bi->page_dma) {
1168 /* use a half page if we're re-using */
1169 bi->page_offset ^= PAGE_SIZE / 2;
1170 bi->page_dma = dma_map_page(rx_ring->dev,
1175 if (dma_mapping_error(rx_ring->dev,
1177 rx_ring->rx_stats.alloc_page_failed++;
1183 dma_sync_single_range_for_device(rx_ring->dev,
1186 rx_ring->rx_hdr_len,
1188 /* Refresh the desc even if buffer_addrs didn't change
1189 * because each write-back erases this info.
1191 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1192 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1194 if (i == rx_ring->count)
1199 if (rx_ring->next_to_use != i)
1200 i40e_release_rx_desc(rx_ring, i);
1204 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1205 * @rx_ring: ring to place buffers on
1206 * @cleaned_count: number of buffers to replace
1208 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1210 u16 i = rx_ring->next_to_use;
1211 union i40e_rx_desc *rx_desc;
1212 struct i40e_rx_buffer *bi;
1213 struct sk_buff *skb;
1215 /* do nothing if no valid netdev defined */
1216 if (!rx_ring->netdev || !cleaned_count)
1219 while (cleaned_count--) {
1220 rx_desc = I40E_RX_DESC(rx_ring, i);
1221 bi = &rx_ring->rx_bi[i];
1225 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1226 rx_ring->rx_buf_len);
1228 rx_ring->rx_stats.alloc_buff_failed++;
1231 /* initialize queue mapping */
1232 skb_record_rx_queue(skb, rx_ring->queue_index);
1237 bi->dma = dma_map_single(rx_ring->dev,
1239 rx_ring->rx_buf_len,
1241 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1242 rx_ring->rx_stats.alloc_buff_failed++;
1248 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1249 rx_desc->read.hdr_addr = 0;
1251 if (i == rx_ring->count)
1256 if (rx_ring->next_to_use != i)
1257 i40e_release_rx_desc(rx_ring, i);
1261 * i40e_receive_skb - Send a completed packet up the stack
1262 * @rx_ring: rx ring in play
1263 * @skb: packet to send up
1264 * @vlan_tag: vlan tag for packet
1266 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1267 struct sk_buff *skb, u16 vlan_tag)
1269 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1270 struct i40e_vsi *vsi = rx_ring->vsi;
1271 u64 flags = vsi->back->flags;
1273 if (vlan_tag & VLAN_VID_MASK)
1274 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1276 if (flags & I40E_FLAG_IN_NETPOLL)
1279 napi_gro_receive(&q_vector->napi, skb);
1283 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1284 * @vsi: the VSI we care about
1285 * @skb: skb currently being received and modified
1286 * @rx_status: status value of last descriptor in packet
1287 * @rx_error: error value of last descriptor in packet
1288 * @rx_ptype: ptype value of last descriptor in packet
1290 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1291 struct sk_buff *skb,
1296 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1297 bool ipv4 = false, ipv6 = false;
1298 bool ipv4_tunnel, ipv6_tunnel;
1303 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1304 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1305 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1306 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1308 skb->ip_summed = CHECKSUM_NONE;
1310 /* Rx csum enabled and ip headers found? */
1311 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1314 /* did the hardware decode the packet and checksum? */
1315 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1318 /* both known and outer_ip must be set for the below code to work */
1319 if (!(decoded.known && decoded.outer_ip))
1322 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1323 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1325 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1326 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1330 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1331 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1334 /* likely incorrect csum if alternate IP extension headers found */
1336 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1337 /* don't increment checksum err here, non-fatal err */
1340 /* there was some L4 error, count error and punt packet to the stack */
1341 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1344 /* handle packets that were not able to be checksummed due
1345 * to arrival speed, in this case the stack can compute
1348 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1351 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1352 * it in the driver, hardware does not do it for us.
1353 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1354 * so the total length of IPv4 header is IHL*4 bytes
1355 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1357 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1359 skb->transport_header = skb->mac_header +
1360 sizeof(struct ethhdr) +
1361 (ip_hdr(skb)->ihl * 4);
1363 /* Add 4 bytes for VLAN tagged packets */
1364 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1365 skb->protocol == htons(ETH_P_8021AD))
1368 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1369 (udp_hdr(skb)->check != 0)) {
1370 rx_udp_csum = udp_csum(skb);
1372 csum = csum_tcpudp_magic(
1373 iph->saddr, iph->daddr,
1374 (skb->len - skb_transport_offset(skb)),
1375 IPPROTO_UDP, rx_udp_csum);
1377 if (udp_hdr(skb)->check != csum)
1380 } /* else its GRE and so no outer UDP header */
1383 skb->ip_summed = CHECKSUM_UNNECESSARY;
1384 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1389 vsi->back->hw_csum_rx_error++;
1393 * i40e_rx_hash - returns the hash value from the Rx descriptor
1394 * @ring: descriptor ring
1395 * @rx_desc: specific descriptor
1397 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1398 union i40e_rx_desc *rx_desc)
1400 const __le64 rss_mask =
1401 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1402 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1404 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1405 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1406 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1412 * i40e_ptype_to_hash - get a hash type
1413 * @ptype: the ptype value from the descriptor
1415 * Returns a hash type to be used by skb_set_hash
1417 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1419 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1422 return PKT_HASH_TYPE_NONE;
1424 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1425 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1426 return PKT_HASH_TYPE_L4;
1427 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1428 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1429 return PKT_HASH_TYPE_L3;
1431 return PKT_HASH_TYPE_L2;
1435 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1436 * @rx_ring: rx ring to clean
1437 * @budget: how many cleans we're allowed
1439 * Returns true if there's any budget left (e.g. the clean is finished)
1441 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1443 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1444 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1445 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1446 const int current_node = numa_mem_id();
1447 struct i40e_vsi *vsi = rx_ring->vsi;
1448 u16 i = rx_ring->next_to_clean;
1449 union i40e_rx_desc *rx_desc;
1450 u32 rx_error, rx_status;
1458 struct i40e_rx_buffer *rx_bi;
1459 struct sk_buff *skb;
1461 /* return some buffers to hardware, one at a time is too slow */
1462 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1463 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1467 i = rx_ring->next_to_clean;
1468 rx_desc = I40E_RX_DESC(rx_ring, i);
1469 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1470 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1471 I40E_RXD_QW1_STATUS_SHIFT;
1473 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1476 /* This memory barrier is needed to keep us from reading
1477 * any other fields out of the rx_desc until we know the
1481 if (i40e_rx_is_programming_status(qword)) {
1482 i40e_clean_programming_status(rx_ring, rx_desc);
1483 I40E_RX_INCREMENT(rx_ring, i);
1486 rx_bi = &rx_ring->rx_bi[i];
1489 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1490 rx_ring->rx_hdr_len);
1492 rx_ring->rx_stats.alloc_buff_failed++;
1496 /* initialize queue mapping */
1497 skb_record_rx_queue(skb, rx_ring->queue_index);
1498 /* we are reusing so sync this buffer for CPU use */
1499 dma_sync_single_range_for_cpu(rx_ring->dev,
1502 rx_ring->rx_hdr_len,
1505 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1506 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1507 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1508 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1509 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1510 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1512 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1513 I40E_RXD_QW1_ERROR_SHIFT;
1514 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1515 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1517 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1518 I40E_RXD_QW1_PTYPE_SHIFT;
1519 prefetch(rx_bi->page);
1522 if (rx_hbo || rx_sph) {
1525 len = I40E_RX_HDR_SIZE;
1527 len = rx_header_len;
1528 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1529 } else if (skb->len == 0) {
1532 len = (rx_packet_len > skb_headlen(skb) ?
1533 skb_headlen(skb) : rx_packet_len);
1534 memcpy(__skb_put(skb, len),
1535 rx_bi->page + rx_bi->page_offset,
1537 rx_bi->page_offset += len;
1538 rx_packet_len -= len;
1541 /* Get the rest of the data if this was a header split */
1542 if (rx_packet_len) {
1543 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1548 skb->len += rx_packet_len;
1549 skb->data_len += rx_packet_len;
1550 skb->truesize += rx_packet_len;
1552 if ((page_count(rx_bi->page) == 1) &&
1553 (page_to_nid(rx_bi->page) == current_node))
1554 get_page(rx_bi->page);
1558 dma_unmap_page(rx_ring->dev,
1562 rx_bi->page_dma = 0;
1564 I40E_RX_INCREMENT(rx_ring, i);
1567 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1568 struct i40e_rx_buffer *next_buffer;
1570 next_buffer = &rx_ring->rx_bi[i];
1571 next_buffer->skb = skb;
1572 rx_ring->rx_stats.non_eop_descs++;
1576 /* ERR_MASK will only have valid bits if EOP set */
1577 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1578 dev_kfree_skb_any(skb);
1582 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1583 i40e_ptype_to_hash(rx_ptype));
1584 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1585 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1586 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1587 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1588 rx_ring->last_rx_timestamp = jiffies;
1591 /* probably a little skewed due to removing CRC */
1592 total_rx_bytes += skb->len;
1595 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1597 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1599 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1600 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1603 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1604 dev_kfree_skb_any(skb);
1608 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1609 i40e_receive_skb(rx_ring, skb, vlan_tag);
1611 rx_desc->wb.qword1.status_error_len = 0;
1613 } while (likely(total_rx_packets < budget));
1615 u64_stats_update_begin(&rx_ring->syncp);
1616 rx_ring->stats.packets += total_rx_packets;
1617 rx_ring->stats.bytes += total_rx_bytes;
1618 u64_stats_update_end(&rx_ring->syncp);
1619 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1620 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1622 return total_rx_packets;
1626 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1627 * @rx_ring: rx ring to clean
1628 * @budget: how many cleans we're allowed
1630 * Returns number of packets cleaned
1632 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1634 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1635 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1636 struct i40e_vsi *vsi = rx_ring->vsi;
1637 union i40e_rx_desc *rx_desc;
1638 u32 rx_error, rx_status;
1645 struct i40e_rx_buffer *rx_bi;
1646 struct sk_buff *skb;
1648 /* return some buffers to hardware, one at a time is too slow */
1649 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1650 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1654 i = rx_ring->next_to_clean;
1655 rx_desc = I40E_RX_DESC(rx_ring, i);
1656 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1657 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1658 I40E_RXD_QW1_STATUS_SHIFT;
1660 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1663 /* This memory barrier is needed to keep us from reading
1664 * any other fields out of the rx_desc until we know the
1669 if (i40e_rx_is_programming_status(qword)) {
1670 i40e_clean_programming_status(rx_ring, rx_desc);
1671 I40E_RX_INCREMENT(rx_ring, i);
1674 rx_bi = &rx_ring->rx_bi[i];
1676 prefetch(skb->data);
1678 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1679 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1681 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1682 I40E_RXD_QW1_ERROR_SHIFT;
1683 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1685 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1686 I40E_RXD_QW1_PTYPE_SHIFT;
1690 /* Get the header and possibly the whole packet
1691 * If this is an skb from previous receive dma will be 0
1693 skb_put(skb, rx_packet_len);
1694 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1698 I40E_RX_INCREMENT(rx_ring, i);
1701 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1702 rx_ring->rx_stats.non_eop_descs++;
1706 /* ERR_MASK will only have valid bits if EOP set */
1707 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1708 dev_kfree_skb_any(skb);
1709 /* TODO: shouldn't we increment a counter indicating the
1715 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1716 i40e_ptype_to_hash(rx_ptype));
1717 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1718 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1719 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1720 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1721 rx_ring->last_rx_timestamp = jiffies;
1724 /* probably a little skewed due to removing CRC */
1725 total_rx_bytes += skb->len;
1728 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1730 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1732 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1733 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1736 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1737 dev_kfree_skb_any(skb);
1741 i40e_receive_skb(rx_ring, skb, vlan_tag);
1743 rx_desc->wb.qword1.status_error_len = 0;
1744 } while (likely(total_rx_packets < budget));
1746 u64_stats_update_begin(&rx_ring->syncp);
1747 rx_ring->stats.packets += total_rx_packets;
1748 rx_ring->stats.bytes += total_rx_bytes;
1749 u64_stats_update_end(&rx_ring->syncp);
1750 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1751 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1753 return total_rx_packets;
1757 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1758 * @vsi: the VSI we care about
1759 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1762 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1763 struct i40e_q_vector *q_vector)
1765 struct i40e_hw *hw = &vsi->back->hw;
1770 vector = (q_vector->v_idx + vsi->base_vector);
1771 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1772 old_itr = q_vector->rx.itr;
1773 i40e_set_new_dynamic_itr(&q_vector->rx);
1774 if (old_itr != q_vector->rx.itr) {
1775 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1776 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1778 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1779 (q_vector->rx.itr <<
1780 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1782 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1783 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1785 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1787 if (!test_bit(__I40E_DOWN, &vsi->state))
1788 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1790 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1792 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1793 old_itr = q_vector->tx.itr;
1794 i40e_set_new_dynamic_itr(&q_vector->tx);
1795 if (old_itr != q_vector->tx.itr) {
1796 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1797 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1799 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1800 (q_vector->tx.itr <<
1801 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1803 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1804 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1806 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1808 if (!test_bit(__I40E_DOWN, &vsi->state))
1809 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1810 vsi->base_vector - 1), val);
1812 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1817 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1818 * @napi: napi struct with our devices info in it
1819 * @budget: amount of work driver is allowed to do this pass, in packets
1821 * This function will clean all queues associated with a q_vector.
1823 * Returns the amount of work done
1825 int i40e_napi_poll(struct napi_struct *napi, int budget)
1827 struct i40e_q_vector *q_vector =
1828 container_of(napi, struct i40e_q_vector, napi);
1829 struct i40e_vsi *vsi = q_vector->vsi;
1830 struct i40e_ring *ring;
1831 bool clean_complete = true;
1832 bool arm_wb = false;
1833 int budget_per_ring;
1836 if (test_bit(__I40E_DOWN, &vsi->state)) {
1837 napi_complete(napi);
1841 /* Since the actual Tx work is minimal, we can give the Tx a larger
1842 * budget and be more aggressive about cleaning up the Tx descriptors.
1844 i40e_for_each_ring(ring, q_vector->tx) {
1845 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1846 arm_wb |= ring->arm_wb;
1847 ring->arm_wb = false;
1850 /* We attempt to distribute budget to each Rx queue fairly, but don't
1851 * allow the budget to go below 1 because that would exit polling early.
1853 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1855 i40e_for_each_ring(ring, q_vector->rx) {
1856 if (ring_is_ps_enabled(ring))
1857 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1859 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1860 /* if we didn't clean as many as budgeted, we must be done */
1861 clean_complete &= (budget_per_ring != cleaned);
1864 /* If work not completed, return budget and polling will return */
1865 if (!clean_complete) {
1867 i40e_force_wb(vsi, q_vector);
1871 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1872 q_vector->arm_wb_state = false;
1874 /* Work is done so exit the polling mode and re-enable the interrupt */
1875 napi_complete(napi);
1876 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1877 i40e_update_enable_itr(vsi, q_vector);
1878 } else { /* Legacy mode */
1879 struct i40e_hw *hw = &vsi->back->hw;
1880 /* We re-enable the queue 0 cause, but
1881 * don't worry about dynamic_enable
1882 * because we left it on for the other
1883 * possible interrupts during napi
1885 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1886 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1888 wr32(hw, I40E_QINT_RQCTL(0), qval);
1889 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1890 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1891 wr32(hw, I40E_QINT_TQCTL(0), qval);
1892 i40e_irq_dynamic_enable_icr0(vsi->back);
1898 * i40e_atr - Add a Flow Director ATR filter
1899 * @tx_ring: ring to add programming descriptor to
1901 * @tx_flags: send tx flags
1902 * @protocol: wire protocol
1904 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1905 u32 tx_flags, __be16 protocol)
1907 struct i40e_filter_program_desc *fdir_desc;
1908 struct i40e_pf *pf = tx_ring->vsi->back;
1910 unsigned char *network;
1912 struct ipv6hdr *ipv6;
1916 u32 flex_ptype, dtype_cmd;
1919 /* make sure ATR is enabled */
1920 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1923 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1926 /* if sampling is disabled do nothing */
1927 if (!tx_ring->atr_sample_rate)
1930 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1933 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1934 /* snag network header to get L4 type and address */
1935 hdr.network = skb_network_header(skb);
1937 /* Currently only IPv4/IPv6 with TCP is supported
1938 * access ihl as u8 to avoid unaligned access on ia64
1940 if (tx_flags & I40E_TX_FLAGS_IPV4)
1941 hlen = (hdr.network[0] & 0x0F) << 2;
1942 else if (protocol == htons(ETH_P_IPV6))
1943 hlen = sizeof(struct ipv6hdr);
1947 hdr.network = skb_inner_network_header(skb);
1948 hlen = skb_inner_network_header_len(skb);
1951 /* Currently only IPv4/IPv6 with TCP is supported
1952 * Note: tx_flags gets modified to reflect inner protocols in
1953 * tx_enable_csum function if encap is enabled.
1955 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1956 (hdr.ipv4->protocol != IPPROTO_TCP))
1958 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1959 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1962 th = (struct tcphdr *)(hdr.network + hlen);
1964 /* Due to lack of space, no more new filters can be programmed */
1965 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1967 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1968 /* HW ATR eviction will take care of removing filters on FIN
1971 if (th->fin || th->rst)
1975 tx_ring->atr_count++;
1977 /* sample on all syn/fin/rst packets or once every atr sample rate */
1981 (tx_ring->atr_count < tx_ring->atr_sample_rate))
1984 tx_ring->atr_count = 0;
1986 /* grab the next descriptor */
1987 i = tx_ring->next_to_use;
1988 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1991 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1993 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1994 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1995 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1996 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1997 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1998 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1999 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2001 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2003 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2005 dtype_cmd |= (th->fin || th->rst) ?
2006 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2007 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2008 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2009 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2011 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2012 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2014 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2015 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2017 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2018 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2020 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2021 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2022 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2025 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2026 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2027 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2029 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2030 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2032 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2033 fdir_desc->rsvd = cpu_to_le32(0);
2034 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2035 fdir_desc->fd_id = cpu_to_le32(0);
2039 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2041 * @tx_ring: ring to send buffer on
2042 * @flags: the tx flags to be set
2044 * Checks the skb and set up correspondingly several generic transmit flags
2045 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2047 * Returns error code indicate the frame should be dropped upon error and the
2048 * otherwise returns 0 to indicate the flags has been set properly.
2051 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2052 struct i40e_ring *tx_ring,
2055 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2056 struct i40e_ring *tx_ring,
2060 __be16 protocol = skb->protocol;
2063 if (protocol == htons(ETH_P_8021Q) &&
2064 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2065 /* When HW VLAN acceleration is turned off by the user the
2066 * stack sets the protocol to 8021q so that the driver
2067 * can take any steps required to support the SW only
2068 * VLAN handling. In our case the driver doesn't need
2069 * to take any further steps so just set the protocol
2070 * to the encapsulated ethertype.
2072 skb->protocol = vlan_get_protocol(skb);
2076 /* if we have a HW VLAN tag being added, default to the HW one */
2077 if (skb_vlan_tag_present(skb)) {
2078 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2079 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2080 /* else if it is a SW VLAN, check the next protocol and store the tag */
2081 } else if (protocol == htons(ETH_P_8021Q)) {
2082 struct vlan_hdr *vhdr, _vhdr;
2083 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2087 protocol = vhdr->h_vlan_encapsulated_proto;
2088 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2089 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2092 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2095 /* Insert 802.1p priority into VLAN header */
2096 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2097 (skb->priority != TC_PRIO_CONTROL)) {
2098 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2099 tx_flags |= (skb->priority & 0x7) <<
2100 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2101 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2102 struct vlan_ethhdr *vhdr;
2105 rc = skb_cow_head(skb, 0);
2108 vhdr = (struct vlan_ethhdr *)skb->data;
2109 vhdr->h_vlan_TCI = htons(tx_flags >>
2110 I40E_TX_FLAGS_VLAN_SHIFT);
2112 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2122 * i40e_tso - set up the tso context descriptor
2123 * @tx_ring: ptr to the ring to send
2124 * @skb: ptr to the skb we're sending
2125 * @hdr_len: ptr to the size of the packet header
2126 * @cd_tunneling: ptr to context descriptor bits
2128 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2130 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2131 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2134 u32 cd_cmd, cd_tso_len, cd_mss;
2135 struct ipv6hdr *ipv6h;
2136 struct tcphdr *tcph;
2141 if (!skb_is_gso(skb))
2144 err = skb_cow_head(skb, 0);
2148 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2149 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2151 if (iph->version == 4) {
2152 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2155 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2157 } else if (ipv6h->version == 6) {
2158 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2159 ipv6h->payload_len = 0;
2160 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2164 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2165 *hdr_len = (skb->encapsulation
2166 ? (skb_inner_transport_header(skb) - skb->data)
2167 : skb_transport_offset(skb)) + l4len;
2169 /* find the field values */
2170 cd_cmd = I40E_TX_CTX_DESC_TSO;
2171 cd_tso_len = skb->len - *hdr_len;
2172 cd_mss = skb_shinfo(skb)->gso_size;
2173 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2175 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2176 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2181 * i40e_tsyn - set up the tsyn context descriptor
2182 * @tx_ring: ptr to the ring to send
2183 * @skb: ptr to the skb we're sending
2184 * @tx_flags: the collected send information
2186 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2188 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2189 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2193 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2196 /* Tx timestamps cannot be sampled when doing TSO */
2197 if (tx_flags & I40E_TX_FLAGS_TSO)
2200 /* only timestamp the outbound packet if the user has requested it and
2201 * we are not already transmitting a packet to be timestamped
2203 pf = i40e_netdev_to_pf(tx_ring->netdev);
2204 if (!(pf->flags & I40E_FLAG_PTP))
2208 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2209 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2210 pf->ptp_tx_skb = skb_get(skb);
2215 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2216 I40E_TXD_CTX_QW1_CMD_SHIFT;
2222 * i40e_tx_enable_csum - Enable Tx checksum offloads
2224 * @tx_flags: pointer to Tx flags currently set
2225 * @td_cmd: Tx descriptor command bits to set
2226 * @td_offset: Tx descriptor header offsets to set
2227 * @cd_tunneling: ptr to context desc bits
2229 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2230 u32 *td_cmd, u32 *td_offset,
2231 struct i40e_ring *tx_ring,
2234 struct ipv6hdr *this_ipv6_hdr;
2235 unsigned int this_tcp_hdrlen;
2236 struct iphdr *this_ip_hdr;
2237 u32 network_hdr_len;
2239 struct udphdr *oudph;
2243 if (skb->encapsulation) {
2244 switch (ip_hdr(skb)->protocol) {
2246 oudph = udp_hdr(skb);
2248 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2249 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2252 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2257 network_hdr_len = skb_inner_network_header_len(skb);
2258 this_ip_hdr = inner_ip_hdr(skb);
2259 this_ipv6_hdr = inner_ipv6_hdr(skb);
2260 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2262 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2263 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2264 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2265 ip_hdr(skb)->check = 0;
2268 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2270 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2271 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2272 if (*tx_flags & I40E_TX_FLAGS_TSO)
2273 ip_hdr(skb)->check = 0;
2276 /* Now set the ctx descriptor fields */
2277 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2278 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2280 ((skb_inner_network_offset(skb) -
2281 skb_transport_offset(skb)) >> 1) <<
2282 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2283 if (this_ip_hdr->version == 6) {
2284 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2285 *tx_flags |= I40E_TX_FLAGS_IPV6;
2287 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2288 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2289 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2290 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2292 (skb->len - skb_transport_offset(skb)),
2294 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2297 network_hdr_len = skb_network_header_len(skb);
2298 this_ip_hdr = ip_hdr(skb);
2299 this_ipv6_hdr = ipv6_hdr(skb);
2300 this_tcp_hdrlen = tcp_hdrlen(skb);
2303 /* Enable IP checksum offloads */
2304 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2305 l4_hdr = this_ip_hdr->protocol;
2306 /* the stack computes the IP header already, the only time we
2307 * need the hardware to recompute it is in the case of TSO.
2309 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2310 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2311 this_ip_hdr->check = 0;
2313 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2315 /* Now set the td_offset for IP header length */
2316 *td_offset = (network_hdr_len >> 2) <<
2317 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2318 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2319 l4_hdr = this_ipv6_hdr->nexthdr;
2320 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2321 /* Now set the td_offset for IP header length */
2322 *td_offset = (network_hdr_len >> 2) <<
2323 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2325 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2326 *td_offset |= (skb_network_offset(skb) >> 1) <<
2327 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2329 /* Enable L4 checksum offloads */
2332 /* enable checksum offloads */
2333 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2334 *td_offset |= (this_tcp_hdrlen >> 2) <<
2335 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2338 /* enable SCTP checksum offload */
2339 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2340 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2341 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2344 /* enable UDP checksum offload */
2345 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2346 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2347 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2355 * i40e_create_tx_ctx Build the Tx context descriptor
2356 * @tx_ring: ring to create the descriptor on
2357 * @cd_type_cmd_tso_mss: Quad Word 1
2358 * @cd_tunneling: Quad Word 0 - bits 0-31
2359 * @cd_l2tag2: Quad Word 0 - bits 32-63
2361 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2362 const u64 cd_type_cmd_tso_mss,
2363 const u32 cd_tunneling, const u32 cd_l2tag2)
2365 struct i40e_tx_context_desc *context_desc;
2366 int i = tx_ring->next_to_use;
2368 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2369 !cd_tunneling && !cd_l2tag2)
2372 /* grab the next descriptor */
2373 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2376 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2378 /* cpu_to_le32 and assign to struct fields */
2379 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2380 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2381 context_desc->rsvd = cpu_to_le16(0);
2382 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2386 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2387 * @tx_ring: the ring to be checked
2388 * @size: the size buffer we want to assure is available
2390 * Returns -EBUSY if a stop is needed, else 0
2392 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2394 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2395 /* Memory barrier before checking head and tail */
2398 /* Check again in a case another CPU has just made room available. */
2399 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2402 /* A reprieve! - use start_queue because it doesn't call schedule */
2403 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2404 ++tx_ring->tx_stats.restart_queue;
2409 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2410 * @tx_ring: the ring to be checked
2411 * @size: the size buffer we want to assure is available
2413 * Returns 0 if stop is not needed
2416 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2418 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2421 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2423 return __i40e_maybe_stop_tx(tx_ring, size);
2427 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2429 * @tx_flags: collected send information
2431 * Note: Our HW can't scatter-gather more than 8 fragments to build
2432 * a packet on the wire and so we need to figure out the cases where we
2433 * need to linearize the skb.
2435 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2437 struct skb_frag_struct *frag;
2438 bool linearize = false;
2439 unsigned int size = 0;
2443 num_frags = skb_shinfo(skb)->nr_frags;
2444 gso_segs = skb_shinfo(skb)->gso_segs;
2446 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2449 if (num_frags < (I40E_MAX_BUFFER_TXD))
2450 goto linearize_chk_done;
2451 /* try the simple math, if we have too many frags per segment */
2452 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2453 I40E_MAX_BUFFER_TXD) {
2455 goto linearize_chk_done;
2457 frag = &skb_shinfo(skb)->frags[0];
2458 /* we might still have more fragments per segment */
2460 size += skb_frag_size(frag);
2462 if ((size >= skb_shinfo(skb)->gso_size) &&
2463 (j < I40E_MAX_BUFFER_TXD)) {
2464 size = (size % skb_shinfo(skb)->gso_size);
2467 if (j == I40E_MAX_BUFFER_TXD) {
2472 } while (num_frags);
2474 if (num_frags >= I40E_MAX_BUFFER_TXD)
2483 * i40e_tx_map - Build the Tx descriptor
2484 * @tx_ring: ring to send buffer on
2486 * @first: first buffer info buffer to use
2487 * @tx_flags: collected send information
2488 * @hdr_len: size of the packet header
2489 * @td_cmd: the command field in the descriptor
2490 * @td_offset: offset for checksum or crc
2493 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2494 struct i40e_tx_buffer *first, u32 tx_flags,
2495 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2497 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2498 struct i40e_tx_buffer *first, u32 tx_flags,
2499 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2502 unsigned int data_len = skb->data_len;
2503 unsigned int size = skb_headlen(skb);
2504 struct skb_frag_struct *frag;
2505 struct i40e_tx_buffer *tx_bi;
2506 struct i40e_tx_desc *tx_desc;
2507 u16 i = tx_ring->next_to_use;
2512 bool tail_bump = true;
2515 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2516 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2517 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2518 I40E_TX_FLAGS_VLAN_SHIFT;
2521 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2522 gso_segs = skb_shinfo(skb)->gso_segs;
2526 /* multiply data chunks by size of headers */
2527 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2528 first->gso_segs = gso_segs;
2530 first->tx_flags = tx_flags;
2532 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2534 tx_desc = I40E_TX_DESC(tx_ring, i);
2537 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2538 if (dma_mapping_error(tx_ring->dev, dma))
2541 /* record length, and DMA address */
2542 dma_unmap_len_set(tx_bi, len, size);
2543 dma_unmap_addr_set(tx_bi, dma, dma);
2545 tx_desc->buffer_addr = cpu_to_le64(dma);
2547 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2548 tx_desc->cmd_type_offset_bsz =
2549 build_ctob(td_cmd, td_offset,
2550 I40E_MAX_DATA_PER_TXD, td_tag);
2556 if (i == tx_ring->count) {
2557 tx_desc = I40E_TX_DESC(tx_ring, 0);
2561 dma += I40E_MAX_DATA_PER_TXD;
2562 size -= I40E_MAX_DATA_PER_TXD;
2564 tx_desc->buffer_addr = cpu_to_le64(dma);
2567 if (likely(!data_len))
2570 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2577 if (i == tx_ring->count) {
2578 tx_desc = I40E_TX_DESC(tx_ring, 0);
2582 size = skb_frag_size(frag);
2585 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2588 tx_bi = &tx_ring->tx_bi[i];
2591 /* set next_to_watch value indicating a packet is present */
2592 first->next_to_watch = tx_desc;
2595 if (i == tx_ring->count)
2598 tx_ring->next_to_use = i;
2600 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2601 tx_ring->queue_index),
2603 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2605 /* Algorithm to optimize tail and RS bit setting:
2606 * if xmit_more is supported
2607 * if xmit_more is true
2608 * do not update tail and do not mark RS bit.
2609 * if xmit_more is false and last xmit_more was false
2610 * if every packet spanned less than 4 desc
2611 * then set RS bit on 4th packet and update tail
2614 * update tail and set RS bit on every packet.
2615 * if xmit_more is false and last_xmit_more was true
2616 * update tail and set RS bit.
2618 * Optimization: wmb to be issued only in case of tail update.
2619 * Also optimize the Descriptor WB path for RS bit with the same
2622 * Note: If there are less than 4 packets
2623 * pending and interrupts were disabled the service task will
2624 * trigger a force WB.
2626 if (skb->xmit_more &&
2627 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2628 tx_ring->queue_index))) {
2629 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2631 } else if (!skb->xmit_more &&
2632 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2633 tx_ring->queue_index)) &&
2634 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2635 (tx_ring->packet_stride < WB_STRIDE) &&
2636 (desc_count < WB_STRIDE)) {
2637 tx_ring->packet_stride++;
2639 tx_ring->packet_stride = 0;
2640 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2644 tx_ring->packet_stride = 0;
2646 tx_desc->cmd_type_offset_bsz =
2647 build_ctob(td_cmd, td_offset, size, td_tag) |
2648 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2649 I40E_TX_DESC_CMD_EOP) <<
2650 I40E_TXD_QW1_CMD_SHIFT);
2652 /* notify HW of packet */
2654 prefetchw(tx_desc + 1);
2657 /* Force memory writes to complete before letting h/w
2658 * know there are new descriptors to fetch. (Only
2659 * applicable for weak-ordered memory model archs,
2663 writel(i, tx_ring->tail);
2669 dev_info(tx_ring->dev, "TX DMA map failed\n");
2671 /* clear dma mappings for failed tx_bi map */
2673 tx_bi = &tx_ring->tx_bi[i];
2674 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2682 tx_ring->next_to_use = i;
2686 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2688 * @tx_ring: ring to send buffer on
2690 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2691 * there is not enough descriptors available in this ring since we need at least
2695 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2696 struct i40e_ring *tx_ring)
2698 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2699 struct i40e_ring *tx_ring)
2705 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2706 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2707 * + 4 desc gap to avoid the cache line where head is,
2708 * + 1 desc for context descriptor,
2709 * otherwise try next time
2711 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2712 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2714 count += TXD_USE_COUNT(skb_headlen(skb));
2715 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2716 tx_ring->tx_stats.tx_busy++;
2723 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2725 * @tx_ring: ring to send buffer on
2727 * Returns NETDEV_TX_OK if sent, else an error code
2729 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2730 struct i40e_ring *tx_ring)
2732 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2733 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2734 struct i40e_tx_buffer *first;
2742 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2743 return NETDEV_TX_BUSY;
2745 /* prepare the xmit flags */
2746 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2749 /* obtain protocol of skb */
2750 protocol = vlan_get_protocol(skb);
2752 /* record the location of the first descriptor for this packet */
2753 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2755 /* setup IPv4/IPv6 offloads */
2756 if (protocol == htons(ETH_P_IP))
2757 tx_flags |= I40E_TX_FLAGS_IPV4;
2758 else if (protocol == htons(ETH_P_IPV6))
2759 tx_flags |= I40E_TX_FLAGS_IPV6;
2761 tso = i40e_tso(tx_ring, skb, &hdr_len,
2762 &cd_type_cmd_tso_mss, &cd_tunneling);
2767 tx_flags |= I40E_TX_FLAGS_TSO;
2769 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2772 tx_flags |= I40E_TX_FLAGS_TSYN;
2774 if (i40e_chk_linearize(skb, tx_flags))
2775 if (skb_linearize(skb))
2778 skb_tx_timestamp(skb);
2780 /* always enable CRC insertion offload */
2781 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2783 /* Always offload the checksum, since it's in the data descriptor */
2784 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2785 tx_flags |= I40E_TX_FLAGS_CSUM;
2787 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2788 tx_ring, &cd_tunneling);
2791 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2792 cd_tunneling, cd_l2tag2);
2794 /* Add Flow Director ATR if it's enabled.
2796 * NOTE: this must always be directly before the data descriptor.
2798 i40e_atr(tx_ring, skb, tx_flags, protocol);
2800 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2803 return NETDEV_TX_OK;
2806 dev_kfree_skb_any(skb);
2807 return NETDEV_TX_OK;
2811 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2813 * @netdev: network interface device structure
2815 * Returns NETDEV_TX_OK if sent, else an error code
2817 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2819 struct i40e_netdev_priv *np = netdev_priv(netdev);
2820 struct i40e_vsi *vsi = np->vsi;
2821 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2823 /* hardware can't handle really short frames, hardware padding works
2826 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2827 return NETDEV_TX_OK;
2829 return i40e_xmit_frame_ring(skb, tx_ring);