2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static const struct pci_device_id vmxnet3_pciid_table[] = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46 static int enable_mq = 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
96 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
135 * Check the link state. This may start or stop the tx queue.
138 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
144 spin_lock_irqsave(&adapter->cmd_lock, flags);
145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
153 netif_carrier_on(adapter->netdev);
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
161 netdev_info(adapter->netdev, "NIC Link is Down\n");
162 netif_carrier_off(adapter->netdev);
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
172 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
176 u32 events = le32_to_cpu(adapter->shared->ecr);
180 vmxnet3_ack_events(adapter, events);
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
184 vmxnet3_check_link(adapter, true);
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188 spin_lock_irqsave(&adapter->cmd_lock, flags);
189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
206 schedule_work(&adapter->work);
210 #ifdef __BIG_ENDIAN_BITFIELD
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
223 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
233 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
244 *dst = cpu_to_le32(*src);
249 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
263 /* Used to read bitfield values from double words. */
264 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
275 #endif /* __BIG_ENDIAN_BITFIELD */
277 #ifdef __BIG_ENDIAN_BITFIELD
279 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
301 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
308 #endif /* __BIG_ENDIAN_BITFIELD */
312 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
339 skb = tq->buf_info[eop_idx].skb;
341 tq->buf_info[eop_idx].skb = NULL;
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
358 dev_kfree_skb_any(skb);
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
368 union Vmxnet3_GenericDesc *gdesc;
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev,
376 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
381 spin_lock(&tq->tx_lock);
382 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385 netif_carrier_ok(adapter->netdev))) {
386 vmxnet3_tq_wake(tq, adapter);
388 spin_unlock(&tq->tx_lock);
395 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396 struct vmxnet3_adapter *adapter)
400 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401 struct vmxnet3_tx_buf_info *tbi;
403 tbi = tq->buf_info + tq->tx_ring.next2comp;
405 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
407 dev_kfree_skb_any(tbi->skb);
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i = 0; i < tq->tx_ring.size; i++) {
415 BUG_ON(tq->buf_info[i].skb != NULL ||
416 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
419 tq->tx_ring.gen = VMXNET3_INIT_GEN;
420 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
422 tq->comp_ring.gen = VMXNET3_INIT_GEN;
423 tq->comp_ring.next2proc = 0;
428 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter)
431 if (tq->tx_ring.base) {
432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA);
435 tq->tx_ring.base = NULL;
437 if (tq->data_ring.base) {
438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
439 sizeof(struct Vmxnet3_TxDataDesc),
440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL;
443 if (tq->comp_ring.base) {
444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA);
447 tq->comp_ring.base = NULL;
450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
458 /* Destroy all tx queues */
460 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
464 for (i = 0; i < adapter->num_tx_queues; i++)
465 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
470 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
471 struct vmxnet3_adapter *adapter)
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
477 sizeof(struct Vmxnet3_TxDesc));
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
481 memset(tq->data_ring.base, 0, tq->data_ring.size *
482 sizeof(struct Vmxnet3_TxDataDesc));
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
486 sizeof(struct Vmxnet3_TxCompDesc));
487 tq->comp_ring.next2proc = 0;
488 tq->comp_ring.gen = VMXNET3_INIT_GEN;
490 /* reset the bookkeeping data */
491 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
492 for (i = 0; i < tq->tx_ring.size; i++)
493 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
495 /* stats are not reset */
500 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
501 struct vmxnet3_adapter *adapter)
505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
506 tq->comp_ring.base || tq->buf_info);
508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
510 &tq->tx_ring.basePA, GFP_KERNEL);
511 if (!tq->tx_ring.base) {
512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
518 &tq->data_ring.basePA, GFP_KERNEL);
519 if (!tq->data_ring.base) {
520 netdev_err(adapter->netdev, "failed to allocate data ring\n");
524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
526 &tq->comp_ring.basePA, GFP_KERNEL);
527 if (!tq->comp_ring.base) {
528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
541 vmxnet3_tq_destroy(tq, adapter);
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562 int num_to_alloc, struct vmxnet3_adapter *adapter)
564 int num_allocated = 0;
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
569 while (num_allocated <= num_to_alloc) {
570 struct vmxnet3_rx_buf_info *rbi;
571 union Vmxnet3_GenericDesc *gd;
573 rbi = rbi_base + ring->next2fill;
574 gd = ring->base + ring->next2fill;
576 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577 if (rbi->skb == NULL) {
578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
581 if (unlikely(rbi->skb == NULL)) {
582 rq->stats.rx_buf_alloc_failure++;
586 rbi->dma_addr = dma_map_single(
588 rbi->skb->data, rbi->len,
591 /* rx buffer skipped by the device */
593 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
595 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
596 rbi->len != PAGE_SIZE);
598 if (rbi->page == NULL) {
599 rbi->page = alloc_page(GFP_ATOMIC);
600 if (unlikely(rbi->page == NULL)) {
601 rq->stats.rx_buf_alloc_failure++;
604 rbi->dma_addr = dma_map_page(
606 rbi->page, 0, PAGE_SIZE,
609 /* rx buffers skipped by the device */
611 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
614 BUG_ON(rbi->dma_addr == 0);
615 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
616 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
619 /* Fill the last buffer but dont mark it ready, or else the
620 * device will think that the queue is full */
621 if (num_allocated == num_to_alloc)
624 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
626 vmxnet3_cmd_ring_adv_next2fill(ring);
629 netdev_dbg(adapter->netdev,
630 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
631 num_allocated, ring->next2fill, ring->next2comp);
633 /* so that the device can distinguish a full ring and an empty ring */
634 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
636 return num_allocated;
641 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
642 struct vmxnet3_rx_buf_info *rbi)
644 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
645 skb_shinfo(skb)->nr_frags;
647 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
649 __skb_frag_set_page(frag, rbi->page);
650 frag->page_offset = 0;
651 skb_frag_size_set(frag, rcd->len);
652 skb->data_len += rcd->len;
653 skb->truesize += PAGE_SIZE;
654 skb_shinfo(skb)->nr_frags++;
659 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661 struct vmxnet3_adapter *adapter)
664 unsigned long buf_offset;
666 union Vmxnet3_GenericDesc *gdesc;
667 struct vmxnet3_tx_buf_info *tbi = NULL;
669 BUG_ON(ctx->copy_size > skb_headlen(skb));
671 /* use the previous gen bit for the SOP desc */
672 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
674 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
675 gdesc = ctx->sop_txd; /* both loops below can be skipped */
677 /* no need to map the buffer if headers are copied */
678 if (ctx->copy_size) {
679 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
680 tq->tx_ring.next2fill *
681 sizeof(struct Vmxnet3_TxDataDesc));
682 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
683 ctx->sop_txd->dword[3] = 0;
685 tbi = tq->buf_info + tq->tx_ring.next2fill;
686 tbi->map_type = VMXNET3_MAP_NONE;
688 netdev_dbg(adapter->netdev,
689 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
690 tq->tx_ring.next2fill,
691 le64_to_cpu(ctx->sop_txd->txd.addr),
692 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
693 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
695 /* use the right gen for non-SOP desc */
696 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
699 /* linear part can use multiple tx desc if it's big */
700 len = skb_headlen(skb) - ctx->copy_size;
701 buf_offset = ctx->copy_size;
705 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
709 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
710 /* spec says that for TxDesc.len, 0 == 2^14 */
713 tbi = tq->buf_info + tq->tx_ring.next2fill;
714 tbi->map_type = VMXNET3_MAP_SINGLE;
715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716 skb->data + buf_offset, buf_size,
721 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
722 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
724 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
725 gdesc->dword[2] = cpu_to_le32(dw2);
728 netdev_dbg(adapter->netdev,
729 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
730 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
731 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
732 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
733 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
736 buf_offset += buf_size;
739 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
740 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
744 len = skb_frag_size(frag);
746 tbi = tq->buf_info + tq->tx_ring.next2fill;
747 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
751 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
752 /* spec says that for TxDesc.len, 0 == 2^14 */
754 tbi->map_type = VMXNET3_MAP_PAGE;
755 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756 buf_offset, buf_size,
761 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
762 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
764 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
765 gdesc->dword[2] = cpu_to_le32(dw2);
768 netdev_dbg(adapter->netdev,
769 "txd[%u]: 0x%llx %u %u\n",
770 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
771 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
772 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
773 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
776 buf_offset += buf_size;
780 ctx->eop_txd = gdesc;
782 /* set the last buf_info for the pkt */
784 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
788 /* Init all tx queues */
790 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
794 for (i = 0; i < adapter->num_tx_queues; i++)
795 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
800 * parse and copy relevant protocol headers:
801 * For a tso pkt, relevant headers are L2/3/4 including options
802 * For a pkt requesting csum offloading, they are L2/3 and may include L4
803 * if it's a TCP/UDP pkt
806 * -1: error happens during parsing
807 * 0: protocol headers parsed, but too big to be copied
808 * 1: protocol headers parsed and copied
811 * 1. related *ctx fields are updated.
812 * 2. ctx->copy_size is # of bytes copied
813 * 3. the portion copied is guaranteed to be in the linear part
817 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
818 struct vmxnet3_tx_ctx *ctx,
819 struct vmxnet3_adapter *adapter)
821 struct Vmxnet3_TxDataDesc *tdd;
824 if (ctx->mss) { /* TSO */
825 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
826 ctx->l4_hdr_size = tcp_hdrlen(skb);
827 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
829 if (skb->ip_summed == CHECKSUM_PARTIAL) {
830 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
833 const struct iphdr *iph = ip_hdr(skb);
835 protocol = iph->protocol;
836 } else if (ctx->ipv6) {
837 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
839 protocol = ipv6h->nexthdr;
844 ctx->l4_hdr_size = tcp_hdrlen(skb);
847 ctx->l4_hdr_size = sizeof(struct udphdr);
850 ctx->l4_hdr_size = 0;
854 ctx->copy_size = min(ctx->eth_ip_hdr_size +
855 ctx->l4_hdr_size, skb->len);
857 ctx->eth_ip_hdr_size = 0;
858 ctx->l4_hdr_size = 0;
859 /* copy as much as allowed */
860 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
864 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
865 ctx->copy_size = skb->len;
867 /* make sure headers are accessible directly */
868 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
872 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
873 tq->stats.oversized_hdr++;
878 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
880 memcpy(tdd->data, skb->data, ctx->copy_size);
881 netdev_dbg(adapter->netdev,
882 "copy %u bytes to dataRing[%u]\n",
883 ctx->copy_size, tq->tx_ring.next2fill);
892 vmxnet3_prepare_tso(struct sk_buff *skb,
893 struct vmxnet3_tx_ctx *ctx)
895 struct tcphdr *tcph = tcp_hdr(skb);
898 struct iphdr *iph = ip_hdr(skb);
901 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
903 } else if (ctx->ipv6) {
904 struct ipv6hdr *iph = ipv6_hdr(skb);
906 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
911 static int txd_estimate(const struct sk_buff *skb)
913 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
916 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
917 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
919 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
925 * Transmits a pkt thru a given tq
927 * NETDEV_TX_OK: descriptors are setup successfully
928 * NETDEV_TX_OK: error occurred, the pkt is dropped
929 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
932 * 1. tx ring may be changed
933 * 2. tq stats may be updated accordingly
934 * 3. shared->txNumDeferred may be updated
938 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
939 struct vmxnet3_adapter *adapter, struct net_device *netdev)
944 struct vmxnet3_tx_ctx ctx;
945 union Vmxnet3_GenericDesc *gdesc;
946 #ifdef __BIG_ENDIAN_BITFIELD
947 /* Use temporary descriptor to avoid touching bits multiple times */
948 union Vmxnet3_GenericDesc tempTxDesc;
951 count = txd_estimate(skb);
953 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
954 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
956 ctx.mss = skb_shinfo(skb)->gso_size;
958 if (skb_header_cloned(skb)) {
959 if (unlikely(pskb_expand_head(skb, 0, 0,
961 tq->stats.drop_tso++;
964 tq->stats.copy_skb_header++;
966 vmxnet3_prepare_tso(skb, &ctx);
968 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
970 /* non-tso pkts must not use more than
971 * VMXNET3_MAX_TXD_PER_PKT entries
973 if (skb_linearize(skb) != 0) {
974 tq->stats.drop_too_many_frags++;
977 tq->stats.linearized++;
979 /* recalculate the # of descriptors to use */
980 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
984 spin_lock_irqsave(&tq->tx_lock, flags);
986 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
987 tq->stats.tx_ring_full++;
988 netdev_dbg(adapter->netdev,
989 "tx queue stopped on %s, next2comp %u"
990 " next2fill %u\n", adapter->netdev->name,
991 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
993 vmxnet3_tq_stop(tq, adapter);
994 spin_unlock_irqrestore(&tq->tx_lock, flags);
995 return NETDEV_TX_BUSY;
999 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
1001 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1002 /* hdrs parsed, check against other limits */
1004 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1005 VMXNET3_MAX_TX_BUF_SIZE)) {
1009 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1010 if (unlikely(ctx.eth_ip_hdr_size +
1012 VMXNET3_MAX_CSUM_OFFSET)) {
1018 tq->stats.drop_hdr_inspect_err++;
1019 goto unlock_drop_pkt;
1022 /* fill tx descs related to addr & len */
1023 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1025 /* setup the EOP desc */
1026 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1028 /* setup the SOP desc */
1029 #ifdef __BIG_ENDIAN_BITFIELD
1030 gdesc = &tempTxDesc;
1031 gdesc->dword[2] = ctx.sop_txd->dword[2];
1032 gdesc->dword[3] = ctx.sop_txd->dword[3];
1034 gdesc = ctx.sop_txd;
1037 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1038 gdesc->txd.om = VMXNET3_OM_TSO;
1039 gdesc->txd.msscof = ctx.mss;
1040 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1041 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1043 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1044 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1045 gdesc->txd.om = VMXNET3_OM_CSUM;
1046 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1050 gdesc->txd.msscof = 0;
1052 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1055 if (skb_vlan_tag_present(skb)) {
1057 gdesc->txd.tci = skb_vlan_tag_get(skb);
1060 /* finally flips the GEN bit of the SOP desc. */
1061 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1063 #ifdef __BIG_ENDIAN_BITFIELD
1064 /* Finished updating in bitfields of Tx Desc, so write them in original
1067 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1068 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1069 gdesc = ctx.sop_txd;
1071 netdev_dbg(adapter->netdev,
1072 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1074 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1075 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1077 spin_unlock_irqrestore(&tq->tx_lock, flags);
1079 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1080 le32_to_cpu(tq->shared->txThreshold)) {
1081 tq->shared->txNumDeferred = 0;
1082 VMXNET3_WRITE_BAR0_REG(adapter,
1083 VMXNET3_REG_TXPROD + tq->qid * 8,
1084 tq->tx_ring.next2fill);
1087 return NETDEV_TX_OK;
1090 tq->stats.drop_oversized_hdr++;
1092 spin_unlock_irqrestore(&tq->tx_lock, flags);
1094 tq->stats.drop_total++;
1095 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK;
1101 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1103 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1105 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1106 return vmxnet3_tq_xmit(skb,
1107 &adapter->tx_queue[skb->queue_mapping],
1113 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1114 struct sk_buff *skb,
1115 union Vmxnet3_GenericDesc *gdesc)
1117 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1118 /* typical case: TCP/UDP over IP and both csums are correct */
1119 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1120 VMXNET3_RCD_CSUM_OK) {
1121 skb->ip_summed = CHECKSUM_UNNECESSARY;
1122 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1123 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1124 BUG_ON(gdesc->rcd.frg);
1126 if (gdesc->rcd.csum) {
1127 skb->csum = htons(gdesc->rcd.csum);
1128 skb->ip_summed = CHECKSUM_PARTIAL;
1130 skb_checksum_none_assert(skb);
1134 skb_checksum_none_assert(skb);
1140 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1141 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1143 rq->stats.drop_err++;
1145 rq->stats.drop_fcs++;
1147 rq->stats.drop_total++;
1150 * We do not unmap and chain the rx buffer to the skb.
1151 * We basically pretend this buffer is not used and will be recycled
1152 * by vmxnet3_rq_alloc_rx_buf()
1156 * ctx->skb may be NULL if this is the first and the only one
1160 dev_kfree_skb_irq(ctx->skb);
1167 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1168 union Vmxnet3_GenericDesc *gdesc)
1175 struct ipv6hdr *ipv6;
1178 BUG_ON(gdesc->rcd.tcp == 0);
1180 maplen = skb_headlen(skb);
1181 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1184 hdr.eth = eth_hdr(skb);
1185 if (gdesc->rcd.v4) {
1186 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
1187 hdr.ptr += sizeof(struct ethhdr);
1188 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1189 hlen = hdr.ipv4->ihl << 2;
1190 hdr.ptr += hdr.ipv4->ihl << 2;
1191 } else if (gdesc->rcd.v6) {
1192 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
1193 hdr.ptr += sizeof(struct ethhdr);
1194 /* Use an estimated value, since we also need to handle
1197 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1198 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1199 hlen = sizeof(struct ipv6hdr);
1200 hdr.ptr += sizeof(struct ipv6hdr);
1202 /* Non-IP pkt, dont estimate header length */
1206 if (hlen + sizeof(struct tcphdr) > maplen)
1209 return (hlen + (hdr.tcp->doff << 2));
1213 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1214 struct vmxnet3_adapter *adapter, int quota)
1216 static const u32 rxprod_reg[2] = {
1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1220 bool skip_page_frags = false;
1221 struct Vmxnet3_RxCompDesc *rcd;
1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1223 u16 segCnt = 0, mss = 0;
1224 #ifdef __BIG_ENDIAN_BITFIELD
1225 struct Vmxnet3_RxDesc rxCmdDesc;
1226 struct Vmxnet3_RxCompDesc rxComp;
1228 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1230 while (rcd->gen == rq->comp_ring.gen) {
1231 struct vmxnet3_rx_buf_info *rbi;
1232 struct sk_buff *skb, *new_skb = NULL;
1233 struct page *new_page = NULL;
1235 struct Vmxnet3_RxDesc *rxd;
1237 struct vmxnet3_cmd_ring *ring = NULL;
1238 if (num_pkts >= quota) {
1239 /* we may stop even before we see the EOP desc of
1244 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1246 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1247 ring = rq->rx_ring + ring_idx;
1248 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1250 rbi = rq->buf_info[ring_idx] + idx;
1252 BUG_ON(rxd->addr != rbi->dma_addr ||
1253 rxd->len != rbi->len);
1255 if (unlikely(rcd->eop && rcd->err)) {
1256 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1260 if (rcd->sop) { /* first buf of the pkt */
1261 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1262 rcd->rqID != rq->qid);
1264 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1265 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1267 if (unlikely(rcd->len == 0)) {
1268 /* Pretend the rx buffer is skipped. */
1269 BUG_ON(!(rcd->sop && rcd->eop));
1270 netdev_dbg(adapter->netdev,
1271 "rxRing[%u][%u] 0 length\n",
1276 skip_page_frags = false;
1277 ctx->skb = rbi->skb;
1278 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1280 if (new_skb == NULL) {
1281 /* Skb allocation failed, do not handover this
1282 * skb to stack. Reuse it. Drop the existing pkt
1284 rq->stats.rx_buf_alloc_failure++;
1286 rq->stats.drop_total++;
1287 skip_page_frags = true;
1291 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1293 PCI_DMA_FROMDEVICE);
1296 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1297 (adapter->netdev->features & NETIF_F_RXHASH))
1298 skb_set_hash(ctx->skb,
1299 le32_to_cpu(rcd->rssHash),
1302 skb_put(ctx->skb, rcd->len);
1304 /* Immediate refill */
1306 rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
1307 rbi->skb->data, rbi->len,
1308 PCI_DMA_FROMDEVICE);
1309 rxd->addr = cpu_to_le64(rbi->dma_addr);
1310 rxd->len = rbi->len;
1311 if (adapter->version == 2 &&
1312 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1313 struct Vmxnet3_RxCompDescExt *rcdlro;
1314 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1316 segCnt = rcdlro->segCnt;
1317 BUG_ON(segCnt <= 1);
1319 if (unlikely(segCnt <= 1))
1325 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1327 /* non SOP buffer must be type 1 in most cases */
1328 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1329 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1331 /* If an sop buffer was dropped, skip all
1332 * following non-sop fragments. They will be reused.
1334 if (skip_page_frags)
1338 new_page = alloc_page(GFP_ATOMIC);
1339 /* Replacement page frag could not be allocated.
1340 * Reuse this page. Drop the pkt and free the
1341 * skb which contained this page as a frag. Skip
1342 * processing all the following non-sop frags.
1344 if (unlikely(!new_page)) {
1345 rq->stats.rx_buf_alloc_failure++;
1346 dev_kfree_skb(ctx->skb);
1348 skip_page_frags = true;
1352 dma_unmap_page(&adapter->pdev->dev,
1353 rbi->dma_addr, rbi->len,
1354 PCI_DMA_FROMDEVICE);
1356 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1358 /* Immediate refill */
1359 rbi->page = new_page;
1360 rbi->dma_addr = dma_map_page(&adapter->pdev->dev
1363 PCI_DMA_FROMDEVICE);
1364 rxd->addr = cpu_to_le64(rbi->dma_addr);
1365 rxd->len = rbi->len;
1372 u32 mtu = adapter->netdev->mtu;
1373 skb->len += skb->data_len;
1375 vmxnet3_rx_csum(adapter, skb,
1376 (union Vmxnet3_GenericDesc *)rcd);
1377 skb->protocol = eth_type_trans(skb, adapter->netdev);
1378 if (!rcd->tcp || !adapter->lro)
1381 if (segCnt != 0 && mss != 0) {
1382 skb_shinfo(skb)->gso_type = rcd->v4 ?
1383 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1384 skb_shinfo(skb)->gso_size = mss;
1385 skb_shinfo(skb)->gso_segs = segCnt;
1386 } else if (segCnt != 0 || skb->len > mtu) {
1389 hlen = vmxnet3_get_hdr_len(adapter, skb,
1390 (union Vmxnet3_GenericDesc *)rcd);
1394 skb_shinfo(skb)->gso_type =
1395 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1397 skb_shinfo(skb)->gso_segs = segCnt;
1398 skb_shinfo(skb)->gso_size =
1399 DIV_ROUND_UP(skb->len -
1402 skb_shinfo(skb)->gso_size = mtu - hlen;
1406 if (unlikely(rcd->ts))
1407 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1409 if (adapter->netdev->features & NETIF_F_LRO)
1410 netif_receive_skb(skb);
1412 napi_gro_receive(&rq->napi, skb);
1419 /* device may have skipped some rx descs */
1420 ring->next2comp = idx;
1421 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1422 ring = rq->rx_ring + ring_idx;
1423 while (num_to_alloc) {
1424 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1428 /* Recv desc is ready to be used by the device */
1429 rxd->gen = ring->gen;
1430 vmxnet3_cmd_ring_adv_next2fill(ring);
1434 /* if needed, update the register */
1435 if (unlikely(rq->shared->updateRxProd)) {
1436 VMXNET3_WRITE_BAR0_REG(adapter,
1437 rxprod_reg[ring_idx] + rq->qid * 8,
1441 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1442 vmxnet3_getRxComp(rcd,
1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1451 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1452 struct vmxnet3_adapter *adapter)
1455 struct Vmxnet3_RxDesc *rxd;
1457 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1458 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1459 #ifdef __BIG_ENDIAN_BITFIELD
1460 struct Vmxnet3_RxDesc rxDesc;
1462 vmxnet3_getRxDesc(rxd,
1463 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1465 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1466 rq->buf_info[ring_idx][i].skb) {
1467 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1468 rxd->len, PCI_DMA_FROMDEVICE);
1469 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1470 rq->buf_info[ring_idx][i].skb = NULL;
1471 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1472 rq->buf_info[ring_idx][i].page) {
1473 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1474 rxd->len, PCI_DMA_FROMDEVICE);
1475 put_page(rq->buf_info[ring_idx][i].page);
1476 rq->buf_info[ring_idx][i].page = NULL;
1480 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1481 rq->rx_ring[ring_idx].next2fill =
1482 rq->rx_ring[ring_idx].next2comp = 0;
1485 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1486 rq->comp_ring.next2proc = 0;
1491 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1495 for (i = 0; i < adapter->num_rx_queues; i++)
1496 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1500 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1501 struct vmxnet3_adapter *adapter)
1506 /* all rx buffers must have already been freed */
1507 for (i = 0; i < 2; i++) {
1508 if (rq->buf_info[i]) {
1509 for (j = 0; j < rq->rx_ring[i].size; j++)
1510 BUG_ON(rq->buf_info[i][j].page != NULL);
1515 for (i = 0; i < 2; i++) {
1516 if (rq->rx_ring[i].base) {
1517 dma_free_coherent(&adapter->pdev->dev,
1519 * sizeof(struct Vmxnet3_RxDesc),
1520 rq->rx_ring[i].base,
1521 rq->rx_ring[i].basePA);
1522 rq->rx_ring[i].base = NULL;
1524 rq->buf_info[i] = NULL;
1527 if (rq->comp_ring.base) {
1528 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1529 * sizeof(struct Vmxnet3_RxCompDesc),
1530 rq->comp_ring.base, rq->comp_ring.basePA);
1531 rq->comp_ring.base = NULL;
1534 if (rq->buf_info[0]) {
1535 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1536 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1537 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1544 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1545 struct vmxnet3_adapter *adapter)
1549 /* initialize buf_info */
1550 for (i = 0; i < rq->rx_ring[0].size; i++) {
1552 /* 1st buf for a pkt is skbuff */
1553 if (i % adapter->rx_buf_per_pkt == 0) {
1554 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1555 rq->buf_info[0][i].len = adapter->skb_buf_size;
1556 } else { /* subsequent bufs for a pkt is frag */
1557 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1558 rq->buf_info[0][i].len = PAGE_SIZE;
1561 for (i = 0; i < rq->rx_ring[1].size; i++) {
1562 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1563 rq->buf_info[1][i].len = PAGE_SIZE;
1566 /* reset internal state and allocate buffers for both rings */
1567 for (i = 0; i < 2; i++) {
1568 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1570 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1571 sizeof(struct Vmxnet3_RxDesc));
1572 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1574 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1576 /* at least has 1 rx buffer for the 1st ring */
1579 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1581 /* reset the comp ring */
1582 rq->comp_ring.next2proc = 0;
1583 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1584 sizeof(struct Vmxnet3_RxCompDesc));
1585 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1588 rq->rx_ctx.skb = NULL;
1590 /* stats are not reset */
1596 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1600 for (i = 0; i < adapter->num_rx_queues; i++) {
1601 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1602 if (unlikely(err)) {
1603 dev_err(&adapter->netdev->dev, "%s: failed to "
1604 "initialize rx queue%i\n",
1605 adapter->netdev->name, i);
1615 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1619 struct vmxnet3_rx_buf_info *bi;
1621 for (i = 0; i < 2; i++) {
1623 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1624 rq->rx_ring[i].base = dma_alloc_coherent(
1625 &adapter->pdev->dev, sz,
1626 &rq->rx_ring[i].basePA,
1628 if (!rq->rx_ring[i].base) {
1629 netdev_err(adapter->netdev,
1630 "failed to allocate rx ring %d\n", i);
1635 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1636 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1637 &rq->comp_ring.basePA,
1639 if (!rq->comp_ring.base) {
1640 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1644 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1645 rq->rx_ring[1].size);
1646 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1651 rq->buf_info[0] = bi;
1652 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1657 vmxnet3_rq_destroy(rq, adapter);
1663 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1667 for (i = 0; i < adapter->num_rx_queues; i++) {
1668 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1669 if (unlikely(err)) {
1670 dev_err(&adapter->netdev->dev,
1671 "%s: failed to create rx queue%i\n",
1672 adapter->netdev->name, i);
1678 vmxnet3_rq_destroy_all(adapter);
1683 /* Multiple queue aware polling function for tx and rx */
1686 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1688 int rcd_done = 0, i;
1689 if (unlikely(adapter->shared->ecr))
1690 vmxnet3_process_events(adapter);
1691 for (i = 0; i < adapter->num_tx_queues; i++)
1692 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1694 for (i = 0; i < adapter->num_rx_queues; i++)
1695 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1702 vmxnet3_poll(struct napi_struct *napi, int budget)
1704 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1705 struct vmxnet3_rx_queue, napi);
1708 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1710 if (rxd_done < budget) {
1711 napi_complete(napi);
1712 vmxnet3_enable_all_intrs(rx_queue->adapter);
1718 * NAPI polling function for MSI-X mode with multiple Rx queues
1719 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1723 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1725 struct vmxnet3_rx_queue *rq = container_of(napi,
1726 struct vmxnet3_rx_queue, napi);
1727 struct vmxnet3_adapter *adapter = rq->adapter;
1730 /* When sharing interrupt with corresponding tx queue, process
1731 * tx completions in that queue as well
1733 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1734 struct vmxnet3_tx_queue *tq =
1735 &adapter->tx_queue[rq - adapter->rx_queue];
1736 vmxnet3_tq_tx_complete(tq, adapter);
1739 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1741 if (rxd_done < budget) {
1742 napi_complete(napi);
1743 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1749 #ifdef CONFIG_PCI_MSI
1752 * Handle completion interrupts on tx queues
1753 * Returns whether or not the intr is handled
1757 vmxnet3_msix_tx(int irq, void *data)
1759 struct vmxnet3_tx_queue *tq = data;
1760 struct vmxnet3_adapter *adapter = tq->adapter;
1762 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1763 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1765 /* Handle the case where only one irq is allocate for all tx queues */
1766 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1768 for (i = 0; i < adapter->num_tx_queues; i++) {
1769 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1770 vmxnet3_tq_tx_complete(txq, adapter);
1773 vmxnet3_tq_tx_complete(tq, adapter);
1775 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1782 * Handle completion interrupts on rx queues. Returns whether or not the
1787 vmxnet3_msix_rx(int irq, void *data)
1789 struct vmxnet3_rx_queue *rq = data;
1790 struct vmxnet3_adapter *adapter = rq->adapter;
1792 /* disable intr if needed */
1793 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1794 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1795 napi_schedule(&rq->napi);
1801 *----------------------------------------------------------------------------
1803 * vmxnet3_msix_event --
1805 * vmxnet3 msix event intr handler
1808 * whether or not the intr is handled
1810 *----------------------------------------------------------------------------
1814 vmxnet3_msix_event(int irq, void *data)
1816 struct net_device *dev = data;
1817 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1819 /* disable intr if needed */
1820 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1821 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1823 if (adapter->shared->ecr)
1824 vmxnet3_process_events(adapter);
1826 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1831 #endif /* CONFIG_PCI_MSI */
1834 /* Interrupt handler for vmxnet3 */
1836 vmxnet3_intr(int irq, void *dev_id)
1838 struct net_device *dev = dev_id;
1839 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1841 if (adapter->intr.type == VMXNET3_IT_INTX) {
1842 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1843 if (unlikely(icr == 0))
1849 /* disable intr if needed */
1850 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1851 vmxnet3_disable_all_intrs(adapter);
1853 napi_schedule(&adapter->rx_queue[0].napi);
1858 #ifdef CONFIG_NET_POLL_CONTROLLER
1860 /* netpoll callback. */
1862 vmxnet3_netpoll(struct net_device *netdev)
1864 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1866 switch (adapter->intr.type) {
1867 #ifdef CONFIG_PCI_MSI
1868 case VMXNET3_IT_MSIX: {
1870 for (i = 0; i < adapter->num_rx_queues; i++)
1871 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
1875 case VMXNET3_IT_MSI:
1877 vmxnet3_intr(0, adapter->netdev);
1882 #endif /* CONFIG_NET_POLL_CONTROLLER */
1885 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1887 struct vmxnet3_intr *intr = &adapter->intr;
1891 #ifdef CONFIG_PCI_MSI
1892 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1893 for (i = 0; i < adapter->num_tx_queues; i++) {
1894 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1895 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1896 adapter->netdev->name, vector);
1898 intr->msix_entries[vector].vector,
1900 adapter->tx_queue[i].name,
1901 &adapter->tx_queue[i]);
1903 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1904 adapter->netdev->name, vector);
1907 dev_err(&adapter->netdev->dev,
1908 "Failed to request irq for MSIX, %s, "
1910 adapter->tx_queue[i].name, err);
1914 /* Handle the case where only 1 MSIx was allocated for
1916 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1917 for (; i < adapter->num_tx_queues; i++)
1918 adapter->tx_queue[i].comp_ring.intr_idx
1923 adapter->tx_queue[i].comp_ring.intr_idx
1927 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1930 for (i = 0; i < adapter->num_rx_queues; i++) {
1931 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1932 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1933 adapter->netdev->name, vector);
1935 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1936 adapter->netdev->name, vector);
1937 err = request_irq(intr->msix_entries[vector].vector,
1939 adapter->rx_queue[i].name,
1940 &(adapter->rx_queue[i]));
1942 netdev_err(adapter->netdev,
1943 "Failed to request irq for MSIX, "
1945 adapter->rx_queue[i].name, err);
1949 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1952 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1953 adapter->netdev->name, vector);
1954 err = request_irq(intr->msix_entries[vector].vector,
1955 vmxnet3_msix_event, 0,
1956 intr->event_msi_vector_name, adapter->netdev);
1957 intr->event_intr_idx = vector;
1959 } else if (intr->type == VMXNET3_IT_MSI) {
1960 adapter->num_rx_queues = 1;
1961 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1962 adapter->netdev->name, adapter->netdev);
1965 adapter->num_rx_queues = 1;
1966 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1967 IRQF_SHARED, adapter->netdev->name,
1969 #ifdef CONFIG_PCI_MSI
1972 intr->num_intrs = vector + 1;
1974 netdev_err(adapter->netdev,
1975 "Failed to request irq (intr type:%d), error %d\n",
1978 /* Number of rx queues will not change after this */
1979 for (i = 0; i < adapter->num_rx_queues; i++) {
1980 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1982 rq->qid2 = i + adapter->num_rx_queues;
1987 /* init our intr settings */
1988 for (i = 0; i < intr->num_intrs; i++)
1989 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1990 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1991 adapter->intr.event_intr_idx = 0;
1992 for (i = 0; i < adapter->num_tx_queues; i++)
1993 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1994 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1997 netdev_info(adapter->netdev,
1998 "intr type %u, mode %u, %u vectors allocated\n",
1999 intr->type, intr->mask_mode, intr->num_intrs);
2007 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2009 struct vmxnet3_intr *intr = &adapter->intr;
2010 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2012 switch (intr->type) {
2013 #ifdef CONFIG_PCI_MSI
2014 case VMXNET3_IT_MSIX:
2018 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2019 for (i = 0; i < adapter->num_tx_queues; i++) {
2020 free_irq(intr->msix_entries[vector++].vector,
2021 &(adapter->tx_queue[i]));
2022 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2027 for (i = 0; i < adapter->num_rx_queues; i++) {
2028 free_irq(intr->msix_entries[vector++].vector,
2029 &(adapter->rx_queue[i]));
2032 free_irq(intr->msix_entries[vector].vector,
2034 BUG_ON(vector >= intr->num_intrs);
2038 case VMXNET3_IT_MSI:
2039 free_irq(adapter->pdev->irq, adapter->netdev);
2041 case VMXNET3_IT_INTX:
2042 free_irq(adapter->pdev->irq, adapter->netdev);
2051 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2053 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2056 /* allow untagged pkts */
2057 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2059 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2060 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2065 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2067 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2069 if (!(netdev->flags & IFF_PROMISC)) {
2070 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2071 unsigned long flags;
2073 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2074 spin_lock_irqsave(&adapter->cmd_lock, flags);
2075 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2076 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2077 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2080 set_bit(vid, adapter->active_vlans);
2087 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2089 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2091 if (!(netdev->flags & IFF_PROMISC)) {
2092 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2093 unsigned long flags;
2095 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2096 spin_lock_irqsave(&adapter->cmd_lock, flags);
2097 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2098 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2099 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2102 clear_bit(vid, adapter->active_vlans);
2109 vmxnet3_copy_mc(struct net_device *netdev)
2112 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2114 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2116 /* We may be called with BH disabled */
2117 buf = kmalloc(sz, GFP_ATOMIC);
2119 struct netdev_hw_addr *ha;
2122 netdev_for_each_mc_addr(ha, netdev)
2123 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2132 vmxnet3_set_mc(struct net_device *netdev)
2134 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2135 unsigned long flags;
2136 struct Vmxnet3_RxFilterConf *rxConf =
2137 &adapter->shared->devRead.rxFilterConf;
2138 u8 *new_table = NULL;
2139 dma_addr_t new_table_pa = 0;
2140 u32 new_mode = VMXNET3_RXM_UCAST;
2142 if (netdev->flags & IFF_PROMISC) {
2143 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2144 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2146 new_mode |= VMXNET3_RXM_PROMISC;
2148 vmxnet3_restore_vlan(adapter);
2151 if (netdev->flags & IFF_BROADCAST)
2152 new_mode |= VMXNET3_RXM_BCAST;
2154 if (netdev->flags & IFF_ALLMULTI)
2155 new_mode |= VMXNET3_RXM_ALL_MULTI;
2157 if (!netdev_mc_empty(netdev)) {
2158 new_table = vmxnet3_copy_mc(netdev);
2160 rxConf->mfTableLen = cpu_to_le16(
2161 netdev_mc_count(netdev) * ETH_ALEN);
2162 new_table_pa = dma_map_single(
2163 &adapter->pdev->dev,
2170 new_mode |= VMXNET3_RXM_MCAST;
2171 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2174 "failed to copy mcast list, setting ALL_MULTI\n");
2175 new_mode |= VMXNET3_RXM_ALL_MULTI;
2179 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2180 rxConf->mfTableLen = 0;
2181 rxConf->mfTablePA = 0;
2184 spin_lock_irqsave(&adapter->cmd_lock, flags);
2185 if (new_mode != rxConf->rxMode) {
2186 rxConf->rxMode = cpu_to_le32(new_mode);
2187 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2188 VMXNET3_CMD_UPDATE_RX_MODE);
2189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2190 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2193 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2194 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2195 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2198 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2199 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2204 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2208 for (i = 0; i < adapter->num_rx_queues; i++)
2209 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2214 * Set up driver_shared based on settings in adapter.
2218 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2220 struct Vmxnet3_DriverShared *shared = adapter->shared;
2221 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2222 struct Vmxnet3_TxQueueConf *tqc;
2223 struct Vmxnet3_RxQueueConf *rqc;
2226 memset(shared, 0, sizeof(*shared));
2228 /* driver settings */
2229 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2230 devRead->misc.driverInfo.version = cpu_to_le32(
2231 VMXNET3_DRIVER_VERSION_NUM);
2232 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2233 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2234 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2235 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2236 *((u32 *)&devRead->misc.driverInfo.gos));
2237 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2238 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2240 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2241 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2243 /* set up feature flags */
2244 if (adapter->netdev->features & NETIF_F_RXCSUM)
2245 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2247 if (adapter->netdev->features & NETIF_F_LRO) {
2248 devRead->misc.uptFeatures |= UPT1_F_LRO;
2249 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2251 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2252 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2254 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2255 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2256 devRead->misc.queueDescLen = cpu_to_le32(
2257 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2258 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2260 /* tx queue settings */
2261 devRead->misc.numTxQueues = adapter->num_tx_queues;
2262 for (i = 0; i < adapter->num_tx_queues; i++) {
2263 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2264 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2265 tqc = &adapter->tqd_start[i].conf;
2266 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2267 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2268 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2269 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2270 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2271 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2272 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2273 tqc->ddLen = cpu_to_le32(
2274 sizeof(struct vmxnet3_tx_buf_info) *
2276 tqc->intrIdx = tq->comp_ring.intr_idx;
2279 /* rx queue settings */
2280 devRead->misc.numRxQueues = adapter->num_rx_queues;
2281 for (i = 0; i < adapter->num_rx_queues; i++) {
2282 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2283 rqc = &adapter->rqd_start[i].conf;
2284 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2285 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2286 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2287 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2288 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2289 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2290 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2291 rqc->ddLen = cpu_to_le32(
2292 sizeof(struct vmxnet3_rx_buf_info) *
2293 (rqc->rxRingSize[0] +
2294 rqc->rxRingSize[1]));
2295 rqc->intrIdx = rq->comp_ring.intr_idx;
2299 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2302 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2304 devRead->misc.uptFeatures |= UPT1_F_RSS;
2305 devRead->misc.numRxQueues = adapter->num_rx_queues;
2306 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2307 UPT1_RSS_HASH_TYPE_IPV4 |
2308 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2309 UPT1_RSS_HASH_TYPE_IPV6;
2310 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2311 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2312 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2313 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2315 for (i = 0; i < rssConf->indTableSize; i++)
2316 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2317 i, adapter->num_rx_queues);
2319 devRead->rssConfDesc.confVer = 1;
2320 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2321 devRead->rssConfDesc.confPA =
2322 cpu_to_le64(adapter->rss_conf_pa);
2325 #endif /* VMXNET3_RSS */
2328 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2330 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2331 for (i = 0; i < adapter->intr.num_intrs; i++)
2332 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2334 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2335 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2337 /* rx filter settings */
2338 devRead->rxFilterConf.rxMode = 0;
2339 vmxnet3_restore_vlan(adapter);
2340 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2342 /* the rest are already zeroed */
2347 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2351 unsigned long flags;
2353 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2354 " ring sizes %u %u %u\n", adapter->netdev->name,
2355 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2356 adapter->tx_queue[0].tx_ring.size,
2357 adapter->rx_queue[0].rx_ring[0].size,
2358 adapter->rx_queue[0].rx_ring[1].size);
2360 vmxnet3_tq_init_all(adapter);
2361 err = vmxnet3_rq_init_all(adapter);
2363 netdev_err(adapter->netdev,
2364 "Failed to init rx queue error %d\n", err);
2368 err = vmxnet3_request_irqs(adapter);
2370 netdev_err(adapter->netdev,
2371 "Failed to setup irq for error %d\n", err);
2375 vmxnet3_setup_driver_shared(adapter);
2377 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2378 adapter->shared_pa));
2379 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2380 adapter->shared_pa));
2381 spin_lock_irqsave(&adapter->cmd_lock, flags);
2382 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2383 VMXNET3_CMD_ACTIVATE_DEV);
2384 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2385 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2388 netdev_err(adapter->netdev,
2389 "Failed to activate dev: error %u\n", ret);
2394 for (i = 0; i < adapter->num_rx_queues; i++) {
2395 VMXNET3_WRITE_BAR0_REG(adapter,
2396 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2397 adapter->rx_queue[i].rx_ring[0].next2fill);
2398 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2399 (i * VMXNET3_REG_ALIGN)),
2400 adapter->rx_queue[i].rx_ring[1].next2fill);
2403 /* Apply the rx filter settins last. */
2404 vmxnet3_set_mc(adapter->netdev);
2407 * Check link state when first activating device. It will start the
2408 * tx queue if the link is up.
2410 vmxnet3_check_link(adapter, true);
2411 for (i = 0; i < adapter->num_rx_queues; i++)
2412 napi_enable(&adapter->rx_queue[i].napi);
2413 vmxnet3_enable_all_intrs(adapter);
2414 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2418 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2419 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2420 vmxnet3_free_irqs(adapter);
2423 /* free up buffers we allocated */
2424 vmxnet3_rq_cleanup_all(adapter);
2430 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2432 unsigned long flags;
2433 spin_lock_irqsave(&adapter->cmd_lock, flags);
2434 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2435 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2440 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2443 unsigned long flags;
2444 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2448 spin_lock_irqsave(&adapter->cmd_lock, flags);
2449 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2450 VMXNET3_CMD_QUIESCE_DEV);
2451 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2452 vmxnet3_disable_all_intrs(adapter);
2454 for (i = 0; i < adapter->num_rx_queues; i++)
2455 napi_disable(&adapter->rx_queue[i].napi);
2456 netif_tx_disable(adapter->netdev);
2457 adapter->link_speed = 0;
2458 netif_carrier_off(adapter->netdev);
2460 vmxnet3_tq_cleanup_all(adapter);
2461 vmxnet3_rq_cleanup_all(adapter);
2462 vmxnet3_free_irqs(adapter);
2468 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2473 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2475 tmp = (mac[5] << 8) | mac[4];
2476 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2481 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2483 struct sockaddr *addr = p;
2484 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2486 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2487 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2493 /* ==================== initialization and cleanup routines ============ */
2496 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2499 unsigned long mmio_start, mmio_len;
2500 struct pci_dev *pdev = adapter->pdev;
2502 err = pci_enable_device(pdev);
2504 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2508 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2509 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2511 "pci_set_consistent_dma_mask failed\n");
2517 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2519 "pci_set_dma_mask failed\n");
2526 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2527 vmxnet3_driver_name);
2530 "Failed to request region for adapter: error %d\n", err);
2534 pci_set_master(pdev);
2536 mmio_start = pci_resource_start(pdev, 0);
2537 mmio_len = pci_resource_len(pdev, 0);
2538 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2539 if (!adapter->hw_addr0) {
2540 dev_err(&pdev->dev, "Failed to map bar0\n");
2545 mmio_start = pci_resource_start(pdev, 1);
2546 mmio_len = pci_resource_len(pdev, 1);
2547 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2548 if (!adapter->hw_addr1) {
2549 dev_err(&pdev->dev, "Failed to map bar1\n");
2556 iounmap(adapter->hw_addr0);
2558 pci_release_selected_regions(pdev, (1 << 2) - 1);
2560 pci_disable_device(pdev);
2566 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2568 BUG_ON(!adapter->pdev);
2570 iounmap(adapter->hw_addr0);
2571 iounmap(adapter->hw_addr1);
2572 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2573 pci_disable_device(adapter->pdev);
2578 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2580 size_t sz, i, ring0_size, ring1_size, comp_size;
2581 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2584 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2585 VMXNET3_MAX_ETH_HDR_SIZE) {
2586 adapter->skb_buf_size = adapter->netdev->mtu +
2587 VMXNET3_MAX_ETH_HDR_SIZE;
2588 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2589 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2591 adapter->rx_buf_per_pkt = 1;
2593 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2594 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2595 VMXNET3_MAX_ETH_HDR_SIZE;
2596 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2600 * for simplicity, force the ring0 size to be a multiple of
2601 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2603 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2604 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2605 ring0_size = (ring0_size + sz - 1) / sz * sz;
2606 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2608 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2609 ring1_size = (ring1_size + sz - 1) / sz * sz;
2610 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2612 comp_size = ring0_size + ring1_size;
2614 for (i = 0; i < adapter->num_rx_queues; i++) {
2615 rq = &adapter->rx_queue[i];
2616 rq->rx_ring[0].size = ring0_size;
2617 rq->rx_ring[1].size = ring1_size;
2618 rq->comp_ring.size = comp_size;
2624 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2625 u32 rx_ring_size, u32 rx_ring2_size)
2629 for (i = 0; i < adapter->num_tx_queues; i++) {
2630 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2631 tq->tx_ring.size = tx_ring_size;
2632 tq->data_ring.size = tx_ring_size;
2633 tq->comp_ring.size = tx_ring_size;
2634 tq->shared = &adapter->tqd_start[i].ctrl;
2636 tq->adapter = adapter;
2638 err = vmxnet3_tq_create(tq, adapter);
2640 * Too late to change num_tx_queues. We cannot do away with
2641 * lesser number of queues than what we asked for
2647 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2648 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2649 vmxnet3_adjust_rx_ring_size(adapter);
2650 for (i = 0; i < adapter->num_rx_queues; i++) {
2651 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2652 /* qid and qid2 for rx queues will be assigned later when num
2653 * of rx queues is finalized after allocating intrs */
2654 rq->shared = &adapter->rqd_start[i].ctrl;
2655 rq->adapter = adapter;
2656 err = vmxnet3_rq_create(rq, adapter);
2659 netdev_err(adapter->netdev,
2660 "Could not allocate any rx queues. "
2664 netdev_info(adapter->netdev,
2665 "Number of rx queues changed "
2667 adapter->num_rx_queues = i;
2675 vmxnet3_tq_destroy_all(adapter);
2680 vmxnet3_open(struct net_device *netdev)
2682 struct vmxnet3_adapter *adapter;
2685 adapter = netdev_priv(netdev);
2687 for (i = 0; i < adapter->num_tx_queues; i++)
2688 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2690 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
2691 adapter->rx_ring_size,
2692 adapter->rx_ring2_size);
2696 err = vmxnet3_activate_dev(adapter);
2703 vmxnet3_rq_destroy_all(adapter);
2704 vmxnet3_tq_destroy_all(adapter);
2711 vmxnet3_close(struct net_device *netdev)
2713 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2716 * Reset_work may be in the middle of resetting the device, wait for its
2719 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2722 vmxnet3_quiesce_dev(adapter);
2724 vmxnet3_rq_destroy_all(adapter);
2725 vmxnet3_tq_destroy_all(adapter);
2727 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2735 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2740 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2741 * vmxnet3_close() will deadlock.
2743 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2745 /* we need to enable NAPI, otherwise dev_close will deadlock */
2746 for (i = 0; i < adapter->num_rx_queues; i++)
2747 napi_enable(&adapter->rx_queue[i].napi);
2748 dev_close(adapter->netdev);
2753 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2755 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2758 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2761 netdev->mtu = new_mtu;
2764 * Reset_work may be in the middle of resetting the device, wait for its
2767 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2770 if (netif_running(netdev)) {
2771 vmxnet3_quiesce_dev(adapter);
2772 vmxnet3_reset_dev(adapter);
2774 /* we need to re-create the rx queue based on the new mtu */
2775 vmxnet3_rq_destroy_all(adapter);
2776 vmxnet3_adjust_rx_ring_size(adapter);
2777 err = vmxnet3_rq_create_all(adapter);
2780 "failed to re-create rx queues, "
2781 " error %d. Closing it.\n", err);
2785 err = vmxnet3_activate_dev(adapter);
2788 "failed to re-activate, error %d. "
2789 "Closing it\n", err);
2795 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2797 vmxnet3_force_close(adapter);
2804 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2806 struct net_device *netdev = adapter->netdev;
2808 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2809 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2810 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2813 netdev->hw_features |= NETIF_F_HIGHDMA;
2814 netdev->vlan_features = netdev->hw_features &
2815 ~(NETIF_F_HW_VLAN_CTAG_TX |
2816 NETIF_F_HW_VLAN_CTAG_RX);
2817 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
2822 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2826 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2829 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2830 mac[4] = tmp & 0xff;
2831 mac[5] = (tmp >> 8) & 0xff;
2834 #ifdef CONFIG_PCI_MSI
2837 * Enable MSIx vectors.
2839 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2841 * number of vectors which were enabled otherwise (this number is greater
2842 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2846 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
2848 int ret = pci_enable_msix_range(adapter->pdev,
2849 adapter->intr.msix_entries, nvec, nvec);
2851 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
2852 dev_err(&adapter->netdev->dev,
2853 "Failed to enable %d MSI-X, trying %d\n",
2854 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
2856 ret = pci_enable_msix_range(adapter->pdev,
2857 adapter->intr.msix_entries,
2858 VMXNET3_LINUX_MIN_MSIX_VECT,
2859 VMXNET3_LINUX_MIN_MSIX_VECT);
2863 dev_err(&adapter->netdev->dev,
2864 "Failed to enable MSI-X, error: %d\n", ret);
2871 #endif /* CONFIG_PCI_MSI */
2874 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2877 unsigned long flags;
2880 spin_lock_irqsave(&adapter->cmd_lock, flags);
2881 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2882 VMXNET3_CMD_GET_CONF_INTR);
2883 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2884 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2885 adapter->intr.type = cfg & 0x3;
2886 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2888 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2889 adapter->intr.type = VMXNET3_IT_MSIX;
2892 #ifdef CONFIG_PCI_MSI
2893 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2896 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
2897 1 : adapter->num_tx_queues;
2898 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
2899 0 : adapter->num_rx_queues;
2900 nvec += 1; /* for link event */
2901 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
2902 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
2904 for (i = 0; i < nvec; i++)
2905 adapter->intr.msix_entries[i].entry = i;
2907 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
2911 /* If we cannot allocate one MSIx vector per queue
2912 * then limit the number of rx queues to 1
2914 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
2915 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2916 || adapter->num_rx_queues != 1) {
2917 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2918 netdev_err(adapter->netdev,
2919 "Number of rx queues : 1\n");
2920 adapter->num_rx_queues = 1;
2924 adapter->intr.num_intrs = nvec;
2928 /* If we cannot allocate MSIx vectors use only one rx queue */
2929 dev_info(&adapter->pdev->dev,
2930 "Failed to enable MSI-X, error %d. "
2931 "Limiting #rx queues to 1, try MSI.\n", nvec);
2933 adapter->intr.type = VMXNET3_IT_MSI;
2936 if (adapter->intr.type == VMXNET3_IT_MSI) {
2937 if (!pci_enable_msi(adapter->pdev)) {
2938 adapter->num_rx_queues = 1;
2939 adapter->intr.num_intrs = 1;
2943 #endif /* CONFIG_PCI_MSI */
2945 adapter->num_rx_queues = 1;
2946 dev_info(&adapter->netdev->dev,
2947 "Using INTx interrupt, #Rx queues: 1.\n");
2948 adapter->intr.type = VMXNET3_IT_INTX;
2950 /* INT-X related setting */
2951 adapter->intr.num_intrs = 1;
2956 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2958 if (adapter->intr.type == VMXNET3_IT_MSIX)
2959 pci_disable_msix(adapter->pdev);
2960 else if (adapter->intr.type == VMXNET3_IT_MSI)
2961 pci_disable_msi(adapter->pdev);
2963 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2968 vmxnet3_tx_timeout(struct net_device *netdev)
2970 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2971 adapter->tx_timeout_count++;
2973 netdev_err(adapter->netdev, "tx hang\n");
2974 schedule_work(&adapter->work);
2975 netif_wake_queue(adapter->netdev);
2980 vmxnet3_reset_work(struct work_struct *data)
2982 struct vmxnet3_adapter *adapter;
2984 adapter = container_of(data, struct vmxnet3_adapter, work);
2986 /* if another thread is resetting the device, no need to proceed */
2987 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2990 /* if the device is closed, we must leave it alone */
2992 if (netif_running(adapter->netdev)) {
2993 netdev_notice(adapter->netdev, "resetting\n");
2994 vmxnet3_quiesce_dev(adapter);
2995 vmxnet3_reset_dev(adapter);
2996 vmxnet3_activate_dev(adapter);
2998 netdev_info(adapter->netdev, "already closed\n");
3002 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3007 vmxnet3_probe_device(struct pci_dev *pdev,
3008 const struct pci_device_id *id)
3010 static const struct net_device_ops vmxnet3_netdev_ops = {
3011 .ndo_open = vmxnet3_open,
3012 .ndo_stop = vmxnet3_close,
3013 .ndo_start_xmit = vmxnet3_xmit_frame,
3014 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3015 .ndo_change_mtu = vmxnet3_change_mtu,
3016 .ndo_set_features = vmxnet3_set_features,
3017 .ndo_get_stats64 = vmxnet3_get_stats64,
3018 .ndo_tx_timeout = vmxnet3_tx_timeout,
3019 .ndo_set_rx_mode = vmxnet3_set_mc,
3020 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3021 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3022 #ifdef CONFIG_NET_POLL_CONTROLLER
3023 .ndo_poll_controller = vmxnet3_netpoll,
3027 bool dma64 = false; /* stupid gcc */
3029 struct net_device *netdev;
3030 struct vmxnet3_adapter *adapter;
3036 if (!pci_msi_enabled())
3041 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3042 (int)num_online_cpus());
3046 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3049 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3050 (int)num_online_cpus());
3054 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3055 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3056 max(num_tx_queues, num_rx_queues));
3057 dev_info(&pdev->dev,
3058 "# of Tx queues : %d, # of Rx queues : %d\n",
3059 num_tx_queues, num_rx_queues);
3064 pci_set_drvdata(pdev, netdev);
3065 adapter = netdev_priv(netdev);
3066 adapter->netdev = netdev;
3067 adapter->pdev = pdev;
3069 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3070 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3071 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3073 spin_lock_init(&adapter->cmd_lock);
3074 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3075 sizeof(struct vmxnet3_adapter),
3077 adapter->shared = dma_alloc_coherent(
3078 &adapter->pdev->dev,
3079 sizeof(struct Vmxnet3_DriverShared),
3080 &adapter->shared_pa, GFP_KERNEL);
3081 if (!adapter->shared) {
3082 dev_err(&pdev->dev, "Failed to allocate memory\n");
3084 goto err_alloc_shared;
3087 adapter->num_rx_queues = num_rx_queues;
3088 adapter->num_tx_queues = num_tx_queues;
3089 adapter->rx_buf_per_pkt = 1;
3091 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3092 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3093 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3094 &adapter->queue_desc_pa,
3097 if (!adapter->tqd_start) {
3098 dev_err(&pdev->dev, "Failed to allocate memory\n");
3100 goto err_alloc_queue_desc;
3102 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3103 adapter->num_tx_queues);
3105 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3106 sizeof(struct Vmxnet3_PMConf),
3107 &adapter->pm_conf_pa,
3109 if (adapter->pm_conf == NULL) {
3116 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3117 sizeof(struct UPT1_RSSConf),
3118 &adapter->rss_conf_pa,
3120 if (adapter->rss_conf == NULL) {
3124 #endif /* VMXNET3_RSS */
3126 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3130 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3132 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2);
3133 adapter->version = 2;
3134 } else if (ver & 1) {
3135 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
3136 adapter->version = 1;
3139 "Incompatible h/w version (0x%x) for adapter\n", ver);
3143 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3145 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3147 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3150 "Incompatible upt version (0x%x) for adapter\n", ver);
3155 SET_NETDEV_DEV(netdev, &pdev->dev);
3156 vmxnet3_declare_features(adapter, dma64);
3158 if (adapter->num_tx_queues == adapter->num_rx_queues)
3159 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3161 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3163 vmxnet3_alloc_intr_resources(adapter);
3166 if (adapter->num_rx_queues > 1 &&
3167 adapter->intr.type == VMXNET3_IT_MSIX) {
3168 adapter->rss = true;
3169 netdev->hw_features |= NETIF_F_RXHASH;
3170 netdev->features |= NETIF_F_RXHASH;
3171 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3173 adapter->rss = false;
3177 vmxnet3_read_mac_addr(adapter, mac);
3178 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3180 netdev->netdev_ops = &vmxnet3_netdev_ops;
3181 vmxnet3_set_ethtool_ops(netdev);
3182 netdev->watchdog_timeo = 5 * HZ;
3184 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3185 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3187 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3189 for (i = 0; i < adapter->num_rx_queues; i++) {
3190 netif_napi_add(adapter->netdev,
3191 &adapter->rx_queue[i].napi,
3192 vmxnet3_poll_rx_only, 64);
3195 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3199 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3200 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3202 netif_carrier_off(netdev);
3203 err = register_netdev(netdev);
3206 dev_err(&pdev->dev, "Failed to register adapter\n");
3210 vmxnet3_check_link(adapter, false);
3214 vmxnet3_free_intr_resources(adapter);
3216 vmxnet3_free_pci_resources(adapter);
3219 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3220 adapter->rss_conf, adapter->rss_conf_pa);
3223 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3224 adapter->pm_conf, adapter->pm_conf_pa);
3226 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3227 adapter->queue_desc_pa);
3228 err_alloc_queue_desc:
3229 dma_free_coherent(&adapter->pdev->dev,
3230 sizeof(struct Vmxnet3_DriverShared),
3231 adapter->shared, adapter->shared_pa);
3233 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3234 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3235 free_netdev(netdev);
3241 vmxnet3_remove_device(struct pci_dev *pdev)
3243 struct net_device *netdev = pci_get_drvdata(pdev);
3244 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3250 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3251 (int)num_online_cpus());
3255 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3257 cancel_work_sync(&adapter->work);
3259 unregister_netdev(netdev);
3261 vmxnet3_free_intr_resources(adapter);
3262 vmxnet3_free_pci_resources(adapter);
3264 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3265 adapter->rss_conf, adapter->rss_conf_pa);
3267 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3268 adapter->pm_conf, adapter->pm_conf_pa);
3270 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3271 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3272 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3273 adapter->queue_desc_pa);
3274 dma_free_coherent(&adapter->pdev->dev,
3275 sizeof(struct Vmxnet3_DriverShared),
3276 adapter->shared, adapter->shared_pa);
3277 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3278 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3279 free_netdev(netdev);
3282 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3284 struct net_device *netdev = pci_get_drvdata(pdev);
3285 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3286 unsigned long flags;
3288 /* Reset_work may be in the middle of resetting the device, wait for its
3291 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3294 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3296 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3299 spin_lock_irqsave(&adapter->cmd_lock, flags);
3300 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3301 VMXNET3_CMD_QUIESCE_DEV);
3302 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3303 vmxnet3_disable_all_intrs(adapter);
3305 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3312 vmxnet3_suspend(struct device *device)
3314 struct pci_dev *pdev = to_pci_dev(device);
3315 struct net_device *netdev = pci_get_drvdata(pdev);
3316 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3317 struct Vmxnet3_PMConf *pmConf;
3318 struct ethhdr *ehdr;
3319 struct arphdr *ahdr;
3321 struct in_device *in_dev;
3322 struct in_ifaddr *ifa;
3323 unsigned long flags;
3326 if (!netif_running(netdev))
3329 for (i = 0; i < adapter->num_rx_queues; i++)
3330 napi_disable(&adapter->rx_queue[i].napi);
3332 vmxnet3_disable_all_intrs(adapter);
3333 vmxnet3_free_irqs(adapter);
3334 vmxnet3_free_intr_resources(adapter);
3336 netif_device_detach(netdev);
3337 netif_tx_stop_all_queues(netdev);
3339 /* Create wake-up filters. */
3340 pmConf = adapter->pm_conf;
3341 memset(pmConf, 0, sizeof(*pmConf));
3343 if (adapter->wol & WAKE_UCAST) {
3344 pmConf->filters[i].patternSize = ETH_ALEN;
3345 pmConf->filters[i].maskSize = 1;
3346 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3347 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3349 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3353 if (adapter->wol & WAKE_ARP) {
3354 in_dev = in_dev_get(netdev);
3358 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3362 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3363 sizeof(struct arphdr) + /* ARP header */
3364 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3365 2 * sizeof(u32); /*2 IPv4 addresses */
3366 pmConf->filters[i].maskSize =
3367 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3369 /* ETH_P_ARP in Ethernet header. */
3370 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3371 ehdr->h_proto = htons(ETH_P_ARP);
3373 /* ARPOP_REQUEST in ARP header. */
3374 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3375 ahdr->ar_op = htons(ARPOP_REQUEST);
3376 arpreq = (u8 *)(ahdr + 1);
3378 /* The Unicast IPv4 address in 'tip' field. */
3379 arpreq += 2 * ETH_ALEN + sizeof(u32);
3380 *(u32 *)arpreq = ifa->ifa_address;
3382 /* The mask for the relevant bits. */
3383 pmConf->filters[i].mask[0] = 0x00;
3384 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3385 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3386 pmConf->filters[i].mask[3] = 0x00;
3387 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3388 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3391 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3396 if (adapter->wol & WAKE_MAGIC)
3397 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3399 pmConf->numFilters = i;
3401 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3402 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3404 adapter->shared->devRead.pmConfDesc.confPA =
3405 cpu_to_le64(adapter->pm_conf_pa);
3407 spin_lock_irqsave(&adapter->cmd_lock, flags);
3408 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3409 VMXNET3_CMD_UPDATE_PMCFG);
3410 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3412 pci_save_state(pdev);
3413 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3415 pci_disable_device(pdev);
3416 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3423 vmxnet3_resume(struct device *device)
3426 unsigned long flags;
3427 struct pci_dev *pdev = to_pci_dev(device);
3428 struct net_device *netdev = pci_get_drvdata(pdev);
3429 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3431 if (!netif_running(netdev))
3434 pci_set_power_state(pdev, PCI_D0);
3435 pci_restore_state(pdev);
3436 err = pci_enable_device_mem(pdev);
3440 pci_enable_wake(pdev, PCI_D0, 0);
3442 vmxnet3_alloc_intr_resources(adapter);
3444 /* During hibernate and suspend, device has to be reinitialized as the
3445 * device state need not be preserved.
3448 /* Need not check adapter state as other reset tasks cannot run during
3451 spin_lock_irqsave(&adapter->cmd_lock, flags);
3452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3453 VMXNET3_CMD_QUIESCE_DEV);
3454 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3455 vmxnet3_tq_cleanup_all(adapter);
3456 vmxnet3_rq_cleanup_all(adapter);
3458 vmxnet3_reset_dev(adapter);
3459 err = vmxnet3_activate_dev(adapter);
3462 "failed to re-activate on resume, error: %d", err);
3463 vmxnet3_force_close(adapter);
3466 netif_device_attach(netdev);
3471 static const struct dev_pm_ops vmxnet3_pm_ops = {
3472 .suspend = vmxnet3_suspend,
3473 .resume = vmxnet3_resume,
3474 .freeze = vmxnet3_suspend,
3475 .restore = vmxnet3_resume,
3479 static struct pci_driver vmxnet3_driver = {
3480 .name = vmxnet3_driver_name,
3481 .id_table = vmxnet3_pciid_table,
3482 .probe = vmxnet3_probe_device,
3483 .remove = vmxnet3_remove_device,
3484 .shutdown = vmxnet3_shutdown_device,
3486 .driver.pm = &vmxnet3_pm_ops,
3492 vmxnet3_init_module(void)
3494 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3495 VMXNET3_DRIVER_VERSION_REPORT);
3496 return pci_register_driver(&vmxnet3_driver);
3499 module_init(vmxnet3_init_module);
3503 vmxnet3_exit_module(void)
3505 pci_unregister_driver(&vmxnet3_driver);
3508 module_exit(vmxnet3_exit_module);
3510 MODULE_AUTHOR("VMware, Inc.");
3511 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3512 MODULE_LICENSE("GPL v2");
3513 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);