++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ void __iomem *rwptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
++ void __iomem *base_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_BASE_REG;
++
++ unsigned n_txq = dev->num_tx_queues;
++ unsigned int r, i;
++
++ for (i = 0; i < n_txq; i++) {
++ r = readw(rwptr_reg);
++ rwptr_reg += 2;
++ writew(r, rwptr_reg);
++ rwptr_reg += 2;
++
++ gmac_clean_txq(dev, gmac->txq + i, r);
++ }
++ writel(0, base_reg);
++
++ kfree(gmac->txq->skb);
++ dma_free_coherent(toe->dev,
++ n_txq * sizeof(*gmac->txq->ring) << gmac->txq_order,
++ gmac->txq->ring, gmac->txq_dma_base);
++}
++
++static int gmac_setup_rxq(struct net_device *dev)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
++
++ gmac->rxq_rwptr = &qhdr->word1;
++ gmac->rxq_ring = dma_alloc_coherent(toe->dev,
++ sizeof(*gmac->rxq_ring) << gmac->rxq_order,
++ &gmac->rxq_dma_base, GFP_KERNEL);
++ if (!gmac->rxq_ring)
++ return -ENOMEM;
++
++ BUG_ON(gmac->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK);
++
++ writel(gmac->rxq_dma_base | gmac->rxq_order, &qhdr->word0);
++ writel(0, gmac->rxq_rwptr);
++ return 0;
++}
++
++static void gmac_cleanup_rxq(struct net_device *dev)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++
++ NONTOE_QHDR_T __iomem *qhdr = toe->iomem + TOE_DEFAULT_Q_HDR_BASE(dev->dev_id);
++ void __iomem *dma_reg = &qhdr->word0;
++ void __iomem *ptr_reg = &qhdr->word1;
++ GMAC_RXDESC_T *rxd = gmac->rxq_ring;
++ DMA_RWPTR_T rw;
++ unsigned int r, w;
++ unsigned int m = (1 <<gmac->rxq_order) - 1;
++ struct page *page;
++ dma_addr_t mapping;
++
++ rw.bits32 = readl(ptr_reg);
++ r = rw.bits.rptr;
++ w = rw.bits.wptr;
++ writew(r, ptr_reg + 2);
++
++ writel(0, dma_reg);
++
++ rmb();
++ while (r != w) {
++ mapping = rxd[r].word2.buf_adr;
++ r++;
++ r &= m;
++
++ if (!mapping)
++ continue;
++
++ page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
++ put_page(page);
++ }
++
++ dma_free_coherent(toe->dev, sizeof(*gmac->rxq_ring) << gmac->rxq_order,
++ gmac->rxq_ring, gmac->rxq_dma_base);
++}
++
++static struct page *toe_freeq_alloc_map_page(struct toe_private *toe, int pn)
++{
++ unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
++ unsigned int frag_len = 1 << toe->freeq_frag_order;
++ GMAC_RXDESC_T *freeq_entry;
++ dma_addr_t mapping;
++ struct page *page;
++ int i;
++
++ page = alloc_page(__GFP_COLD | GFP_ATOMIC);
++ if (!page)
++ return NULL;
++
++ mapping = dma_map_single(toe->dev, page_address(page),
++ PAGE_SIZE, DMA_FROM_DEVICE);
++
++ if (unlikely(dma_mapping_error(toe->dev, mapping) || !mapping)) {
++ put_page(page);
++ return NULL;
++ }
++
++ freeq_entry = toe->freeq_ring + (pn << fpp_order);
++ for (i = 1 << fpp_order; i > 0; --i) {
++ freeq_entry->word2.buf_adr = mapping;
++ freeq_entry++;
++ mapping += frag_len;
++ }
++
++ if (toe->freeq_page_tab[pn]) {
++ mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
++ dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
++ put_page(toe->freeq_page_tab[pn]);
++ }
++
++ toe->freeq_page_tab[pn] = page;
++ return page;
++}
++
++static unsigned int toe_fill_freeq(struct toe_private *toe, int reset)
++{
++ void __iomem *rwptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
++
++ DMA_RWPTR_T rw;
++ unsigned int pn, epn;
++ unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
++ unsigned int m_pn = (1 << (toe->freeq_order - fpp_order)) - 1;
++ struct page *page;
++ unsigned int count = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&toe->freeq_lock, flags);
++
++ rw.bits32 = readl(rwptr_reg);
++ pn = (reset ? rw.bits.rptr : rw.bits.wptr) >> fpp_order;
++ epn = (rw.bits.rptr >> fpp_order) - 1;
++ epn &= m_pn;
++
++ while (pn != epn) {
++ page = toe->freeq_page_tab[pn];
++
++ if (atomic_read(&page->_count) > 1) {
++ unsigned int fl = (pn -epn) & m_pn;
++
++ if (fl > 64 >> fpp_order)
++ break;
++
++ page = toe_freeq_alloc_map_page(toe, pn);
++ if (!page)
++ break;
++ }
++
++ atomic_add(1 << fpp_order, &page->_count);
++ count += 1 << fpp_order;
++ pn++;
++ pn &= m_pn;
++ }
++
++ wmb();
++ writew(pn << fpp_order, rwptr_reg+2);
++
++ spin_unlock_irqrestore(&toe->freeq_lock, flags);
++ return count;
++}
++
++static int toe_setup_freeq(struct toe_private *toe)
++{
++ void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
++ QUEUE_THRESHOLD_T qt;
++ DMA_SKB_SIZE_T skbsz;
++ unsigned int filled;
++ unsigned int frag_len = 1 << toe->freeq_frag_order;
++ unsigned int len = 1 << toe->freeq_order;
++ unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
++ unsigned int pages = len >> fpp_order;
++ dma_addr_t mapping;
++ unsigned int pn;
++
++ toe->freeq_ring = dma_alloc_coherent(toe->dev,
++ sizeof(*toe->freeq_ring) << toe->freeq_order,
++ &toe->freeq_dma_base, GFP_KERNEL);
++ if (!toe->freeq_ring)
++ return -ENOMEM;
++
++ BUG_ON(toe->freeq_dma_base & ~DMA_Q_BASE_MASK);
++
++ toe->freeq_page_tab = kzalloc(pages * sizeof(*toe->freeq_page_tab),
++ GFP_KERNEL);
++ if (!toe->freeq_page_tab)
++ goto err_freeq;
++
++ for (pn = 0; pn < pages; pn++)
++ if (!toe_freeq_alloc_map_page(toe, pn))
++ goto err_freeq_alloc;
++
++ filled = toe_fill_freeq(toe, 1);
++ if (!filled)
++ goto err_freeq_alloc;
++
++ qt.bits32 = readl(toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
++ qt.bits.swfq_empty = 32;
++ writel(qt.bits32, toe->iomem + GLOBAL_QUEUE_THRESHOLD_REG);
++
++ skbsz.bits.sw_skb_size = 1 << toe->freeq_frag_order;
++ writel(skbsz.bits32, toe->iomem + GLOBAL_DMA_SKB_SIZE_REG);
++ writel(toe->freeq_dma_base | toe->freeq_order, dma_reg);
++
++ return 0;
++
++err_freeq_alloc:
++ while (pn > 0) {
++ --pn;
++ mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
++ dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
++ put_page(toe->freeq_page_tab[pn]);
++ }
++
++err_freeq:
++ dma_free_coherent(toe->dev,
++ sizeof(*toe->freeq_ring) << toe->freeq_order,
++ toe->freeq_ring, toe->freeq_dma_base);
++ toe->freeq_ring = NULL;
++ return -ENOMEM;
++}
++
++static void toe_cleanup_freeq(struct toe_private *toe)
++{
++ void __iomem *dma_reg = toe->iomem + GLOBAL_SW_FREEQ_BASE_SIZE_REG;
++ void __iomem *ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
++
++ unsigned int frag_len = 1 << toe->freeq_frag_order;
++ unsigned int len = 1 << toe->freeq_order;
++ unsigned int fpp_order = PAGE_SHIFT - toe->freeq_frag_order;
++ unsigned int pages = len >> fpp_order;
++ struct page *page;
++ dma_addr_t mapping;
++ unsigned int pn;
++
++ writew(readw(ptr_reg), ptr_reg + 2);
++ writel(0, dma_reg);
++
++ for (pn = 0; pn < pages; pn++) {
++ mapping = toe->freeq_ring[pn << fpp_order].word2.buf_adr;
++ dma_unmap_single(toe->dev, mapping, frag_len, DMA_FROM_DEVICE);
++
++ page = toe->freeq_page_tab[pn];
++ while (atomic_read(&page->_count) > 0)
++ put_page(page);
++ }
++
++ kfree(toe->freeq_page_tab);
++
++ dma_free_coherent(toe->dev,
++ sizeof(*toe->freeq_ring) << toe->freeq_order,
++ toe->freeq_ring, toe->freeq_dma_base);
++}
++
++static int toe_resize_freeq(struct toe_private *toe, int changing_dev_id)
++{
++ void __iomem *irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG;
++ struct gmac_private *gmac;
++ struct net_device *other = toe->netdev[1 - changing_dev_id];
++ unsigned new_size = 0;
++ unsigned new_order;
++ int err;
++ unsigned long flags;
++ unsigned en;
++
++ if (other && netif_running(other))
++ return -EBUSY;
++
++ if (toe->netdev[0]) {
++ gmac = netdev_priv(toe->netdev[0]);
++ new_size = 1 << (gmac->rxq_order + 1);
++ }
++
++ if (toe->netdev[1]) {
++ gmac = netdev_priv(toe->netdev[1]);
++ new_size += 1 << (gmac->rxq_order + 1);
++ }
++
++ new_order = min(15, ilog2(new_size - 1) + 1);
++ if (toe->freeq_order == new_order)
++ return 0;
++
++ spin_lock_irqsave(&toe->irq_lock, flags);
++ en = readl(irqen_reg);
++ en &= ~SWFQ_EMPTY_INT_BIT;
++ writel(en, irqen_reg);
++
++ if (toe->freeq_ring)
++ toe_cleanup_freeq(toe);
++
++ toe->freeq_order = new_order;
++ err = toe_setup_freeq(toe);
++
++ en |= SWFQ_EMPTY_INT_BIT;
++ writel(en, irqen_reg);
++ spin_unlock_irqrestore(&toe->irq_lock, flags);
++
++ return err;
++}
++
++static void gmac_tx_irq_enable(struct net_device *dev, unsigned txq, int en)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned val, mask;
++
++ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * dev->dev_id + txq);
++
++ if (en)
++ writel(mask, toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
++
++ val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
++ val = en ? val | mask : val & ~mask;
++ writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
++}
++
++
++static void gmac_tx_irq(struct net_device *dev, unsigned txq_num)
++{
++ struct netdev_queue *ntxq = netdev_get_tx_queue(dev, txq_num);
++
++ gmac_tx_irq_enable(dev, txq_num, 0);
++ netif_tx_wake_queue(ntxq);
++}
++
++static int gmac_map_tx_bufs(struct net_device *dev, struct sk_buff *skb,
++ struct gmac_txq *txq, unsigned short *desc)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ struct skb_shared_info *skb_si = skb_shinfo(skb);
++ skb_frag_t *skb_frag;
++ short frag, last_frag = skb_si->nr_frags - 1;
++ unsigned short m = (1 << gmac->txq_order) -1;
++ unsigned short w = *desc;
++ unsigned word1, word3, buflen;
++ dma_addr_t mapping;
++ void *buffer;
++ unsigned short mtu;
++ GMAC_TXDESC_T *txd;
++
++ mtu = ETH_HLEN;
++ mtu += dev->mtu;
++ if (skb->protocol == htons(ETH_P_8021Q))
++ mtu += VLAN_HLEN;
++
++ word1 = skb->len;
++ word3 = SOF_BIT;
++
++ if (word1 > mtu) {
++ word1 |= TSS_MTU_ENABLE_BIT;
++ word3 += mtu;
++ }
++
++ if (skb->ip_summed != CHECKSUM_NONE) {
++ int tcp = 0;
++ if (skb->protocol == htons(ETH_P_IP)) {
++ word1 |= TSS_IP_CHKSUM_BIT;
++ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
++ } else { /* IPv6 */
++ word1 |= TSS_IPV6_ENABLE_BIT;
++ tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
++ }
++
++ word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
++ }
++
++ frag = -1;
++ while (frag <= last_frag) {
++ if (frag == -1) {
++ buffer = skb->data;
++ buflen = skb_headlen(skb);
++ } else {
++ skb_frag = skb_si->frags + frag;
++ buffer = page_address(skb_frag_page(skb_frag)) +
++ skb_frag->page_offset;
++ buflen = skb_frag->size;
++ }
++
++ if (frag == last_frag) {
++ word3 |= EOF_BIT;
++ txq->skb[w] = skb;
++ }
++
++ mapping = dma_map_single(toe->dev, buffer, buflen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(toe->dev, mapping) ||
++ !(mapping & PAGE_MASK))
++ goto map_error;
++
++ txd = txq->ring + w;
++ txd->word0.bits32 = buflen;
++ txd->word1.bits32 = word1;
++ txd->word2.buf_adr = mapping;
++ txd->word3.bits32 = word3;
++
++ word3 &= MTU_SIZE_BIT_MASK;
++ w++;
++ w &= m;
++ frag++;
++ }
++
++ *desc = w;
++ return 0;
++
++map_error:
++ while (w != *desc) {
++ w--;
++ w &= m;
++
++ dma_unmap_page(toe->dev, txq->ring[w].word2.buf_adr,
++ txq->ring[w].word0.bits.buffer_size, DMA_TO_DEVICE);
++ }
++ return ENOMEM;
++}
++
++static int gmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++
++ void __iomem *ptr_reg;
++ struct gmac_txq *txq;
++ struct netdev_queue *ntxq;
++ int txq_num, nfrags;
++ DMA_RWPTR_T rw;
++ unsigned short r, w, d;
++ unsigned short m = (1 << gmac->txq_order) - 1;
++
++ SKB_FRAG_ASSERT(skb);
++
++ if (unlikely(skb->len >= 0x10000))
++ goto out_drop_free;
++
++ txq_num = skb_get_queue_mapping(skb);
++ ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
++ txq = &gmac->txq[txq_num];
++ ntxq = netdev_get_tx_queue(dev, txq_num);
++ nfrags = skb_shinfo(skb)->nr_frags;
++
++ rw.bits32 = readl(ptr_reg);
++ r = rw.bits.rptr;
++ w = rw.bits.wptr;
++
++ d = txq->cptr - w - 1;
++ d &= m;
++
++ if (unlikely(d < nfrags+2))
++ {
++ gmac_clean_txq(dev, txq, r);
++ d = txq->cptr - w - 1;
++ d &= m;
++
++ if (unlikely(d < nfrags+2)) {
++ netif_tx_stop_queue(ntxq);
++
++ d = txq->cptr + nfrags + 16;
++ d &= m;
++ txq->ring[d].word3.bits.eofie = 1;
++ gmac_tx_irq_enable(dev, txq_num, 1);
++
++ u64_stats_update_begin(&gmac->tx_stats_syncp);
++ dev->stats.tx_fifo_errors++;
++ u64_stats_update_end(&gmac->tx_stats_syncp);
++ return NETDEV_TX_BUSY;
++ }
++ }
++
++ if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w))) {
++ if (skb_linearize(skb))
++ goto out_drop;
++
++ if (unlikely(gmac_map_tx_bufs(dev, skb, txq, &w)))
++ goto out_drop_free;
++
++ u64_stats_update_begin(&gmac->tx_stats_syncp);
++ gmac->tx_frags_linearized++;
++ u64_stats_update_end(&gmac->tx_stats_syncp);
++ }
++
++ writew(w, ptr_reg+2);
++
++ gmac_clean_txq(dev, txq, r);
++ return NETDEV_TX_OK;
++
++out_drop_free:
++ dev_kfree_skb(skb);
++out_drop:
++ u64_stats_update_begin(&gmac->tx_stats_syncp);
++ gmac->stats.tx_dropped++;
++ u64_stats_update_end(&gmac->tx_stats_syncp);
++ return NETDEV_TX_OK;
++}
++
++static void gmac_tx_timeout(struct net_device *dev)
++{
++ netdev_err(dev, "Tx timeout\n");
++ gmac_dump_dma_state(dev);
++}
++
++static void gmac_enable_irq(struct net_device *dev, int enable)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned long flags;
++ unsigned val, mask;
++
++ spin_lock_irqsave(&toe->irq_lock, flags);
++
++ mask = GMAC0_IRQ0_2 << (dev->dev_id * 2);
++ val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
++ val = enable ? (val | mask) : (val & ~mask);
++ writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
++
++ mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
++ val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
++ val = enable ? (val | mask) : (val & ~mask);
++ writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
++
++ mask = GMAC0_IRQ4_8 << (dev->dev_id * 8);
++ val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
++ val = enable ? (val | mask) : (val & ~mask);
++ writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
++
++ spin_unlock_irqrestore(&toe->irq_lock, flags);
++}
++
++static void gmac_enable_rx_irq(struct net_device *dev, int enable)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned long flags;
++ unsigned val, mask;
++
++ spin_lock_irqsave(&toe->irq_lock, flags);
++ mask = DEFAULT_Q0_INT_BIT << dev->dev_id;
++
++ val = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
++ val = enable ? (val | mask) : (val & ~mask);
++ writel(val, toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
++
++ spin_unlock_irqrestore(&toe->irq_lock, flags);
++}
++
++static struct sk_buff *gmac_skb_if_good_frame(struct gmac_private *gmac,
++ GMAC_RXDESC_0_T word0, unsigned frame_len)
++{
++ struct sk_buff *skb = NULL;
++ unsigned rx_status = word0.bits.status;
++ unsigned rx_csum = word0.bits.chksum_status;
++
++ gmac->rx_stats[rx_status]++;
++ gmac->rx_csum_stats[rx_csum]++;
++
++ if (word0.bits.derr || word0.bits.perr ||
++ rx_status || frame_len < ETH_ZLEN ||
++ rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
++ gmac->stats.rx_errors++;
++
++ if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
++ gmac->stats.rx_length_errors++;
++ if (RX_ERROR_OVER(rx_status))
++ gmac->stats.rx_over_errors++;
++ if (RX_ERROR_CRC(rx_status))
++ gmac->stats.rx_crc_errors++;
++ if (RX_ERROR_FRAME(rx_status))
++ gmac->stats.rx_frame_errors++;
++
++ return NULL;
++ }
++
++ skb = napi_get_frags(&gmac->napi);
++ if (!skb)
++ return NULL;
++
++ if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ gmac->stats.rx_bytes += frame_len;
++ gmac->stats.rx_packets++;
++ return skb;
++}
++
++static unsigned gmac_rx(struct net_device *dev, unsigned budget)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ void __iomem *ptr_reg = gmac->rxq_rwptr;
++
++ static struct sk_buff *skb;
++
++ DMA_RWPTR_T rw;
++ unsigned short r, w;
++ unsigned short m = (1 << gmac->rxq_order) -1;
++ GMAC_RXDESC_T *rx = NULL;
++ struct page* page = NULL;
++ unsigned page_offs;
++ unsigned int frame_len, frag_len;
++ int frag_nr = 0;
++
++ GMAC_RXDESC_0_T word0;
++ GMAC_RXDESC_1_T word1;
++ dma_addr_t mapping;
++ GMAC_RXDESC_3_T word3;
++
++ rw.bits32 = readl(ptr_reg);
++ /* Reset interrupt as all packages until here are taken into account */
++ writel(DEFAULT_Q0_INT_BIT << dev->dev_id,
++ toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
++ r = rw.bits.rptr;
++ w = rw.bits.wptr;
++
++ while (budget && w != r) {
++ rx = gmac->rxq_ring + r;
++ word0 = rx->word0;
++ word1 = rx->word1;
++ mapping = rx->word2.buf_adr;
++ word3 = rx->word3;
++
++ r++;
++ r &= m;
++
++ frag_len = word0.bits.buffer_size;
++ frame_len =word1.bits.byte_count;
++ page_offs = mapping & ~PAGE_MASK;
++
++ if (unlikely(!mapping)) {
++ netdev_err(dev, "rxq[%u]: HW BUG: zero DMA desc\n", r);
++ goto err_drop;
++ }
++
++ page = pfn_to_page(dma_to_pfn(toe->dev, mapping));
++
++ if (word3.bits32 & SOF_BIT) {
++ if (unlikely(skb)) {
++ napi_free_frags(&gmac->napi);
++ gmac->stats.rx_dropped++;
++ }
++
++ skb = gmac_skb_if_good_frame(gmac, word0, frame_len);
++ if (unlikely(!skb))
++ goto err_drop;
++
++ page_offs += NET_IP_ALIGN;
++ frag_len -= NET_IP_ALIGN;
++ frag_nr = 0;
++
++ } else if (!skb) {
++ put_page(page);
++ continue;
++ }
++
++ if (word3.bits32 & EOF_BIT)
++ frag_len = frame_len - skb->len;
++
++ /* append page frag to skb */
++ if (unlikely(frag_nr == MAX_SKB_FRAGS))
++ goto err_drop;
++
++ if (frag_len == 0)
++ netdev_err(dev, "Received fragment with len = 0\n");
++
++ skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
++ skb->len += frag_len;
++ skb->data_len += frag_len;
++ skb->truesize += frag_len;
++ frag_nr++;
++
++ if (word3.bits32 & EOF_BIT) {
++ napi_gro_frags(&gmac->napi);
++ skb = NULL;
++ --budget;
++ }
++ continue;
++
++err_drop:
++ if (skb) {
++ napi_free_frags(&gmac->napi);
++ skb = NULL;
++ }
++
++ if (mapping)
++ put_page(page);
++
++ gmac->stats.rx_dropped++;
++ }
++
++ writew(r, ptr_reg);
++ return budget;
++}
++
++static int gmac_napi_poll(struct napi_struct *napi, int budget)
++{
++ struct gmac_private *gmac = netdev_priv(napi->dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned rx;
++ unsigned freeq_threshold = 1 << (toe->freeq_order - 1);
++
++ u64_stats_update_begin(&gmac->rx_stats_syncp);
++
++ rx = budget - gmac_rx(napi->dev, budget);
++
++ if (rx == 0) {
++ napi_gro_flush(napi, false);
++ __napi_complete(napi);
++ gmac_enable_rx_irq(napi->dev, 1);
++ ++gmac->rx_napi_exits;
++ }
++
++ gmac->freeq_refill += rx;
++ if (gmac->freeq_refill > freeq_threshold) {
++ gmac->freeq_refill -= freeq_threshold;
++ toe_fill_freeq(toe, 0);
++ }
++
++ u64_stats_update_end(&gmac->rx_stats_syncp);
++ return rx;
++}
++
++static void gmac_dump_dma_state(struct net_device *dev)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ void __iomem *ptr_reg;
++ unsigned reg[5];
++
++ /* Interrupt status */
++ reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG);
++ reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_1_REG);
++ reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_2_REG);
++ reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_3_REG);
++ reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
++ netdev_err(dev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ reg[0], reg[1], reg[2], reg[3], reg[4]);
++
++ /* Interrupt enable */
++ reg[0] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG);
++ reg[1] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_1_REG);
++ reg[2] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_2_REG);
++ reg[3] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_3_REG);
++ reg[4] = readl(toe->iomem + GLOBAL_INTERRUPT_ENABLE_4_REG);
++ netdev_err(dev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ reg[0], reg[1], reg[2], reg[3], reg[4]);
++
++ /* RX DMA status */
++ reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_FIRST_DESC_REG);
++ reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_CURR_DESC_REG);
++ reg[2] = GET_RPTR(gmac->rxq_rwptr);
++ reg[3] = GET_WPTR(gmac->rxq_rwptr);
++ netdev_err(dev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
++ reg[0], reg[1], reg[2], reg[3]);
++
++ reg[0] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD0_REG);
++ reg[1] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD1_REG);
++ reg[2] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD2_REG);
++ reg[3] = readl(gmac->dma_iomem + GMAC_DMA_RX_DESC_WORD3_REG);
++ netdev_err(dev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ reg[0], reg[1], reg[2], reg[3]);
++
++ /* TX DMA status */
++ ptr_reg = gmac->dma_iomem + GMAC_SW_TX_QUEUE0_PTR_REG;
++
++ reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_FIRST_DESC_REG);
++ reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_CURR_DESC_REG);
++ reg[2] = GET_RPTR(ptr_reg);
++ reg[3] = GET_WPTR(ptr_reg);
++ netdev_err(dev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
++ reg[0], reg[1], reg[2], reg[3]);
++
++ reg[0] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD0_REG);
++ reg[1] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD1_REG);
++ reg[2] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD2_REG);
++ reg[3] = readl(gmac->dma_iomem + GMAC_DMA_TX_DESC_WORD3_REG);
++ netdev_err(dev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ reg[0], reg[1], reg[2], reg[3]);
++
++ /* FREE queues status */
++ ptr_reg = toe->iomem + GLOBAL_SWFQ_RWPTR_REG;
++
++ reg[0] = GET_RPTR(ptr_reg);
++ reg[1] = GET_WPTR(ptr_reg);
++
++ ptr_reg = toe->iomem + GLOBAL_HWFQ_RWPTR_REG;
++
++ reg[2] = GET_RPTR(ptr_reg);
++ reg[3] = GET_WPTR(ptr_reg);
++ netdev_err(dev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
++ reg[0], reg[1], reg[2], reg[3]);
++}
++
++static void gmac_update_hw_stats(struct net_device *dev)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned long flags;
++ unsigned int rx_discards, rx_mcast, rx_bcast;
++
++ spin_lock_irqsave(&toe->irq_lock, flags);
++ u64_stats_update_begin(&gmac->ir_stats_syncp);
++
++ gmac->hw_stats[0] += rx_discards = readl(gmac->ctl_iomem + GMAC_IN_DISCARDS);
++ gmac->hw_stats[1] += readl(gmac->ctl_iomem + GMAC_IN_ERRORS);
++ gmac->hw_stats[2] += rx_mcast = readl(gmac->ctl_iomem + GMAC_IN_MCAST);
++ gmac->hw_stats[3] += rx_bcast = readl(gmac->ctl_iomem + GMAC_IN_BCAST);
++ gmac->hw_stats[4] += readl(gmac->ctl_iomem + GMAC_IN_MAC1);
++ gmac->hw_stats[5] += readl(gmac->ctl_iomem + GMAC_IN_MAC2);
++
++ gmac->stats.rx_missed_errors += rx_discards;
++ gmac->stats.multicast += rx_mcast;
++ gmac->stats.multicast += rx_bcast;
++
++ writel(GMAC0_MIB_INT_BIT << (dev->dev_id * 8),
++ toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
++
++ u64_stats_update_end(&gmac->ir_stats_syncp);
++ spin_unlock_irqrestore(&toe->irq_lock, flags);
++}
++
++static inline unsigned gmac_get_intr_flags(struct net_device *dev, int i)
++{
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ void __iomem *irqif_reg, *irqen_reg;
++ unsigned offs, val;
++
++ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - GLOBAL_INTERRUPT_STATUS_0_REG);
++
++ irqif_reg = toe->iomem + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
++ irqen_reg = toe->iomem + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
++
++ val = readl(irqif_reg) & readl(irqen_reg);
++ return val;
++}
++
++enum hrtimer_restart gmac_coalesce_delay_expired( struct hrtimer *timer )
++{
++ struct gmac_private *gmac = container_of(timer, struct gmac_private, rx_coalesce_timer);
++
++ napi_schedule(&gmac->napi);
++ return HRTIMER_NORESTART;
++}
++
++static irqreturn_t gmac_irq(int irq, void *data)
++{
++ struct net_device *dev = data;
++ struct gmac_private *gmac = netdev_priv(dev);
++ struct toe_private *toe = gmac->toe;
++ unsigned val, orr = 0;
++
++ orr |= val = gmac_get_intr_flags(dev, 0);
++
++ if (unlikely(val & (GMAC0_IRQ0_2 << (dev->dev_id * 2)))) {
++ /* oh, crap. */
++ netdev_err(dev, "hw failure/sw bug\n");
++ gmac_dump_dma_state(dev);
++
++ /* don't know how to recover, just reduce losses */
++ gmac_enable_irq(dev, 0);
++ return IRQ_HANDLED;
++ }
++
++ if (val & (GMAC0_IRQ0_TXQ0_INTS << (dev->dev_id * 6)))
++ gmac_tx_irq(dev, 0);
++
++ orr |= val = gmac_get_intr_flags(dev, 1);
++
++ if (val & (DEFAULT_Q0_INT_BIT << dev->dev_id)) {
++
++ gmac_enable_rx_irq(dev, 0);
++
++ if (!gmac->rx_coalesce_nsecs)
++ napi_schedule(&gmac->napi);
++ else {
++ ktime_t ktime;
++ ktime = ktime_set(0, gmac->rx_coalesce_nsecs);
++ hrtimer_start(&gmac->rx_coalesce_timer, ktime, HRTIMER_MODE_REL);
++ }
++ }
++
++ orr |= val = gmac_get_intr_flags(dev, 4);
++
++ if (unlikely(val & (GMAC0_MIB_INT_BIT << (dev->dev_id * 8))))
++ gmac_update_hw_stats(dev);
++
++ if (unlikely(val & (GMAC0_RX_OVERRUN_INT_BIT << (dev->dev_id * 8)))) {
++ writel(GMAC0_RXDERR_INT_BIT << (dev->dev_id * 8),
++ toe->iomem + GLOBAL_INTERRUPT_STATUS_4_REG);
++
++ spin_lock(&toe->irq_lock);
++ u64_stats_update_begin(&gmac->ir_stats_syncp);
++ ++gmac->stats.rx_fifo_errors;
++ u64_stats_update_end(&gmac->ir_stats_syncp);
++ spin_unlock(&toe->irq_lock);
++ }
++
++ return orr ? IRQ_HANDLED : IRQ_NONE;
++}
++
++static void gmac_start_dma(struct gmac_private *gmac)
++{
++ void __iomem *dma_ctrl_reg = gmac->dma_iomem + GMAC_DMA_CTRL_REG;