amd-xgbe: Add BQL support
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / amd / xgbe / xgbe-drv.c
index c3533e104c61eda8f23981707dd765a3bd5fcc04..f9635281c7df8d819b8b980cf83fb98403db303e 100644 (file)
@@ -876,7 +876,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 static void xgbe_stop(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_channel *channel;
        struct net_device *netdev = pdata->netdev;
+       struct netdev_queue *txq;
+       unsigned int i;
 
        DBGPR("-->xgbe_stop\n");
 
@@ -890,6 +893,15 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
 
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               if (!channel->tx_ring)
+                       continue;
+
+               txq = netdev_get_tx_queue(netdev, channel->queue_index);
+               netdev_tx_reset_queue(txq);
+       }
+
        DBGPR("<--xgbe_stop\n");
 }
 
@@ -1156,6 +1168,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
              packet->tcp_header_len, packet->tcp_payload_len);
        DBGPR("  packet->mss=%u\n", packet->mss);
 
+       /* Update the number of packets that will ultimately be transmitted
+        * along with the extra bytes for each extra packet
+        */
+       packet->tx_packets = skb_shinfo(skb)->gso_segs;
+       packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
        return 0;
 }
 
@@ -1184,14 +1202,17 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
        context_desc = 0;
        packet->rdesc_count = 0;
 
+       packet->tx_packets = 1;
+       packet->tx_bytes = skb->len;
+
        if (xgbe_is_tso(skb)) {
-               /* TSO requires an extra desriptor if mss is different */
+               /* TSO requires an extra descriptor if mss is different */
                if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
                        context_desc = 1;
                        packet->rdesc_count++;
                }
 
-               /* TSO requires an extra desriptor for TSO header */
+               /* TSO requires an extra descriptor for TSO header */
                packet->rdesc_count++;
 
                XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
@@ -1369,9 +1390,6 @@ static int xgbe_close(struct net_device *netdev)
        /* Free the ring descriptors and buffers */
        desc_if->free_ring_resources(pdata);
 
-       /* Free the channel and ring structures */
-       xgbe_free_channels(pdata);
-
        /* Release the interrupts */
        devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
        if (pdata->per_channel_irq) {
@@ -1380,6 +1398,9 @@ static int xgbe_close(struct net_device *netdev)
                        devm_free_irq(pdata->dev, channel->dma_irq, channel);
        }
 
+       /* Free the channel and ring structures */
+       xgbe_free_channels(pdata);
+
        /* Disable the clocks */
        clk_disable_unprepare(pdata->ptpclk);
        clk_disable_unprepare(pdata->sysclk);
@@ -1400,12 +1421,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_packet_data *packet;
+       struct netdev_queue *txq;
        unsigned long flags;
        int ret;
 
        DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
 
        channel = pdata->channel + skb->queue_mapping;
+       txq = netdev_get_tx_queue(netdev, channel->queue_index);
        ring = channel->tx_ring;
        packet = &ring->packet_data;
 
@@ -1447,6 +1470,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        xgbe_prep_tx_tstamp(pdata, skb, packet);
 
+       /* Report on the actual number of bytes (to be) sent */
+       netdev_tx_sent_queue(txq, packet->tx_bytes);
+
        /* Configure required descriptor fields for transmission */
        hw_if->dev_xmit(channel);
 
@@ -1661,12 +1687,21 @@ static int xgbe_set_features(struct net_device *netdev,
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       netdev_features_t rxcsum, rxvlan, rxvlan_filter;
+       netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+       int ret = 0;
 
+       rxhash = pdata->netdev_features & NETIF_F_RXHASH;
        rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
        rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
        rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
 
+       if ((features & NETIF_F_RXHASH) && !rxhash)
+               ret = hw_if->enable_rss(pdata);
+       else if (!(features & NETIF_F_RXHASH) && rxhash)
+               ret = hw_if->disable_rss(pdata);
+       if (ret)
+               return ret;
+
        if ((features & NETIF_F_RXCSUM) && !rxcsum)
                hw_if->enable_rx_csum(pdata);
        else if (!(features & NETIF_F_RXCSUM) && rxcsum)
@@ -1738,14 +1773,14 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
        u8 *packet;
        unsigned int copy_len;
 
-       skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
+       skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
        if (!skb)
                return NULL;
 
-       packet = page_address(rdata->rx_hdr.pa.pages) +
-                rdata->rx_hdr.pa.pages_offset;
-       copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
-       copy_len = min(rdata->rx_hdr.dma_len, copy_len);
+       packet = page_address(rdata->rx.hdr.pa.pages) +
+                rdata->rx.hdr.pa.pages_offset;
+       copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+       copy_len = min(rdata->rx.hdr.dma_len, copy_len);
        skb_copy_to_linear_data(skb, packet, copy_len);
        skb_put(skb, copy_len);
 
@@ -1763,8 +1798,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        struct net_device *netdev = pdata->netdev;
+       struct netdev_queue *txq;
        unsigned long flags;
        int processed = 0;
+       unsigned int tx_packets = 0, tx_bytes = 0;
 
        DBGPR("-->xgbe_tx_poll\n");
 
@@ -1772,6 +1809,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        if (!ring)
                return 0;
 
+       txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
        spin_lock_irqsave(&ring->lock, flags);
 
        while ((processed < XGBE_TX_DESC_MAX_PROC) &&
@@ -1782,10 +1821,19 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
                if (!hw_if->tx_complete(rdesc))
                        break;
 
+               /* Make sure descriptor fields are read after reading the OWN
+                * bit */
+               rmb();
+
 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
                xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
 #endif
 
+               if (hw_if->is_last_desc(rdesc)) {
+                       tx_packets += rdata->tx.packets;
+                       tx_bytes += rdata->tx.bytes;
+               }
+
                /* Free the SKB and reset the descriptor for re-use */
                desc_if->unmap_rdata(pdata, rdata);
                hw_if->tx_desc_reset(rdata);
@@ -1794,14 +1842,20 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
                ring->dirty++;
        }
 
+       if (!processed)
+               goto unlock;
+
+       netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
        if ((ring->tx.queue_stopped == 1) &&
            (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
                ring->tx.queue_stopped = 0;
-               netif_wake_subqueue(netdev, channel->queue_index);
+               netif_tx_wake_queue(txq);
        }
 
        DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
 
+unlock:
        spin_unlock_irqrestore(&ring->lock, flags);
 
        return processed;
@@ -1887,40 +1941,40 @@ read_again:
                }
 
                if (!context) {
-                       put_len = rdata->len - len;
+                       put_len = rdata->rx.len - len;
                        len += put_len;
 
                        if (!skb) {
                                dma_sync_single_for_cpu(pdata->dev,
-                                                       rdata->rx_hdr.dma,
-                                                       rdata->rx_hdr.dma_len,
+                                                       rdata->rx.hdr.dma,
+                                                       rdata->rx.hdr.dma_len,
                                                        DMA_FROM_DEVICE);
 
                                skb = xgbe_create_skb(pdata, rdata, &put_len);
                                if (!skb) {
                                        error = 1;
-                                       goto read_again;
+                                       goto skip_data;
                                }
                        }
 
                        if (put_len) {
                                dma_sync_single_for_cpu(pdata->dev,
-                                                       rdata->rx_buf.dma,
-                                                       rdata->rx_buf.dma_len,
+                                                       rdata->rx.buf.dma,
+                                                       rdata->rx.buf.dma_len,
                                                        DMA_FROM_DEVICE);
 
                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                                               rdata->rx_buf.pa.pages,
-                                               rdata->rx_buf.pa.pages_offset,
-                                               put_len, rdata->rx_buf.dma_len);
-                               rdata->rx_buf.pa.pages = NULL;
+                                               rdata->rx.buf.pa.pages,
+                                               rdata->rx.buf.pa.pages_offset,
+                                               put_len, rdata->rx.buf.dma_len);
+                               rdata->rx.buf.pa.pages = NULL;
                        }
                }
 
+skip_data:
                if (incomplete || context_next)
                        goto read_again;
 
-               /* Stray Context Descriptor? */
                if (!skb)
                        goto next_packet;
 
@@ -1960,6 +2014,11 @@ read_again:
                        hwtstamps->hwtstamp = ns_to_ktime(nsec);
                }
 
+               if (XGMAC_GET_BITS(packet->attributes,
+                                  RX_PACKET_ATTRIBUTES, RSS_HASH))
+                       skb_set_hash(skb, packet->rss_hash,
+                                    packet->rss_hash_type);
+
                skb->dev = netdev;
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, channel->queue_index);