clk: rockchip: rk3288: fix up the clk register for hclk_vio
[firefly-linux-kernel-4.4.55.git] / drivers / usb / gadget / function / u_ether.c
index b644248f4b8e31532e73b93bce50e7d14af01b58..e4920e5e1d647ff92c5118545309f1eb99396c36 100644 (file)
@@ -53,6 +53,8 @@
  * blocks and still have efficient handling. */
 #define GETHER_MAX_ETH_FRAME_LEN 15412
 
+static struct workqueue_struct *uether_wq;
+
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -64,19 +66,27 @@ struct eth_dev {
 
        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
        struct list_head        tx_reqs, rx_reqs;
-       atomic_t                tx_qlen;
+       unsigned                tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD       5
+       int                     no_tx_req_used;
+       int                     tx_skb_hold_count;
+       u32                     tx_req_bufsize;
 
        struct sk_buff_head     rx_frames;
 
        unsigned                qmult;
 
        unsigned                header_len;
+       unsigned                ul_max_pkts_per_xfer;
+       unsigned                dl_max_pkts_per_xfer;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
                                                struct sk_buff *skb,
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
+       struct work_struct      rx_work;
 
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
@@ -230,9 +240,13 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
        size += out->maxpacket - 1;
        size -= size % out->maxpacket;
 
+       if (dev->ul_max_pkts_per_xfer)
+               size *= dev->ul_max_pkts_per_xfer;
+
        if (dev->port_usb->is_fixed)
                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+       DBG(dev, "%s: size: %zd\n", __func__, size);
        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
        if (skb == NULL) {
                DBG(dev, "no rx skb\n");
@@ -258,18 +272,16 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
-               spin_lock_irqsave(&dev->req_lock, flags);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context, *skb2;
+       struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
+       bool            queue = 0;
 
        switch (status) {
 
@@ -285,6 +297,10 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                                status = dev->unwrap(dev->port_usb,
                                                        skb,
                                                        &dev->rx_frames);
+                               if (status == -EINVAL)
+                                       dev->net->stats.rx_errors++;
+                               else if (status == -EOVERFLOW)
+                                       dev->net->stats.rx_over_errors++;
                        } else {
                                dev_kfree_skb_any(skb);
                                status = -ENOTCONN;
@@ -293,30 +309,8 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               skb = NULL;
-
-               skb2 = skb_dequeue(&dev->rx_frames);
-               while (skb2) {
-                       if (status < 0
-                                       || ETH_HLEN > skb2->len
-                                       || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
-                               dev->net->stats.rx_errors++;
-                               dev->net->stats.rx_length_errors++;
-                               DBG(dev, "rx length %d\n", skb2->len);
-                               dev_kfree_skb_any(skb2);
-                               goto next_frame;
-                       }
-                       skb2->protocol = eth_type_trans(skb2, dev->net);
-                       dev->net->stats.rx_packets++;
-                       dev->net->stats.rx_bytes += skb2->len;
-
-                       /* no buffer copies needed, unless hardware can't
-                        * use skb buffers.
-                        */
-                       status = netif_rx(skb2);
-next_frame:
-                       skb2 = skb_dequeue(&dev->rx_frames);
-               }
+               if (!status)
+                       queue = 1;
                break;
 
        /* software-driven interface shutdown */
@@ -339,22 +333,20 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
+               queue = 1;
+               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
-       if (skb)
-               dev_kfree_skb_any(skb);
-       if (!netif_running(dev->net)) {
 clean:
-               spin_lock(&dev->req_lock);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock(&dev->req_lock);
-               req = NULL;
-       }
-       if (req)
-               rx_submit(dev, req, GFP_ATOMIC);
+       spin_lock(&dev->req_lock);
+       list_add(&req->list, &dev->rx_reqs);
+       spin_unlock(&dev->req_lock);
+
+       if (queue)
+               queue_work(uether_wq, &dev->rx_work);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -419,16 +411,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
+       int                     req_cnt = 0;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
+               /* break the nexus of continuous completion and re-submission*/
+               if (++req_cnt > qlen(dev->gadget, dev->qmult))
+                       break;
+
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
+                       spin_lock_irqsave(&dev->req_lock, flags);
+                       list_add(&req->list, &dev->rx_reqs);
+                       spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
@@ -438,6 +438,36 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
+static void process_rx_w(struct work_struct *work)
+{
+       struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work);
+       struct sk_buff  *skb;
+       int             status = 0;
+
+       if (!dev->port_usb)
+               return;
+
+       while ((skb = skb_dequeue(&dev->rx_frames))) {
+               if (status < 0
+                               || ETH_HLEN > skb->len
+                               || skb->len > ETH_FRAME_LEN) {
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       DBG(dev, "rx length %d\n", skb->len);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+               skb->protocol = eth_type_trans(skb, dev->net);
+               dev->net->stats.rx_packets++;
+               dev->net->stats.rx_bytes += skb->len;
+
+               status = netif_rx_ni(skb);
+       }
+
+       if (netif_running(dev->net))
+               rx_fill(dev, GFP_KERNEL);
+}
+
 static void eth_work(struct work_struct *work)
 {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@ -455,6 +485,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
+       struct net_device *net = dev->net;
+       struct usb_request *new_req;
+       struct usb_ep *in;
+       int length;
+       int retval;
 
        switch (req->status) {
        default:
@@ -465,16 +500,74 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
        case -ESHUTDOWN:                /* disconnect etc */
                break;
        case 0:
-               dev->net->stats.tx_bytes += skb->len;
+               if (!req->zero)
+                       dev->net->stats.tx_bytes += req->length-1;
+               else
+                       dev->net->stats.tx_bytes += req->length;
        }
        dev->net->stats.tx_packets++;
 
        spin_lock(&dev->req_lock);
-       list_add(&req->list, &dev->tx_reqs);
-       spin_unlock(&dev->req_lock);
-       dev_kfree_skb_any(skb);
+       list_add_tail(&req->list, &dev->tx_reqs);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               dev->no_tx_req_used--;
+               req->length = 0;
+               in = dev->port_usb->in_ep;
+
+               if (!list_empty(&dev->tx_reqs)) {
+                       new_req = container_of(dev->tx_reqs.next,
+                                       struct usb_request, list);
+                       list_del(&new_req->list);
+                       spin_unlock(&dev->req_lock);
+                       if (new_req->length > 0) {
+                               length = new_req->length;
+
+                               /* NCM requires no zlp if transfer is
+                                * dwNtbInMaxSize */
+                               if (dev->port_usb->is_fixed &&
+                                       length == dev->port_usb->fixed_in_len &&
+                                       (length % in->maxpacket) == 0)
+                                       new_req->zero = 0;
+                               else
+                                       new_req->zero = 1;
+
+                               /* use zlp framing on tx for strict CDC-Ether
+                                * conformance, though any robust network rx
+                                * path ignores extra padding. and some hardware
+                                * doesn't like to write zlps.
+                                */
+                               if (new_req->zero && !dev->zlp &&
+                                               (length % in->maxpacket) == 0) {
+                                       new_req->zero = 0;
+                                       length++;
+                               }
+
+                               new_req->length = length;
+                               retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+                               switch (retval) {
+                               default:
+                                       DBG(dev, "tx queue err %d\n", retval);
+                                       break;
+                               case 0:
+                                       spin_lock(&dev->req_lock);
+                                       dev->no_tx_req_used++;
+                                       spin_unlock(&dev->req_lock);
+                                       net->trans_start = jiffies;
+                               }
+                       } else {
+                               spin_lock(&dev->req_lock);
+                               list_add(&new_req->list, &dev->tx_reqs);
+                               spin_unlock(&dev->req_lock);
+                       }
+               } else {
+                       spin_unlock(&dev->req_lock);
+               }
+       } else {
+               spin_unlock(&dev->req_lock);
+               dev_kfree_skb_any(skb);
+       }
 
-       atomic_dec(&dev->tx_qlen);
        if (netif_carrier_ok(dev->net))
                netif_wake_queue(dev->net);
 }
@@ -484,6 +577,26 @@ static inline int is_promisc(u16 cdc_filter)
        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+       struct list_head        *act;
+       struct usb_request      *req;
+
+       dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+                               (dev->net->mtu
+                               + sizeof(struct ethhdr)
+                               /* size of rndis_packet_msg_type */
+                               + 44
+                               + 22));
+
+       list_for_each(act, &dev->tx_reqs) {
+               req = container_of(act, struct usb_request, list);
+               if (!req->buf)
+                       req->buf = kmalloc(dev->tx_req_bufsize,
+                                               GFP_ATOMIC);
+       }
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                        struct net_device *net)
 {
@@ -510,6 +623,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* Allocate memory for tx_reqs to support multi packet transfer */
+       if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+               alloc_tx_buffer(dev);
+
        /* apply outgoing CDC or RNDIS filters */
        if (skb && !is_promisc(cdc_filter)) {
                u8              *dest = skb->data;
@@ -572,9 +689,37 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                }
        }
 
-       length = skb->len;
-       req->buf = skb->data;
-       req->context = skb;
+       spin_lock_irqsave(&dev->req_lock, flags);
+       dev->tx_skb_hold_count++;
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               memcpy(req->buf + req->length, skb->data, skb->len);
+               req->length = req->length + skb->len;
+               length = req->length;
+               dev_kfree_skb_any(skb);
+
+               spin_lock_irqsave(&dev->req_lock, flags);
+               if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+                       if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+                               list_add(&req->list, &dev->tx_reqs);
+                               spin_unlock_irqrestore(&dev->req_lock, flags);
+                               goto success;
+                       }
+               }
+
+               dev->no_tx_req_used++;
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+
+               spin_lock_irqsave(&dev->lock, flags);
+               dev->tx_skb_hold_count = 0;
+               spin_unlock_irqrestore(&dev->lock, flags);
+       } else {
+               length = skb->len;
+               req->buf = skb->data;
+               req->context = skb;
+       }
+
        req->complete = tx_complete;
 
        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -589,18 +734,27 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * though any robust network rx path ignores extra padding.
         * and some hardware doesn't like to write zlps.
         */
-       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+               req->zero = 0;
                length++;
+       }
 
        req->length = length;
 
-       /* throttle high/super speed IRQ rate back slightly */
-       if (gadget_is_dualspeed(dev->gadget))
-               req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
-                                      dev->gadget->speed == USB_SPEED_SUPER)) &&
-                                       !list_empty(&dev->tx_reqs))
-                       ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
-                       : 0;
+       /* throttle highspeed IRQ rate back slightly */
+       if (gadget_is_dualspeed(dev->gadget) &&
+                        (dev->gadget->speed == USB_SPEED_HIGH) &&
+                        !list_empty(&dev->tx_reqs)) {
+               dev->tx_qlen++;
+               if (dev->tx_qlen == (dev->qmult/2)) {
+                       req->no_interrupt = 0;
+                       dev->tx_qlen = 0;
+               } else {
+                       req->no_interrupt = 1;
+               }
+       } else {
+               req->no_interrupt = 0;
+       }
 
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
        switch (retval) {
@@ -609,11 +763,11 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                break;
        case 0:
                net->trans_start = jiffies;
-               atomic_inc(&dev->tx_qlen);
        }
 
        if (retval) {
-               dev_kfree_skb_any(skb);
+               if (!dev->port_usb->multi_pkt_xfer)
+                       dev_kfree_skb_any(skb);
 drop:
                dev->net->stats.tx_dropped++;
 multiframe:
@@ -623,6 +777,7 @@ multiframe:
                list_add(&req->list, &dev->tx_reqs);
                spin_unlock_irqrestore(&dev->req_lock, flags);
        }
+success:
        return NETDEV_TX_OK;
 }
 
@@ -636,7 +791,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
        rx_fill(dev, gfp_flags);
 
        /* and open the tx floodgates */
-       atomic_set(&dev->tx_qlen, 0);
+       dev->tx_qlen = 0;
        netif_wake_queue(dev->net);
 }
 
@@ -782,6 +937,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -844,6 +1000,7 @@ struct net_device *gether_setup_name_default(const char *netname)
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -1078,8 +1235,13 @@ struct net_device *gether_connect(struct gether *link)
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
                dev->wrap = link->wrap;
+               dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+               dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
                spin_lock(&dev->lock);
+               dev->tx_skb_hold_count = 0;
+               dev->no_tx_req_used = 0;
+               dev->tx_req_bufsize = 0;
                dev->port_usb = link;
                if (netif_running(dev->net)) {
                        if (link->open)
@@ -1124,6 +1286,7 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
+       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
@@ -1146,6 +1309,8 @@ void gether_disconnect(struct gether *link)
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
+               if (link->multi_pkt_xfer)
+                       kfree(req->buf);
                usb_ep_free_request(link->in_ep, req);
                spin_lock(&dev->req_lock);
        }
@@ -1164,6 +1329,12 @@ void gether_disconnect(struct gether *link)
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
+
+       spin_lock(&dev->rx_frames.lock);
+       while ((skb = __skb_dequeue(&dev->rx_frames)))
+               dev_kfree_skb_any(skb);
+       spin_unlock(&dev->rx_frames.lock);
+
        link->out_ep->desc = NULL;
 
        /* finish forgetting about this USB link episode */
@@ -1177,5 +1348,23 @@ void gether_disconnect(struct gether *link)
 }
 EXPORT_SYMBOL_GPL(gether_disconnect);
 
-MODULE_LICENSE("GPL");
+static int __init gether_init(void)
+{
+       uether_wq  = create_singlethread_workqueue("uether");
+       if (!uether_wq) {
+               pr_err("%s: Unable to create workqueue: uether\n", __func__);
+               return -ENOMEM;
+       }
+       return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+       destroy_workqueue(uether_wq);
+
+}
+module_exit(gether_exit);
 MODULE_AUTHOR("David Brownell");
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");