usb: gadget: net2280: physically disable endpoint on disable operation
[firefly-linux-kernel-4.4.55.git] / drivers / usb / gadget / udc / net2280.c
1 /*
2  * Driver for the PLX NET2280 USB device controller.
3  * Specs and errata are available from <http://www.plxtech.com>.
4  *
5  * PLX Technology Inc. (formerly NetChip Technology) supported the
6  * development of this driver.
7  *
8  *
9  * CODE STATUS HIGHLIGHTS
10  *
11  * This driver should work well with most "gadget" drivers, including
12  * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
13  * as well as Gadget Zero and Gadgetfs.
14  *
15  * DMA is enabled by default.
16  *
17  * MSI is enabled by default.  The legacy IRQ is used if MSI couldn't
18  * be enabled.
19  *
20  * Note that almost all the errata workarounds here are only needed for
21  * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
22  */
23
24 /*
25  * Copyright (C) 2003 David Brownell
26  * Copyright (C) 2003-2005 PLX Technology, Inc.
27  * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
28  *
29  * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30  *      with 2282 chip
31  *
32  * Modified Ricardo Ribalda Qtechnology AS  to provide compatibility
33  *      with usb 338x chip. Based on PLX driver
34  *
35  * This program is free software; you can redistribute it and/or modify
36  * it under the terms of the GNU General Public License as published by
37  * the Free Software Foundation; either version 2 of the License, or
38  * (at your option) any later version.
39  */
40
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kernel.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/timer.h>
51 #include <linux/list.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/device.h>
55 #include <linux/usb/ch9.h>
56 #include <linux/usb/gadget.h>
57 #include <linux/prefetch.h>
58 #include <linux/io.h>
59
60 #include <asm/byteorder.h>
61 #include <asm/irq.h>
62 #include <asm/unaligned.h>
63
64 #define DRIVER_DESC             "PLX NET228x/USB338x USB Peripheral Controller"
65 #define DRIVER_VERSION          "2005 Sept 27/v3.0"
66
67 #define EP_DONTUSE              13      /* nonzero */
68
69 #define USE_RDK_LEDS            /* GPIO pins control three LEDs */
70
71
72 static const char driver_name[] = "net2280";
73 static const char driver_desc[] = DRIVER_DESC;
74
75 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
76 static const char ep0name[] = "ep0";
77 static const char *const ep_name[] = {
78         ep0name,
79         "ep-a", "ep-b", "ep-c", "ep-d",
80         "ep-e", "ep-f", "ep-g", "ep-h",
81 };
82
83 /* Endpoint names for usb3380 advance mode */
84 static const char *const ep_name_adv[] = {
85         ep0name,
86         "ep1in", "ep2out", "ep3in", "ep4out",
87         "ep1out", "ep2in", "ep3out", "ep4in",
88 };
89
90 /* mode 0 == ep-{a,b,c,d} 1K fifo each
91  * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
92  * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
93  */
94 static ushort fifo_mode;
95
96 /* "modprobe net2280 fifo_mode=1" etc */
97 module_param(fifo_mode, ushort, 0644);
98
99 /* enable_suspend -- When enabled, the driver will respond to
100  * USB suspend requests by powering down the NET2280.  Otherwise,
101  * USB suspend requests will be ignored.  This is acceptable for
102  * self-powered devices
103  */
104 static bool enable_suspend;
105
106 /* "modprobe net2280 enable_suspend=1" etc */
107 module_param(enable_suspend, bool, 0444);
108
109 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
110
111 static char *type_string(u8 bmAttributes)
112 {
113         switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
114         case USB_ENDPOINT_XFER_BULK:    return "bulk";
115         case USB_ENDPOINT_XFER_ISOC:    return "iso";
116         case USB_ENDPOINT_XFER_INT:     return "intr";
117         }
118         return "control";
119 }
120
121 #include "net2280.h"
122
123 #define valid_bit       cpu_to_le32(BIT(VALID_BIT))
124 #define dma_done_ie     cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
125
126 static void ep_clear_seqnum(struct net2280_ep *ep);
127
128 /*-------------------------------------------------------------------------*/
129 static inline void enable_pciirqenb(struct net2280_ep *ep)
130 {
131         u32 tmp = readl(&ep->dev->regs->pciirqenb0);
132
133         if (ep->dev->quirks & PLX_LEGACY)
134                 tmp |= BIT(ep->num);
135         else
136                 tmp |= BIT(ep_bit[ep->num]);
137         writel(tmp, &ep->dev->regs->pciirqenb0);
138
139         return;
140 }
141
142 static int
143 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
144 {
145         struct net2280          *dev;
146         struct net2280_ep       *ep;
147         u32                     max;
148         u32 tmp = 0;
149         u32 type;
150         unsigned long           flags;
151         static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
152         int ret = 0;
153
154         ep = container_of(_ep, struct net2280_ep, ep);
155         if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
156                         desc->bDescriptorType != USB_DT_ENDPOINT) {
157                 pr_err("%s: failed at line=%d\n", __func__, __LINE__);
158                 return -EINVAL;
159         }
160         dev = ep->dev;
161         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
162                 ret = -ESHUTDOWN;
163                 goto print_err;
164         }
165
166         /* erratum 0119 workaround ties up an endpoint number */
167         if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
168                 ret = -EDOM;
169                 goto print_err;
170         }
171
172         if (dev->quirks & PLX_SUPERSPEED) {
173                 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
174                         ret = -EDOM;
175                         goto print_err;
176                 }
177                 ep->is_in = !!usb_endpoint_dir_in(desc);
178                 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
179                         ret = -EINVAL;
180                         goto print_err;
181                 }
182         }
183
184         /* sanity check ep-e/ep-f since their fifos are small */
185         max = usb_endpoint_maxp(desc) & 0x1fff;
186         if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
187                 ret = -ERANGE;
188                 goto print_err;
189         }
190
191         spin_lock_irqsave(&dev->lock, flags);
192         _ep->maxpacket = max & 0x7ff;
193         ep->desc = desc;
194
195         /* ep_reset() has already been called */
196         ep->stopped = 0;
197         ep->wedged = 0;
198         ep->out_overflow = 0;
199
200         /* set speed-dependent max packet; may kick in high bandwidth */
201         set_max_speed(ep, max);
202
203         /* set type, direction, address; reset fifo counters */
204         writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
205
206         if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
207                 tmp = readl(&ep->cfg->ep_cfg);
208                 /* If USB ep number doesn't match hardware ep number */
209                 if ((tmp & 0xf) != usb_endpoint_num(desc)) {
210                         ret = -EINVAL;
211                         spin_unlock_irqrestore(&dev->lock, flags);
212                         goto print_err;
213                 }
214                 if (ep->is_in)
215                         tmp &= ~USB3380_EP_CFG_MASK_IN;
216                 else
217                         tmp &= ~USB3380_EP_CFG_MASK_OUT;
218         }
219         type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
220         if (type == USB_ENDPOINT_XFER_INT) {
221                 /* erratum 0105 workaround prevents hs NYET */
222                 if (dev->chiprev == 0100 &&
223                                 dev->gadget.speed == USB_SPEED_HIGH &&
224                                 !(desc->bEndpointAddress & USB_DIR_IN))
225                         writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
226                                 &ep->regs->ep_rsp);
227         } else if (type == USB_ENDPOINT_XFER_BULK) {
228                 /* catch some particularly blatant driver bugs */
229                 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
230                     (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
231                     (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
232                         spin_unlock_irqrestore(&dev->lock, flags);
233                         ret = -ERANGE;
234                         goto print_err;
235                 }
236         }
237         ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
238         /* Enable this endpoint */
239         if (dev->quirks & PLX_LEGACY) {
240                 tmp |= type << ENDPOINT_TYPE;
241                 tmp |= desc->bEndpointAddress;
242                 /* default full fifo lines */
243                 tmp |= (4 << ENDPOINT_BYTE_COUNT);
244                 tmp |= BIT(ENDPOINT_ENABLE);
245                 ep->is_in = (tmp & USB_DIR_IN) != 0;
246         } else {
247                 /* In Legacy mode, only OUT endpoints are used */
248                 if (dev->enhanced_mode && ep->is_in) {
249                         tmp |= type << IN_ENDPOINT_TYPE;
250                         tmp |= BIT(IN_ENDPOINT_ENABLE);
251                 } else {
252                         tmp |= type << OUT_ENDPOINT_TYPE;
253                         tmp |= BIT(OUT_ENDPOINT_ENABLE);
254                         tmp |= (ep->is_in << ENDPOINT_DIRECTION);
255                 }
256
257                 tmp |= (4 << ENDPOINT_BYTE_COUNT);
258                 if (!dev->enhanced_mode)
259                         tmp |= usb_endpoint_num(desc);
260                 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
261         }
262
263         /* Make sure all the registers are written before ep_rsp*/
264         wmb();
265
266         /* for OUT transfers, block the rx fifo until a read is posted */
267         if (!ep->is_in)
268                 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
269         else if (!(dev->quirks & PLX_2280)) {
270                 /* Added for 2282, Don't use nak packets on an in endpoint,
271                  * this was ignored on 2280
272                  */
273                 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
274                         BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
275         }
276
277         if (dev->quirks & PLX_SUPERSPEED)
278                 ep_clear_seqnum(ep);
279         writel(tmp, &ep->cfg->ep_cfg);
280
281         /* enable irqs */
282         if (!ep->dma) {                         /* pio, per-packet */
283                 enable_pciirqenb(ep);
284
285                 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
286                         BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
287                 if (dev->quirks & PLX_2280)
288                         tmp |= readl(&ep->regs->ep_irqenb);
289                 writel(tmp, &ep->regs->ep_irqenb);
290         } else {                                /* dma, per-request */
291                 tmp = BIT((8 + ep->num));       /* completion */
292                 tmp |= readl(&dev->regs->pciirqenb1);
293                 writel(tmp, &dev->regs->pciirqenb1);
294
295                 /* for short OUT transfers, dma completions can't
296                  * advance the queue; do it pio-style, by hand.
297                  * NOTE erratum 0112 workaround #2
298                  */
299                 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
300                         tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
301                         writel(tmp, &ep->regs->ep_irqenb);
302
303                         enable_pciirqenb(ep);
304                 }
305         }
306
307         tmp = desc->bEndpointAddress;
308         ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
309                 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
310                 type_string(desc->bmAttributes),
311                 ep->dma ? "dma" : "pio", max);
312
313         /* pci writes may still be posted */
314         spin_unlock_irqrestore(&dev->lock, flags);
315         return ret;
316
317 print_err:
318         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
319         return ret;
320 }
321
322 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
323 {
324         u32     result;
325
326         do {
327                 result = readl(ptr);
328                 if (result == ~(u32)0)          /* "device unplugged" */
329                         return -ENODEV;
330                 result &= mask;
331                 if (result == done)
332                         return 0;
333                 udelay(1);
334                 usec--;
335         } while (usec > 0);
336         return -ETIMEDOUT;
337 }
338
339 static const struct usb_ep_ops net2280_ep_ops;
340
341 static void ep_reset_228x(struct net2280_regs __iomem *regs,
342                           struct net2280_ep *ep)
343 {
344         u32             tmp;
345
346         ep->desc = NULL;
347         INIT_LIST_HEAD(&ep->queue);
348
349         usb_ep_set_maxpacket_limit(&ep->ep, ~0);
350         ep->ep.ops = &net2280_ep_ops;
351
352         /* disable the dma, irqs, endpoint... */
353         if (ep->dma) {
354                 writel(0, &ep->dma->dmactl);
355                 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
356                         BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
357                         BIT(DMA_ABORT),
358                         &ep->dma->dmastat);
359
360                 tmp = readl(&regs->pciirqenb0);
361                 tmp &= ~BIT(ep->num);
362                 writel(tmp, &regs->pciirqenb0);
363         } else {
364                 tmp = readl(&regs->pciirqenb1);
365                 tmp &= ~BIT((8 + ep->num));     /* completion */
366                 writel(tmp, &regs->pciirqenb1);
367         }
368         writel(0, &ep->regs->ep_irqenb);
369
370         /* init to our chosen defaults, notably so that we NAK OUT
371          * packets until the driver queues a read (+note erratum 0112)
372          */
373         if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
374                 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
375                 BIT(SET_NAK_OUT_PACKETS) |
376                 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
377                 BIT(CLEAR_INTERRUPT_MODE);
378         } else {
379                 /* added for 2282 */
380                 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
381                 BIT(CLEAR_NAK_OUT_PACKETS) |
382                 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
383                 BIT(CLEAR_INTERRUPT_MODE);
384         }
385
386         if (ep->num != 0) {
387                 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
388                         BIT(CLEAR_ENDPOINT_HALT);
389         }
390         writel(tmp, &ep->regs->ep_rsp);
391
392         /* scrub most status bits, and flush any fifo state */
393         if (ep->dev->quirks & PLX_2280)
394                 tmp = BIT(FIFO_OVERFLOW) |
395                         BIT(FIFO_UNDERFLOW);
396         else
397                 tmp = 0;
398
399         writel(tmp | BIT(TIMEOUT) |
400                 BIT(USB_STALL_SENT) |
401                 BIT(USB_IN_NAK_SENT) |
402                 BIT(USB_IN_ACK_RCVD) |
403                 BIT(USB_OUT_PING_NAK_SENT) |
404                 BIT(USB_OUT_ACK_SENT) |
405                 BIT(FIFO_FLUSH) |
406                 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
407                 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
408                 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
409                 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
410                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
411                 BIT(DATA_IN_TOKEN_INTERRUPT),
412                 &ep->regs->ep_stat);
413
414         /* fifo size is handled separately */
415 }
416
417 static void ep_reset_338x(struct net2280_regs __iomem *regs,
418                                         struct net2280_ep *ep)
419 {
420         u32 tmp, dmastat;
421
422         ep->desc = NULL;
423         INIT_LIST_HEAD(&ep->queue);
424
425         usb_ep_set_maxpacket_limit(&ep->ep, ~0);
426         ep->ep.ops = &net2280_ep_ops;
427
428         /* disable the dma, irqs, endpoint... */
429         if (ep->dma) {
430                 writel(0, &ep->dma->dmactl);
431                 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
432                        BIT(DMA_PAUSE_DONE_INTERRUPT) |
433                        BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
434                        BIT(DMA_TRANSACTION_DONE_INTERRUPT),
435                        /* | BIT(DMA_ABORT), */
436                        &ep->dma->dmastat);
437
438                 dmastat = readl(&ep->dma->dmastat);
439                 if (dmastat == 0x5002) {
440                         ep_warn(ep->dev, "The dmastat return = %x!!\n",
441                                dmastat);
442                         writel(0x5a, &ep->dma->dmastat);
443                 }
444
445                 tmp = readl(&regs->pciirqenb0);
446                 tmp &= ~BIT(ep_bit[ep->num]);
447                 writel(tmp, &regs->pciirqenb0);
448         } else {
449                 if (ep->num < 5) {
450                         tmp = readl(&regs->pciirqenb1);
451                         tmp &= ~BIT((8 + ep->num));     /* completion */
452                         writel(tmp, &regs->pciirqenb1);
453                 }
454         }
455         writel(0, &ep->regs->ep_irqenb);
456
457         writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
458                BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
459                BIT(FIFO_OVERFLOW) |
460                BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
461                BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
462                BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
463                BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
464
465         tmp = readl(&ep->cfg->ep_cfg);
466         if (ep->is_in)
467                 tmp &= ~USB3380_EP_CFG_MASK_IN;
468         else
469                 tmp &= ~USB3380_EP_CFG_MASK_OUT;
470         writel(tmp, &ep->cfg->ep_cfg);
471 }
472
473 static void nuke(struct net2280_ep *);
474
475 static int net2280_disable(struct usb_ep *_ep)
476 {
477         struct net2280_ep       *ep;
478         unsigned long           flags;
479
480         ep = container_of(_ep, struct net2280_ep, ep);
481         if (!_ep || !ep->desc || _ep->name == ep0name) {
482                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
483                 return -EINVAL;
484         }
485         spin_lock_irqsave(&ep->dev->lock, flags);
486         nuke(ep);
487
488         if (ep->dev->quirks & PLX_SUPERSPEED)
489                 ep_reset_338x(ep->dev->regs, ep);
490         else
491                 ep_reset_228x(ep->dev->regs, ep);
492
493         ep_vdbg(ep->dev, "disabled %s %s\n",
494                         ep->dma ? "dma" : "pio", _ep->name);
495
496         /* synch memory views with the device */
497         (void)readl(&ep->cfg->ep_cfg);
498
499         if (!ep->dma && ep->num >= 1 && ep->num <= 4)
500                 ep->dma = &ep->dev->dma[ep->num - 1];
501
502         spin_unlock_irqrestore(&ep->dev->lock, flags);
503         return 0;
504 }
505
506 /*-------------------------------------------------------------------------*/
507
508 static struct usb_request
509 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
510 {
511         struct net2280_ep       *ep;
512         struct net2280_request  *req;
513
514         if (!_ep) {
515                 pr_err("%s: Invalid ep\n", __func__);
516                 return NULL;
517         }
518         ep = container_of(_ep, struct net2280_ep, ep);
519
520         req = kzalloc(sizeof(*req), gfp_flags);
521         if (!req)
522                 return NULL;
523
524         INIT_LIST_HEAD(&req->queue);
525
526         /* this dma descriptor may be swapped with the previous dummy */
527         if (ep->dma) {
528                 struct net2280_dma      *td;
529
530                 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
531                                 &req->td_dma);
532                 if (!td) {
533                         kfree(req);
534                         return NULL;
535                 }
536                 td->dmacount = 0;       /* not VALID */
537                 td->dmadesc = td->dmaaddr;
538                 req->td = td;
539         }
540         return &req->req;
541 }
542
543 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
544 {
545         struct net2280_ep       *ep;
546         struct net2280_request  *req;
547
548         ep = container_of(_ep, struct net2280_ep, ep);
549         if (!_ep || !_req) {
550                 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n",
551                                                         __func__, _ep, _req);
552                 return;
553         }
554
555         req = container_of(_req, struct net2280_request, req);
556         WARN_ON(!list_empty(&req->queue));
557         if (req->td)
558                 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
559         kfree(req);
560 }
561
562 /*-------------------------------------------------------------------------*/
563
564 /* load a packet into the fifo we use for usb IN transfers.
565  * works for all endpoints.
566  *
567  * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
568  * at a time, but this code is simpler because it knows it only writes
569  * one packet.  ep-a..ep-d should use dma instead.
570  */
571 static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
572 {
573         struct net2280_ep_regs  __iomem *regs = ep->regs;
574         u8                      *buf;
575         u32                     tmp;
576         unsigned                count, total;
577
578         /* INVARIANT:  fifo is currently empty. (testable) */
579
580         if (req) {
581                 buf = req->buf + req->actual;
582                 prefetch(buf);
583                 total = req->length - req->actual;
584         } else {
585                 total = 0;
586                 buf = NULL;
587         }
588
589         /* write just one packet at a time */
590         count = ep->ep.maxpacket;
591         if (count > total)      /* min() cannot be used on a bitfield */
592                 count = total;
593
594         ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
595                         ep->ep.name, count,
596                         (count != ep->ep.maxpacket) ? " (short)" : "",
597                         req);
598         while (count >= 4) {
599                 /* NOTE be careful if you try to align these. fifo lines
600                  * should normally be full (4 bytes) and successive partial
601                  * lines are ok only in certain cases.
602                  */
603                 tmp = get_unaligned((u32 *)buf);
604                 cpu_to_le32s(&tmp);
605                 writel(tmp, &regs->ep_data);
606                 buf += 4;
607                 count -= 4;
608         }
609
610         /* last fifo entry is "short" unless we wrote a full packet.
611          * also explicitly validate last word in (periodic) transfers
612          * when maxpacket is not a multiple of 4 bytes.
613          */
614         if (count || total < ep->ep.maxpacket) {
615                 tmp = count ? get_unaligned((u32 *)buf) : count;
616                 cpu_to_le32s(&tmp);
617                 set_fifo_bytecount(ep, count & 0x03);
618                 writel(tmp, &regs->ep_data);
619         }
620
621         /* pci writes may still be posted */
622 }
623
624 /* work around erratum 0106: PCI and USB race over the OUT fifo.
625  * caller guarantees chiprev 0100, out endpoint is NAKing, and
626  * there's no real data in the fifo.
627  *
628  * NOTE:  also used in cases where that erratum doesn't apply:
629  * where the host wrote "too much" data to us.
630  */
631 static void out_flush(struct net2280_ep *ep)
632 {
633         u32     __iomem *statp;
634         u32     tmp;
635
636         statp = &ep->regs->ep_stat;
637
638         tmp = readl(statp);
639         if (tmp & BIT(NAK_OUT_PACKETS)) {
640                 ep_dbg(ep->dev, "%s %s %08x !NAK\n",
641                         ep->ep.name, __func__, tmp);
642                 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
643         }
644
645         writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
646                 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
647                 statp);
648         writel(BIT(FIFO_FLUSH), statp);
649         /* Make sure that stap is written */
650         mb();
651         tmp = readl(statp);
652         if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
653                         /* high speed did bulk NYET; fifo isn't filling */
654                         ep->dev->gadget.speed == USB_SPEED_FULL) {
655                 unsigned        usec;
656
657                 usec = 50;              /* 64 byte bulk/interrupt */
658                 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
659                                 BIT(USB_OUT_PING_NAK_SENT), usec);
660                 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
661         }
662 }
663
664 /* unload packet(s) from the fifo we use for usb OUT transfers.
665  * returns true iff the request completed, because of short packet
666  * or the request buffer having filled with full packets.
667  *
668  * for ep-a..ep-d this will read multiple packets out when they
669  * have been accepted.
670  */
671 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
672 {
673         struct net2280_ep_regs  __iomem *regs = ep->regs;
674         u8                      *buf = req->req.buf + req->req.actual;
675         unsigned                count, tmp, is_short;
676         unsigned                cleanup = 0, prevent = 0;
677
678         /* erratum 0106 ... packets coming in during fifo reads might
679          * be incompletely rejected.  not all cases have workarounds.
680          */
681         if (ep->dev->chiprev == 0x0100 &&
682                         ep->dev->gadget.speed == USB_SPEED_FULL) {
683                 udelay(1);
684                 tmp = readl(&ep->regs->ep_stat);
685                 if ((tmp & BIT(NAK_OUT_PACKETS)))
686                         cleanup = 1;
687                 else if ((tmp & BIT(FIFO_FULL))) {
688                         start_out_naking(ep);
689                         prevent = 1;
690                 }
691                 /* else: hope we don't see the problem */
692         }
693
694         /* never overflow the rx buffer. the fifo reads packets until
695          * it sees a short one; we might not be ready for them all.
696          */
697         prefetchw(buf);
698         count = readl(&regs->ep_avail);
699         if (unlikely(count == 0)) {
700                 udelay(1);
701                 tmp = readl(&ep->regs->ep_stat);
702                 count = readl(&regs->ep_avail);
703                 /* handled that data already? */
704                 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
705                         return 0;
706         }
707
708         tmp = req->req.length - req->req.actual;
709         if (count > tmp) {
710                 /* as with DMA, data overflow gets flushed */
711                 if ((tmp % ep->ep.maxpacket) != 0) {
712                         ep_err(ep->dev,
713                                 "%s out fifo %d bytes, expected %d\n",
714                                 ep->ep.name, count, tmp);
715                         req->req.status = -EOVERFLOW;
716                         cleanup = 1;
717                         /* NAK_OUT_PACKETS will be set, so flushing is safe;
718                          * the next read will start with the next packet
719                          */
720                 } /* else it's a ZLP, no worries */
721                 count = tmp;
722         }
723         req->req.actual += count;
724
725         is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
726
727         ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
728                         ep->ep.name, count, is_short ? " (short)" : "",
729                         cleanup ? " flush" : "", prevent ? " nak" : "",
730                         req, req->req.actual, req->req.length);
731
732         while (count >= 4) {
733                 tmp = readl(&regs->ep_data);
734                 cpu_to_le32s(&tmp);
735                 put_unaligned(tmp, (u32 *)buf);
736                 buf += 4;
737                 count -= 4;
738         }
739         if (count) {
740                 tmp = readl(&regs->ep_data);
741                 /* LE conversion is implicit here: */
742                 do {
743                         *buf++ = (u8) tmp;
744                         tmp >>= 8;
745                 } while (--count);
746         }
747         if (cleanup)
748                 out_flush(ep);
749         if (prevent) {
750                 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
751                 (void) readl(&ep->regs->ep_rsp);
752         }
753
754         return is_short || ((req->req.actual == req->req.length) &&
755                         !req->req.zero);
756 }
757
758 /* fill out dma descriptor to match a given request */
759 static void fill_dma_desc(struct net2280_ep *ep,
760                                         struct net2280_request *req, int valid)
761 {
762         struct net2280_dma      *td = req->td;
763         u32                     dmacount = req->req.length;
764
765         /* don't let DMA continue after a short OUT packet,
766          * so overruns can't affect the next transfer.
767          * in case of overruns on max-size packets, we can't
768          * stop the fifo from filling but we can flush it.
769          */
770         if (ep->is_in)
771                 dmacount |= BIT(DMA_DIRECTION);
772         if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
773                                         !(ep->dev->quirks & PLX_2280))
774                 dmacount |= BIT(END_OF_CHAIN);
775
776         req->valid = valid;
777         if (valid)
778                 dmacount |= BIT(VALID_BIT);
779         dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
780
781         /* td->dmadesc = previously set by caller */
782         td->dmaaddr = cpu_to_le32 (req->req.dma);
783
784         /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
785         wmb();
786         td->dmacount = cpu_to_le32(dmacount);
787 }
788
789 static const u32 dmactl_default =
790                 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
791                 BIT(DMA_CLEAR_COUNT_ENABLE) |
792                 /* erratum 0116 workaround part 1 (use POLLING) */
793                 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
794                 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
795                 BIT(DMA_VALID_BIT_ENABLE) |
796                 BIT(DMA_SCATTER_GATHER_ENABLE) |
797                 /* erratum 0116 workaround part 2 (no AUTOSTART) */
798                 BIT(DMA_ENABLE);
799
800 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
801 {
802         handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
803 }
804
805 static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
806 {
807         writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
808         spin_stop_dma(dma);
809 }
810
811 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
812 {
813         struct net2280_dma_regs __iomem *dma = ep->dma;
814         unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
815
816         if (!(ep->dev->quirks & PLX_2280))
817                 tmp |= BIT(END_OF_CHAIN);
818
819         writel(tmp, &dma->dmacount);
820         writel(readl(&dma->dmastat), &dma->dmastat);
821
822         writel(td_dma, &dma->dmadesc);
823         if (ep->dev->quirks & PLX_SUPERSPEED)
824                 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
825         writel(dmactl, &dma->dmactl);
826
827         /* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
828         (void) readl(&ep->dev->pci->pcimstctl);
829
830         writel(BIT(DMA_START), &dma->dmastat);
831
832         if (!ep->is_in)
833                 stop_out_naking(ep);
834 }
835
836 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
837 {
838         u32                     tmp;
839         struct net2280_dma_regs __iomem *dma = ep->dma;
840
841         /* FIXME can't use DMA for ZLPs */
842
843         /* on this path we "know" there's no dma active (yet) */
844         WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
845         writel(0, &ep->dma->dmactl);
846
847         /* previous OUT packet might have been short */
848         if (!ep->is_in && (readl(&ep->regs->ep_stat) &
849                                 BIT(NAK_OUT_PACKETS))) {
850                 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
851                         &ep->regs->ep_stat);
852
853                 tmp = readl(&ep->regs->ep_avail);
854                 if (tmp) {
855                         writel(readl(&dma->dmastat), &dma->dmastat);
856
857                         /* transfer all/some fifo data */
858                         writel(req->req.dma, &dma->dmaaddr);
859                         tmp = min(tmp, req->req.length);
860
861                         /* dma irq, faking scatterlist status */
862                         req->td->dmacount = cpu_to_le32(req->req.length - tmp);
863                         writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
864                                         &dma->dmacount);
865                         req->td->dmadesc = 0;
866                         req->valid = 1;
867
868                         writel(BIT(DMA_ENABLE), &dma->dmactl);
869                         writel(BIT(DMA_START), &dma->dmastat);
870                         return;
871                 }
872         }
873
874         tmp = dmactl_default;
875
876         /* force packet boundaries between dma requests, but prevent the
877          * controller from automagically writing a last "short" packet
878          * (zero length) unless the driver explicitly said to do that.
879          */
880         if (ep->is_in) {
881                 if (likely((req->req.length % ep->ep.maxpacket) ||
882                                                         req->req.zero)){
883                         tmp |= BIT(DMA_FIFO_VALIDATE);
884                         ep->in_fifo_validate = 1;
885                 } else
886                         ep->in_fifo_validate = 0;
887         }
888
889         /* init req->td, pointing to the current dummy */
890         req->td->dmadesc = cpu_to_le32 (ep->td_dma);
891         fill_dma_desc(ep, req, 1);
892
893         req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
894
895         start_queue(ep, tmp, req->td_dma);
896 }
897
898 static inline void
899 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
900 {
901         struct net2280_dma      *end;
902         dma_addr_t              tmp;
903
904         /* swap new dummy for old, link; fill and maybe activate */
905         end = ep->dummy;
906         ep->dummy = req->td;
907         req->td = end;
908
909         tmp = ep->td_dma;
910         ep->td_dma = req->td_dma;
911         req->td_dma = tmp;
912
913         end->dmadesc = cpu_to_le32 (ep->td_dma);
914
915         fill_dma_desc(ep, req, valid);
916 }
917
918 static void
919 done(struct net2280_ep *ep, struct net2280_request *req, int status)
920 {
921         struct net2280          *dev;
922         unsigned                stopped = ep->stopped;
923
924         list_del_init(&req->queue);
925
926         if (req->req.status == -EINPROGRESS)
927                 req->req.status = status;
928         else
929                 status = req->req.status;
930
931         dev = ep->dev;
932         if (ep->dma)
933                 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
934
935         if (status && status != -ESHUTDOWN)
936                 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
937                         ep->ep.name, &req->req, status,
938                         req->req.actual, req->req.length);
939
940         /* don't modify queue heads during completion callback */
941         ep->stopped = 1;
942         spin_unlock(&dev->lock);
943         usb_gadget_giveback_request(&ep->ep, &req->req);
944         spin_lock(&dev->lock);
945         ep->stopped = stopped;
946 }
947
948 /*-------------------------------------------------------------------------*/
949
950 static int
951 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
952 {
953         struct net2280_request  *req;
954         struct net2280_ep       *ep;
955         struct net2280          *dev;
956         unsigned long           flags;
957         int ret = 0;
958
959         /* we always require a cpu-view buffer, so that we can
960          * always use pio (as fallback or whatever).
961          */
962         ep = container_of(_ep, struct net2280_ep, ep);
963         if (!_ep || (!ep->desc && ep->num != 0)) {
964                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
965                 return -EINVAL;
966         }
967         req = container_of(_req, struct net2280_request, req);
968         if (!_req || !_req->complete || !_req->buf ||
969                                 !list_empty(&req->queue)) {
970                 ret = -EINVAL;
971                 goto print_err;
972         }
973         if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
974                 ret = -EDOM;
975                 goto print_err;
976         }
977         dev = ep->dev;
978         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
979                 ret = -ESHUTDOWN;
980                 goto print_err;
981         }
982
983         /* FIXME implement PIO fallback for ZLPs with DMA */
984         if (ep->dma && _req->length == 0) {
985                 ret = -EOPNOTSUPP;
986                 goto print_err;
987         }
988
989         /* set up dma mapping in case the caller didn't */
990         if (ep->dma) {
991                 ret = usb_gadget_map_request(&dev->gadget, _req,
992                                 ep->is_in);
993                 if (ret)
994                         goto print_err;
995         }
996
997         ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
998                         _ep->name, _req, _req->length, _req->buf);
999
1000         spin_lock_irqsave(&dev->lock, flags);
1001
1002         _req->status = -EINPROGRESS;
1003         _req->actual = 0;
1004
1005         /* kickstart this i/o queue? */
1006         if  (list_empty(&ep->queue) && !ep->stopped &&
1007                 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
1008                   (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
1009
1010                 /* use DMA if the endpoint supports it, else pio */
1011                 if (ep->dma)
1012                         start_dma(ep, req);
1013                 else {
1014                         /* maybe there's no control data, just status ack */
1015                         if (ep->num == 0 && _req->length == 0) {
1016                                 allow_status(ep);
1017                                 done(ep, req, 0);
1018                                 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1019                                 goto done;
1020                         }
1021
1022                         /* PIO ... stuff the fifo, or unblock it.  */
1023                         if (ep->is_in)
1024                                 write_fifo(ep, _req);
1025                         else if (list_empty(&ep->queue)) {
1026                                 u32     s;
1027
1028                                 /* OUT FIFO might have packet(s) buffered */
1029                                 s = readl(&ep->regs->ep_stat);
1030                                 if ((s & BIT(FIFO_EMPTY)) == 0) {
1031                                         /* note:  _req->short_not_ok is
1032                                          * ignored here since PIO _always_
1033                                          * stops queue advance here, and
1034                                          * _req->status doesn't change for
1035                                          * short reads (only _req->actual)
1036                                          */
1037                                         if (read_fifo(ep, req) &&
1038                                                         ep->num == 0) {
1039                                                 done(ep, req, 0);
1040                                                 allow_status(ep);
1041                                                 /* don't queue it */
1042                                                 req = NULL;
1043                                         } else if (read_fifo(ep, req) &&
1044                                                         ep->num != 0) {
1045                                                 done(ep, req, 0);
1046                                                 req = NULL;
1047                                         } else
1048                                                 s = readl(&ep->regs->ep_stat);
1049                                 }
1050
1051                                 /* don't NAK, let the fifo fill */
1052                                 if (req && (s & BIT(NAK_OUT_PACKETS)))
1053                                         writel(BIT(CLEAR_NAK_OUT_PACKETS),
1054                                                         &ep->regs->ep_rsp);
1055                         }
1056                 }
1057
1058         } else if (ep->dma) {
1059                 int     valid = 1;
1060
1061                 if (ep->is_in) {
1062                         int     expect;
1063
1064                         /* preventing magic zlps is per-engine state, not
1065                          * per-transfer; irq logic must recover hiccups.
1066                          */
1067                         expect = likely(req->req.zero ||
1068                                 (req->req.length % ep->ep.maxpacket));
1069                         if (expect != ep->in_fifo_validate)
1070                                 valid = 0;
1071                 }
1072                 queue_dma(ep, req, valid);
1073
1074         } /* else the irq handler advances the queue. */
1075
1076         ep->responded = 1;
1077         if (req)
1078                 list_add_tail(&req->queue, &ep->queue);
1079 done:
1080         spin_unlock_irqrestore(&dev->lock, flags);
1081
1082         /* pci writes may still be posted */
1083         return ret;
1084
1085 print_err:
1086         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
1087         return ret;
1088 }
1089
1090 static inline void
1091 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1092                 int status)
1093 {
1094         req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1095         done(ep, req, status);
1096 }
1097
1098 static void scan_dma_completions(struct net2280_ep *ep)
1099 {
1100         /* only look at descriptors that were "naturally" retired,
1101          * so fifo and list head state won't matter
1102          */
1103         while (!list_empty(&ep->queue)) {
1104                 struct net2280_request  *req;
1105                 u32                     tmp;
1106
1107                 req = list_entry(ep->queue.next,
1108                                 struct net2280_request, queue);
1109                 if (!req->valid)
1110                         break;
1111                 rmb();
1112                 tmp = le32_to_cpup(&req->td->dmacount);
1113                 if ((tmp & BIT(VALID_BIT)) != 0)
1114                         break;
1115
1116                 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1117                  * cases where DMA must be aborted; this code handles
1118                  * all non-abort DMA completions.
1119                  */
1120                 if (unlikely(req->td->dmadesc == 0)) {
1121                         /* paranoia */
1122                         tmp = readl(&ep->dma->dmacount);
1123                         if (tmp & DMA_BYTE_COUNT_MASK)
1124                                 break;
1125                         /* single transfer mode */
1126                         dma_done(ep, req, tmp, 0);
1127                         break;
1128                 } else if (!ep->is_in &&
1129                            (req->req.length % ep->ep.maxpacket) &&
1130                            !(ep->dev->quirks & PLX_SUPERSPEED)) {
1131
1132                         tmp = readl(&ep->regs->ep_stat);
1133                         /* AVOID TROUBLE HERE by not issuing short reads from
1134                          * your gadget driver.  That helps avoids errata 0121,
1135                          * 0122, and 0124; not all cases trigger the warning.
1136                          */
1137                         if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
1138                                 ep_warn(ep->dev, "%s lost packet sync!\n",
1139                                                 ep->ep.name);
1140                                 req->req.status = -EOVERFLOW;
1141                         } else {
1142                                 tmp = readl(&ep->regs->ep_avail);
1143                                 if (tmp) {
1144                                         /* fifo gets flushed later */
1145                                         ep->out_overflow = 1;
1146                                         ep_dbg(ep->dev,
1147                                                 "%s dma, discard %d len %d\n",
1148                                                 ep->ep.name, tmp,
1149                                                 req->req.length);
1150                                         req->req.status = -EOVERFLOW;
1151                                 }
1152                         }
1153                 }
1154                 dma_done(ep, req, tmp, 0);
1155         }
1156 }
1157
1158 static void restart_dma(struct net2280_ep *ep)
1159 {
1160         struct net2280_request  *req;
1161
1162         if (ep->stopped)
1163                 return;
1164         req = list_entry(ep->queue.next, struct net2280_request, queue);
1165
1166         start_dma(ep, req);
1167 }
1168
1169 static void abort_dma(struct net2280_ep *ep)
1170 {
1171         /* abort the current transfer */
1172         if (likely(!list_empty(&ep->queue))) {
1173                 /* FIXME work around errata 0121, 0122, 0124 */
1174                 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1175                 spin_stop_dma(ep->dma);
1176         } else
1177                 stop_dma(ep->dma);
1178         scan_dma_completions(ep);
1179 }
1180
1181 /* dequeue ALL requests */
1182 static void nuke(struct net2280_ep *ep)
1183 {
1184         struct net2280_request  *req;
1185
1186         /* called with spinlock held */
1187         ep->stopped = 1;
1188         if (ep->dma)
1189                 abort_dma(ep);
1190         while (!list_empty(&ep->queue)) {
1191                 req = list_entry(ep->queue.next,
1192                                 struct net2280_request,
1193                                 queue);
1194                 done(ep, req, -ESHUTDOWN);
1195         }
1196 }
1197
1198 /* dequeue JUST ONE request */
1199 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1200 {
1201         struct net2280_ep       *ep;
1202         struct net2280_request  *req;
1203         unsigned long           flags;
1204         u32                     dmactl;
1205         int                     stopped;
1206
1207         ep = container_of(_ep, struct net2280_ep, ep);
1208         if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
1209                 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
1210                                                 __func__, _ep, _req);
1211                 return -EINVAL;
1212         }
1213
1214         spin_lock_irqsave(&ep->dev->lock, flags);
1215         stopped = ep->stopped;
1216
1217         /* quiesce dma while we patch the queue */
1218         dmactl = 0;
1219         ep->stopped = 1;
1220         if (ep->dma) {
1221                 dmactl = readl(&ep->dma->dmactl);
1222                 /* WARNING erratum 0127 may kick in ... */
1223                 stop_dma(ep->dma);
1224                 scan_dma_completions(ep);
1225         }
1226
1227         /* make sure it's still queued on this endpoint */
1228         list_for_each_entry(req, &ep->queue, queue) {
1229                 if (&req->req == _req)
1230                         break;
1231         }
1232         if (&req->req != _req) {
1233                 spin_unlock_irqrestore(&ep->dev->lock, flags);
1234                 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
1235                                                                 __func__);
1236                 return -EINVAL;
1237         }
1238
1239         /* queue head may be partially complete. */
1240         if (ep->queue.next == &req->queue) {
1241                 if (ep->dma) {
1242                         ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1243                         _req->status = -ECONNRESET;
1244                         abort_dma(ep);
1245                         if (likely(ep->queue.next == &req->queue)) {
1246                                 /* NOTE: misreports single-transfer mode*/
1247                                 req->td->dmacount = 0;  /* invalidate */
1248                                 dma_done(ep, req,
1249                                         readl(&ep->dma->dmacount),
1250                                         -ECONNRESET);
1251                         }
1252                 } else {
1253                         ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
1254                         done(ep, req, -ECONNRESET);
1255                 }
1256                 req = NULL;
1257         }
1258
1259         if (req)
1260                 done(ep, req, -ECONNRESET);
1261         ep->stopped = stopped;
1262
1263         if (ep->dma) {
1264                 /* turn off dma on inactive queues */
1265                 if (list_empty(&ep->queue))
1266                         stop_dma(ep->dma);
1267                 else if (!ep->stopped) {
1268                         /* resume current request, or start new one */
1269                         if (req)
1270                                 writel(dmactl, &ep->dma->dmactl);
1271                         else
1272                                 start_dma(ep, list_entry(ep->queue.next,
1273                                         struct net2280_request, queue));
1274                 }
1275         }
1276
1277         spin_unlock_irqrestore(&ep->dev->lock, flags);
1278         return 0;
1279 }
1280
1281 /*-------------------------------------------------------------------------*/
1282
1283 static int net2280_fifo_status(struct usb_ep *_ep);
1284
1285 static int
1286 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1287 {
1288         struct net2280_ep       *ep;
1289         unsigned long           flags;
1290         int                     retval = 0;
1291
1292         ep = container_of(_ep, struct net2280_ep, ep);
1293         if (!_ep || (!ep->desc && ep->num != 0)) {
1294                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1295                 return -EINVAL;
1296         }
1297         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1298                 retval = -ESHUTDOWN;
1299                 goto print_err;
1300         }
1301         if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1302                                                 == USB_ENDPOINT_XFER_ISOC) {
1303                 retval = -EINVAL;
1304                 goto print_err;
1305         }
1306
1307         spin_lock_irqsave(&ep->dev->lock, flags);
1308         if (!list_empty(&ep->queue)) {
1309                 retval = -EAGAIN;
1310                 goto print_unlock;
1311         } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
1312                 retval = -EAGAIN;
1313                 goto print_unlock;
1314         } else {
1315                 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
1316                                 value ? "set" : "clear",
1317                                 wedged ? "wedge" : "halt");
1318                 /* set/clear, then synch memory views with the device */
1319                 if (value) {
1320                         if (ep->num == 0)
1321                                 ep->dev->protocol_stall = 1;
1322                         else
1323                                 set_halt(ep);
1324                         if (wedged)
1325                                 ep->wedged = 1;
1326                 } else {
1327                         clear_halt(ep);
1328                         if (ep->dev->quirks & PLX_SUPERSPEED &&
1329                                 !list_empty(&ep->queue) && ep->td_dma)
1330                                         restart_dma(ep);
1331                         ep->wedged = 0;
1332                 }
1333                 (void) readl(&ep->regs->ep_rsp);
1334         }
1335         spin_unlock_irqrestore(&ep->dev->lock, flags);
1336
1337         return retval;
1338
1339 print_unlock:
1340         spin_unlock_irqrestore(&ep->dev->lock, flags);
1341 print_err:
1342         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
1343         return retval;
1344 }
1345
1346 static int net2280_set_halt(struct usb_ep *_ep, int value)
1347 {
1348         return net2280_set_halt_and_wedge(_ep, value, 0);
1349 }
1350
1351 static int net2280_set_wedge(struct usb_ep *_ep)
1352 {
1353         if (!_ep || _ep->name == ep0name) {
1354                 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
1355                 return -EINVAL;
1356         }
1357         return net2280_set_halt_and_wedge(_ep, 1, 1);
1358 }
1359
1360 static int net2280_fifo_status(struct usb_ep *_ep)
1361 {
1362         struct net2280_ep       *ep;
1363         u32                     avail;
1364
1365         ep = container_of(_ep, struct net2280_ep, ep);
1366         if (!_ep || (!ep->desc && ep->num != 0)) {
1367                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1368                 return -ENODEV;
1369         }
1370         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1371                 dev_err(&ep->dev->pdev->dev,
1372                         "%s: Invalid driver=%p or speed=%d\n",
1373                         __func__, ep->dev->driver, ep->dev->gadget.speed);
1374                 return -ESHUTDOWN;
1375         }
1376
1377         avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1378         if (avail > ep->fifo_size) {
1379                 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
1380                 return -EOVERFLOW;
1381         }
1382         if (ep->is_in)
1383                 avail = ep->fifo_size - avail;
1384         return avail;
1385 }
1386
1387 static void net2280_fifo_flush(struct usb_ep *_ep)
1388 {
1389         struct net2280_ep       *ep;
1390
1391         ep = container_of(_ep, struct net2280_ep, ep);
1392         if (!_ep || (!ep->desc && ep->num != 0)) {
1393                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1394                 return;
1395         }
1396         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1397                 dev_err(&ep->dev->pdev->dev,
1398                         "%s: Invalid driver=%p or speed=%d\n",
1399                         __func__, ep->dev->driver, ep->dev->gadget.speed);
1400                 return;
1401         }
1402
1403         writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1404         (void) readl(&ep->regs->ep_rsp);
1405 }
1406
1407 static const struct usb_ep_ops net2280_ep_ops = {
1408         .enable         = net2280_enable,
1409         .disable        = net2280_disable,
1410
1411         .alloc_request  = net2280_alloc_request,
1412         .free_request   = net2280_free_request,
1413
1414         .queue          = net2280_queue,
1415         .dequeue        = net2280_dequeue,
1416
1417         .set_halt       = net2280_set_halt,
1418         .set_wedge      = net2280_set_wedge,
1419         .fifo_status    = net2280_fifo_status,
1420         .fifo_flush     = net2280_fifo_flush,
1421 };
1422
1423 /*-------------------------------------------------------------------------*/
1424
1425 static int net2280_get_frame(struct usb_gadget *_gadget)
1426 {
1427         struct net2280          *dev;
1428         unsigned long           flags;
1429         u16                     retval;
1430
1431         if (!_gadget)
1432                 return -ENODEV;
1433         dev = container_of(_gadget, struct net2280, gadget);
1434         spin_lock_irqsave(&dev->lock, flags);
1435         retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1436         spin_unlock_irqrestore(&dev->lock, flags);
1437         return retval;
1438 }
1439
1440 static int net2280_wakeup(struct usb_gadget *_gadget)
1441 {
1442         struct net2280          *dev;
1443         u32                     tmp;
1444         unsigned long           flags;
1445
1446         if (!_gadget)
1447                 return 0;
1448         dev = container_of(_gadget, struct net2280, gadget);
1449
1450         spin_lock_irqsave(&dev->lock, flags);
1451         tmp = readl(&dev->usb->usbctl);
1452         if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1453                 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1454         spin_unlock_irqrestore(&dev->lock, flags);
1455
1456         /* pci writes may still be posted */
1457         return 0;
1458 }
1459
1460 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1461 {
1462         struct net2280          *dev;
1463         u32                     tmp;
1464         unsigned long           flags;
1465
1466         if (!_gadget)
1467                 return 0;
1468         dev = container_of(_gadget, struct net2280, gadget);
1469
1470         spin_lock_irqsave(&dev->lock, flags);
1471         tmp = readl(&dev->usb->usbctl);
1472         if (value) {
1473                 tmp |= BIT(SELF_POWERED_STATUS);
1474                 _gadget->is_selfpowered = 1;
1475         } else {
1476                 tmp &= ~BIT(SELF_POWERED_STATUS);
1477                 _gadget->is_selfpowered = 0;
1478         }
1479         writel(tmp, &dev->usb->usbctl);
1480         spin_unlock_irqrestore(&dev->lock, flags);
1481
1482         return 0;
1483 }
1484
1485 static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1486 {
1487         struct net2280  *dev;
1488         u32             tmp;
1489         unsigned long   flags;
1490
1491         if (!_gadget)
1492                 return -ENODEV;
1493         dev = container_of(_gadget, struct net2280, gadget);
1494
1495         spin_lock_irqsave(&dev->lock, flags);
1496         tmp = readl(&dev->usb->usbctl);
1497         dev->softconnect = (is_on != 0);
1498         if (is_on)
1499                 tmp |= BIT(USB_DETECT_ENABLE);
1500         else
1501                 tmp &= ~BIT(USB_DETECT_ENABLE);
1502         writel(tmp, &dev->usb->usbctl);
1503         spin_unlock_irqrestore(&dev->lock, flags);
1504
1505         return 0;
1506 }
1507
1508 static int net2280_start(struct usb_gadget *_gadget,
1509                 struct usb_gadget_driver *driver);
1510 static int net2280_stop(struct usb_gadget *_gadget);
1511
1512 static const struct usb_gadget_ops net2280_ops = {
1513         .get_frame      = net2280_get_frame,
1514         .wakeup         = net2280_wakeup,
1515         .set_selfpowered = net2280_set_selfpowered,
1516         .pullup         = net2280_pullup,
1517         .udc_start      = net2280_start,
1518         .udc_stop       = net2280_stop,
1519 };
1520
1521 /*-------------------------------------------------------------------------*/
1522
1523 #ifdef  CONFIG_USB_GADGET_DEBUG_FILES
1524
1525 /* FIXME move these into procfs, and use seq_file.
1526  * Sysfs _still_ doesn't behave for arbitrarily sized files,
1527  * and also doesn't help products using this with 2.4 kernels.
1528  */
1529
1530 /* "function" sysfs attribute */
1531 static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1532                              char *buf)
1533 {
1534         struct net2280  *dev = dev_get_drvdata(_dev);
1535
1536         if (!dev->driver || !dev->driver->function ||
1537                         strlen(dev->driver->function) > PAGE_SIZE)
1538                 return 0;
1539         return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1540 }
1541 static DEVICE_ATTR_RO(function);
1542
1543 static ssize_t registers_show(struct device *_dev,
1544                               struct device_attribute *attr, char *buf)
1545 {
1546         struct net2280          *dev;
1547         char                    *next;
1548         unsigned                size, t;
1549         unsigned long           flags;
1550         int                     i;
1551         u32                     t1, t2;
1552         const char              *s;
1553
1554         dev = dev_get_drvdata(_dev);
1555         next = buf;
1556         size = PAGE_SIZE;
1557         spin_lock_irqsave(&dev->lock, flags);
1558
1559         if (dev->driver)
1560                 s = dev->driver->driver.name;
1561         else
1562                 s = "(none)";
1563
1564         /* Main Control Registers */
1565         t = scnprintf(next, size, "%s version " DRIVER_VERSION
1566                         ", chiprev %04x\n\n"
1567                         "devinit %03x fifoctl %08x gadget '%s'\n"
1568                         "pci irqenb0 %02x irqenb1 %08x "
1569                         "irqstat0 %04x irqstat1 %08x\n",
1570                         driver_name, dev->chiprev,
1571                         readl(&dev->regs->devinit),
1572                         readl(&dev->regs->fifoctl),
1573                         s,
1574                         readl(&dev->regs->pciirqenb0),
1575                         readl(&dev->regs->pciirqenb1),
1576                         readl(&dev->regs->irqstat0),
1577                         readl(&dev->regs->irqstat1));
1578         size -= t;
1579         next += t;
1580
1581         /* USB Control Registers */
1582         t1 = readl(&dev->usb->usbctl);
1583         t2 = readl(&dev->usb->usbstat);
1584         if (t1 & BIT(VBUS_PIN)) {
1585                 if (t2 & BIT(HIGH_SPEED))
1586                         s = "high speed";
1587                 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1588                         s = "powered";
1589                 else
1590                         s = "full speed";
1591                 /* full speed bit (6) not working?? */
1592         } else
1593                         s = "not attached";
1594         t = scnprintf(next, size,
1595                         "stdrsp %08x usbctl %08x usbstat %08x "
1596                                 "addr 0x%02x (%s)\n",
1597                         readl(&dev->usb->stdrsp), t1, t2,
1598                         readl(&dev->usb->ouraddr), s);
1599         size -= t;
1600         next += t;
1601
1602         /* PCI Master Control Registers */
1603
1604         /* DMA Control Registers */
1605
1606         /* Configurable EP Control Registers */
1607         for (i = 0; i < dev->n_ep; i++) {
1608                 struct net2280_ep       *ep;
1609
1610                 ep = &dev->ep[i];
1611                 if (i && !ep->desc)
1612                         continue;
1613
1614                 t1 = readl(&ep->cfg->ep_cfg);
1615                 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1616                 t = scnprintf(next, size,
1617                                 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1618                                         "irqenb %02x\n",
1619                                 ep->ep.name, t1, t2,
1620                                 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1621                                         ? "NAK " : "",
1622                                 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1623                                         ? "hide " : "",
1624                                 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1625                                         ? "CRC " : "",
1626                                 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1627                                         ? "interrupt " : "",
1628                                 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1629                                         ? "status " : "",
1630                                 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1631                                         ? "NAKmode " : "",
1632                                 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1633                                         ? "DATA1 " : "DATA0 ",
1634                                 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1635                                         ? "HALT " : "",
1636                                 readl(&ep->regs->ep_irqenb));
1637                 size -= t;
1638                 next += t;
1639
1640                 t = scnprintf(next, size,
1641                                 "\tstat %08x avail %04x "
1642                                 "(ep%d%s-%s)%s\n",
1643                                 readl(&ep->regs->ep_stat),
1644                                 readl(&ep->regs->ep_avail),
1645                                 t1 & 0x0f, DIR_STRING(t1),
1646                                 type_string(t1 >> 8),
1647                                 ep->stopped ? "*" : "");
1648                 size -= t;
1649                 next += t;
1650
1651                 if (!ep->dma)
1652                         continue;
1653
1654                 t = scnprintf(next, size,
1655                                 "  dma\tctl %08x stat %08x count %08x\n"
1656                                 "\taddr %08x desc %08x\n",
1657                                 readl(&ep->dma->dmactl),
1658                                 readl(&ep->dma->dmastat),
1659                                 readl(&ep->dma->dmacount),
1660                                 readl(&ep->dma->dmaaddr),
1661                                 readl(&ep->dma->dmadesc));
1662                 size -= t;
1663                 next += t;
1664
1665         }
1666
1667         /* Indexed Registers (none yet) */
1668
1669         /* Statistics */
1670         t = scnprintf(next, size, "\nirqs:  ");
1671         size -= t;
1672         next += t;
1673         for (i = 0; i < dev->n_ep; i++) {
1674                 struct net2280_ep       *ep;
1675
1676                 ep = &dev->ep[i];
1677                 if (i && !ep->irqs)
1678                         continue;
1679                 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1680                 size -= t;
1681                 next += t;
1682
1683         }
1684         t = scnprintf(next, size, "\n");
1685         size -= t;
1686         next += t;
1687
1688         spin_unlock_irqrestore(&dev->lock, flags);
1689
1690         return PAGE_SIZE - size;
1691 }
1692 static DEVICE_ATTR_RO(registers);
1693
1694 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1695                            char *buf)
1696 {
1697         struct net2280          *dev;
1698         char                    *next;
1699         unsigned                size;
1700         unsigned long           flags;
1701         int                     i;
1702
1703         dev = dev_get_drvdata(_dev);
1704         next = buf;
1705         size = PAGE_SIZE;
1706         spin_lock_irqsave(&dev->lock, flags);
1707
1708         for (i = 0; i < dev->n_ep; i++) {
1709                 struct net2280_ep               *ep = &dev->ep[i];
1710                 struct net2280_request          *req;
1711                 int                             t;
1712
1713                 if (i != 0) {
1714                         const struct usb_endpoint_descriptor    *d;
1715
1716                         d = ep->desc;
1717                         if (!d)
1718                                 continue;
1719                         t = d->bEndpointAddress;
1720                         t = scnprintf(next, size,
1721                                 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1722                                 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1723                                 (t & USB_DIR_IN) ? "in" : "out",
1724                                 type_string(d->bmAttributes),
1725                                 usb_endpoint_maxp(d) & 0x1fff,
1726                                 ep->dma ? "dma" : "pio", ep->fifo_size
1727                                 );
1728                 } else /* ep0 should only have one transfer queued */
1729                         t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1730                                         ep->is_in ? "in" : "out");
1731                 if (t <= 0 || t > size)
1732                         goto done;
1733                 size -= t;
1734                 next += t;
1735
1736                 if (list_empty(&ep->queue)) {
1737                         t = scnprintf(next, size, "\t(nothing queued)\n");
1738                         if (t <= 0 || t > size)
1739                                 goto done;
1740                         size -= t;
1741                         next += t;
1742                         continue;
1743                 }
1744                 list_for_each_entry(req, &ep->queue, queue) {
1745                         if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1746                                 t = scnprintf(next, size,
1747                                         "\treq %p len %d/%d "
1748                                         "buf %p (dmacount %08x)\n",
1749                                         &req->req, req->req.actual,
1750                                         req->req.length, req->req.buf,
1751                                         readl(&ep->dma->dmacount));
1752                         else
1753                                 t = scnprintf(next, size,
1754                                         "\treq %p len %d/%d buf %p\n",
1755                                         &req->req, req->req.actual,
1756                                         req->req.length, req->req.buf);
1757                         if (t <= 0 || t > size)
1758                                 goto done;
1759                         size -= t;
1760                         next += t;
1761
1762                         if (ep->dma) {
1763                                 struct net2280_dma      *td;
1764
1765                                 td = req->td;
1766                                 t = scnprintf(next, size, "\t    td %08x "
1767                                         " count %08x buf %08x desc %08x\n",
1768                                         (u32) req->td_dma,
1769                                         le32_to_cpu(td->dmacount),
1770                                         le32_to_cpu(td->dmaaddr),
1771                                         le32_to_cpu(td->dmadesc));
1772                                 if (t <= 0 || t > size)
1773                                         goto done;
1774                                 size -= t;
1775                                 next += t;
1776                         }
1777                 }
1778         }
1779
1780 done:
1781         spin_unlock_irqrestore(&dev->lock, flags);
1782         return PAGE_SIZE - size;
1783 }
1784 static DEVICE_ATTR_RO(queues);
1785
1786
1787 #else
1788
1789 #define device_create_file(a, b)        (0)
1790 #define device_remove_file(a, b)        do { } while (0)
1791
1792 #endif
1793
1794 /*-------------------------------------------------------------------------*/
1795
1796 /* another driver-specific mode might be a request type doing dma
1797  * to/from another device fifo instead of to/from memory.
1798  */
1799
1800 static void set_fifo_mode(struct net2280 *dev, int mode)
1801 {
1802         /* keeping high bits preserves BAR2 */
1803         writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1804
1805         /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1806         INIT_LIST_HEAD(&dev->gadget.ep_list);
1807         list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1808         list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1809         switch (mode) {
1810         case 0:
1811                 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1812                 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1813                 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1814                 break;
1815         case 1:
1816                 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1817                 break;
1818         case 2:
1819                 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1820                 dev->ep[1].fifo_size = 2048;
1821                 dev->ep[2].fifo_size = 1024;
1822                 break;
1823         }
1824         /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1825         list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1826         list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1827 }
1828
1829 static void defect7374_disable_data_eps(struct net2280 *dev)
1830 {
1831         /*
1832          * For Defect 7374, disable data EPs (and more):
1833          *  - This phase undoes the earlier phase of the Defect 7374 workaround,
1834          *    returing ep regs back to normal.
1835          */
1836         struct net2280_ep *ep;
1837         int i;
1838         unsigned char ep_sel;
1839         u32 tmp_reg;
1840
1841         for (i = 1; i < 5; i++) {
1842                 ep = &dev->ep[i];
1843                 writel(0, &ep->cfg->ep_cfg);
1844         }
1845
1846         /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1847         for (i = 0; i < 6; i++)
1848                 writel(0, &dev->dep[i].dep_cfg);
1849
1850         for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1851                 /* Select an endpoint for subsequent operations: */
1852                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1853                 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1854
1855                 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1856                                         ep_sel == 18 || ep_sel == 20)
1857                         continue;
1858
1859                 /* Change settings on some selected endpoints */
1860                 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
1861                 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
1862                 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1863                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1864                 tmp_reg |= BIT(EP_INITIALIZED);
1865                 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1866         }
1867 }
1868
1869 static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1870 {
1871         u32 tmp = 0, tmp_reg;
1872         u32 scratch;
1873         int i;
1874         unsigned char ep_sel;
1875
1876         scratch = get_idx_reg(dev->regs, SCRATCH);
1877
1878         WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1879                 == DEFECT7374_FSM_SS_CONTROL_READ);
1880
1881         scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1882
1883         ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1884         ep_warn(dev, "It will operate on cold-reboot and SS connect");
1885
1886         /*GPEPs:*/
1887         tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1888                         (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1889                         ((dev->enhanced_mode) ?
1890                          BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
1891                          BIT(ENDPOINT_ENABLE)));
1892
1893         for (i = 1; i < 5; i++)
1894                 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1895
1896         /* CSRIN, PCIIN, STATIN, RCIN*/
1897         tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1898         writel(tmp, &dev->dep[1].dep_cfg);
1899         writel(tmp, &dev->dep[3].dep_cfg);
1900         writel(tmp, &dev->dep[4].dep_cfg);
1901         writel(tmp, &dev->dep[5].dep_cfg);
1902
1903         /*Implemented for development and debug.
1904          * Can be refined/tuned later.*/
1905         for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1906                 /* Select an endpoint for subsequent operations: */
1907                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1908                 writel(((tmp_reg & ~0x1f) | ep_sel),
1909                                 &dev->plregs->pl_ep_ctrl);
1910
1911                 if (ep_sel == 1) {
1912                         tmp =
1913                                 (readl(&dev->plregs->pl_ep_ctrl) |
1914                                  BIT(CLEAR_ACK_ERROR_CODE) | 0);
1915                         writel(tmp, &dev->plregs->pl_ep_ctrl);
1916                         continue;
1917                 }
1918
1919                 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1920                                 ep_sel == 18  || ep_sel == 20)
1921                         continue;
1922
1923                 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1924                                 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1925                 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1926
1927                 tmp = readl(&dev->plregs->pl_ep_ctrl) &
1928                         ~BIT(EP_INITIALIZED);
1929                 writel(tmp, &dev->plregs->pl_ep_ctrl);
1930
1931         }
1932
1933         /* Set FSM to focus on the first Control Read:
1934          * - Tip: Connection speed is known upon the first
1935          * setup request.*/
1936         scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1937         set_idx_reg(dev->regs, SCRATCH, scratch);
1938
1939 }
1940
1941 /* keeping it simple:
1942  * - one bus driver, initted first;
1943  * - one function driver, initted second
1944  *
1945  * most of the work to support multiple net2280 controllers would
1946  * be to associate this gadget driver (yes?) with all of them, or
1947  * perhaps to bind specific drivers to specific devices.
1948  */
1949
1950 static void usb_reset_228x(struct net2280 *dev)
1951 {
1952         u32     tmp;
1953
1954         dev->gadget.speed = USB_SPEED_UNKNOWN;
1955         (void) readl(&dev->usb->usbctl);
1956
1957         net2280_led_init(dev);
1958
1959         /* disable automatic responses, and irqs */
1960         writel(0, &dev->usb->stdrsp);
1961         writel(0, &dev->regs->pciirqenb0);
1962         writel(0, &dev->regs->pciirqenb1);
1963
1964         /* clear old dma and irq state */
1965         for (tmp = 0; tmp < 4; tmp++) {
1966                 struct net2280_ep       *ep = &dev->ep[tmp + 1];
1967                 if (ep->dma)
1968                         abort_dma(ep);
1969         }
1970
1971         writel(~0, &dev->regs->irqstat0),
1972         writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1973
1974         /* reset, and enable pci */
1975         tmp = readl(&dev->regs->devinit) |
1976                 BIT(PCI_ENABLE) |
1977                 BIT(FIFO_SOFT_RESET) |
1978                 BIT(USB_SOFT_RESET) |
1979                 BIT(M8051_RESET);
1980         writel(tmp, &dev->regs->devinit);
1981
1982         /* standard fifo and endpoint allocations */
1983         set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1984 }
1985
1986 static void usb_reset_338x(struct net2280 *dev)
1987 {
1988         u32 tmp;
1989
1990         dev->gadget.speed = USB_SPEED_UNKNOWN;
1991         (void)readl(&dev->usb->usbctl);
1992
1993         net2280_led_init(dev);
1994
1995         if (dev->bug7734_patched) {
1996                 /* disable automatic responses, and irqs */
1997                 writel(0, &dev->usb->stdrsp);
1998                 writel(0, &dev->regs->pciirqenb0);
1999                 writel(0, &dev->regs->pciirqenb1);
2000         }
2001
2002         /* clear old dma and irq state */
2003         for (tmp = 0; tmp < 4; tmp++) {
2004                 struct net2280_ep *ep = &dev->ep[tmp + 1];
2005                 struct net2280_dma_regs __iomem *dma;
2006
2007                 if (ep->dma) {
2008                         abort_dma(ep);
2009                 } else {
2010                         dma = &dev->dma[tmp];
2011                         writel(BIT(DMA_ABORT), &dma->dmastat);
2012                         writel(0, &dma->dmactl);
2013                 }
2014         }
2015
2016         writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
2017
2018         if (dev->bug7734_patched) {
2019                 /* reset, and enable pci */
2020                 tmp = readl(&dev->regs->devinit) |
2021                     BIT(PCI_ENABLE) |
2022                     BIT(FIFO_SOFT_RESET) |
2023                     BIT(USB_SOFT_RESET) |
2024                     BIT(M8051_RESET);
2025
2026                 writel(tmp, &dev->regs->devinit);
2027         }
2028
2029         /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2030         INIT_LIST_HEAD(&dev->gadget.ep_list);
2031
2032         for (tmp = 1; tmp < dev->n_ep; tmp++)
2033                 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2034
2035 }
2036
2037 static void usb_reset(struct net2280 *dev)
2038 {
2039         if (dev->quirks & PLX_LEGACY)
2040                 return usb_reset_228x(dev);
2041         return usb_reset_338x(dev);
2042 }
2043
2044 static void usb_reinit_228x(struct net2280 *dev)
2045 {
2046         u32     tmp;
2047
2048         /* basic endpoint init */
2049         for (tmp = 0; tmp < 7; tmp++) {
2050                 struct net2280_ep       *ep = &dev->ep[tmp];
2051
2052                 ep->ep.name = ep_name[tmp];
2053                 ep->dev = dev;
2054                 ep->num = tmp;
2055
2056                 if (tmp > 0 && tmp <= 4) {
2057                         ep->fifo_size = 1024;
2058                         ep->dma = &dev->dma[tmp - 1];
2059                 } else
2060                         ep->fifo_size = 64;
2061                 ep->regs = &dev->epregs[tmp];
2062                 ep->cfg = &dev->epregs[tmp];
2063                 ep_reset_228x(dev->regs, ep);
2064         }
2065         usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
2066         usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
2067         usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
2068
2069         dev->gadget.ep0 = &dev->ep[0].ep;
2070         dev->ep[0].stopped = 0;
2071         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2072
2073         /* we want to prevent lowlevel/insecure access from the USB host,
2074          * but erratum 0119 means this enable bit is ignored
2075          */
2076         for (tmp = 0; tmp < 5; tmp++)
2077                 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
2078 }
2079
2080 static void usb_reinit_338x(struct net2280 *dev)
2081 {
2082         int i;
2083         u32 tmp, val;
2084         static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2085         static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2086                                                 0x00, 0xC0, 0x00, 0xC0 };
2087
2088         /* basic endpoint init */
2089         for (i = 0; i < dev->n_ep; i++) {
2090                 struct net2280_ep *ep = &dev->ep[i];
2091
2092                 ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i];
2093                 ep->dev = dev;
2094                 ep->num = i;
2095
2096                 if (i > 0 && i <= 4)
2097                         ep->dma = &dev->dma[i - 1];
2098
2099                 if (dev->enhanced_mode) {
2100                         ep->cfg = &dev->epregs[ne[i]];
2101                         /*
2102                          * Set USB endpoint number, hardware allows same number
2103                          * in both directions.
2104                          */
2105                          if (i > 0 && i < 5)
2106                                 writel(ne[i], &ep->cfg->ep_cfg);
2107                         ep->regs = (struct net2280_ep_regs __iomem *)
2108                                 (((void __iomem *)&dev->epregs[ne[i]]) +
2109                                 ep_reg_addr[i]);
2110                 } else {
2111                         ep->cfg = &dev->epregs[i];
2112                         ep->regs = &dev->epregs[i];
2113                 }
2114
2115                 ep->fifo_size = (i != 0) ? 2048 : 512;
2116
2117                 ep_reset_338x(dev->regs, ep);
2118         }
2119         usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2120
2121         dev->gadget.ep0 = &dev->ep[0].ep;
2122         dev->ep[0].stopped = 0;
2123
2124         /* Link layer set up */
2125         if (dev->bug7734_patched) {
2126                 tmp = readl(&dev->usb_ext->usbctl2) &
2127                     ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
2128                 writel(tmp, &dev->usb_ext->usbctl2);
2129         }
2130
2131         /* Hardware Defect and Workaround */
2132         val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2133         val &= ~(0xf << TIMER_LFPS_6US);
2134         val |= 0x5 << TIMER_LFPS_6US;
2135         writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2136
2137         val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2138         val &= ~(0xffff << TIMER_LFPS_80US);
2139         val |= 0x0100 << TIMER_LFPS_80US;
2140         writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2141
2142         /*
2143          * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2144          * Hot Reset Exit Handshake may Fail in Specific Case using
2145          * Default Register Settings. Workaround for Enumeration test.
2146          */
2147         val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2148         val &= ~(0x1f << HOT_TX_NORESET_TS2);
2149         val |= 0x10 << HOT_TX_NORESET_TS2;
2150         writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2151
2152         val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2153         val &= ~(0x1f << HOT_RX_RESET_TS2);
2154         val |= 0x3 << HOT_RX_RESET_TS2;
2155         writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2156
2157         /*
2158          * Set Recovery Idle to Recover bit:
2159          * - On SS connections, setting Recovery Idle to Recover Fmw improves
2160          *   link robustness with various hosts and hubs.
2161          * - It is safe to set for all connection speeds; all chip revisions.
2162          * - R-M-W to leave other bits undisturbed.
2163          * - Reference PLX TT-7372
2164         */
2165         val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
2166         val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
2167         writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2168
2169         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2170
2171         /* disable dedicated endpoints */
2172         writel(0x0D, &dev->dep[0].dep_cfg);
2173         writel(0x0D, &dev->dep[1].dep_cfg);
2174         writel(0x0E, &dev->dep[2].dep_cfg);
2175         writel(0x0E, &dev->dep[3].dep_cfg);
2176         writel(0x0F, &dev->dep[4].dep_cfg);
2177         writel(0x0C, &dev->dep[5].dep_cfg);
2178 }
2179
2180 static void usb_reinit(struct net2280 *dev)
2181 {
2182         if (dev->quirks & PLX_LEGACY)
2183                 return usb_reinit_228x(dev);
2184         return usb_reinit_338x(dev);
2185 }
2186
2187 static void ep0_start_228x(struct net2280 *dev)
2188 {
2189         writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2190                 BIT(CLEAR_NAK_OUT_PACKETS) |
2191                 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2192                 &dev->epregs[0].ep_rsp);
2193
2194         /*
2195          * hardware optionally handles a bunch of standard requests
2196          * that the API hides from drivers anyway.  have it do so.
2197          * endpoint status/features are handled in software, to
2198          * help pass tests for some dubious behavior.
2199          */
2200         writel(BIT(SET_TEST_MODE) |
2201                 BIT(SET_ADDRESS) |
2202                 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2203                 BIT(GET_DEVICE_STATUS) |
2204                 BIT(GET_INTERFACE_STATUS),
2205                 &dev->usb->stdrsp);
2206         writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2207                 BIT(SELF_POWERED_USB_DEVICE) |
2208                 BIT(REMOTE_WAKEUP_SUPPORT) |
2209                 (dev->softconnect << USB_DETECT_ENABLE) |
2210                 BIT(SELF_POWERED_STATUS),
2211                 &dev->usb->usbctl);
2212
2213         /* enable irqs so we can see ep0 and general operation  */
2214         writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2215                 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2216                 &dev->regs->pciirqenb0);
2217         writel(BIT(PCI_INTERRUPT_ENABLE) |
2218                 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2219                 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2220                 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2221                 BIT(VBUS_INTERRUPT_ENABLE) |
2222                 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2223                 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2224                 &dev->regs->pciirqenb1);
2225
2226         /* don't leave any writes posted */
2227         (void) readl(&dev->usb->usbctl);
2228 }
2229
2230 static void ep0_start_338x(struct net2280 *dev)
2231 {
2232
2233         if (dev->bug7734_patched)
2234                 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2235                        BIT(SET_EP_HIDE_STATUS_PHASE),
2236                        &dev->epregs[0].ep_rsp);
2237
2238         /*
2239          * hardware optionally handles a bunch of standard requests
2240          * that the API hides from drivers anyway.  have it do so.
2241          * endpoint status/features are handled in software, to
2242          * help pass tests for some dubious behavior.
2243          */
2244         writel(BIT(SET_ISOCHRONOUS_DELAY) |
2245                BIT(SET_SEL) |
2246                BIT(SET_TEST_MODE) |
2247                BIT(SET_ADDRESS) |
2248                BIT(GET_INTERFACE_STATUS) |
2249                BIT(GET_DEVICE_STATUS),
2250                 &dev->usb->stdrsp);
2251         dev->wakeup_enable = 1;
2252         writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2253                (dev->softconnect << USB_DETECT_ENABLE) |
2254                BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2255                &dev->usb->usbctl);
2256
2257         /* enable irqs so we can see ep0 and general operation  */
2258         writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2259                BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2260                &dev->regs->pciirqenb0);
2261         writel(BIT(PCI_INTERRUPT_ENABLE) |
2262                BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2263                BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2264                BIT(VBUS_INTERRUPT_ENABLE),
2265                &dev->regs->pciirqenb1);
2266
2267         /* don't leave any writes posted */
2268         (void)readl(&dev->usb->usbctl);
2269 }
2270
2271 static void ep0_start(struct net2280 *dev)
2272 {
2273         if (dev->quirks & PLX_LEGACY)
2274                 return ep0_start_228x(dev);
2275         return ep0_start_338x(dev);
2276 }
2277
2278 /* when a driver is successfully registered, it will receive
2279  * control requests including set_configuration(), which enables
2280  * non-control requests.  then usb traffic follows until a
2281  * disconnect is reported.  then a host may connect again, or
2282  * the driver might get unbound.
2283  */
2284 static int net2280_start(struct usb_gadget *_gadget,
2285                 struct usb_gadget_driver *driver)
2286 {
2287         struct net2280          *dev;
2288         int                     retval;
2289         unsigned                i;
2290
2291         /* insist on high speed support from the driver, since
2292          * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2293          * "must not be used in normal operation"
2294          */
2295         if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2296                         !driver->setup)
2297                 return -EINVAL;
2298
2299         dev = container_of(_gadget, struct net2280, gadget);
2300
2301         for (i = 0; i < dev->n_ep; i++)
2302                 dev->ep[i].irqs = 0;
2303
2304         /* hook up the driver ... */
2305         driver->driver.bus = NULL;
2306         dev->driver = driver;
2307
2308         retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2309         if (retval)
2310                 goto err_unbind;
2311         retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2312         if (retval)
2313                 goto err_func;
2314
2315         /* enable host detection and ep0; and we're ready
2316          * for set_configuration as well as eventual disconnect.
2317          */
2318         net2280_led_active(dev, 1);
2319
2320         if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
2321                 defect7374_enable_data_eps_zero(dev);
2322
2323         ep0_start(dev);
2324
2325         /* pci writes may still be posted */
2326         return 0;
2327
2328 err_func:
2329         device_remove_file(&dev->pdev->dev, &dev_attr_function);
2330 err_unbind:
2331         dev->driver = NULL;
2332         return retval;
2333 }
2334
2335 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2336 {
2337         int                     i;
2338
2339         /* don't disconnect if it's not connected */
2340         if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2341                 driver = NULL;
2342
2343         /* stop hardware; prevent new request submissions;
2344          * and kill any outstanding requests.
2345          */
2346         usb_reset(dev);
2347         for (i = 0; i < dev->n_ep; i++)
2348                 nuke(&dev->ep[i]);
2349
2350         /* report disconnect; the driver is already quiesced */
2351         if (driver) {
2352                 spin_unlock(&dev->lock);
2353                 driver->disconnect(&dev->gadget);
2354                 spin_lock(&dev->lock);
2355         }
2356
2357         usb_reinit(dev);
2358 }
2359
2360 static int net2280_stop(struct usb_gadget *_gadget)
2361 {
2362         struct net2280  *dev;
2363         unsigned long   flags;
2364
2365         dev = container_of(_gadget, struct net2280, gadget);
2366
2367         spin_lock_irqsave(&dev->lock, flags);
2368         stop_activity(dev, NULL);
2369         spin_unlock_irqrestore(&dev->lock, flags);
2370
2371         net2280_led_active(dev, 0);
2372
2373         device_remove_file(&dev->pdev->dev, &dev_attr_function);
2374         device_remove_file(&dev->pdev->dev, &dev_attr_queues);
2375
2376         dev->driver = NULL;
2377
2378         return 0;
2379 }
2380
2381 /*-------------------------------------------------------------------------*/
2382
2383 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2384  * also works for dma-capable endpoints, in pio mode or just
2385  * to manually advance the queue after short OUT transfers.
2386  */
2387 static void handle_ep_small(struct net2280_ep *ep)
2388 {
2389         struct net2280_request  *req;
2390         u32                     t;
2391         /* 0 error, 1 mid-data, 2 done */
2392         int                     mode = 1;
2393
2394         if (!list_empty(&ep->queue))
2395                 req = list_entry(ep->queue.next,
2396                         struct net2280_request, queue);
2397         else
2398                 req = NULL;
2399
2400         /* ack all, and handle what we care about */
2401         t = readl(&ep->regs->ep_stat);
2402         ep->irqs++;
2403
2404         ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
2405                         ep->ep.name, t, req ? &req->req : NULL);
2406
2407         if (!ep->is_in || (ep->dev->quirks & PLX_2280))
2408                 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
2409         else
2410                 /* Added for 2282 */
2411                 writel(t, &ep->regs->ep_stat);
2412
2413         /* for ep0, monitor token irqs to catch data stage length errors
2414          * and to synchronize on status.
2415          *
2416          * also, to defer reporting of protocol stalls ... here's where
2417          * data or status first appears, handling stalls here should never
2418          * cause trouble on the host side..
2419          *
2420          * control requests could be slightly faster without token synch for
2421          * status, but status can jam up that way.
2422          */
2423         if (unlikely(ep->num == 0)) {
2424                 if (ep->is_in) {
2425                         /* status; stop NAKing */
2426                         if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
2427                                 if (ep->dev->protocol_stall) {
2428                                         ep->stopped = 1;
2429                                         set_halt(ep);
2430                                 }
2431                                 if (!req)
2432                                         allow_status(ep);
2433                                 mode = 2;
2434                         /* reply to extra IN data tokens with a zlp */
2435                         } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2436                                 if (ep->dev->protocol_stall) {
2437                                         ep->stopped = 1;
2438                                         set_halt(ep);
2439                                         mode = 2;
2440                                 } else if (ep->responded &&
2441                                                 !req && !ep->stopped)
2442                                         write_fifo(ep, NULL);
2443                         }
2444                 } else {
2445                         /* status; stop NAKing */
2446                         if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2447                                 if (ep->dev->protocol_stall) {
2448                                         ep->stopped = 1;
2449                                         set_halt(ep);
2450                                 }
2451                                 mode = 2;
2452                         /* an extra OUT token is an error */
2453                         } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2454                                         req &&
2455                                         req->req.actual == req->req.length) ||
2456                                         (ep->responded && !req)) {
2457                                 ep->dev->protocol_stall = 1;
2458                                 set_halt(ep);
2459                                 ep->stopped = 1;
2460                                 if (req)
2461                                         done(ep, req, -EOVERFLOW);
2462                                 req = NULL;
2463                         }
2464                 }
2465         }
2466
2467         if (unlikely(!req))
2468                 return;
2469
2470         /* manual DMA queue advance after short OUT */
2471         if (likely(ep->dma)) {
2472                 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2473                         u32     count;
2474                         int     stopped = ep->stopped;
2475
2476                         /* TRANSFERRED works around OUT_DONE erratum 0112.
2477                          * we expect (N <= maxpacket) bytes; host wrote M.
2478                          * iff (M < N) we won't ever see a DMA interrupt.
2479                          */
2480                         ep->stopped = 1;
2481                         for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
2482
2483                                 /* any preceding dma transfers must finish.
2484                                  * dma handles (M >= N), may empty the queue
2485                                  */
2486                                 scan_dma_completions(ep);
2487                                 if (unlikely(list_empty(&ep->queue) ||
2488                                                 ep->out_overflow)) {
2489                                         req = NULL;
2490                                         break;
2491                                 }
2492                                 req = list_entry(ep->queue.next,
2493                                         struct net2280_request, queue);
2494
2495                                 /* here either (M < N), a "real" short rx;
2496                                  * or (M == N) and the queue didn't empty
2497                                  */
2498                                 if (likely(t & BIT(FIFO_EMPTY))) {
2499                                         count = readl(&ep->dma->dmacount);
2500                                         count &= DMA_BYTE_COUNT_MASK;
2501                                         if (readl(&ep->dma->dmadesc)
2502                                                         != req->td_dma)
2503                                                 req = NULL;
2504                                         break;
2505                                 }
2506                                 udelay(1);
2507                         }
2508
2509                         /* stop DMA, leave ep NAKing */
2510                         writel(BIT(DMA_ABORT), &ep->dma->dmastat);
2511                         spin_stop_dma(ep->dma);
2512
2513                         if (likely(req)) {
2514                                 req->td->dmacount = 0;
2515                                 t = readl(&ep->regs->ep_avail);
2516                                 dma_done(ep, req, count,
2517                                         (ep->out_overflow || t)
2518                                                 ? -EOVERFLOW : 0);
2519                         }
2520
2521                         /* also flush to prevent erratum 0106 trouble */
2522                         if (unlikely(ep->out_overflow ||
2523                                         (ep->dev->chiprev == 0x0100 &&
2524                                         ep->dev->gadget.speed
2525                                         == USB_SPEED_FULL))) {
2526                                 out_flush(ep);
2527                                 ep->out_overflow = 0;
2528                         }
2529
2530                         /* (re)start dma if needed, stop NAKing */
2531                         ep->stopped = stopped;
2532                         if (!list_empty(&ep->queue))
2533                                 restart_dma(ep);
2534                 } else
2535                         ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
2536                                         ep->ep.name, t);
2537                 return;
2538
2539         /* data packet(s) received (in the fifo, OUT) */
2540         } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
2541                 if (read_fifo(ep, req) && ep->num != 0)
2542                         mode = 2;
2543
2544         /* data packet(s) transmitted (IN) */
2545         } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2546                 unsigned        len;
2547
2548                 len = req->req.length - req->req.actual;
2549                 if (len > ep->ep.maxpacket)
2550                         len = ep->ep.maxpacket;
2551                 req->req.actual += len;
2552
2553                 /* if we wrote it all, we're usually done */
2554                 /* send zlps until the status stage */
2555                 if ((req->req.actual == req->req.length) &&
2556                         (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
2557                                 mode = 2;
2558
2559         /* there was nothing to do ...  */
2560         } else if (mode == 1)
2561                 return;
2562
2563         /* done */
2564         if (mode == 2) {
2565                 /* stream endpoints often resubmit/unlink in completion */
2566                 done(ep, req, 0);
2567
2568                 /* maybe advance queue to next request */
2569                 if (ep->num == 0) {
2570                         /* NOTE:  net2280 could let gadget driver start the
2571                          * status stage later. since not all controllers let
2572                          * them control that, the api doesn't (yet) allow it.
2573                          */
2574                         if (!ep->stopped)
2575                                 allow_status(ep);
2576                         req = NULL;
2577                 } else {
2578                         if (!list_empty(&ep->queue) && !ep->stopped)
2579                                 req = list_entry(ep->queue.next,
2580                                         struct net2280_request, queue);
2581                         else
2582                                 req = NULL;
2583                         if (req && !ep->is_in)
2584                                 stop_out_naking(ep);
2585                 }
2586         }
2587
2588         /* is there a buffer for the next packet?
2589          * for best streaming performance, make sure there is one.
2590          */
2591         if (req && !ep->stopped) {
2592
2593                 /* load IN fifo with next packet (may be zlp) */
2594                 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
2595                         write_fifo(ep, &req->req);
2596         }
2597 }
2598
2599 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
2600 {
2601         struct net2280_ep       *ep;
2602
2603         if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2604                 return &dev->ep[0];
2605         list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2606                 u8      bEndpointAddress;
2607
2608                 if (!ep->desc)
2609                         continue;
2610                 bEndpointAddress = ep->desc->bEndpointAddress;
2611                 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2612                         continue;
2613                 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2614                         return ep;
2615         }
2616         return NULL;
2617 }
2618
2619 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2620 {
2621         u32 scratch, fsmvalue;
2622         u32 ack_wait_timeout, state;
2623
2624         /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2625         scratch = get_idx_reg(dev->regs, SCRATCH);
2626         fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2627         scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2628
2629         if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2630                                 (r.bRequestType & USB_DIR_IN)))
2631                 return;
2632
2633         /* This is the first Control Read for this connection: */
2634         if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
2635                 /*
2636                  * Connection is NOT SS:
2637                  * - Connection must be FS or HS.
2638                  * - This FSM state should allow workaround software to
2639                  * run after the next USB connection.
2640                  */
2641                 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2642                 dev->bug7734_patched = 1;
2643                 goto restore_data_eps;
2644         }
2645
2646         /* Connection is SS: */
2647         for (ack_wait_timeout = 0;
2648                         ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2649                         ack_wait_timeout++) {
2650
2651                 state = readl(&dev->plregs->pl_ep_status_1)
2652                         & (0xff << STATE);
2653                 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2654                         (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2655                         scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2656                         dev->bug7734_patched = 1;
2657                         break;
2658                 }
2659
2660                 /*
2661                  * We have not yet received host's Data Phase ACK
2662                  * - Wait and try again.
2663                  */
2664                 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2665
2666                 continue;
2667         }
2668
2669
2670         if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
2671                 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
2672                 "to detect SS host's data phase ACK.");
2673                 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2674                 "got 0x%2.2x.\n", state >> STATE);
2675         } else {
2676                 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
2677                 "%duSec for Control Read Data Phase ACK\n",
2678                         DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2679         }
2680
2681 restore_data_eps:
2682         /*
2683          * Restore data EPs to their pre-workaround settings (disabled,
2684          * initialized, and other details).
2685          */
2686         defect7374_disable_data_eps(dev);
2687
2688         set_idx_reg(dev->regs, SCRATCH, scratch);
2689
2690         return;
2691 }
2692
2693 static void ep_clear_seqnum(struct net2280_ep *ep)
2694 {
2695         struct net2280 *dev = ep->dev;
2696         u32 val;
2697         static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2698
2699         val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2700         val |= ep_pl[ep->num];
2701         writel(val, &dev->plregs->pl_ep_ctrl);
2702         val |= BIT(SEQUENCE_NUMBER_RESET);
2703         writel(val, &dev->plregs->pl_ep_ctrl);
2704
2705         return;
2706 }
2707
2708 static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2709                 struct net2280_ep *ep, struct usb_ctrlrequest r)
2710 {
2711         int tmp = 0;
2712
2713 #define w_value         le16_to_cpu(r.wValue)
2714 #define w_index         le16_to_cpu(r.wIndex)
2715 #define w_length        le16_to_cpu(r.wLength)
2716
2717         switch (r.bRequest) {
2718                 struct net2280_ep *e;
2719                 u16 status;
2720
2721         case USB_REQ_SET_CONFIGURATION:
2722                 dev->addressed_state = !w_value;
2723                 goto usb3_delegate;
2724
2725         case USB_REQ_GET_STATUS:
2726                 switch (r.bRequestType) {
2727                 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2728                         status = dev->wakeup_enable ? 0x02 : 0x00;
2729                         if (dev->gadget.is_selfpowered)
2730                                 status |= BIT(0);
2731                         status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2732                                                         dev->ltm_enable << 4);
2733                         writel(0, &dev->epregs[0].ep_irqenb);
2734                         set_fifo_bytecount(ep, sizeof(status));
2735                         writel((__force u32) status, &dev->epregs[0].ep_data);
2736                         allow_status_338x(ep);
2737                         break;
2738
2739                 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2740                         e = get_ep_by_addr(dev, w_index);
2741                         if (!e)
2742                                 goto do_stall3;
2743                         status = readl(&e->regs->ep_rsp) &
2744                                                 BIT(CLEAR_ENDPOINT_HALT);
2745                         writel(0, &dev->epregs[0].ep_irqenb);
2746                         set_fifo_bytecount(ep, sizeof(status));
2747                         writel((__force u32) status, &dev->epregs[0].ep_data);
2748                         allow_status_338x(ep);
2749                         break;
2750
2751                 default:
2752                         goto usb3_delegate;
2753                 }
2754                 break;
2755
2756         case USB_REQ_CLEAR_FEATURE:
2757                 switch (r.bRequestType) {
2758                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2759                         if (!dev->addressed_state) {
2760                                 switch (w_value) {
2761                                 case USB_DEVICE_U1_ENABLE:
2762                                         dev->u1_enable = 0;
2763                                         writel(readl(&dev->usb_ext->usbctl2) &
2764                                                 ~BIT(U1_ENABLE),
2765                                                 &dev->usb_ext->usbctl2);
2766                                         allow_status_338x(ep);
2767                                         goto next_endpoints3;
2768
2769                                 case USB_DEVICE_U2_ENABLE:
2770                                         dev->u2_enable = 0;
2771                                         writel(readl(&dev->usb_ext->usbctl2) &
2772                                                 ~BIT(U2_ENABLE),
2773                                                 &dev->usb_ext->usbctl2);
2774                                         allow_status_338x(ep);
2775                                         goto next_endpoints3;
2776
2777                                 case USB_DEVICE_LTM_ENABLE:
2778                                         dev->ltm_enable = 0;
2779                                         writel(readl(&dev->usb_ext->usbctl2) &
2780                                                 ~BIT(LTM_ENABLE),
2781                                                 &dev->usb_ext->usbctl2);
2782                                         allow_status_338x(ep);
2783                                         goto next_endpoints3;
2784
2785                                 default:
2786                                         break;
2787                                 }
2788                         }
2789                         if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2790                                 dev->wakeup_enable = 0;
2791                                 writel(readl(&dev->usb->usbctl) &
2792                                         ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2793                                         &dev->usb->usbctl);
2794                                 allow_status_338x(ep);
2795                                 break;
2796                         }
2797                         goto usb3_delegate;
2798
2799                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2800                         e = get_ep_by_addr(dev, w_index);
2801                         if (!e)
2802                                 goto do_stall3;
2803                         if (w_value != USB_ENDPOINT_HALT)
2804                                 goto do_stall3;
2805                         ep_vdbg(dev, "%s clear halt\n", e->ep.name);
2806                         /*
2807                          * Workaround for SS SeqNum not cleared via
2808                          * Endpoint Halt (Clear) bit. select endpoint
2809                          */
2810                         ep_clear_seqnum(e);
2811                         clear_halt(e);
2812                         if (!list_empty(&e->queue) && e->td_dma)
2813                                 restart_dma(e);
2814                         allow_status(ep);
2815                         ep->stopped = 1;
2816                         break;
2817
2818                 default:
2819                         goto usb3_delegate;
2820                 }
2821                 break;
2822         case USB_REQ_SET_FEATURE:
2823                 switch (r.bRequestType) {
2824                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2825                         if (!dev->addressed_state) {
2826                                 switch (w_value) {
2827                                 case USB_DEVICE_U1_ENABLE:
2828                                         dev->u1_enable = 1;
2829                                         writel(readl(&dev->usb_ext->usbctl2) |
2830                                                 BIT(U1_ENABLE),
2831                                                 &dev->usb_ext->usbctl2);
2832                                         allow_status_338x(ep);
2833                                         goto next_endpoints3;
2834
2835                                 case USB_DEVICE_U2_ENABLE:
2836                                         dev->u2_enable = 1;
2837                                         writel(readl(&dev->usb_ext->usbctl2) |
2838                                                 BIT(U2_ENABLE),
2839                                                 &dev->usb_ext->usbctl2);
2840                                         allow_status_338x(ep);
2841                                         goto next_endpoints3;
2842
2843                                 case USB_DEVICE_LTM_ENABLE:
2844                                         dev->ltm_enable = 1;
2845                                         writel(readl(&dev->usb_ext->usbctl2) |
2846                                                 BIT(LTM_ENABLE),
2847                                                 &dev->usb_ext->usbctl2);
2848                                         allow_status_338x(ep);
2849                                         goto next_endpoints3;
2850                                 default:
2851                                         break;
2852                                 }
2853                         }
2854
2855                         if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2856                                 dev->wakeup_enable = 1;
2857                                 writel(readl(&dev->usb->usbctl) |
2858                                         BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2859                                         &dev->usb->usbctl);
2860                                 allow_status_338x(ep);
2861                                 break;
2862                         }
2863                         goto usb3_delegate;
2864
2865                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2866                         e = get_ep_by_addr(dev, w_index);
2867                         if (!e || (w_value != USB_ENDPOINT_HALT))
2868                                 goto do_stall3;
2869                         ep->stopped = 1;
2870                         if (ep->num == 0)
2871                                 ep->dev->protocol_stall = 1;
2872                         else {
2873                                 if (ep->dma)
2874                                         abort_dma(ep);
2875                                 set_halt(ep);
2876                         }
2877                         allow_status_338x(ep);
2878                         break;
2879
2880                 default:
2881                         goto usb3_delegate;
2882                 }
2883
2884                 break;
2885         default:
2886
2887 usb3_delegate:
2888                 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
2889                                 r.bRequestType, r.bRequest,
2890                                 w_value, w_index, w_length,
2891                                 readl(&ep->cfg->ep_cfg));
2892
2893                 ep->responded = 0;
2894                 spin_unlock(&dev->lock);
2895                 tmp = dev->driver->setup(&dev->gadget, &r);
2896                 spin_lock(&dev->lock);
2897         }
2898 do_stall3:
2899         if (tmp < 0) {
2900                 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
2901                                 r.bRequestType, r.bRequest, tmp);
2902                 dev->protocol_stall = 1;
2903                 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
2904                 set_halt(ep);
2905         }
2906
2907 next_endpoints3:
2908
2909 #undef  w_value
2910 #undef  w_index
2911 #undef  w_length
2912
2913         return;
2914 }
2915
2916 static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
2917 {
2918         u32 index;
2919         u32 bit;
2920
2921         for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
2922                 bit = BIT(ep_bit[index]);
2923
2924                 if (!stat0)
2925                         break;
2926
2927                 if (!(stat0 & bit))
2928                         continue;
2929
2930                 stat0 &= ~bit;
2931
2932                 handle_ep_small(&dev->ep[index]);
2933         }
2934 }
2935
2936 static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
2937 {
2938         struct net2280_ep       *ep;
2939         u32                     num, scratch;
2940
2941         /* most of these don't need individual acks */
2942         stat &= ~BIT(INTA_ASSERTED);
2943         if (!stat)
2944                 return;
2945         /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
2946
2947         /* starting a control request? */
2948         if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
2949                 union {
2950                         u32                     raw[2];
2951                         struct usb_ctrlrequest  r;
2952                 } u;
2953                 int                             tmp;
2954                 struct net2280_request          *req;
2955
2956                 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2957                         u32 val = readl(&dev->usb->usbstat);
2958                         if (val & BIT(SUPER_SPEED)) {
2959                                 dev->gadget.speed = USB_SPEED_SUPER;
2960                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2961                                                 EP0_SS_MAX_PACKET_SIZE);
2962                         } else if (val & BIT(HIGH_SPEED)) {
2963                                 dev->gadget.speed = USB_SPEED_HIGH;
2964                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2965                                                 EP0_HS_MAX_PACKET_SIZE);
2966                         } else {
2967                                 dev->gadget.speed = USB_SPEED_FULL;
2968                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2969                                                 EP0_HS_MAX_PACKET_SIZE);
2970                         }
2971                         net2280_led_speed(dev, dev->gadget.speed);
2972                         ep_dbg(dev, "%s\n",
2973                                         usb_speed_string(dev->gadget.speed));
2974                 }
2975
2976                 ep = &dev->ep[0];
2977                 ep->irqs++;
2978
2979                 /* make sure any leftover request state is cleared */
2980                 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
2981                 while (!list_empty(&ep->queue)) {
2982                         req = list_entry(ep->queue.next,
2983                                         struct net2280_request, queue);
2984                         done(ep, req, (req->req.actual == req->req.length)
2985                                                 ? 0 : -EPROTO);
2986                 }
2987                 ep->stopped = 0;
2988                 dev->protocol_stall = 0;
2989                 if (!(dev->quirks & PLX_SUPERSPEED)) {
2990                         if (ep->dev->quirks & PLX_2280)
2991                                 tmp = BIT(FIFO_OVERFLOW) |
2992                                     BIT(FIFO_UNDERFLOW);
2993                         else
2994                                 tmp = 0;
2995
2996                         writel(tmp | BIT(TIMEOUT) |
2997                                    BIT(USB_STALL_SENT) |
2998                                    BIT(USB_IN_NAK_SENT) |
2999                                    BIT(USB_IN_ACK_RCVD) |
3000                                    BIT(USB_OUT_PING_NAK_SENT) |
3001                                    BIT(USB_OUT_ACK_SENT) |
3002                                    BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
3003                                    BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
3004                                    BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3005                                    BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3006                                    BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3007                                    BIT(DATA_IN_TOKEN_INTERRUPT),
3008                                    &ep->regs->ep_stat);
3009                 }
3010                 u.raw[0] = readl(&dev->usb->setup0123);
3011                 u.raw[1] = readl(&dev->usb->setup4567);
3012
3013                 cpu_to_le32s(&u.raw[0]);
3014                 cpu_to_le32s(&u.raw[1]);
3015
3016                 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
3017                         defect7374_workaround(dev, u.r);
3018
3019                 tmp = 0;
3020
3021 #define w_value         le16_to_cpu(u.r.wValue)
3022 #define w_index         le16_to_cpu(u.r.wIndex)
3023 #define w_length        le16_to_cpu(u.r.wLength)
3024
3025                 /* ack the irq */
3026                 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
3027                 stat ^= BIT(SETUP_PACKET_INTERRUPT);
3028
3029                 /* watch control traffic at the token level, and force
3030                  * synchronization before letting the status stage happen.
3031                  * FIXME ignore tokens we'll NAK, until driver responds.
3032                  * that'll mean a lot less irqs for some drivers.
3033                  */
3034                 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
3035                 if (ep->is_in) {
3036                         scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3037                                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3038                                 BIT(DATA_IN_TOKEN_INTERRUPT);
3039                         stop_out_naking(ep);
3040                 } else
3041                         scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3042                                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3043                                 BIT(DATA_IN_TOKEN_INTERRUPT);
3044                 writel(scratch, &dev->epregs[0].ep_irqenb);
3045
3046                 /* we made the hardware handle most lowlevel requests;
3047                  * everything else goes uplevel to the gadget code.
3048                  */
3049                 ep->responded = 1;
3050
3051                 if (dev->gadget.speed == USB_SPEED_SUPER) {
3052                         handle_stat0_irqs_superspeed(dev, ep, u.r);
3053                         goto next_endpoints;
3054                 }
3055
3056                 switch (u.r.bRequest) {
3057                 case USB_REQ_GET_STATUS: {
3058                         struct net2280_ep       *e;
3059                         __le32                  status;
3060
3061                         /* hw handles device and interface status */
3062                         if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3063                                 goto delegate;
3064                         e = get_ep_by_addr(dev, w_index);
3065                         if (!e || w_length > 2)
3066                                 goto do_stall;
3067
3068                         if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
3069                                 status = cpu_to_le32(1);
3070                         else
3071                                 status = cpu_to_le32(0);
3072
3073                         /* don't bother with a request object! */
3074                         writel(0, &dev->epregs[0].ep_irqenb);
3075                         set_fifo_bytecount(ep, w_length);
3076                         writel((__force u32)status, &dev->epregs[0].ep_data);
3077                         allow_status(ep);
3078                         ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
3079                         goto next_endpoints;
3080                         }
3081                         break;
3082                 case USB_REQ_CLEAR_FEATURE: {
3083                         struct net2280_ep       *e;
3084
3085                         /* hw handles device features */
3086                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3087                                 goto delegate;
3088                         if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3089                                 goto do_stall;
3090                         e = get_ep_by_addr(dev, w_index);
3091                         if (!e)
3092                                 goto do_stall;
3093                         if (e->wedged) {
3094                                 ep_vdbg(dev, "%s wedged, halt not cleared\n",
3095                                                 ep->ep.name);
3096                         } else {
3097                                 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
3098                                 clear_halt(e);
3099                                 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
3100                                         !list_empty(&e->queue) && e->td_dma)
3101                                                 restart_dma(e);
3102                         }
3103                         allow_status(ep);
3104                         goto next_endpoints;
3105                         }
3106                         break;
3107                 case USB_REQ_SET_FEATURE: {
3108                         struct net2280_ep       *e;
3109
3110                         /* hw handles device features */
3111                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3112                                 goto delegate;
3113                         if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3114                                 goto do_stall;
3115                         e = get_ep_by_addr(dev, w_index);
3116                         if (!e)
3117                                 goto do_stall;
3118                         if (e->ep.name == ep0name)
3119                                 goto do_stall;
3120                         set_halt(e);
3121                         if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
3122                                 abort_dma(e);
3123                         allow_status(ep);
3124                         ep_vdbg(dev, "%s set halt\n", ep->ep.name);
3125                         goto next_endpoints;
3126                         }
3127                         break;
3128                 default:
3129 delegate:
3130                         ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
3131                                 "ep_cfg %08x\n",
3132                                 u.r.bRequestType, u.r.bRequest,
3133                                 w_value, w_index, w_length,
3134                                 readl(&ep->cfg->ep_cfg));
3135                         ep->responded = 0;
3136                         spin_unlock(&dev->lock);
3137                         tmp = dev->driver->setup(&dev->gadget, &u.r);
3138                         spin_lock(&dev->lock);
3139                 }
3140
3141                 /* stall ep0 on error */
3142                 if (tmp < 0) {
3143 do_stall:
3144                         ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
3145                                         u.r.bRequestType, u.r.bRequest, tmp);
3146                         dev->protocol_stall = 1;
3147                 }
3148
3149                 /* some in/out token irq should follow; maybe stall then.
3150                  * driver must queue a request (even zlp) or halt ep0
3151                  * before the host times out.
3152                  */
3153         }
3154
3155 #undef  w_value
3156 #undef  w_index
3157 #undef  w_length
3158
3159 next_endpoints:
3160         if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
3161                 u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
3162                         USB3380_IRQSTAT0_EP_INTR_MASK_IN |
3163                         USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
3164
3165                 if (stat & mask) {
3166                         usb338x_handle_ep_intr(dev, stat & mask);
3167                         stat &= ~mask;
3168                 }
3169         } else {
3170                 /* endpoint data irq ? */
3171                 scratch = stat & 0x7f;
3172                 stat &= ~0x7f;
3173                 for (num = 0; scratch; num++) {
3174                         u32             t;
3175
3176                         /* do this endpoint's FIFO and queue need tending? */
3177                         t = BIT(num);
3178                         if ((scratch & t) == 0)
3179                                 continue;
3180                         scratch ^= t;
3181
3182                         ep = &dev->ep[num];
3183                         handle_ep_small(ep);
3184                 }
3185         }
3186
3187         if (stat)
3188                 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
3189 }
3190
3191 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3192                 BIT(DMA_C_INTERRUPT) | \
3193                 BIT(DMA_B_INTERRUPT) | \
3194                 BIT(DMA_A_INTERRUPT))
3195 #define PCI_ERROR_INTERRUPTS ( \
3196                 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3197                 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3198                 BIT(PCI_RETRY_ABORT_INTERRUPT))
3199
3200 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3201 __releases(dev->lock)
3202 __acquires(dev->lock)
3203 {
3204         struct net2280_ep       *ep;
3205         u32                     tmp, num, mask, scratch;
3206
3207         /* after disconnect there's nothing else to do! */
3208         tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3209         mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
3210
3211         /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
3212          * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
3213          * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
3214          * only indicates a change in the reset state).
3215          */
3216         if (stat & tmp) {
3217                 bool    reset = false;
3218                 bool    disconnect = false;
3219
3220                 /*
3221                  * Ignore disconnects and resets if the speed hasn't been set.
3222                  * VBUS can bounce and there's always an initial reset.
3223                  */
3224                 writel(tmp, &dev->regs->irqstat1);
3225                 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3226                         if ((stat & BIT(VBUS_INTERRUPT)) &&
3227                                         (readl(&dev->usb->usbctl) &
3228                                                 BIT(VBUS_PIN)) == 0) {
3229                                 disconnect = true;
3230                                 ep_dbg(dev, "disconnect %s\n",
3231                                                 dev->driver->driver.name);
3232                         } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3233                                         (readl(&dev->usb->usbstat) & mask)
3234                                                 == 0) {
3235                                 reset = true;
3236                                 ep_dbg(dev, "reset %s\n",
3237                                                 dev->driver->driver.name);
3238                         }
3239
3240                         if (disconnect || reset) {
3241                                 stop_activity(dev, dev->driver);
3242                                 ep0_start(dev);
3243                                 spin_unlock(&dev->lock);
3244                                 if (reset)
3245                                         usb_gadget_udc_reset
3246                                                 (&dev->gadget, dev->driver);
3247                                 else
3248                                         (dev->driver->disconnect)
3249                                                 (&dev->gadget);
3250                                 spin_lock(&dev->lock);
3251                                 return;
3252                         }
3253                 }
3254                 stat &= ~tmp;
3255
3256                 /* vBUS can bounce ... one of many reasons to ignore the
3257                  * notion of hotplug events on bus connect/disconnect!
3258                  */
3259                 if (!stat)
3260                         return;
3261         }
3262
3263         /* NOTE: chip stays in PCI D0 state for now, but it could
3264          * enter D1 to save more power
3265          */
3266         tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3267         if (stat & tmp) {
3268                 writel(tmp, &dev->regs->irqstat1);
3269                 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3270                         if (dev->driver->suspend)
3271                                 dev->driver->suspend(&dev->gadget);
3272                         if (!enable_suspend)
3273                                 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
3274                 } else {
3275                         if (dev->driver->resume)
3276                                 dev->driver->resume(&dev->gadget);
3277                         /* at high speed, note erratum 0133 */
3278                 }
3279                 stat &= ~tmp;
3280         }
3281
3282         /* clear any other status/irqs */
3283         if (stat)
3284                 writel(stat, &dev->regs->irqstat1);
3285
3286         /* some status we can just ignore */
3287         if (dev->quirks & PLX_2280)
3288                 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3289                           BIT(SUSPEND_REQUEST_INTERRUPT) |
3290                           BIT(RESUME_INTERRUPT) |
3291                           BIT(SOF_INTERRUPT));
3292         else
3293                 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3294                           BIT(RESUME_INTERRUPT) |
3295                           BIT(SOF_DOWN_INTERRUPT) |
3296                           BIT(SOF_INTERRUPT));
3297
3298         if (!stat)
3299                 return;
3300         /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
3301
3302         /* DMA status, for ep-{a,b,c,d} */
3303         scratch = stat & DMA_INTERRUPTS;
3304         stat &= ~DMA_INTERRUPTS;
3305         scratch >>= 9;
3306         for (num = 0; scratch; num++) {
3307                 struct net2280_dma_regs __iomem *dma;
3308
3309                 tmp = BIT(num);
3310                 if ((tmp & scratch) == 0)
3311                         continue;
3312                 scratch ^= tmp;
3313
3314                 ep = &dev->ep[num + 1];
3315                 dma = ep->dma;
3316
3317                 if (!dma)
3318                         continue;
3319
3320                 /* clear ep's dma status */
3321                 tmp = readl(&dma->dmastat);
3322                 writel(tmp, &dma->dmastat);
3323
3324                 /* dma sync*/
3325                 if (dev->quirks & PLX_SUPERSPEED) {
3326                         u32 r_dmacount = readl(&dma->dmacount);
3327                         if (!ep->is_in &&  (r_dmacount & 0x00FFFFFF) &&
3328                             (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
3329                                 continue;
3330                 }
3331
3332                 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3333                         ep_dbg(ep->dev, "%s no xact done? %08x\n",
3334                                 ep->ep.name, tmp);
3335                         continue;
3336                 }
3337                 stop_dma(ep->dma);
3338
3339                 /* OUT transfers terminate when the data from the
3340                  * host is in our memory.  Process whatever's done.
3341                  * On this path, we know transfer's last packet wasn't
3342                  * less than req->length. NAK_OUT_PACKETS may be set,
3343                  * or the FIFO may already be holding new packets.
3344                  *
3345                  * IN transfers can linger in the FIFO for a very
3346                  * long time ... we ignore that for now, accounting
3347                  * precisely (like PIO does) needs per-packet irqs
3348                  */
3349                 scan_dma_completions(ep);
3350
3351                 /* disable dma on inactive queues; else maybe restart */
3352                 if (!list_empty(&ep->queue)) {
3353                         tmp = readl(&dma->dmactl);
3354                         restart_dma(ep);
3355                 }
3356                 ep->irqs++;
3357         }
3358
3359         /* NOTE:  there are other PCI errors we might usefully notice.
3360          * if they appear very often, here's where to try recovering.
3361          */
3362         if (stat & PCI_ERROR_INTERRUPTS) {
3363                 ep_err(dev, "pci dma error; stat %08x\n", stat);
3364                 stat &= ~PCI_ERROR_INTERRUPTS;
3365                 /* these are fatal errors, but "maybe" they won't
3366                  * happen again ...
3367                  */
3368                 stop_activity(dev, dev->driver);
3369                 ep0_start(dev);
3370                 stat = 0;
3371         }
3372
3373         if (stat)
3374                 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
3375 }
3376
3377 static irqreturn_t net2280_irq(int irq, void *_dev)
3378 {
3379         struct net2280          *dev = _dev;
3380
3381         /* shared interrupt, not ours */
3382         if ((dev->quirks & PLX_LEGACY) &&
3383                 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
3384                 return IRQ_NONE;
3385
3386         spin_lock(&dev->lock);
3387
3388         /* handle disconnect, dma, and more */
3389         handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
3390
3391         /* control requests and PIO */
3392         handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
3393
3394         if (dev->quirks & PLX_SUPERSPEED) {
3395                 /* re-enable interrupt to trigger any possible new interrupt */
3396                 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3397                 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3398                 writel(pciirqenb1, &dev->regs->pciirqenb1);
3399         }
3400
3401         spin_unlock(&dev->lock);
3402
3403         return IRQ_HANDLED;
3404 }
3405
3406 /*-------------------------------------------------------------------------*/
3407
3408 static void gadget_release(struct device *_dev)
3409 {
3410         struct net2280  *dev = dev_get_drvdata(_dev);
3411
3412         kfree(dev);
3413 }
3414
3415 /* tear down the binding between this driver and the pci device */
3416
3417 static void net2280_remove(struct pci_dev *pdev)
3418 {
3419         struct net2280          *dev = pci_get_drvdata(pdev);
3420
3421         usb_del_gadget_udc(&dev->gadget);
3422
3423         BUG_ON(dev->driver);
3424
3425         /* then clean up the resources we allocated during probe() */
3426         net2280_led_shutdown(dev);
3427         if (dev->requests) {
3428                 int             i;
3429                 for (i = 1; i < 5; i++) {
3430                         if (!dev->ep[i].dummy)
3431                                 continue;
3432                         pci_pool_free(dev->requests, dev->ep[i].dummy,
3433                                         dev->ep[i].td_dma);
3434                 }
3435                 pci_pool_destroy(dev->requests);
3436         }
3437         if (dev->got_irq)
3438                 free_irq(pdev->irq, dev);
3439         if (dev->quirks & PLX_SUPERSPEED)
3440                 pci_disable_msi(pdev);
3441         if (dev->regs)
3442                 iounmap(dev->regs);
3443         if (dev->region)
3444                 release_mem_region(pci_resource_start(pdev, 0),
3445                                 pci_resource_len(pdev, 0));
3446         if (dev->enabled)
3447                 pci_disable_device(pdev);
3448         device_remove_file(&pdev->dev, &dev_attr_registers);
3449
3450         ep_info(dev, "unbind\n");
3451 }
3452
3453 /* wrap this driver around the specified device, but
3454  * don't respond over USB until a gadget driver binds to us.
3455  */
3456
3457 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3458 {
3459         struct net2280          *dev;
3460         unsigned long           resource, len;
3461         void                    __iomem *base = NULL;
3462         int                     retval, i;
3463
3464         /* alloc, and start init */
3465         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3466         if (dev == NULL) {
3467                 retval = -ENOMEM;
3468                 goto done;
3469         }
3470
3471         pci_set_drvdata(pdev, dev);
3472         spin_lock_init(&dev->lock);
3473         dev->quirks = id->driver_data;
3474         dev->pdev = pdev;
3475         dev->gadget.ops = &net2280_ops;
3476         dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
3477                                 USB_SPEED_SUPER : USB_SPEED_HIGH;
3478
3479         /* the "gadget" abstracts/virtualizes the controller */
3480         dev->gadget.name = driver_name;
3481
3482         /* now all the pci goodies ... */
3483         if (pci_enable_device(pdev) < 0) {
3484                 retval = -ENODEV;
3485                 goto done;
3486         }
3487         dev->enabled = 1;
3488
3489         /* BAR 0 holds all the registers
3490          * BAR 1 is 8051 memory; unused here (note erratum 0103)
3491          * BAR 2 is fifo memory; unused here
3492          */
3493         resource = pci_resource_start(pdev, 0);
3494         len = pci_resource_len(pdev, 0);
3495         if (!request_mem_region(resource, len, driver_name)) {
3496                 ep_dbg(dev, "controller already in use\n");
3497                 retval = -EBUSY;
3498                 goto done;
3499         }
3500         dev->region = 1;
3501
3502         /* FIXME provide firmware download interface to put
3503          * 8051 code into the chip, e.g. to turn on PCI PM.
3504          */
3505
3506         base = ioremap_nocache(resource, len);
3507         if (base == NULL) {
3508                 ep_dbg(dev, "can't map memory\n");
3509                 retval = -EFAULT;
3510                 goto done;
3511         }
3512         dev->regs = (struct net2280_regs __iomem *) base;
3513         dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3514         dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3515         dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3516         dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3517         dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3518
3519         if (dev->quirks & PLX_SUPERSPEED) {
3520                 u32 fsmvalue;
3521                 u32 usbstat;
3522                 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3523                                                         (base + 0x00b4);
3524                 dev->llregs = (struct usb338x_ll_regs __iomem *)
3525                                                         (base + 0x0700);
3526                 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3527                                                         (base + 0x0748);
3528                 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3529                                                         (base + 0x077c);
3530                 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3531                                                         (base + 0x079c);
3532                 dev->plregs = (struct usb338x_pl_regs __iomem *)
3533                                                         (base + 0x0800);
3534                 usbstat = readl(&dev->usb->usbstat);
3535                 dev->enhanced_mode = !!(usbstat & BIT(11));
3536                 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3537                 /* put into initial config, link up all endpoints */
3538                 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3539                                         (0xf << DEFECT7374_FSM_FIELD);
3540                 /* See if firmware needs to set up for workaround: */
3541                 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3542                         dev->bug7734_patched = 1;
3543                         writel(0, &dev->usb->usbctl);
3544                 } else
3545                         dev->bug7734_patched = 0;
3546         } else {
3547                 dev->enhanced_mode = 0;
3548                 dev->n_ep = 7;
3549                 /* put into initial config, link up all endpoints */
3550                 writel(0, &dev->usb->usbctl);
3551         }
3552
3553         usb_reset(dev);
3554         usb_reinit(dev);
3555
3556         /* irq setup after old hardware is cleaned up */
3557         if (!pdev->irq) {
3558                 ep_err(dev, "No IRQ.  Check PCI setup!\n");
3559                 retval = -ENODEV;
3560                 goto done;
3561         }
3562
3563         if (dev->quirks & PLX_SUPERSPEED)
3564                 if (pci_enable_msi(pdev))
3565                         ep_err(dev, "Failed to enable MSI mode\n");
3566
3567         if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3568                                                         driver_name, dev)) {
3569                 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
3570                 retval = -EBUSY;
3571                 goto done;
3572         }
3573         dev->got_irq = 1;
3574
3575         /* DMA setup */
3576         /* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
3577         dev->requests = pci_pool_create("requests", pdev,
3578                 sizeof(struct net2280_dma),
3579                 0 /* no alignment requirements */,
3580                 0 /* or page-crossing issues */);
3581         if (!dev->requests) {
3582                 ep_dbg(dev, "can't get request pool\n");
3583                 retval = -ENOMEM;
3584                 goto done;
3585         }
3586         for (i = 1; i < 5; i++) {
3587                 struct net2280_dma      *td;
3588
3589                 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3590                                 &dev->ep[i].td_dma);
3591                 if (!td) {
3592                         ep_dbg(dev, "can't get dummy %d\n", i);
3593                         retval = -ENOMEM;
3594                         goto done;
3595                 }
3596                 td->dmacount = 0;       /* not VALID */
3597                 td->dmadesc = td->dmaaddr;
3598                 dev->ep[i].dummy = td;
3599         }
3600
3601         /* enable lower-overhead pci memory bursts during DMA */
3602         if (dev->quirks & PLX_LEGACY)
3603                 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3604                         /*
3605                          * 256 write retries may not be enough...
3606                            BIT(PCI_RETRY_ABORT_ENABLE) |
3607                         */
3608                         BIT(DMA_READ_MULTIPLE_ENABLE) |
3609                         BIT(DMA_READ_LINE_ENABLE),
3610                         &dev->pci->pcimstctl);
3611         /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3612         pci_set_master(pdev);
3613         pci_try_set_mwi(pdev);
3614
3615         /* ... also flushes any posted pci writes */
3616         dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
3617
3618         /* done */
3619         ep_info(dev, "%s\n", driver_desc);
3620         ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
3621                         pdev->irq, base, dev->chiprev);
3622         ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
3623                 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
3624         retval = device_create_file(&pdev->dev, &dev_attr_registers);
3625         if (retval)
3626                 goto done;
3627
3628         retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3629                         gadget_release);
3630         if (retval)
3631                 goto done;
3632         return 0;
3633
3634 done:
3635         if (dev)
3636                 net2280_remove(pdev);
3637         return retval;
3638 }
3639
3640 /* make sure the board is quiescent; otherwise it will continue
3641  * generating IRQs across the upcoming reboot.
3642  */
3643
3644 static void net2280_shutdown(struct pci_dev *pdev)
3645 {
3646         struct net2280          *dev = pci_get_drvdata(pdev);
3647
3648         /* disable IRQs */
3649         writel(0, &dev->regs->pciirqenb0);
3650         writel(0, &dev->regs->pciirqenb1);
3651
3652         /* disable the pullup so the host will think we're gone */
3653         writel(0, &dev->usb->usbctl);
3654
3655 }
3656
3657
3658 /*-------------------------------------------------------------------------*/
3659
3660 static const struct pci_device_id pci_ids[] = { {
3661         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3662         .class_mask =   ~0,
3663         .vendor =       PCI_VENDOR_ID_PLX_LEGACY,
3664         .device =       0x2280,
3665         .subvendor =    PCI_ANY_ID,
3666         .subdevice =    PCI_ANY_ID,
3667         .driver_data =  PLX_LEGACY | PLX_2280,
3668         }, {
3669         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3670         .class_mask =   ~0,
3671         .vendor =       PCI_VENDOR_ID_PLX_LEGACY,
3672         .device =       0x2282,
3673         .subvendor =    PCI_ANY_ID,
3674         .subdevice =    PCI_ANY_ID,
3675         .driver_data =  PLX_LEGACY,
3676         },
3677         {
3678         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3679         .class_mask =   ~0,
3680         .vendor =       PCI_VENDOR_ID_PLX,
3681         .device =       0x3380,
3682         .subvendor =    PCI_ANY_ID,
3683         .subdevice =    PCI_ANY_ID,
3684         .driver_data =  PLX_SUPERSPEED,
3685          },
3686         {
3687         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3688         .class_mask =   ~0,
3689         .vendor =       PCI_VENDOR_ID_PLX,
3690         .device =       0x3382,
3691         .subvendor =    PCI_ANY_ID,
3692         .subdevice =    PCI_ANY_ID,
3693         .driver_data =  PLX_SUPERSPEED,
3694          },
3695 { /* end: all zeroes */ }
3696 };
3697 MODULE_DEVICE_TABLE(pci, pci_ids);
3698
3699 /* pci driver glue; this is a "new style" PCI driver module */
3700 static struct pci_driver net2280_pci_driver = {
3701         .name =         (char *) driver_name,
3702         .id_table =     pci_ids,
3703
3704         .probe =        net2280_probe,
3705         .remove =       net2280_remove,
3706         .shutdown =     net2280_shutdown,
3707
3708         /* FIXME add power management support */
3709 };
3710
3711 module_pci_driver(net2280_pci_driver);
3712
3713 MODULE_DESCRIPTION(DRIVER_DESC);
3714 MODULE_AUTHOR("David Brownell");
3715 MODULE_LICENSE("GPL");