6c167370cd5a6ba939265cd763c03c066ce86b2b
[firefly-linux-kernel-4.4.55.git] / drivers / usb / gadget / udc / amd5536udc.c
1 /*
2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5  * Author: Thomas Dahlmann
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12
13 /*
14  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17  *
18  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20  * by BIOS init).
21  *
22  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24  * can be used with gadget ether.
25  */
26
27 /* debug control */
28 /* #define UDC_VERBOSE */
29
30 /* Driver strings */
31 #define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
32 #define UDC_DRIVER_VERSION_STRING       "01.00.0206"
33
34 /* system */
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/ioport.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
45 #include <linux/interrupt.h>
46 #include <linux/ioctl.h>
47 #include <linux/fs.h>
48 #include <linux/dmapool.h>
49 #include <linux/moduleparam.h>
50 #include <linux/device.h>
51 #include <linux/io.h>
52 #include <linux/irq.h>
53 #include <linux/prefetch.h>
54
55 #include <asm/byteorder.h>
56 #include <asm/unaligned.h>
57
58 /* gadget stack */
59 #include <linux/usb/ch9.h>
60 #include <linux/usb/gadget.h>
61
62 /* udc specific */
63 #include "amd5536udc.h"
64
65
66 static void udc_tasklet_disconnect(unsigned long);
67 static void empty_req_queue(struct udc_ep *);
68 static void udc_basic_init(struct udc *dev);
69 static void udc_setup_endpoints(struct udc *dev);
70 static void udc_soft_reset(struct udc *dev);
71 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
72 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
73 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
74 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
75                                 unsigned long buf_len, gfp_t gfp_flags);
76 static int udc_remote_wakeup(struct udc *dev);
77 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
78 static void udc_pci_remove(struct pci_dev *pdev);
79
80 /* description */
81 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
82 static const char name[] = "amd5536udc";
83
84 /* structure to hold endpoint function pointers */
85 static const struct usb_ep_ops udc_ep_ops;
86
87 /* received setup data */
88 static union udc_setup_data setup_data;
89
90 /* pointer to device object */
91 static struct udc *udc;
92
93 /* irq spin lock for soft reset */
94 static DEFINE_SPINLOCK(udc_irq_spinlock);
95 /* stall spin lock */
96 static DEFINE_SPINLOCK(udc_stall_spinlock);
97
98 /*
99 * slave mode: pending bytes in rx fifo after nyet,
100 * used if EPIN irq came but no req was available
101 */
102 static unsigned int udc_rxfifo_pending;
103
104 /* count soft resets after suspend to avoid loop */
105 static int soft_reset_occured;
106 static int soft_reset_after_usbreset_occured;
107
108 /* timer */
109 static struct timer_list udc_timer;
110 static int stop_timer;
111
112 /* set_rde -- Is used to control enabling of RX DMA. Problem is
113  * that UDC has only one bit (RDE) to enable/disable RX DMA for
114  * all OUT endpoints. So we have to handle race conditions like
115  * when OUT data reaches the fifo but no request was queued yet.
116  * This cannot be solved by letting the RX DMA disabled until a
117  * request gets queued because there may be other OUT packets
118  * in the FIFO (important for not blocking control traffic).
119  * The value of set_rde controls the correspondig timer.
120  *
121  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
122  * set_rde  0 == do not touch RDE, do no start the RDE timer
123  * set_rde  1 == timer function will look whether FIFO has data
124  * set_rde  2 == set by timer function to enable RX DMA on next call
125  */
126 static int set_rde = -1;
127
128 static DECLARE_COMPLETION(on_exit);
129 static struct timer_list udc_pollstall_timer;
130 static int stop_pollstall_timer;
131 static DECLARE_COMPLETION(on_pollstall_exit);
132
133 /* tasklet for usb disconnect */
134 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
135                 (unsigned long) &udc);
136
137
138 /* endpoint names used for print */
139 static const char ep0_string[] = "ep0in";
140 static const struct {
141         const char *name;
142         const struct usb_ep_caps caps;
143 } ep_info[] = {
144 #define EP_INFO(_name, _caps) \
145         { \
146                 .name = _name, \
147                 .caps = _caps, \
148         }
149
150         EP_INFO(ep0_string,
151                 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
152         EP_INFO("ep1in-int",
153                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
154         EP_INFO("ep2in-bulk",
155                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
156         EP_INFO("ep3in-bulk",
157                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
158         EP_INFO("ep4in-bulk",
159                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
160         EP_INFO("ep5in-bulk",
161                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
162         EP_INFO("ep6in-bulk",
163                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
164         EP_INFO("ep7in-bulk",
165                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
166         EP_INFO("ep8in-bulk",
167                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
168         EP_INFO("ep9in-bulk",
169                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
170         EP_INFO("ep10in-bulk",
171                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
172         EP_INFO("ep11in-bulk",
173                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
174         EP_INFO("ep12in-bulk",
175                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
176         EP_INFO("ep13in-bulk",
177                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
178         EP_INFO("ep14in-bulk",
179                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
180         EP_INFO("ep15in-bulk",
181                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
182         EP_INFO("ep0out",
183                 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
184         EP_INFO("ep1out-bulk",
185                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
186         EP_INFO("ep2out-bulk",
187                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
188         EP_INFO("ep3out-bulk",
189                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
190         EP_INFO("ep4out-bulk",
191                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
192         EP_INFO("ep5out-bulk",
193                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
194         EP_INFO("ep6out-bulk",
195                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
196         EP_INFO("ep7out-bulk",
197                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
198         EP_INFO("ep8out-bulk",
199                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
200         EP_INFO("ep9out-bulk",
201                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
202         EP_INFO("ep10out-bulk",
203                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
204         EP_INFO("ep11out-bulk",
205                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
206         EP_INFO("ep12out-bulk",
207                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
208         EP_INFO("ep13out-bulk",
209                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
210         EP_INFO("ep14out-bulk",
211                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
212         EP_INFO("ep15out-bulk",
213                 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
214
215 #undef EP_INFO
216 };
217
218 /* DMA usage flag */
219 static bool use_dma = 1;
220 /* packet per buffer dma */
221 static bool use_dma_ppb = 1;
222 /* with per descr. update */
223 static bool use_dma_ppb_du;
224 /* buffer fill mode */
225 static int use_dma_bufferfill_mode;
226 /* full speed only mode */
227 static bool use_fullspeed;
228 /* tx buffer size for high speed */
229 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
230
231 /* module parameters */
232 module_param(use_dma, bool, S_IRUGO);
233 MODULE_PARM_DESC(use_dma, "true for DMA");
234 module_param(use_dma_ppb, bool, S_IRUGO);
235 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
236 module_param(use_dma_ppb_du, bool, S_IRUGO);
237 MODULE_PARM_DESC(use_dma_ppb_du,
238         "true for DMA in packet per buffer mode with descriptor update");
239 module_param(use_fullspeed, bool, S_IRUGO);
240 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
241
242 /*---------------------------------------------------------------------------*/
243 /* Prints UDC device registers and endpoint irq registers */
244 static void print_regs(struct udc *dev)
245 {
246         DBG(dev, "------- Device registers -------\n");
247         DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
248         DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
249         DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
250         DBG(dev, "\n");
251         DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
252         DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
253         DBG(dev, "\n");
254         DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
255         DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
256         DBG(dev, "\n");
257         DBG(dev, "USE DMA        = %d\n", use_dma);
258         if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
259                 DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
260                         "WITHOUT desc. update)\n");
261                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
262         } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
263                 DBG(dev, "DMA mode       = PPBDU (packet per buffer "
264                         "WITH desc. update)\n");
265                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
266         }
267         if (use_dma && use_dma_bufferfill_mode) {
268                 DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
269                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
270         }
271         if (!use_dma)
272                 dev_info(&dev->pdev->dev, "FIFO mode\n");
273         DBG(dev, "-------------------------------------------------------\n");
274 }
275
276 /* Masks unused interrupts */
277 static int udc_mask_unused_interrupts(struct udc *dev)
278 {
279         u32 tmp;
280
281         /* mask all dev interrupts */
282         tmp =   AMD_BIT(UDC_DEVINT_SVC) |
283                 AMD_BIT(UDC_DEVINT_ENUM) |
284                 AMD_BIT(UDC_DEVINT_US) |
285                 AMD_BIT(UDC_DEVINT_UR) |
286                 AMD_BIT(UDC_DEVINT_ES) |
287                 AMD_BIT(UDC_DEVINT_SI) |
288                 AMD_BIT(UDC_DEVINT_SOF)|
289                 AMD_BIT(UDC_DEVINT_SC);
290         writel(tmp, &dev->regs->irqmsk);
291
292         /* mask all ep interrupts */
293         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
294
295         return 0;
296 }
297
298 /* Enables endpoint 0 interrupts */
299 static int udc_enable_ep0_interrupts(struct udc *dev)
300 {
301         u32 tmp;
302
303         DBG(dev, "udc_enable_ep0_interrupts()\n");
304
305         /* read irq mask */
306         tmp = readl(&dev->regs->ep_irqmsk);
307         /* enable ep0 irq's */
308         tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
309                 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
310         writel(tmp, &dev->regs->ep_irqmsk);
311
312         return 0;
313 }
314
315 /* Enables device interrupts for SET_INTF and SET_CONFIG */
316 static int udc_enable_dev_setup_interrupts(struct udc *dev)
317 {
318         u32 tmp;
319
320         DBG(dev, "enable device interrupts for setup data\n");
321
322         /* read irq mask */
323         tmp = readl(&dev->regs->irqmsk);
324
325         /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
326         tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
327                 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
328                 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
329                 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
330                 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
331         writel(tmp, &dev->regs->irqmsk);
332
333         return 0;
334 }
335
336 /* Calculates fifo start of endpoint based on preceding endpoints */
337 static int udc_set_txfifo_addr(struct udc_ep *ep)
338 {
339         struct udc      *dev;
340         u32 tmp;
341         int i;
342
343         if (!ep || !(ep->in))
344                 return -EINVAL;
345
346         dev = ep->dev;
347         ep->txfifo = dev->txfifo;
348
349         /* traverse ep's */
350         for (i = 0; i < ep->num; i++) {
351                 if (dev->ep[i].regs) {
352                         /* read fifo size */
353                         tmp = readl(&dev->ep[i].regs->bufin_framenum);
354                         tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
355                         ep->txfifo += tmp;
356                 }
357         }
358         return 0;
359 }
360
361 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
362 static u32 cnak_pending;
363
364 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
365 {
366         if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
367                 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
368                 cnak_pending |= 1 << (num);
369                 ep->naking = 1;
370         } else
371                 cnak_pending = cnak_pending & (~(1 << (num)));
372 }
373
374
375 /* Enables endpoint, is called by gadget driver */
376 static int
377 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
378 {
379         struct udc_ep           *ep;
380         struct udc              *dev;
381         u32                     tmp;
382         unsigned long           iflags;
383         u8 udc_csr_epix;
384         unsigned                maxpacket;
385
386         if (!usbep
387                         || usbep->name == ep0_string
388                         || !desc
389                         || desc->bDescriptorType != USB_DT_ENDPOINT)
390                 return -EINVAL;
391
392         ep = container_of(usbep, struct udc_ep, ep);
393         dev = ep->dev;
394
395         DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
396
397         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
398                 return -ESHUTDOWN;
399
400         spin_lock_irqsave(&dev->lock, iflags);
401         ep->ep.desc = desc;
402
403         ep->halted = 0;
404
405         /* set traffic type */
406         tmp = readl(&dev->ep[ep->num].regs->ctl);
407         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
408         writel(tmp, &dev->ep[ep->num].regs->ctl);
409
410         /* set max packet size */
411         maxpacket = usb_endpoint_maxp(desc);
412         tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
413         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
414         ep->ep.maxpacket = maxpacket;
415         writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
416
417         /* IN ep */
418         if (ep->in) {
419
420                 /* ep ix in UDC CSR register space */
421                 udc_csr_epix = ep->num;
422
423                 /* set buffer size (tx fifo entries) */
424                 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
425                 /* double buffering: fifo size = 2 x max packet size */
426                 tmp = AMD_ADDBITS(
427                                 tmp,
428                                 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
429                                           / UDC_DWORD_BYTES,
430                                 UDC_EPIN_BUFF_SIZE);
431                 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
432
433                 /* calc. tx fifo base addr */
434                 udc_set_txfifo_addr(ep);
435
436                 /* flush fifo */
437                 tmp = readl(&ep->regs->ctl);
438                 tmp |= AMD_BIT(UDC_EPCTL_F);
439                 writel(tmp, &ep->regs->ctl);
440
441         /* OUT ep */
442         } else {
443                 /* ep ix in UDC CSR register space */
444                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
445
446                 /* set max packet size UDC CSR  */
447                 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
448                 tmp = AMD_ADDBITS(tmp, maxpacket,
449                                         UDC_CSR_NE_MAX_PKT);
450                 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
451
452                 if (use_dma && !ep->in) {
453                         /* alloc and init BNA dummy request */
454                         ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
455                         ep->bna_occurred = 0;
456                 }
457
458                 if (ep->num != UDC_EP0OUT_IX)
459                         dev->data_ep_enabled = 1;
460         }
461
462         /* set ep values */
463         tmp = readl(&dev->csr->ne[udc_csr_epix]);
464         /* max packet */
465         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
466         /* ep number */
467         tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
468         /* ep direction */
469         tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
470         /* ep type */
471         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
472         /* ep config */
473         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
474         /* ep interface */
475         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
476         /* ep alt */
477         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
478         /* write reg */
479         writel(tmp, &dev->csr->ne[udc_csr_epix]);
480
481         /* enable ep irq */
482         tmp = readl(&dev->regs->ep_irqmsk);
483         tmp &= AMD_UNMASK_BIT(ep->num);
484         writel(tmp, &dev->regs->ep_irqmsk);
485
486         /*
487          * clear NAK by writing CNAK
488          * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
489          */
490         if (!use_dma || ep->in) {
491                 tmp = readl(&ep->regs->ctl);
492                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
493                 writel(tmp, &ep->regs->ctl);
494                 ep->naking = 0;
495                 UDC_QUEUE_CNAK(ep, ep->num);
496         }
497         tmp = desc->bEndpointAddress;
498         DBG(dev, "%s enabled\n", usbep->name);
499
500         spin_unlock_irqrestore(&dev->lock, iflags);
501         return 0;
502 }
503
504 /* Resets endpoint */
505 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
506 {
507         u32             tmp;
508
509         VDBG(ep->dev, "ep-%d reset\n", ep->num);
510         ep->ep.desc = NULL;
511         ep->ep.ops = &udc_ep_ops;
512         INIT_LIST_HEAD(&ep->queue);
513
514         usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
515         /* set NAK */
516         tmp = readl(&ep->regs->ctl);
517         tmp |= AMD_BIT(UDC_EPCTL_SNAK);
518         writel(tmp, &ep->regs->ctl);
519         ep->naking = 1;
520
521         /* disable interrupt */
522         tmp = readl(&regs->ep_irqmsk);
523         tmp |= AMD_BIT(ep->num);
524         writel(tmp, &regs->ep_irqmsk);
525
526         if (ep->in) {
527                 /* unset P and IN bit of potential former DMA */
528                 tmp = readl(&ep->regs->ctl);
529                 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
530                 writel(tmp, &ep->regs->ctl);
531
532                 tmp = readl(&ep->regs->sts);
533                 tmp |= AMD_BIT(UDC_EPSTS_IN);
534                 writel(tmp, &ep->regs->sts);
535
536                 /* flush the fifo */
537                 tmp = readl(&ep->regs->ctl);
538                 tmp |= AMD_BIT(UDC_EPCTL_F);
539                 writel(tmp, &ep->regs->ctl);
540
541         }
542         /* reset desc pointer */
543         writel(0, &ep->regs->desptr);
544 }
545
546 /* Disables endpoint, is called by gadget driver */
547 static int udc_ep_disable(struct usb_ep *usbep)
548 {
549         struct udc_ep   *ep = NULL;
550         unsigned long   iflags;
551
552         if (!usbep)
553                 return -EINVAL;
554
555         ep = container_of(usbep, struct udc_ep, ep);
556         if (usbep->name == ep0_string || !ep->ep.desc)
557                 return -EINVAL;
558
559         DBG(ep->dev, "Disable ep-%d\n", ep->num);
560
561         spin_lock_irqsave(&ep->dev->lock, iflags);
562         udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
563         empty_req_queue(ep);
564         ep_init(ep->dev->regs, ep);
565         spin_unlock_irqrestore(&ep->dev->lock, iflags);
566
567         return 0;
568 }
569
570 /* Allocates request packet, called by gadget driver */
571 static struct usb_request *
572 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
573 {
574         struct udc_request      *req;
575         struct udc_data_dma     *dma_desc;
576         struct udc_ep   *ep;
577
578         if (!usbep)
579                 return NULL;
580
581         ep = container_of(usbep, struct udc_ep, ep);
582
583         VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
584         req = kzalloc(sizeof(struct udc_request), gfp);
585         if (!req)
586                 return NULL;
587
588         req->req.dma = DMA_DONT_USE;
589         INIT_LIST_HEAD(&req->queue);
590
591         if (ep->dma) {
592                 /* ep0 in requests are allocated from data pool here */
593                 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
594                                                 &req->td_phys);
595                 if (!dma_desc) {
596                         kfree(req);
597                         return NULL;
598                 }
599
600                 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
601                                 "td_phys = %lx\n",
602                                 req, dma_desc,
603                                 (unsigned long)req->td_phys);
604                 /* prevent from using desc. - set HOST BUSY */
605                 dma_desc->status = AMD_ADDBITS(dma_desc->status,
606                                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
607                                                 UDC_DMA_STP_STS_BS);
608                 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
609                 req->td_data = dma_desc;
610                 req->td_data_last = NULL;
611                 req->chain_len = 1;
612         }
613
614         return &req->req;
615 }
616
617 /* Frees request packet, called by gadget driver */
618 static void
619 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
620 {
621         struct udc_ep   *ep;
622         struct udc_request      *req;
623
624         if (!usbep || !usbreq)
625                 return;
626
627         ep = container_of(usbep, struct udc_ep, ep);
628         req = container_of(usbreq, struct udc_request, req);
629         VDBG(ep->dev, "free_req req=%p\n", req);
630         BUG_ON(!list_empty(&req->queue));
631         if (req->td_data) {
632                 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
633
634                 /* free dma chain if created */
635                 if (req->chain_len > 1)
636                         udc_free_dma_chain(ep->dev, req);
637
638                 pci_pool_free(ep->dev->data_requests, req->td_data,
639                                                         req->td_phys);
640         }
641         kfree(req);
642 }
643
644 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
645 static void udc_init_bna_dummy(struct udc_request *req)
646 {
647         if (req) {
648                 /* set last bit */
649                 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
650                 /* set next pointer to itself */
651                 req->td_data->next = req->td_phys;
652                 /* set HOST BUSY */
653                 req->td_data->status
654                         = AMD_ADDBITS(req->td_data->status,
655                                         UDC_DMA_STP_STS_BS_DMA_DONE,
656                                         UDC_DMA_STP_STS_BS);
657 #ifdef UDC_VERBOSE
658                 pr_debug("bna desc = %p, sts = %08x\n",
659                         req->td_data, req->td_data->status);
660 #endif
661         }
662 }
663
664 /* Allocate BNA dummy descriptor */
665 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
666 {
667         struct udc_request *req = NULL;
668         struct usb_request *_req = NULL;
669
670         /* alloc the dummy request */
671         _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
672         if (_req) {
673                 req = container_of(_req, struct udc_request, req);
674                 ep->bna_dummy_req = req;
675                 udc_init_bna_dummy(req);
676         }
677         return req;
678 }
679
680 /* Write data to TX fifo for IN packets */
681 static void
682 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
683 {
684         u8                      *req_buf;
685         u32                     *buf;
686         int                     i, j;
687         unsigned                bytes = 0;
688         unsigned                remaining = 0;
689
690         if (!req || !ep)
691                 return;
692
693         req_buf = req->buf + req->actual;
694         prefetch(req_buf);
695         remaining = req->length - req->actual;
696
697         buf = (u32 *) req_buf;
698
699         bytes = ep->ep.maxpacket;
700         if (bytes > remaining)
701                 bytes = remaining;
702
703         /* dwords first */
704         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
705                 writel(*(buf + i), ep->txfifo);
706
707         /* remaining bytes must be written by byte access */
708         for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
709                 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
710                                                         ep->txfifo);
711         }
712
713         /* dummy write confirm */
714         writel(0, &ep->regs->confirm);
715 }
716
717 /* Read dwords from RX fifo for OUT transfers */
718 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
719 {
720         int i;
721
722         VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
723
724         for (i = 0; i < dwords; i++)
725                 *(buf + i) = readl(dev->rxfifo);
726         return 0;
727 }
728
729 /* Read bytes from RX fifo for OUT transfers */
730 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
731 {
732         int i, j;
733         u32 tmp;
734
735         VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
736
737         /* dwords first */
738         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
739                 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
740
741         /* remaining bytes must be read by byte access */
742         if (bytes % UDC_DWORD_BYTES) {
743                 tmp = readl(dev->rxfifo);
744                 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
745                         *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
746                         tmp = tmp >> UDC_BITS_PER_BYTE;
747                 }
748         }
749
750         return 0;
751 }
752
753 /* Read data from RX fifo for OUT transfers */
754 static int
755 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
756 {
757         u8 *buf;
758         unsigned buf_space;
759         unsigned bytes = 0;
760         unsigned finished = 0;
761
762         /* received number bytes */
763         bytes = readl(&ep->regs->sts);
764         bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
765
766         buf_space = req->req.length - req->req.actual;
767         buf = req->req.buf + req->req.actual;
768         if (bytes > buf_space) {
769                 if ((buf_space % ep->ep.maxpacket) != 0) {
770                         DBG(ep->dev,
771                                 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
772                                 ep->ep.name, bytes, buf_space);
773                         req->req.status = -EOVERFLOW;
774                 }
775                 bytes = buf_space;
776         }
777         req->req.actual += bytes;
778
779         /* last packet ? */
780         if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
781                 || ((req->req.actual == req->req.length) && !req->req.zero))
782                 finished = 1;
783
784         /* read rx fifo bytes */
785         VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
786         udc_rxfifo_read_bytes(ep->dev, buf, bytes);
787
788         return finished;
789 }
790
791 /* create/re-init a DMA descriptor or a DMA descriptor chain */
792 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
793 {
794         int     retval = 0;
795         u32     tmp;
796
797         VDBG(ep->dev, "prep_dma\n");
798         VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
799                         ep->num, req->td_data);
800
801         /* set buffer pointer */
802         req->td_data->bufptr = req->req.dma;
803
804         /* set last bit */
805         req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
806
807         /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
808         if (use_dma_ppb) {
809
810                 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
811                 if (retval != 0) {
812                         if (retval == -ENOMEM)
813                                 DBG(ep->dev, "Out of DMA memory\n");
814                         return retval;
815                 }
816                 if (ep->in) {
817                         if (req->req.length == ep->ep.maxpacket) {
818                                 /* write tx bytes */
819                                 req->td_data->status =
820                                         AMD_ADDBITS(req->td_data->status,
821                                                 ep->ep.maxpacket,
822                                                 UDC_DMA_IN_STS_TXBYTES);
823
824                         }
825                 }
826
827         }
828
829         if (ep->in) {
830                 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
831                                 "maxpacket=%d ep%d\n",
832                                 use_dma_ppb, req->req.length,
833                                 ep->ep.maxpacket, ep->num);
834                 /*
835                  * if bytes < max packet then tx bytes must
836                  * be written in packet per buffer mode
837                  */
838                 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
839                                 || ep->num == UDC_EP0OUT_IX
840                                 || ep->num == UDC_EP0IN_IX) {
841                         /* write tx bytes */
842                         req->td_data->status =
843                                 AMD_ADDBITS(req->td_data->status,
844                                                 req->req.length,
845                                                 UDC_DMA_IN_STS_TXBYTES);
846                         /* reset frame num */
847                         req->td_data->status =
848                                 AMD_ADDBITS(req->td_data->status,
849                                                 0,
850                                                 UDC_DMA_IN_STS_FRAMENUM);
851                 }
852                 /* set HOST BUSY */
853                 req->td_data->status =
854                         AMD_ADDBITS(req->td_data->status,
855                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
856                                 UDC_DMA_STP_STS_BS);
857         } else {
858                 VDBG(ep->dev, "OUT set host ready\n");
859                 /* set HOST READY */
860                 req->td_data->status =
861                         AMD_ADDBITS(req->td_data->status,
862                                 UDC_DMA_STP_STS_BS_HOST_READY,
863                                 UDC_DMA_STP_STS_BS);
864
865
866                         /* clear NAK by writing CNAK */
867                         if (ep->naking) {
868                                 tmp = readl(&ep->regs->ctl);
869                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
870                                 writel(tmp, &ep->regs->ctl);
871                                 ep->naking = 0;
872                                 UDC_QUEUE_CNAK(ep, ep->num);
873                         }
874
875         }
876
877         return retval;
878 }
879
880 /* Completes request packet ... caller MUST hold lock */
881 static void
882 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
883 __releases(ep->dev->lock)
884 __acquires(ep->dev->lock)
885 {
886         struct udc              *dev;
887         unsigned                halted;
888
889         VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
890
891         dev = ep->dev;
892         /* unmap DMA */
893         if (ep->dma)
894                 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
895
896         halted = ep->halted;
897         ep->halted = 1;
898
899         /* set new status if pending */
900         if (req->req.status == -EINPROGRESS)
901                 req->req.status = sts;
902
903         /* remove from ep queue */
904         list_del_init(&req->queue);
905
906         VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
907                 &req->req, req->req.length, ep->ep.name, sts);
908
909         spin_unlock(&dev->lock);
910         usb_gadget_giveback_request(&ep->ep, &req->req);
911         spin_lock(&dev->lock);
912         ep->halted = halted;
913 }
914
915 /* frees pci pool descriptors of a DMA chain */
916 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
917 {
918
919         int ret_val = 0;
920         struct udc_data_dma     *td;
921         struct udc_data_dma     *td_last = NULL;
922         unsigned int i;
923
924         DBG(dev, "free chain req = %p\n", req);
925
926         /* do not free first desc., will be done by free for request */
927         td_last = req->td_data;
928         td = phys_to_virt(td_last->next);
929
930         for (i = 1; i < req->chain_len; i++) {
931
932                 pci_pool_free(dev->data_requests, td,
933                                 (dma_addr_t) td_last->next);
934                 td_last = td;
935                 td = phys_to_virt(td_last->next);
936         }
937
938         return ret_val;
939 }
940
941 /* Iterates to the end of a DMA chain and returns last descriptor */
942 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
943 {
944         struct udc_data_dma     *td;
945
946         td = req->td_data;
947         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
948                 td = phys_to_virt(td->next);
949
950         return td;
951
952 }
953
954 /* Iterates to the end of a DMA chain and counts bytes received */
955 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
956 {
957         struct udc_data_dma     *td;
958         u32 count;
959
960         td = req->td_data;
961         /* received number bytes */
962         count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
963
964         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
965                 td = phys_to_virt(td->next);
966                 /* received number bytes */
967                 if (td) {
968                         count += AMD_GETBITS(td->status,
969                                 UDC_DMA_OUT_STS_RXBYTES);
970                 }
971         }
972
973         return count;
974
975 }
976
977 /* Creates or re-inits a DMA chain */
978 static int udc_create_dma_chain(
979         struct udc_ep *ep,
980         struct udc_request *req,
981         unsigned long buf_len, gfp_t gfp_flags
982 )
983 {
984         unsigned long bytes = req->req.length;
985         unsigned int i;
986         dma_addr_t dma_addr;
987         struct udc_data_dma     *td = NULL;
988         struct udc_data_dma     *last = NULL;
989         unsigned long txbytes;
990         unsigned create_new_chain = 0;
991         unsigned len;
992
993         VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
994                         bytes, buf_len);
995         dma_addr = DMA_DONT_USE;
996
997         /* unset L bit in first desc for OUT */
998         if (!ep->in)
999                 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
1000
1001         /* alloc only new desc's if not already available */
1002         len = req->req.length / ep->ep.maxpacket;
1003         if (req->req.length % ep->ep.maxpacket)
1004                 len++;
1005
1006         if (len > req->chain_len) {
1007                 /* shorter chain already allocated before */
1008                 if (req->chain_len > 1)
1009                         udc_free_dma_chain(ep->dev, req);
1010                 req->chain_len = len;
1011                 create_new_chain = 1;
1012         }
1013
1014         td = req->td_data;
1015         /* gen. required number of descriptors and buffers */
1016         for (i = buf_len; i < bytes; i += buf_len) {
1017                 /* create or determine next desc. */
1018                 if (create_new_chain) {
1019
1020                         td = pci_pool_alloc(ep->dev->data_requests,
1021                                         gfp_flags, &dma_addr);
1022                         if (!td)
1023                                 return -ENOMEM;
1024
1025                         td->status = 0;
1026                 } else if (i == buf_len) {
1027                         /* first td */
1028                         td = (struct udc_data_dma *) phys_to_virt(
1029                                                 req->td_data->next);
1030                         td->status = 0;
1031                 } else {
1032                         td = (struct udc_data_dma *) phys_to_virt(last->next);
1033                         td->status = 0;
1034                 }
1035
1036
1037                 if (td)
1038                         td->bufptr = req->req.dma + i; /* assign buffer */
1039                 else
1040                         break;
1041
1042                 /* short packet ? */
1043                 if ((bytes - i) >= buf_len) {
1044                         txbytes = buf_len;
1045                 } else {
1046                         /* short packet */
1047                         txbytes = bytes - i;
1048                 }
1049
1050                 /* link td and assign tx bytes */
1051                 if (i == buf_len) {
1052                         if (create_new_chain)
1053                                 req->td_data->next = dma_addr;
1054                         /*
1055                         else
1056                                 req->td_data->next = virt_to_phys(td);
1057                         */
1058                         /* write tx bytes */
1059                         if (ep->in) {
1060                                 /* first desc */
1061                                 req->td_data->status =
1062                                         AMD_ADDBITS(req->td_data->status,
1063                                                         ep->ep.maxpacket,
1064                                                         UDC_DMA_IN_STS_TXBYTES);
1065                                 /* second desc */
1066                                 td->status = AMD_ADDBITS(td->status,
1067                                                         txbytes,
1068                                                         UDC_DMA_IN_STS_TXBYTES);
1069                         }
1070                 } else {
1071                         if (create_new_chain)
1072                                 last->next = dma_addr;
1073                         /*
1074                         else
1075                                 last->next = virt_to_phys(td);
1076                         */
1077                         if (ep->in) {
1078                                 /* write tx bytes */
1079                                 td->status = AMD_ADDBITS(td->status,
1080                                                         txbytes,
1081                                                         UDC_DMA_IN_STS_TXBYTES);
1082                         }
1083                 }
1084                 last = td;
1085         }
1086         /* set last bit */
1087         if (td) {
1088                 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1089                 /* last desc. points to itself */
1090                 req->td_data_last = td;
1091         }
1092
1093         return 0;
1094 }
1095
1096 /* Enabling RX DMA */
1097 static void udc_set_rde(struct udc *dev)
1098 {
1099         u32 tmp;
1100
1101         VDBG(dev, "udc_set_rde()\n");
1102         /* stop RDE timer */
1103         if (timer_pending(&udc_timer)) {
1104                 set_rde = 0;
1105                 mod_timer(&udc_timer, jiffies - 1);
1106         }
1107         /* set RDE */
1108         tmp = readl(&dev->regs->ctl);
1109         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1110         writel(tmp, &dev->regs->ctl);
1111 }
1112
1113 /* Queues a request packet, called by gadget driver */
1114 static int
1115 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1116 {
1117         int                     retval = 0;
1118         u8                      open_rxfifo = 0;
1119         unsigned long           iflags;
1120         struct udc_ep           *ep;
1121         struct udc_request      *req;
1122         struct udc              *dev;
1123         u32                     tmp;
1124
1125         /* check the inputs */
1126         req = container_of(usbreq, struct udc_request, req);
1127
1128         if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1129                         || !list_empty(&req->queue))
1130                 return -EINVAL;
1131
1132         ep = container_of(usbep, struct udc_ep, ep);
1133         if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1134                 return -EINVAL;
1135
1136         VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1137         dev = ep->dev;
1138
1139         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1140                 return -ESHUTDOWN;
1141
1142         /* map dma (usually done before) */
1143         if (ep->dma) {
1144                 VDBG(dev, "DMA map req %p\n", req);
1145                 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1146                 if (retval)
1147                         return retval;
1148         }
1149
1150         VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1151                         usbep->name, usbreq, usbreq->length,
1152                         req->td_data, usbreq->buf);
1153
1154         spin_lock_irqsave(&dev->lock, iflags);
1155         usbreq->actual = 0;
1156         usbreq->status = -EINPROGRESS;
1157         req->dma_done = 0;
1158
1159         /* on empty queue just do first transfer */
1160         if (list_empty(&ep->queue)) {
1161                 /* zlp */
1162                 if (usbreq->length == 0) {
1163                         /* IN zlp's are handled by hardware */
1164                         complete_req(ep, req, 0);
1165                         VDBG(dev, "%s: zlp\n", ep->ep.name);
1166                         /*
1167                          * if set_config or set_intf is waiting for ack by zlp
1168                          * then set CSR_DONE
1169                          */
1170                         if (dev->set_cfg_not_acked) {
1171                                 tmp = readl(&dev->regs->ctl);
1172                                 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1173                                 writel(tmp, &dev->regs->ctl);
1174                                 dev->set_cfg_not_acked = 0;
1175                         }
1176                         /* setup command is ACK'ed now by zlp */
1177                         if (dev->waiting_zlp_ack_ep0in) {
1178                                 /* clear NAK by writing CNAK in EP0_IN */
1179                                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1180                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1181                                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1182                                 dev->ep[UDC_EP0IN_IX].naking = 0;
1183                                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1184                                                         UDC_EP0IN_IX);
1185                                 dev->waiting_zlp_ack_ep0in = 0;
1186                         }
1187                         goto finished;
1188                 }
1189                 if (ep->dma) {
1190                         retval = prep_dma(ep, req, GFP_ATOMIC);
1191                         if (retval != 0)
1192                                 goto finished;
1193                         /* write desc pointer to enable DMA */
1194                         if (ep->in) {
1195                                 /* set HOST READY */
1196                                 req->td_data->status =
1197                                         AMD_ADDBITS(req->td_data->status,
1198                                                 UDC_DMA_IN_STS_BS_HOST_READY,
1199                                                 UDC_DMA_IN_STS_BS);
1200                         }
1201
1202                         /* disabled rx dma while descriptor update */
1203                         if (!ep->in) {
1204                                 /* stop RDE timer */
1205                                 if (timer_pending(&udc_timer)) {
1206                                         set_rde = 0;
1207                                         mod_timer(&udc_timer, jiffies - 1);
1208                                 }
1209                                 /* clear RDE */
1210                                 tmp = readl(&dev->regs->ctl);
1211                                 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1212                                 writel(tmp, &dev->regs->ctl);
1213                                 open_rxfifo = 1;
1214
1215                                 /*
1216                                  * if BNA occurred then let BNA dummy desc.
1217                                  * point to current desc.
1218                                  */
1219                                 if (ep->bna_occurred) {
1220                                         VDBG(dev, "copy to BNA dummy desc.\n");
1221                                         memcpy(ep->bna_dummy_req->td_data,
1222                                                 req->td_data,
1223                                                 sizeof(struct udc_data_dma));
1224                                 }
1225                         }
1226                         /* write desc pointer */
1227                         writel(req->td_phys, &ep->regs->desptr);
1228
1229                         /* clear NAK by writing CNAK */
1230                         if (ep->naking) {
1231                                 tmp = readl(&ep->regs->ctl);
1232                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1233                                 writel(tmp, &ep->regs->ctl);
1234                                 ep->naking = 0;
1235                                 UDC_QUEUE_CNAK(ep, ep->num);
1236                         }
1237
1238                         if (ep->in) {
1239                                 /* enable ep irq */
1240                                 tmp = readl(&dev->regs->ep_irqmsk);
1241                                 tmp &= AMD_UNMASK_BIT(ep->num);
1242                                 writel(tmp, &dev->regs->ep_irqmsk);
1243                         }
1244                 } else if (ep->in) {
1245                                 /* enable ep irq */
1246                                 tmp = readl(&dev->regs->ep_irqmsk);
1247                                 tmp &= AMD_UNMASK_BIT(ep->num);
1248                                 writel(tmp, &dev->regs->ep_irqmsk);
1249                         }
1250
1251         } else if (ep->dma) {
1252
1253                 /*
1254                  * prep_dma not used for OUT ep's, this is not possible
1255                  * for PPB modes, because of chain creation reasons
1256                  */
1257                 if (ep->in) {
1258                         retval = prep_dma(ep, req, GFP_ATOMIC);
1259                         if (retval != 0)
1260                                 goto finished;
1261                 }
1262         }
1263         VDBG(dev, "list_add\n");
1264         /* add request to ep queue */
1265         if (req) {
1266
1267                 list_add_tail(&req->queue, &ep->queue);
1268
1269                 /* open rxfifo if out data queued */
1270                 if (open_rxfifo) {
1271                         /* enable DMA */
1272                         req->dma_going = 1;
1273                         udc_set_rde(dev);
1274                         if (ep->num != UDC_EP0OUT_IX)
1275                                 dev->data_ep_queued = 1;
1276                 }
1277                 /* stop OUT naking */
1278                 if (!ep->in) {
1279                         if (!use_dma && udc_rxfifo_pending) {
1280                                 DBG(dev, "udc_queue(): pending bytes in "
1281                                         "rxfifo after nyet\n");
1282                                 /*
1283                                  * read pending bytes afer nyet:
1284                                  * referring to isr
1285                                  */
1286                                 if (udc_rxfifo_read(ep, req)) {
1287                                         /* finish */
1288                                         complete_req(ep, req, 0);
1289                                 }
1290                                 udc_rxfifo_pending = 0;
1291
1292                         }
1293                 }
1294         }
1295
1296 finished:
1297         spin_unlock_irqrestore(&dev->lock, iflags);
1298         return retval;
1299 }
1300
1301 /* Empty request queue of an endpoint; caller holds spinlock */
1302 static void empty_req_queue(struct udc_ep *ep)
1303 {
1304         struct udc_request      *req;
1305
1306         ep->halted = 1;
1307         while (!list_empty(&ep->queue)) {
1308                 req = list_entry(ep->queue.next,
1309                         struct udc_request,
1310                         queue);
1311                 complete_req(ep, req, -ESHUTDOWN);
1312         }
1313 }
1314
1315 /* Dequeues a request packet, called by gadget driver */
1316 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1317 {
1318         struct udc_ep           *ep;
1319         struct udc_request      *req;
1320         unsigned                halted;
1321         unsigned long           iflags;
1322
1323         ep = container_of(usbep, struct udc_ep, ep);
1324         if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1325                                 && ep->num != UDC_EP0OUT_IX)))
1326                 return -EINVAL;
1327
1328         req = container_of(usbreq, struct udc_request, req);
1329
1330         spin_lock_irqsave(&ep->dev->lock, iflags);
1331         halted = ep->halted;
1332         ep->halted = 1;
1333         /* request in processing or next one */
1334         if (ep->queue.next == &req->queue) {
1335                 if (ep->dma && req->dma_going) {
1336                         if (ep->in)
1337                                 ep->cancel_transfer = 1;
1338                         else {
1339                                 u32 tmp;
1340                                 u32 dma_sts;
1341                                 /* stop potential receive DMA */
1342                                 tmp = readl(&udc->regs->ctl);
1343                                 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1344                                                         &udc->regs->ctl);
1345                                 /*
1346                                  * Cancel transfer later in ISR
1347                                  * if descriptor was touched.
1348                                  */
1349                                 dma_sts = AMD_GETBITS(req->td_data->status,
1350                                                         UDC_DMA_OUT_STS_BS);
1351                                 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1352                                         ep->cancel_transfer = 1;
1353                                 else {
1354                                         udc_init_bna_dummy(ep->req);
1355                                         writel(ep->bna_dummy_req->td_phys,
1356                                                 &ep->regs->desptr);
1357                                 }
1358                                 writel(tmp, &udc->regs->ctl);
1359                         }
1360                 }
1361         }
1362         complete_req(ep, req, -ECONNRESET);
1363         ep->halted = halted;
1364
1365         spin_unlock_irqrestore(&ep->dev->lock, iflags);
1366         return 0;
1367 }
1368
1369 /* Halt or clear halt of endpoint */
1370 static int
1371 udc_set_halt(struct usb_ep *usbep, int halt)
1372 {
1373         struct udc_ep   *ep;
1374         u32 tmp;
1375         unsigned long iflags;
1376         int retval = 0;
1377
1378         if (!usbep)
1379                 return -EINVAL;
1380
1381         pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1382
1383         ep = container_of(usbep, struct udc_ep, ep);
1384         if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1385                 return -EINVAL;
1386         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1387                 return -ESHUTDOWN;
1388
1389         spin_lock_irqsave(&udc_stall_spinlock, iflags);
1390         /* halt or clear halt */
1391         if (halt) {
1392                 if (ep->num == 0)
1393                         ep->dev->stall_ep0in = 1;
1394                 else {
1395                         /*
1396                          * set STALL
1397                          * rxfifo empty not taken into acount
1398                          */
1399                         tmp = readl(&ep->regs->ctl);
1400                         tmp |= AMD_BIT(UDC_EPCTL_S);
1401                         writel(tmp, &ep->regs->ctl);
1402                         ep->halted = 1;
1403
1404                         /* setup poll timer */
1405                         if (!timer_pending(&udc_pollstall_timer)) {
1406                                 udc_pollstall_timer.expires = jiffies +
1407                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
1408                                         / (1000 * 1000);
1409                                 if (!stop_pollstall_timer) {
1410                                         DBG(ep->dev, "start polltimer\n");
1411                                         add_timer(&udc_pollstall_timer);
1412                                 }
1413                         }
1414                 }
1415         } else {
1416                 /* ep is halted by set_halt() before */
1417                 if (ep->halted) {
1418                         tmp = readl(&ep->regs->ctl);
1419                         /* clear stall bit */
1420                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1421                         /* clear NAK by writing CNAK */
1422                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1423                         writel(tmp, &ep->regs->ctl);
1424                         ep->halted = 0;
1425                         UDC_QUEUE_CNAK(ep, ep->num);
1426                 }
1427         }
1428         spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1429         return retval;
1430 }
1431
1432 /* gadget interface */
1433 static const struct usb_ep_ops udc_ep_ops = {
1434         .enable         = udc_ep_enable,
1435         .disable        = udc_ep_disable,
1436
1437         .alloc_request  = udc_alloc_request,
1438         .free_request   = udc_free_request,
1439
1440         .queue          = udc_queue,
1441         .dequeue        = udc_dequeue,
1442
1443         .set_halt       = udc_set_halt,
1444         /* fifo ops not implemented */
1445 };
1446
1447 /*-------------------------------------------------------------------------*/
1448
1449 /* Get frame counter (not implemented) */
1450 static int udc_get_frame(struct usb_gadget *gadget)
1451 {
1452         return -EOPNOTSUPP;
1453 }
1454
1455 /* Remote wakeup gadget interface */
1456 static int udc_wakeup(struct usb_gadget *gadget)
1457 {
1458         struct udc              *dev;
1459
1460         if (!gadget)
1461                 return -EINVAL;
1462         dev = container_of(gadget, struct udc, gadget);
1463         udc_remote_wakeup(dev);
1464
1465         return 0;
1466 }
1467
1468 static int amd5536_udc_start(struct usb_gadget *g,
1469                 struct usb_gadget_driver *driver);
1470 static int amd5536_udc_stop(struct usb_gadget *g);
1471
1472 static const struct usb_gadget_ops udc_ops = {
1473         .wakeup         = udc_wakeup,
1474         .get_frame      = udc_get_frame,
1475         .udc_start      = amd5536_udc_start,
1476         .udc_stop       = amd5536_udc_stop,
1477 };
1478
1479 /* Setups endpoint parameters, adds endpoints to linked list */
1480 static void make_ep_lists(struct udc *dev)
1481 {
1482         /* make gadget ep lists */
1483         INIT_LIST_HEAD(&dev->gadget.ep_list);
1484         list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1485                                                 &dev->gadget.ep_list);
1486         list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1487                                                 &dev->gadget.ep_list);
1488         list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1489                                                 &dev->gadget.ep_list);
1490
1491         /* fifo config */
1492         dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1493         if (dev->gadget.speed == USB_SPEED_FULL)
1494                 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1495         else if (dev->gadget.speed == USB_SPEED_HIGH)
1496                 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1497         dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1498 }
1499
1500 /* init registers at driver load time */
1501 static int startup_registers(struct udc *dev)
1502 {
1503         u32 tmp;
1504
1505         /* init controller by soft reset */
1506         udc_soft_reset(dev);
1507
1508         /* mask not needed interrupts */
1509         udc_mask_unused_interrupts(dev);
1510
1511         /* put into initial config */
1512         udc_basic_init(dev);
1513         /* link up all endpoints */
1514         udc_setup_endpoints(dev);
1515
1516         /* program speed */
1517         tmp = readl(&dev->regs->cfg);
1518         if (use_fullspeed)
1519                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1520         else
1521                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1522         writel(tmp, &dev->regs->cfg);
1523
1524         return 0;
1525 }
1526
1527 /* Inits UDC context */
1528 static void udc_basic_init(struct udc *dev)
1529 {
1530         u32     tmp;
1531
1532         DBG(dev, "udc_basic_init()\n");
1533
1534         dev->gadget.speed = USB_SPEED_UNKNOWN;
1535
1536         /* stop RDE timer */
1537         if (timer_pending(&udc_timer)) {
1538                 set_rde = 0;
1539                 mod_timer(&udc_timer, jiffies - 1);
1540         }
1541         /* stop poll stall timer */
1542         if (timer_pending(&udc_pollstall_timer))
1543                 mod_timer(&udc_pollstall_timer, jiffies - 1);
1544         /* disable DMA */
1545         tmp = readl(&dev->regs->ctl);
1546         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1547         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1548         writel(tmp, &dev->regs->ctl);
1549
1550         /* enable dynamic CSR programming */
1551         tmp = readl(&dev->regs->cfg);
1552         tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1553         /* set self powered */
1554         tmp |= AMD_BIT(UDC_DEVCFG_SP);
1555         /* set remote wakeupable */
1556         tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1557         writel(tmp, &dev->regs->cfg);
1558
1559         make_ep_lists(dev);
1560
1561         dev->data_ep_enabled = 0;
1562         dev->data_ep_queued = 0;
1563 }
1564
1565 /* Sets initial endpoint parameters */
1566 static void udc_setup_endpoints(struct udc *dev)
1567 {
1568         struct udc_ep   *ep;
1569         u32     tmp;
1570         u32     reg;
1571
1572         DBG(dev, "udc_setup_endpoints()\n");
1573
1574         /* read enum speed */
1575         tmp = readl(&dev->regs->sts);
1576         tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1577         if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1578                 dev->gadget.speed = USB_SPEED_HIGH;
1579         else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1580                 dev->gadget.speed = USB_SPEED_FULL;
1581
1582         /* set basic ep parameters */
1583         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1584                 ep = &dev->ep[tmp];
1585                 ep->dev = dev;
1586                 ep->ep.name = ep_info[tmp].name;
1587                 ep->ep.caps = ep_info[tmp].caps;
1588                 ep->num = tmp;
1589                 /* txfifo size is calculated at enable time */
1590                 ep->txfifo = dev->txfifo;
1591
1592                 /* fifo size */
1593                 if (tmp < UDC_EPIN_NUM) {
1594                         ep->fifo_depth = UDC_TXFIFO_SIZE;
1595                         ep->in = 1;
1596                 } else {
1597                         ep->fifo_depth = UDC_RXFIFO_SIZE;
1598                         ep->in = 0;
1599
1600                 }
1601                 ep->regs = &dev->ep_regs[tmp];
1602                 /*
1603                  * ep will be reset only if ep was not enabled before to avoid
1604                  * disabling ep interrupts when ENUM interrupt occurs but ep is
1605                  * not enabled by gadget driver
1606                  */
1607                 if (!ep->ep.desc)
1608                         ep_init(dev->regs, ep);
1609
1610                 if (use_dma) {
1611                         /*
1612                          * ep->dma is not really used, just to indicate that
1613                          * DMA is active: remove this
1614                          * dma regs = dev control regs
1615                          */
1616                         ep->dma = &dev->regs->ctl;
1617
1618                         /* nak OUT endpoints until enable - not for ep0 */
1619                         if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1620                                                 && tmp > UDC_EPIN_NUM) {
1621                                 /* set NAK */
1622                                 reg = readl(&dev->ep[tmp].regs->ctl);
1623                                 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1624                                 writel(reg, &dev->ep[tmp].regs->ctl);
1625                                 dev->ep[tmp].naking = 1;
1626
1627                         }
1628                 }
1629         }
1630         /* EP0 max packet */
1631         if (dev->gadget.speed == USB_SPEED_FULL) {
1632                 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1633                                            UDC_FS_EP0IN_MAX_PKT_SIZE);
1634                 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1635                                            UDC_FS_EP0OUT_MAX_PKT_SIZE);
1636         } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1637                 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1638                                            UDC_EP0IN_MAX_PKT_SIZE);
1639                 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1640                                            UDC_EP0OUT_MAX_PKT_SIZE);
1641         }
1642
1643         /*
1644          * with suspend bug workaround, ep0 params for gadget driver
1645          * are set at gadget driver bind() call
1646          */
1647         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1648         dev->ep[UDC_EP0IN_IX].halted = 0;
1649         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1650
1651         /* init cfg/alt/int */
1652         dev->cur_config = 0;
1653         dev->cur_intf = 0;
1654         dev->cur_alt = 0;
1655 }
1656
1657 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
1658 static void usb_connect(struct udc *dev)
1659 {
1660
1661         dev_info(&dev->pdev->dev, "USB Connect\n");
1662
1663         dev->connected = 1;
1664
1665         /* put into initial config */
1666         udc_basic_init(dev);
1667
1668         /* enable device setup interrupts */
1669         udc_enable_dev_setup_interrupts(dev);
1670 }
1671
1672 /*
1673  * Calls gadget with disconnect event and resets the UDC and makes
1674  * initial bringup to be ready for ep0 events
1675  */
1676 static void usb_disconnect(struct udc *dev)
1677 {
1678
1679         dev_info(&dev->pdev->dev, "USB Disconnect\n");
1680
1681         dev->connected = 0;
1682
1683         /* mask interrupts */
1684         udc_mask_unused_interrupts(dev);
1685
1686         /* REVISIT there doesn't seem to be a point to having this
1687          * talk to a tasklet ... do it directly, we already hold
1688          * the spinlock needed to process the disconnect.
1689          */
1690
1691         tasklet_schedule(&disconnect_tasklet);
1692 }
1693
1694 /* Tasklet for disconnect to be outside of interrupt context */
1695 static void udc_tasklet_disconnect(unsigned long par)
1696 {
1697         struct udc *dev = (struct udc *)(*((struct udc **) par));
1698         u32 tmp;
1699
1700         DBG(dev, "Tasklet disconnect\n");
1701         spin_lock_irq(&dev->lock);
1702
1703         if (dev->driver) {
1704                 spin_unlock(&dev->lock);
1705                 dev->driver->disconnect(&dev->gadget);
1706                 spin_lock(&dev->lock);
1707
1708                 /* empty queues */
1709                 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1710                         empty_req_queue(&dev->ep[tmp]);
1711
1712         }
1713
1714         /* disable ep0 */
1715         ep_init(dev->regs,
1716                         &dev->ep[UDC_EP0IN_IX]);
1717
1718
1719         if (!soft_reset_occured) {
1720                 /* init controller by soft reset */
1721                 udc_soft_reset(dev);
1722                 soft_reset_occured++;
1723         }
1724
1725         /* re-enable dev interrupts */
1726         udc_enable_dev_setup_interrupts(dev);
1727         /* back to full speed ? */
1728         if (use_fullspeed) {
1729                 tmp = readl(&dev->regs->cfg);
1730                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1731                 writel(tmp, &dev->regs->cfg);
1732         }
1733
1734         spin_unlock_irq(&dev->lock);
1735 }
1736
1737 /* Reset the UDC core */
1738 static void udc_soft_reset(struct udc *dev)
1739 {
1740         unsigned long   flags;
1741
1742         DBG(dev, "Soft reset\n");
1743         /*
1744          * reset possible waiting interrupts, because int.
1745          * status is lost after soft reset,
1746          * ep int. status reset
1747          */
1748         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1749         /* device int. status reset */
1750         writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1751
1752         spin_lock_irqsave(&udc_irq_spinlock, flags);
1753         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1754         readl(&dev->regs->cfg);
1755         spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1756
1757 }
1758
1759 /* RDE timer callback to set RDE bit */
1760 static void udc_timer_function(unsigned long v)
1761 {
1762         u32 tmp;
1763
1764         spin_lock_irq(&udc_irq_spinlock);
1765
1766         if (set_rde > 0) {
1767                 /*
1768                  * open the fifo if fifo was filled on last timer call
1769                  * conditionally
1770                  */
1771                 if (set_rde > 1) {
1772                         /* set RDE to receive setup data */
1773                         tmp = readl(&udc->regs->ctl);
1774                         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1775                         writel(tmp, &udc->regs->ctl);
1776                         set_rde = -1;
1777                 } else if (readl(&udc->regs->sts)
1778                                 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1779                         /*
1780                          * if fifo empty setup polling, do not just
1781                          * open the fifo
1782                          */
1783                         udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1784                         if (!stop_timer)
1785                                 add_timer(&udc_timer);
1786                 } else {
1787                         /*
1788                          * fifo contains data now, setup timer for opening
1789                          * the fifo when timer expires to be able to receive
1790                          * setup packets, when data packets gets queued by
1791                          * gadget layer then timer will forced to expire with
1792                          * set_rde=0 (RDE is set in udc_queue())
1793                          */
1794                         set_rde++;
1795                         /* debug: lhadmot_timer_start = 221070 */
1796                         udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1797                         if (!stop_timer)
1798                                 add_timer(&udc_timer);
1799                 }
1800
1801         } else
1802                 set_rde = -1; /* RDE was set by udc_queue() */
1803         spin_unlock_irq(&udc_irq_spinlock);
1804         if (stop_timer)
1805                 complete(&on_exit);
1806
1807 }
1808
1809 /* Handle halt state, used in stall poll timer */
1810 static void udc_handle_halt_state(struct udc_ep *ep)
1811 {
1812         u32 tmp;
1813         /* set stall as long not halted */
1814         if (ep->halted == 1) {
1815                 tmp = readl(&ep->regs->ctl);
1816                 /* STALL cleared ? */
1817                 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1818                         /*
1819                          * FIXME: MSC spec requires that stall remains
1820                          * even on receivng of CLEAR_FEATURE HALT. So
1821                          * we would set STALL again here to be compliant.
1822                          * But with current mass storage drivers this does
1823                          * not work (would produce endless host retries).
1824                          * So we clear halt on CLEAR_FEATURE.
1825                          *
1826                         DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1827                         tmp |= AMD_BIT(UDC_EPCTL_S);
1828                         writel(tmp, &ep->regs->ctl);*/
1829
1830                         /* clear NAK by writing CNAK */
1831                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1832                         writel(tmp, &ep->regs->ctl);
1833                         ep->halted = 0;
1834                         UDC_QUEUE_CNAK(ep, ep->num);
1835                 }
1836         }
1837 }
1838
1839 /* Stall timer callback to poll S bit and set it again after */
1840 static void udc_pollstall_timer_function(unsigned long v)
1841 {
1842         struct udc_ep *ep;
1843         int halted = 0;
1844
1845         spin_lock_irq(&udc_stall_spinlock);
1846         /*
1847          * only one IN and OUT endpoints are handled
1848          * IN poll stall
1849          */
1850         ep = &udc->ep[UDC_EPIN_IX];
1851         udc_handle_halt_state(ep);
1852         if (ep->halted)
1853                 halted = 1;
1854         /* OUT poll stall */
1855         ep = &udc->ep[UDC_EPOUT_IX];
1856         udc_handle_halt_state(ep);
1857         if (ep->halted)
1858                 halted = 1;
1859
1860         /* setup timer again when still halted */
1861         if (!stop_pollstall_timer && halted) {
1862                 udc_pollstall_timer.expires = jiffies +
1863                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
1864                                         / (1000 * 1000);
1865                 add_timer(&udc_pollstall_timer);
1866         }
1867         spin_unlock_irq(&udc_stall_spinlock);
1868
1869         if (stop_pollstall_timer)
1870                 complete(&on_pollstall_exit);
1871 }
1872
1873 /* Inits endpoint 0 so that SETUP packets are processed */
1874 static void activate_control_endpoints(struct udc *dev)
1875 {
1876         u32 tmp;
1877
1878         DBG(dev, "activate_control_endpoints\n");
1879
1880         /* flush fifo */
1881         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1882         tmp |= AMD_BIT(UDC_EPCTL_F);
1883         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1884
1885         /* set ep0 directions */
1886         dev->ep[UDC_EP0IN_IX].in = 1;
1887         dev->ep[UDC_EP0OUT_IX].in = 0;
1888
1889         /* set buffer size (tx fifo entries) of EP0_IN */
1890         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1891         if (dev->gadget.speed == USB_SPEED_FULL)
1892                 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1893                                         UDC_EPIN_BUFF_SIZE);
1894         else if (dev->gadget.speed == USB_SPEED_HIGH)
1895                 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1896                                         UDC_EPIN_BUFF_SIZE);
1897         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1898
1899         /* set max packet size of EP0_IN */
1900         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1901         if (dev->gadget.speed == USB_SPEED_FULL)
1902                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1903                                         UDC_EP_MAX_PKT_SIZE);
1904         else if (dev->gadget.speed == USB_SPEED_HIGH)
1905                 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1906                                 UDC_EP_MAX_PKT_SIZE);
1907         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1908
1909         /* set max packet size of EP0_OUT */
1910         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1911         if (dev->gadget.speed == USB_SPEED_FULL)
1912                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1913                                         UDC_EP_MAX_PKT_SIZE);
1914         else if (dev->gadget.speed == USB_SPEED_HIGH)
1915                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1916                                         UDC_EP_MAX_PKT_SIZE);
1917         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1918
1919         /* set max packet size of EP0 in UDC CSR */
1920         tmp = readl(&dev->csr->ne[0]);
1921         if (dev->gadget.speed == USB_SPEED_FULL)
1922                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1923                                         UDC_CSR_NE_MAX_PKT);
1924         else if (dev->gadget.speed == USB_SPEED_HIGH)
1925                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1926                                         UDC_CSR_NE_MAX_PKT);
1927         writel(tmp, &dev->csr->ne[0]);
1928
1929         if (use_dma) {
1930                 dev->ep[UDC_EP0OUT_IX].td->status |=
1931                         AMD_BIT(UDC_DMA_OUT_STS_L);
1932                 /* write dma desc address */
1933                 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1934                         &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1935                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1936                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1937                 /* stop RDE timer */
1938                 if (timer_pending(&udc_timer)) {
1939                         set_rde = 0;
1940                         mod_timer(&udc_timer, jiffies - 1);
1941                 }
1942                 /* stop pollstall timer */
1943                 if (timer_pending(&udc_pollstall_timer))
1944                         mod_timer(&udc_pollstall_timer, jiffies - 1);
1945                 /* enable DMA */
1946                 tmp = readl(&dev->regs->ctl);
1947                 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1948                                 | AMD_BIT(UDC_DEVCTL_RDE)
1949                                 | AMD_BIT(UDC_DEVCTL_TDE);
1950                 if (use_dma_bufferfill_mode)
1951                         tmp |= AMD_BIT(UDC_DEVCTL_BF);
1952                 else if (use_dma_ppb_du)
1953                         tmp |= AMD_BIT(UDC_DEVCTL_DU);
1954                 writel(tmp, &dev->regs->ctl);
1955         }
1956
1957         /* clear NAK by writing CNAK for EP0IN */
1958         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1959         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1960         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1961         dev->ep[UDC_EP0IN_IX].naking = 0;
1962         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1963
1964         /* clear NAK by writing CNAK for EP0OUT */
1965         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1966         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1967         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1968         dev->ep[UDC_EP0OUT_IX].naking = 0;
1969         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1970 }
1971
1972 /* Make endpoint 0 ready for control traffic */
1973 static int setup_ep0(struct udc *dev)
1974 {
1975         activate_control_endpoints(dev);
1976         /* enable ep0 interrupts */
1977         udc_enable_ep0_interrupts(dev);
1978         /* enable device setup interrupts */
1979         udc_enable_dev_setup_interrupts(dev);
1980
1981         return 0;
1982 }
1983
1984 /* Called by gadget driver to register itself */
1985 static int amd5536_udc_start(struct usb_gadget *g,
1986                 struct usb_gadget_driver *driver)
1987 {
1988         struct udc *dev = to_amd5536_udc(g);
1989         u32 tmp;
1990
1991         driver->driver.bus = NULL;
1992         dev->driver = driver;
1993
1994         /* Some gadget drivers use both ep0 directions.
1995          * NOTE: to gadget driver, ep0 is just one endpoint...
1996          */
1997         dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1998                 dev->ep[UDC_EP0IN_IX].ep.driver_data;
1999
2000         /* get ready for ep0 traffic */
2001         setup_ep0(dev);
2002
2003         /* clear SD */
2004         tmp = readl(&dev->regs->ctl);
2005         tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
2006         writel(tmp, &dev->regs->ctl);
2007
2008         usb_connect(dev);
2009
2010         return 0;
2011 }
2012
2013 /* shutdown requests and disconnect from gadget */
2014 static void
2015 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2016 __releases(dev->lock)
2017 __acquires(dev->lock)
2018 {
2019         int tmp;
2020
2021         /* empty queues and init hardware */
2022         udc_basic_init(dev);
2023
2024         for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2025                 empty_req_queue(&dev->ep[tmp]);
2026
2027         udc_setup_endpoints(dev);
2028 }
2029
2030 /* Called by gadget driver to unregister itself */
2031 static int amd5536_udc_stop(struct usb_gadget *g)
2032 {
2033         struct udc *dev = to_amd5536_udc(g);
2034         unsigned long flags;
2035         u32 tmp;
2036
2037         spin_lock_irqsave(&dev->lock, flags);
2038         udc_mask_unused_interrupts(dev);
2039         shutdown(dev, NULL);
2040         spin_unlock_irqrestore(&dev->lock, flags);
2041
2042         dev->driver = NULL;
2043
2044         /* set SD */
2045         tmp = readl(&dev->regs->ctl);
2046         tmp |= AMD_BIT(UDC_DEVCTL_SD);
2047         writel(tmp, &dev->regs->ctl);
2048
2049         return 0;
2050 }
2051
2052 /* Clear pending NAK bits */
2053 static void udc_process_cnak_queue(struct udc *dev)
2054 {
2055         u32 tmp;
2056         u32 reg;
2057
2058         /* check epin's */
2059         DBG(dev, "CNAK pending queue processing\n");
2060         for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2061                 if (cnak_pending & (1 << tmp)) {
2062                         DBG(dev, "CNAK pending for ep%d\n", tmp);
2063                         /* clear NAK by writing CNAK */
2064                         reg = readl(&dev->ep[tmp].regs->ctl);
2065                         reg |= AMD_BIT(UDC_EPCTL_CNAK);
2066                         writel(reg, &dev->ep[tmp].regs->ctl);
2067                         dev->ep[tmp].naking = 0;
2068                         UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2069                 }
2070         }
2071         /* ...  and ep0out */
2072         if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2073                 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2074                 /* clear NAK by writing CNAK */
2075                 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2076                 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2077                 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2078                 dev->ep[UDC_EP0OUT_IX].naking = 0;
2079                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2080                                 dev->ep[UDC_EP0OUT_IX].num);
2081         }
2082 }
2083
2084 /* Enabling RX DMA after setup packet */
2085 static void udc_ep0_set_rde(struct udc *dev)
2086 {
2087         if (use_dma) {
2088                 /*
2089                  * only enable RXDMA when no data endpoint enabled
2090                  * or data is queued
2091                  */
2092                 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2093                         udc_set_rde(dev);
2094                 } else {
2095                         /*
2096                          * setup timer for enabling RDE (to not enable
2097                          * RXFIFO DMA for data endpoints to early)
2098                          */
2099                         if (set_rde != 0 && !timer_pending(&udc_timer)) {
2100                                 udc_timer.expires =
2101                                         jiffies + HZ/UDC_RDE_TIMER_DIV;
2102                                 set_rde = 1;
2103                                 if (!stop_timer)
2104                                         add_timer(&udc_timer);
2105                         }
2106                 }
2107         }
2108 }
2109
2110
2111 /* Interrupt handler for data OUT traffic */
2112 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2113 {
2114         irqreturn_t             ret_val = IRQ_NONE;
2115         u32                     tmp;
2116         struct udc_ep           *ep;
2117         struct udc_request      *req;
2118         unsigned int            count;
2119         struct udc_data_dma     *td = NULL;
2120         unsigned                dma_done;
2121
2122         VDBG(dev, "ep%d irq\n", ep_ix);
2123         ep = &dev->ep[ep_ix];
2124
2125         tmp = readl(&ep->regs->sts);
2126         if (use_dma) {
2127                 /* BNA event ? */
2128                 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2129                         DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2130                                         ep->num, readl(&ep->regs->desptr));
2131                         /* clear BNA */
2132                         writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2133                         if (!ep->cancel_transfer)
2134                                 ep->bna_occurred = 1;
2135                         else
2136                                 ep->cancel_transfer = 0;
2137                         ret_val = IRQ_HANDLED;
2138                         goto finished;
2139                 }
2140         }
2141         /* HE event ? */
2142         if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2143                 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
2144
2145                 /* clear HE */
2146                 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2147                 ret_val = IRQ_HANDLED;
2148                 goto finished;
2149         }
2150
2151         if (!list_empty(&ep->queue)) {
2152
2153                 /* next request */
2154                 req = list_entry(ep->queue.next,
2155                         struct udc_request, queue);
2156         } else {
2157                 req = NULL;
2158                 udc_rxfifo_pending = 1;
2159         }
2160         VDBG(dev, "req = %p\n", req);
2161         /* fifo mode */
2162         if (!use_dma) {
2163
2164                 /* read fifo */
2165                 if (req && udc_rxfifo_read(ep, req)) {
2166                         ret_val = IRQ_HANDLED;
2167
2168                         /* finish */
2169                         complete_req(ep, req, 0);
2170                         /* next request */
2171                         if (!list_empty(&ep->queue) && !ep->halted) {
2172                                 req = list_entry(ep->queue.next,
2173                                         struct udc_request, queue);
2174                         } else
2175                                 req = NULL;
2176                 }
2177
2178         /* DMA */
2179         } else if (!ep->cancel_transfer && req != NULL) {
2180                 ret_val = IRQ_HANDLED;
2181
2182                 /* check for DMA done */
2183                 if (!use_dma_ppb) {
2184                         dma_done = AMD_GETBITS(req->td_data->status,
2185                                                 UDC_DMA_OUT_STS_BS);
2186                 /* packet per buffer mode - rx bytes */
2187                 } else {
2188                         /*
2189                          * if BNA occurred then recover desc. from
2190                          * BNA dummy desc.
2191                          */
2192                         if (ep->bna_occurred) {
2193                                 VDBG(dev, "Recover desc. from BNA dummy\n");
2194                                 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2195                                                 sizeof(struct udc_data_dma));
2196                                 ep->bna_occurred = 0;
2197                                 udc_init_bna_dummy(ep->req);
2198                         }
2199                         td = udc_get_last_dma_desc(req);
2200                         dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2201                 }
2202                 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2203                         /* buffer fill mode - rx bytes */
2204                         if (!use_dma_ppb) {
2205                                 /* received number bytes */
2206                                 count = AMD_GETBITS(req->td_data->status,
2207                                                 UDC_DMA_OUT_STS_RXBYTES);
2208                                 VDBG(dev, "rx bytes=%u\n", count);
2209                         /* packet per buffer mode - rx bytes */
2210                         } else {
2211                                 VDBG(dev, "req->td_data=%p\n", req->td_data);
2212                                 VDBG(dev, "last desc = %p\n", td);
2213                                 /* received number bytes */
2214                                 if (use_dma_ppb_du) {
2215                                         /* every desc. counts bytes */
2216                                         count = udc_get_ppbdu_rxbytes(req);
2217                                 } else {
2218                                         /* last desc. counts bytes */
2219                                         count = AMD_GETBITS(td->status,
2220                                                 UDC_DMA_OUT_STS_RXBYTES);
2221                                         if (!count && req->req.length
2222                                                 == UDC_DMA_MAXPACKET) {
2223                                                 /*
2224                                                  * on 64k packets the RXBYTES
2225                                                  * field is zero
2226                                                  */
2227                                                 count = UDC_DMA_MAXPACKET;
2228                                         }
2229                                 }
2230                                 VDBG(dev, "last desc rx bytes=%u\n", count);
2231                         }
2232
2233                         tmp = req->req.length - req->req.actual;
2234                         if (count > tmp) {
2235                                 if ((tmp % ep->ep.maxpacket) != 0) {
2236                                         DBG(dev, "%s: rx %db, space=%db\n",
2237                                                 ep->ep.name, count, tmp);
2238                                         req->req.status = -EOVERFLOW;
2239                                 }
2240                                 count = tmp;
2241                         }
2242                         req->req.actual += count;
2243                         req->dma_going = 0;
2244                         /* complete request */
2245                         complete_req(ep, req, 0);
2246
2247                         /* next request */
2248                         if (!list_empty(&ep->queue) && !ep->halted) {
2249                                 req = list_entry(ep->queue.next,
2250                                         struct udc_request,
2251                                         queue);
2252                                 /*
2253                                  * DMA may be already started by udc_queue()
2254                                  * called by gadget drivers completion
2255                                  * routine. This happens when queue
2256                                  * holds one request only.
2257                                  */
2258                                 if (req->dma_going == 0) {
2259                                         /* next dma */
2260                                         if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2261                                                 goto finished;
2262                                         /* write desc pointer */
2263                                         writel(req->td_phys,
2264                                                 &ep->regs->desptr);
2265                                         req->dma_going = 1;
2266                                         /* enable DMA */
2267                                         udc_set_rde(dev);
2268                                 }
2269                         } else {
2270                                 /*
2271                                  * implant BNA dummy descriptor to allow
2272                                  * RXFIFO opening by RDE
2273                                  */
2274                                 if (ep->bna_dummy_req) {
2275                                         /* write desc pointer */
2276                                         writel(ep->bna_dummy_req->td_phys,
2277                                                 &ep->regs->desptr);
2278                                         ep->bna_occurred = 0;
2279                                 }
2280
2281                                 /*
2282                                  * schedule timer for setting RDE if queue
2283                                  * remains empty to allow ep0 packets pass
2284                                  * through
2285                                  */
2286                                 if (set_rde != 0
2287                                                 && !timer_pending(&udc_timer)) {
2288                                         udc_timer.expires =
2289                                                 jiffies
2290                                                 + HZ*UDC_RDE_TIMER_SECONDS;
2291                                         set_rde = 1;
2292                                         if (!stop_timer)
2293                                                 add_timer(&udc_timer);
2294                                 }
2295                                 if (ep->num != UDC_EP0OUT_IX)
2296                                         dev->data_ep_queued = 0;
2297                         }
2298
2299                 } else {
2300                         /*
2301                         * RX DMA must be reenabled for each desc in PPBDU mode
2302                         * and must be enabled for PPBNDU mode in case of BNA
2303                         */
2304                         udc_set_rde(dev);
2305                 }
2306
2307         } else if (ep->cancel_transfer) {
2308                 ret_val = IRQ_HANDLED;
2309                 ep->cancel_transfer = 0;
2310         }
2311
2312         /* check pending CNAKS */
2313         if (cnak_pending) {
2314                 /* CNAk processing when rxfifo empty only */
2315                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2316                         udc_process_cnak_queue(dev);
2317         }
2318
2319         /* clear OUT bits in ep status */
2320         writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2321 finished:
2322         return ret_val;
2323 }
2324
2325 /* Interrupt handler for data IN traffic */
2326 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2327 {
2328         irqreturn_t ret_val = IRQ_NONE;
2329         u32 tmp;
2330         u32 epsts;
2331         struct udc_ep *ep;
2332         struct udc_request *req;
2333         struct udc_data_dma *td;
2334         unsigned dma_done;
2335         unsigned len;
2336
2337         ep = &dev->ep[ep_ix];
2338
2339         epsts = readl(&ep->regs->sts);
2340         if (use_dma) {
2341                 /* BNA ? */
2342                 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2343                         dev_err(&dev->pdev->dev,
2344                                 "BNA ep%din occurred - DESPTR = %08lx\n",
2345                                 ep->num,
2346                                 (unsigned long) readl(&ep->regs->desptr));
2347
2348                         /* clear BNA */
2349                         writel(epsts, &ep->regs->sts);
2350                         ret_val = IRQ_HANDLED;
2351                         goto finished;
2352                 }
2353         }
2354         /* HE event ? */
2355         if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2356                 dev_err(&dev->pdev->dev,
2357                         "HE ep%dn occurred - DESPTR = %08lx\n",
2358                         ep->num, (unsigned long) readl(&ep->regs->desptr));
2359
2360                 /* clear HE */
2361                 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2362                 ret_val = IRQ_HANDLED;
2363                 goto finished;
2364         }
2365
2366         /* DMA completion */
2367         if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2368                 VDBG(dev, "TDC set- completion\n");
2369                 ret_val = IRQ_HANDLED;
2370                 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2371                         req = list_entry(ep->queue.next,
2372                                         struct udc_request, queue);
2373                         /*
2374                          * length bytes transferred
2375                          * check dma done of last desc. in PPBDU mode
2376                          */
2377                         if (use_dma_ppb_du) {
2378                                 td = udc_get_last_dma_desc(req);
2379                                 if (td) {
2380                                         dma_done =
2381                                                 AMD_GETBITS(td->status,
2382                                                 UDC_DMA_IN_STS_BS);
2383                                         /* don't care DMA done */
2384                                         req->req.actual = req->req.length;
2385                                 }
2386                         } else {
2387                                 /* assume all bytes transferred */
2388                                 req->req.actual = req->req.length;
2389                         }
2390
2391                         if (req->req.actual == req->req.length) {
2392                                 /* complete req */
2393                                 complete_req(ep, req, 0);
2394                                 req->dma_going = 0;
2395                                 /* further request available ? */
2396                                 if (list_empty(&ep->queue)) {
2397                                         /* disable interrupt */
2398                                         tmp = readl(&dev->regs->ep_irqmsk);
2399                                         tmp |= AMD_BIT(ep->num);
2400                                         writel(tmp, &dev->regs->ep_irqmsk);
2401                                 }
2402                         }
2403                 }
2404                 ep->cancel_transfer = 0;
2405
2406         }
2407         /*
2408          * status reg has IN bit set and TDC not set (if TDC was handled,
2409          * IN must not be handled (UDC defect) ?
2410          */
2411         if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2412                         && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2413                 ret_val = IRQ_HANDLED;
2414                 if (!list_empty(&ep->queue)) {
2415                         /* next request */
2416                         req = list_entry(ep->queue.next,
2417                                         struct udc_request, queue);
2418                         /* FIFO mode */
2419                         if (!use_dma) {
2420                                 /* write fifo */
2421                                 udc_txfifo_write(ep, &req->req);
2422                                 len = req->req.length - req->req.actual;
2423                                 if (len > ep->ep.maxpacket)
2424                                         len = ep->ep.maxpacket;
2425                                 req->req.actual += len;
2426                                 if (req->req.actual == req->req.length
2427                                         || (len != ep->ep.maxpacket)) {
2428                                         /* complete req */
2429                                         complete_req(ep, req, 0);
2430                                 }
2431                         /* DMA */
2432                         } else if (req && !req->dma_going) {
2433                                 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2434                                         req, req->td_data);
2435                                 if (req->td_data) {
2436
2437                                         req->dma_going = 1;
2438
2439                                         /*
2440                                          * unset L bit of first desc.
2441                                          * for chain
2442                                          */
2443                                         if (use_dma_ppb && req->req.length >
2444                                                         ep->ep.maxpacket) {
2445                                                 req->td_data->status &=
2446                                                         AMD_CLEAR_BIT(
2447                                                         UDC_DMA_IN_STS_L);
2448                                         }
2449
2450                                         /* write desc pointer */
2451                                         writel(req->td_phys, &ep->regs->desptr);
2452
2453                                         /* set HOST READY */
2454                                         req->td_data->status =
2455                                                 AMD_ADDBITS(
2456                                                 req->td_data->status,
2457                                                 UDC_DMA_IN_STS_BS_HOST_READY,
2458                                                 UDC_DMA_IN_STS_BS);
2459
2460                                         /* set poll demand bit */
2461                                         tmp = readl(&ep->regs->ctl);
2462                                         tmp |= AMD_BIT(UDC_EPCTL_P);
2463                                         writel(tmp, &ep->regs->ctl);
2464                                 }
2465                         }
2466
2467                 } else if (!use_dma && ep->in) {
2468                         /* disable interrupt */
2469                         tmp = readl(
2470                                 &dev->regs->ep_irqmsk);
2471                         tmp |= AMD_BIT(ep->num);
2472                         writel(tmp,
2473                                 &dev->regs->ep_irqmsk);
2474                 }
2475         }
2476         /* clear status bits */
2477         writel(epsts, &ep->regs->sts);
2478
2479 finished:
2480         return ret_val;
2481
2482 }
2483
2484 /* Interrupt handler for Control OUT traffic */
2485 static irqreturn_t udc_control_out_isr(struct udc *dev)
2486 __releases(dev->lock)
2487 __acquires(dev->lock)
2488 {
2489         irqreturn_t ret_val = IRQ_NONE;
2490         u32 tmp;
2491         int setup_supported;
2492         u32 count;
2493         int set = 0;
2494         struct udc_ep   *ep;
2495         struct udc_ep   *ep_tmp;
2496
2497         ep = &dev->ep[UDC_EP0OUT_IX];
2498
2499         /* clear irq */
2500         writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2501
2502         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2503         /* check BNA and clear if set */
2504         if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2505                 VDBG(dev, "ep0: BNA set\n");
2506                 writel(AMD_BIT(UDC_EPSTS_BNA),
2507                         &dev->ep[UDC_EP0OUT_IX].regs->sts);
2508                 ep->bna_occurred = 1;
2509                 ret_val = IRQ_HANDLED;
2510                 goto finished;
2511         }
2512
2513         /* type of data: SETUP or DATA 0 bytes */
2514         tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2515         VDBG(dev, "data_typ = %x\n", tmp);
2516
2517         /* setup data */
2518         if (tmp == UDC_EPSTS_OUT_SETUP) {
2519                 ret_val = IRQ_HANDLED;
2520
2521                 ep->dev->stall_ep0in = 0;
2522                 dev->waiting_zlp_ack_ep0in = 0;
2523
2524                 /* set NAK for EP0_IN */
2525                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2526                 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2527                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2528                 dev->ep[UDC_EP0IN_IX].naking = 1;
2529                 /* get setup data */
2530                 if (use_dma) {
2531
2532                         /* clear OUT bits in ep status */
2533                         writel(UDC_EPSTS_OUT_CLEAR,
2534                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2535
2536                         setup_data.data[0] =
2537                                 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2538                         setup_data.data[1] =
2539                                 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2540                         /* set HOST READY */
2541                         dev->ep[UDC_EP0OUT_IX].td_stp->status =
2542                                         UDC_DMA_STP_STS_BS_HOST_READY;
2543                 } else {
2544                         /* read fifo */
2545                         udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2546                 }
2547
2548                 /* determine direction of control data */
2549                 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2550                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2551                         /* enable RDE */
2552                         udc_ep0_set_rde(dev);
2553                         set = 0;
2554                 } else {
2555                         dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2556                         /*
2557                          * implant BNA dummy descriptor to allow RXFIFO opening
2558                          * by RDE
2559                          */
2560                         if (ep->bna_dummy_req) {
2561                                 /* write desc pointer */
2562                                 writel(ep->bna_dummy_req->td_phys,
2563                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2564                                 ep->bna_occurred = 0;
2565                         }
2566
2567                         set = 1;
2568                         dev->ep[UDC_EP0OUT_IX].naking = 1;
2569                         /*
2570                          * setup timer for enabling RDE (to not enable
2571                          * RXFIFO DMA for data to early)
2572                          */
2573                         set_rde = 1;
2574                         if (!timer_pending(&udc_timer)) {
2575                                 udc_timer.expires = jiffies +
2576                                                         HZ/UDC_RDE_TIMER_DIV;
2577                                 if (!stop_timer)
2578                                         add_timer(&udc_timer);
2579                         }
2580                 }
2581
2582                 /*
2583                  * mass storage reset must be processed here because
2584                  * next packet may be a CLEAR_FEATURE HALT which would not
2585                  * clear the stall bit when no STALL handshake was received
2586                  * before (autostall can cause this)
2587                  */
2588                 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2589                                 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2590                         DBG(dev, "MSC Reset\n");
2591                         /*
2592                          * clear stall bits
2593                          * only one IN and OUT endpoints are handled
2594                          */
2595                         ep_tmp = &udc->ep[UDC_EPIN_IX];
2596                         udc_set_halt(&ep_tmp->ep, 0);
2597                         ep_tmp = &udc->ep[UDC_EPOUT_IX];
2598                         udc_set_halt(&ep_tmp->ep, 0);
2599                 }
2600
2601                 /* call gadget with setup data received */
2602                 spin_unlock(&dev->lock);
2603                 setup_supported = dev->driver->setup(&dev->gadget,
2604                                                 &setup_data.request);
2605                 spin_lock(&dev->lock);
2606
2607                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2608                 /* ep0 in returns data (not zlp) on IN phase */
2609                 if (setup_supported >= 0 && setup_supported <
2610                                 UDC_EP0IN_MAXPACKET) {
2611                         /* clear NAK by writing CNAK in EP0_IN */
2612                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2613                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2614                         dev->ep[UDC_EP0IN_IX].naking = 0;
2615                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2616
2617                 /* if unsupported request then stall */
2618                 } else if (setup_supported < 0) {
2619                         tmp |= AMD_BIT(UDC_EPCTL_S);
2620                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2621                 } else
2622                         dev->waiting_zlp_ack_ep0in = 1;
2623
2624
2625                 /* clear NAK by writing CNAK in EP0_OUT */
2626                 if (!set) {
2627                         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2628                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2629                         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2630                         dev->ep[UDC_EP0OUT_IX].naking = 0;
2631                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2632                 }
2633
2634                 if (!use_dma) {
2635                         /* clear OUT bits in ep status */
2636                         writel(UDC_EPSTS_OUT_CLEAR,
2637                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2638                 }
2639
2640         /* data packet 0 bytes */
2641         } else if (tmp == UDC_EPSTS_OUT_DATA) {
2642                 /* clear OUT bits in ep status */
2643                 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2644
2645                 /* get setup data: only 0 packet */
2646                 if (use_dma) {
2647                         /* no req if 0 packet, just reactivate */
2648                         if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2649                                 VDBG(dev, "ZLP\n");
2650
2651                                 /* set HOST READY */
2652                                 dev->ep[UDC_EP0OUT_IX].td->status =
2653                                         AMD_ADDBITS(
2654                                         dev->ep[UDC_EP0OUT_IX].td->status,
2655                                         UDC_DMA_OUT_STS_BS_HOST_READY,
2656                                         UDC_DMA_OUT_STS_BS);
2657                                 /* enable RDE */
2658                                 udc_ep0_set_rde(dev);
2659                                 ret_val = IRQ_HANDLED;
2660
2661                         } else {
2662                                 /* control write */
2663                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2664                                 /* re-program desc. pointer for possible ZLPs */
2665                                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2666                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2667                                 /* enable RDE */
2668                                 udc_ep0_set_rde(dev);
2669                         }
2670                 } else {
2671
2672                         /* received number bytes */
2673                         count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2674                         count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2675                         /* out data for fifo mode not working */
2676                         count = 0;
2677
2678                         /* 0 packet or real data ? */
2679                         if (count != 0) {
2680                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2681                         } else {
2682                                 /* dummy read confirm */
2683                                 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2684                                 ret_val = IRQ_HANDLED;
2685                         }
2686                 }
2687         }
2688
2689         /* check pending CNAKS */
2690         if (cnak_pending) {
2691                 /* CNAk processing when rxfifo empty only */
2692                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2693                         udc_process_cnak_queue(dev);
2694         }
2695
2696 finished:
2697         return ret_val;
2698 }
2699
2700 /* Interrupt handler for Control IN traffic */
2701 static irqreturn_t udc_control_in_isr(struct udc *dev)
2702 {
2703         irqreturn_t ret_val = IRQ_NONE;
2704         u32 tmp;
2705         struct udc_ep *ep;
2706         struct udc_request *req;
2707         unsigned len;
2708
2709         ep = &dev->ep[UDC_EP0IN_IX];
2710
2711         /* clear irq */
2712         writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2713
2714         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2715         /* DMA completion */
2716         if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2717                 VDBG(dev, "isr: TDC clear\n");
2718                 ret_val = IRQ_HANDLED;
2719
2720                 /* clear TDC bit */
2721                 writel(AMD_BIT(UDC_EPSTS_TDC),
2722                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2723
2724         /* status reg has IN bit set ? */
2725         } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2726                 ret_val = IRQ_HANDLED;
2727
2728                 if (ep->dma) {
2729                         /* clear IN bit */
2730                         writel(AMD_BIT(UDC_EPSTS_IN),
2731                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2732                 }
2733                 if (dev->stall_ep0in) {
2734                         DBG(dev, "stall ep0in\n");
2735                         /* halt ep0in */
2736                         tmp = readl(&ep->regs->ctl);
2737                         tmp |= AMD_BIT(UDC_EPCTL_S);
2738                         writel(tmp, &ep->regs->ctl);
2739                 } else {
2740                         if (!list_empty(&ep->queue)) {
2741                                 /* next request */
2742                                 req = list_entry(ep->queue.next,
2743                                                 struct udc_request, queue);
2744
2745                                 if (ep->dma) {
2746                                         /* write desc pointer */
2747                                         writel(req->td_phys, &ep->regs->desptr);
2748                                         /* set HOST READY */
2749                                         req->td_data->status =
2750                                                 AMD_ADDBITS(
2751                                                 req->td_data->status,
2752                                                 UDC_DMA_STP_STS_BS_HOST_READY,
2753                                                 UDC_DMA_STP_STS_BS);
2754
2755                                         /* set poll demand bit */
2756                                         tmp =
2757                                         readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2758                                         tmp |= AMD_BIT(UDC_EPCTL_P);
2759                                         writel(tmp,
2760                                         &dev->ep[UDC_EP0IN_IX].regs->ctl);
2761
2762                                         /* all bytes will be transferred */
2763                                         req->req.actual = req->req.length;
2764
2765                                         /* complete req */
2766                                         complete_req(ep, req, 0);
2767
2768                                 } else {
2769                                         /* write fifo */
2770                                         udc_txfifo_write(ep, &req->req);
2771
2772                                         /* lengh bytes transferred */
2773                                         len = req->req.length - req->req.actual;
2774                                         if (len > ep->ep.maxpacket)
2775                                                 len = ep->ep.maxpacket;
2776
2777                                         req->req.actual += len;
2778                                         if (req->req.actual == req->req.length
2779                                                 || (len != ep->ep.maxpacket)) {
2780                                                 /* complete req */
2781                                                 complete_req(ep, req, 0);
2782                                         }
2783                                 }
2784
2785                         }
2786                 }
2787                 ep->halted = 0;
2788                 dev->stall_ep0in = 0;
2789                 if (!ep->dma) {
2790                         /* clear IN bit */
2791                         writel(AMD_BIT(UDC_EPSTS_IN),
2792                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2793                 }
2794         }
2795
2796         return ret_val;
2797 }
2798
2799
2800 /* Interrupt handler for global device events */
2801 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2802 __releases(dev->lock)
2803 __acquires(dev->lock)
2804 {
2805         irqreturn_t ret_val = IRQ_NONE;
2806         u32 tmp;
2807         u32 cfg;
2808         struct udc_ep *ep;
2809         u16 i;
2810         u8 udc_csr_epix;
2811
2812         /* SET_CONFIG irq ? */
2813         if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2814                 ret_val = IRQ_HANDLED;
2815
2816                 /* read config value */
2817                 tmp = readl(&dev->regs->sts);
2818                 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2819                 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2820                 dev->cur_config = cfg;
2821                 dev->set_cfg_not_acked = 1;
2822
2823                 /* make usb request for gadget driver */
2824                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2825                 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2826                 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2827
2828                 /* programm the NE registers */
2829                 for (i = 0; i < UDC_EP_NUM; i++) {
2830                         ep = &dev->ep[i];
2831                         if (ep->in) {
2832
2833                                 /* ep ix in UDC CSR register space */
2834                                 udc_csr_epix = ep->num;
2835
2836
2837                         /* OUT ep */
2838                         } else {
2839                                 /* ep ix in UDC CSR register space */
2840                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2841                         }
2842
2843                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
2844                         /* ep cfg */
2845                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2846                                                 UDC_CSR_NE_CFG);
2847                         /* write reg */
2848                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
2849
2850                         /* clear stall bits */
2851                         ep->halted = 0;
2852                         tmp = readl(&ep->regs->ctl);
2853                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2854                         writel(tmp, &ep->regs->ctl);
2855                 }
2856                 /* call gadget zero with setup data received */
2857                 spin_unlock(&dev->lock);
2858                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2859                 spin_lock(&dev->lock);
2860
2861         } /* SET_INTERFACE ? */
2862         if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2863                 ret_val = IRQ_HANDLED;
2864
2865                 dev->set_cfg_not_acked = 1;
2866                 /* read interface and alt setting values */
2867                 tmp = readl(&dev->regs->sts);
2868                 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2869                 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2870
2871                 /* make usb request for gadget driver */
2872                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2873                 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2874                 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2875                 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2876                 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2877
2878                 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2879                                 dev->cur_alt, dev->cur_intf);
2880
2881                 /* programm the NE registers */
2882                 for (i = 0; i < UDC_EP_NUM; i++) {
2883                         ep = &dev->ep[i];
2884                         if (ep->in) {
2885
2886                                 /* ep ix in UDC CSR register space */
2887                                 udc_csr_epix = ep->num;
2888
2889
2890                         /* OUT ep */
2891                         } else {
2892                                 /* ep ix in UDC CSR register space */
2893                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2894                         }
2895
2896                         /* UDC CSR reg */
2897                         /* set ep values */
2898                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
2899                         /* ep interface */
2900                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2901                                                 UDC_CSR_NE_INTF);
2902                         /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2903                         /* ep alt */
2904                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2905                                                 UDC_CSR_NE_ALT);
2906                         /* write reg */
2907                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
2908
2909                         /* clear stall bits */
2910                         ep->halted = 0;
2911                         tmp = readl(&ep->regs->ctl);
2912                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2913                         writel(tmp, &ep->regs->ctl);
2914                 }
2915
2916                 /* call gadget zero with setup data received */
2917                 spin_unlock(&dev->lock);
2918                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2919                 spin_lock(&dev->lock);
2920
2921         } /* USB reset */
2922         if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2923                 DBG(dev, "USB Reset interrupt\n");
2924                 ret_val = IRQ_HANDLED;
2925
2926                 /* allow soft reset when suspend occurs */
2927                 soft_reset_occured = 0;
2928
2929                 dev->waiting_zlp_ack_ep0in = 0;
2930                 dev->set_cfg_not_acked = 0;
2931
2932                 /* mask not needed interrupts */
2933                 udc_mask_unused_interrupts(dev);
2934
2935                 /* call gadget to resume and reset configs etc. */
2936                 spin_unlock(&dev->lock);
2937                 if (dev->sys_suspended && dev->driver->resume) {
2938                         dev->driver->resume(&dev->gadget);
2939                         dev->sys_suspended = 0;
2940                 }
2941                 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2942                 spin_lock(&dev->lock);
2943
2944                 /* disable ep0 to empty req queue */
2945                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2946                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2947
2948                 /* soft reset when rxfifo not empty */
2949                 tmp = readl(&dev->regs->sts);
2950                 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2951                                 && !soft_reset_after_usbreset_occured) {
2952                         udc_soft_reset(dev);
2953                         soft_reset_after_usbreset_occured++;
2954                 }
2955
2956                 /*
2957                  * DMA reset to kill potential old DMA hw hang,
2958                  * POLL bit is already reset by ep_init() through
2959                  * disconnect()
2960                  */
2961                 DBG(dev, "DMA machine reset\n");
2962                 tmp = readl(&dev->regs->cfg);
2963                 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2964                 writel(tmp, &dev->regs->cfg);
2965
2966                 /* put into initial config */
2967                 udc_basic_init(dev);
2968
2969                 /* enable device setup interrupts */
2970                 udc_enable_dev_setup_interrupts(dev);
2971
2972                 /* enable suspend interrupt */
2973                 tmp = readl(&dev->regs->irqmsk);
2974                 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2975                 writel(tmp, &dev->regs->irqmsk);
2976
2977         } /* USB suspend */
2978         if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2979                 DBG(dev, "USB Suspend interrupt\n");
2980                 ret_val = IRQ_HANDLED;
2981                 if (dev->driver->suspend) {
2982                         spin_unlock(&dev->lock);
2983                         dev->sys_suspended = 1;
2984                         dev->driver->suspend(&dev->gadget);
2985                         spin_lock(&dev->lock);
2986                 }
2987         } /* new speed ? */
2988         if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2989                 DBG(dev, "ENUM interrupt\n");
2990                 ret_val = IRQ_HANDLED;
2991                 soft_reset_after_usbreset_occured = 0;
2992
2993                 /* disable ep0 to empty req queue */
2994                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2995                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2996
2997                 /* link up all endpoints */
2998                 udc_setup_endpoints(dev);
2999                 dev_info(&dev->pdev->dev, "Connect: %s\n",
3000                          usb_speed_string(dev->gadget.speed));
3001
3002                 /* init ep 0 */
3003                 activate_control_endpoints(dev);
3004
3005                 /* enable ep0 interrupts */
3006                 udc_enable_ep0_interrupts(dev);
3007         }
3008         /* session valid change interrupt */
3009         if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3010                 DBG(dev, "USB SVC interrupt\n");
3011                 ret_val = IRQ_HANDLED;
3012
3013                 /* check that session is not valid to detect disconnect */
3014                 tmp = readl(&dev->regs->sts);
3015                 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3016                         /* disable suspend interrupt */
3017                         tmp = readl(&dev->regs->irqmsk);
3018                         tmp |= AMD_BIT(UDC_DEVINT_US);
3019                         writel(tmp, &dev->regs->irqmsk);
3020                         DBG(dev, "USB Disconnect (session valid low)\n");
3021                         /* cleanup on disconnect */
3022                         usb_disconnect(udc);
3023                 }
3024
3025         }
3026
3027         return ret_val;
3028 }
3029
3030 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3031 static irqreturn_t udc_irq(int irq, void *pdev)
3032 {
3033         struct udc *dev = pdev;
3034         u32 reg;
3035         u16 i;
3036         u32 ep_irq;
3037         irqreturn_t ret_val = IRQ_NONE;
3038
3039         spin_lock(&dev->lock);
3040
3041         /* check for ep irq */
3042         reg = readl(&dev->regs->ep_irqsts);
3043         if (reg) {
3044                 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3045                         ret_val |= udc_control_out_isr(dev);
3046                 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3047                         ret_val |= udc_control_in_isr(dev);
3048
3049                 /*
3050                  * data endpoint
3051                  * iterate ep's
3052                  */
3053                 for (i = 1; i < UDC_EP_NUM; i++) {
3054                         ep_irq = 1 << i;
3055                         if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3056                                 continue;
3057
3058                         /* clear irq status */
3059                         writel(ep_irq, &dev->regs->ep_irqsts);
3060
3061                         /* irq for out ep ? */
3062                         if (i > UDC_EPIN_NUM)
3063                                 ret_val |= udc_data_out_isr(dev, i);
3064                         else
3065                                 ret_val |= udc_data_in_isr(dev, i);
3066                 }
3067
3068         }
3069
3070
3071         /* check for dev irq */
3072         reg = readl(&dev->regs->irqsts);
3073         if (reg) {
3074                 /* clear irq */
3075                 writel(reg, &dev->regs->irqsts);
3076                 ret_val |= udc_dev_isr(dev, reg);
3077         }
3078
3079
3080         spin_unlock(&dev->lock);
3081         return ret_val;
3082 }
3083
3084 /* Tears down device */
3085 static void gadget_release(struct device *pdev)
3086 {
3087         struct amd5536udc *dev = dev_get_drvdata(pdev);
3088         kfree(dev);
3089 }
3090
3091 /* Cleanup on device remove */
3092 static void udc_remove(struct udc *dev)
3093 {
3094         /* remove timer */
3095         stop_timer++;
3096         if (timer_pending(&udc_timer))
3097                 wait_for_completion(&on_exit);
3098         if (udc_timer.data)
3099                 del_timer_sync(&udc_timer);
3100         /* remove pollstall timer */
3101         stop_pollstall_timer++;
3102         if (timer_pending(&udc_pollstall_timer))
3103                 wait_for_completion(&on_pollstall_exit);
3104         if (udc_pollstall_timer.data)
3105                 del_timer_sync(&udc_pollstall_timer);
3106         udc = NULL;
3107 }
3108
3109 /* free all the dma pools */
3110 static void free_dma_pools(struct udc *dev)
3111 {
3112         dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3113                       dev->ep[UDC_EP0OUT_IX].td_phys);
3114         dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3115                       dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3116         dma_pool_destroy(dev->stp_requests);
3117         dma_pool_destroy(dev->data_requests);
3118 }
3119
3120 /* Reset all pci context */
3121 static void udc_pci_remove(struct pci_dev *pdev)
3122 {
3123         struct udc              *dev;
3124
3125         dev = pci_get_drvdata(pdev);
3126
3127         usb_del_gadget_udc(&udc->gadget);
3128         /* gadget driver must not be registered */
3129         if (WARN_ON(dev->driver))
3130                 return;
3131
3132         /* dma pool cleanup */
3133         free_dma_pools(dev);
3134
3135         /* reset controller */
3136         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3137         free_irq(pdev->irq, dev);
3138         iounmap(dev->virt_addr);
3139         release_mem_region(pci_resource_start(pdev, 0),
3140                            pci_resource_len(pdev, 0));
3141         pci_disable_device(pdev);
3142
3143         udc_remove(dev);
3144 }
3145
3146 /* create dma pools on init */
3147 static int init_dma_pools(struct udc *dev)
3148 {
3149         struct udc_stp_dma      *td_stp;
3150         struct udc_data_dma     *td_data;
3151         int retval;
3152
3153         /* consistent DMA mode setting ? */
3154         if (use_dma_ppb) {
3155                 use_dma_bufferfill_mode = 0;
3156         } else {
3157                 use_dma_ppb_du = 0;
3158                 use_dma_bufferfill_mode = 1;
3159         }
3160
3161         /* DMA setup */
3162         dev->data_requests = dma_pool_create("data_requests", NULL,
3163                 sizeof(struct udc_data_dma), 0, 0);
3164         if (!dev->data_requests) {
3165                 DBG(dev, "can't get request data pool\n");
3166                 return -ENOMEM;
3167         }
3168
3169         /* EP0 in dma regs = dev control regs */
3170         dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3171
3172         /* dma desc for setup data */
3173         dev->stp_requests = dma_pool_create("setup requests", NULL,
3174                 sizeof(struct udc_stp_dma), 0, 0);
3175         if (!dev->stp_requests) {
3176                 DBG(dev, "can't get stp request pool\n");
3177                 retval = -ENOMEM;
3178                 goto err_create_dma_pool;
3179         }
3180         /* setup */
3181         td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3182                                 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3183         if (td_stp == NULL) {
3184                 retval = -ENOMEM;
3185                 goto err_alloc_dma;
3186         }
3187         dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3188
3189         /* data: 0 packets !? */
3190         td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3191                                 &dev->ep[UDC_EP0OUT_IX].td_phys);
3192         if (td_data == NULL) {
3193                 retval = -ENOMEM;
3194                 goto err_alloc_phys;
3195         }
3196         dev->ep[UDC_EP0OUT_IX].td = td_data;
3197         return 0;
3198
3199 err_alloc_phys:
3200         dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3201                       dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3202 err_alloc_dma:
3203         dma_pool_destroy(dev->stp_requests);
3204         dev->stp_requests = NULL;
3205 err_create_dma_pool:
3206         dma_pool_destroy(dev->data_requests);
3207         dev->data_requests = NULL;
3208         return retval;
3209 }
3210
3211 /* general probe */
3212 static int udc_probe(struct udc *dev)
3213 {
3214         char            tmp[128];
3215         u32             reg;
3216         int             retval;
3217
3218         /* mark timer as not initialized */
3219         udc_timer.data = 0;
3220         udc_pollstall_timer.data = 0;
3221
3222         /* device struct setup */
3223         dev->gadget.ops = &udc_ops;
3224
3225         dev_set_name(&dev->gadget.dev, "gadget");
3226         dev->gadget.name = name;
3227         dev->gadget.max_speed = USB_SPEED_HIGH;
3228
3229         /* init registers, interrupts, ... */
3230         startup_registers(dev);
3231
3232         dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3233
3234         snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3235         dev_info(&dev->pdev->dev,
3236                  "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3237                  tmp, dev->phys_addr, dev->chiprev,
3238                  (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3239         strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3240         if (dev->chiprev == UDC_HSA0_REV) {
3241                 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3242                 retval = -ENODEV;
3243                 goto finished;
3244         }
3245         dev_info(&dev->pdev->dev,
3246                  "driver version: %s(for Geode5536 B1)\n", tmp);
3247         udc = dev;
3248
3249         retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3250                                             gadget_release);
3251         if (retval)
3252                 goto finished;
3253
3254         /* timer init */
3255         init_timer(&udc_timer);
3256         udc_timer.function = udc_timer_function;
3257         udc_timer.data = 1;
3258         /* timer pollstall init */
3259         init_timer(&udc_pollstall_timer);
3260         udc_pollstall_timer.function = udc_pollstall_timer_function;
3261         udc_pollstall_timer.data = 1;
3262
3263         /* set SD */
3264         reg = readl(&dev->regs->ctl);
3265         reg |= AMD_BIT(UDC_DEVCTL_SD);
3266         writel(reg, &dev->regs->ctl);
3267
3268         /* print dev register info */
3269         print_regs(dev);
3270
3271         return 0;
3272
3273 finished:
3274         return retval;
3275 }
3276
3277 /* Called by pci bus driver to init pci context */
3278 static int udc_pci_probe(
3279         struct pci_dev *pdev,
3280         const struct pci_device_id *id
3281 )
3282 {
3283         struct udc              *dev;
3284         unsigned long           resource;
3285         unsigned long           len;
3286         int                     retval = 0;
3287
3288         /* one udc only */
3289         if (udc) {
3290                 dev_dbg(&pdev->dev, "already probed\n");
3291                 return -EBUSY;
3292         }
3293
3294         /* init */
3295         dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3296         if (!dev)
3297                 return -ENOMEM;
3298
3299         /* pci setup */
3300         if (pci_enable_device(pdev) < 0) {
3301                 retval = -ENODEV;
3302                 goto err_pcidev;
3303         }
3304
3305         /* PCI resource allocation */
3306         resource = pci_resource_start(pdev, 0);
3307         len = pci_resource_len(pdev, 0);
3308
3309         if (!request_mem_region(resource, len, name)) {
3310                 dev_dbg(&pdev->dev, "pci device used already\n");
3311                 retval = -EBUSY;
3312                 goto err_memreg;
3313         }
3314
3315         dev->virt_addr = ioremap_nocache(resource, len);
3316         if (dev->virt_addr == NULL) {
3317                 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3318                 retval = -EFAULT;
3319                 goto err_ioremap;
3320         }
3321
3322         if (!pdev->irq) {
3323                 dev_err(&pdev->dev, "irq not set\n");
3324                 retval = -ENODEV;
3325                 goto err_irq;
3326         }
3327
3328         spin_lock_init(&dev->lock);
3329         /* udc csr registers base */
3330         dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3331         /* dev registers base */
3332         dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3333         /* ep registers base */
3334         dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3335         /* fifo's base */
3336         dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3337         dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3338
3339         if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3340                 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3341                 retval = -EBUSY;
3342                 goto err_irq;
3343         }
3344
3345         pci_set_drvdata(pdev, dev);
3346
3347         /* chip revision for Hs AMD5536 */
3348         dev->chiprev = pdev->revision;
3349
3350         pci_set_master(pdev);
3351         pci_try_set_mwi(pdev);
3352
3353         /* init dma pools */
3354         if (use_dma) {
3355                 retval = init_dma_pools(dev);
3356                 if (retval != 0)
3357                         goto err_dma;
3358         }
3359
3360         dev->phys_addr = resource;
3361         dev->irq = pdev->irq;
3362         dev->pdev = pdev;
3363
3364         /* general probing */
3365         if (udc_probe(dev)) {
3366                 retval = -ENODEV;
3367                 goto err_probe;
3368         }
3369         return 0;
3370
3371 err_probe:
3372         if (use_dma)
3373                 free_dma_pools(dev);
3374 err_dma:
3375         free_irq(pdev->irq, dev);
3376 err_irq:
3377         iounmap(dev->virt_addr);
3378 err_ioremap:
3379         release_mem_region(resource, len);
3380 err_memreg:
3381         pci_disable_device(pdev);
3382 err_pcidev:
3383         kfree(dev);
3384         return retval;
3385 }
3386
3387 /* Initiates a remote wakeup */
3388 static int udc_remote_wakeup(struct udc *dev)
3389 {
3390         unsigned long flags;
3391         u32 tmp;
3392
3393         DBG(dev, "UDC initiates remote wakeup\n");
3394
3395         spin_lock_irqsave(&dev->lock, flags);
3396
3397         tmp = readl(&dev->regs->ctl);
3398         tmp |= AMD_BIT(UDC_DEVCTL_RES);
3399         writel(tmp, &dev->regs->ctl);
3400         tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3401         writel(tmp, &dev->regs->ctl);
3402
3403         spin_unlock_irqrestore(&dev->lock, flags);
3404         return 0;
3405 }
3406
3407 /* PCI device parameters */
3408 static const struct pci_device_id pci_id[] = {
3409         {
3410                 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3411                 .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3412                 .class_mask =   0xffffffff,
3413         },
3414         {},
3415 };
3416 MODULE_DEVICE_TABLE(pci, pci_id);
3417
3418 /* PCI functions */
3419 static struct pci_driver udc_pci_driver = {
3420         .name =         (char *) name,
3421         .id_table =     pci_id,
3422         .probe =        udc_pci_probe,
3423         .remove =       udc_pci_remove,
3424 };
3425
3426 module_pci_driver(udc_pci_driver);
3427
3428 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3429 MODULE_AUTHOR("Thomas Dahlmann");
3430 MODULE_LICENSE("GPL");
3431