Merge remote-tracking branch 'grant/devicetree/merge' into dt-fixes
[firefly-linux-kernel-4.4.55.git] / drivers / usb / chipidea / udc.c
1 /*
2  * udc.c - ChipIdea UDC driver
3  *
4  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5  *
6  * Author: David Lopo
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/err.h>
17 #include <linux/irqreturn.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/chipidea.h>
24
25 #include "ci.h"
26 #include "udc.h"
27 #include "bits.h"
28 #include "debug.h"
29 #include "otg.h"
30
31 /* control endpoint description */
32 static const struct usb_endpoint_descriptor
33 ctrl_endpt_out_desc = {
34         .bLength         = USB_DT_ENDPOINT_SIZE,
35         .bDescriptorType = USB_DT_ENDPOINT,
36
37         .bEndpointAddress = USB_DIR_OUT,
38         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
39         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
40 };
41
42 static const struct usb_endpoint_descriptor
43 ctrl_endpt_in_desc = {
44         .bLength         = USB_DT_ENDPOINT_SIZE,
45         .bDescriptorType = USB_DT_ENDPOINT,
46
47         .bEndpointAddress = USB_DIR_IN,
48         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
49         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
50 };
51
52 /**
53  * hw_ep_bit: calculates the bit number
54  * @num: endpoint number
55  * @dir: endpoint direction
56  *
57  * This function returns bit number
58  */
59 static inline int hw_ep_bit(int num, int dir)
60 {
61         return num + (dir ? 16 : 0);
62 }
63
64 static inline int ep_to_bit(struct ci_hdrc *ci, int n)
65 {
66         int fill = 16 - ci->hw_ep_max / 2;
67
68         if (n >= ci->hw_ep_max / 2)
69                 n += fill;
70
71         return n;
72 }
73
74 /**
75  * hw_device_state: enables/disables interrupts (execute without interruption)
76  * @dma: 0 => disable, !0 => enable and set dma engine
77  *
78  * This function returns an error code
79  */
80 static int hw_device_state(struct ci_hdrc *ci, u32 dma)
81 {
82         if (dma) {
83                 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
84                 /* interrupt, error, port change, reset, sleep/suspend */
85                 hw_write(ci, OP_USBINTR, ~0,
86                              USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
87                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
88         } else {
89                 hw_write(ci, OP_USBINTR, ~0, 0);
90                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
91         }
92         return 0;
93 }
94
95 /**
96  * hw_ep_flush: flush endpoint fifo (execute without interruption)
97  * @num: endpoint number
98  * @dir: endpoint direction
99  *
100  * This function returns an error code
101  */
102 static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
103 {
104         int n = hw_ep_bit(num, dir);
105
106         do {
107                 /* flush any pending transfer */
108                 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
109                 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
110                         cpu_relax();
111         } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
112
113         return 0;
114 }
115
116 /**
117  * hw_ep_disable: disables endpoint (execute without interruption)
118  * @num: endpoint number
119  * @dir: endpoint direction
120  *
121  * This function returns an error code
122  */
123 static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
124 {
125         hw_ep_flush(ci, num, dir);
126         hw_write(ci, OP_ENDPTCTRL + num,
127                  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
128         return 0;
129 }
130
131 /**
132  * hw_ep_enable: enables endpoint (execute without interruption)
133  * @num:  endpoint number
134  * @dir:  endpoint direction
135  * @type: endpoint type
136  *
137  * This function returns an error code
138  */
139 static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
140 {
141         u32 mask, data;
142
143         if (dir) {
144                 mask  = ENDPTCTRL_TXT;  /* type    */
145                 data  = type << __ffs(mask);
146
147                 mask |= ENDPTCTRL_TXS;  /* unstall */
148                 mask |= ENDPTCTRL_TXR;  /* reset data toggle */
149                 data |= ENDPTCTRL_TXR;
150                 mask |= ENDPTCTRL_TXE;  /* enable  */
151                 data |= ENDPTCTRL_TXE;
152         } else {
153                 mask  = ENDPTCTRL_RXT;  /* type    */
154                 data  = type << __ffs(mask);
155
156                 mask |= ENDPTCTRL_RXS;  /* unstall */
157                 mask |= ENDPTCTRL_RXR;  /* reset data toggle */
158                 data |= ENDPTCTRL_RXR;
159                 mask |= ENDPTCTRL_RXE;  /* enable  */
160                 data |= ENDPTCTRL_RXE;
161         }
162         hw_write(ci, OP_ENDPTCTRL + num, mask, data);
163         return 0;
164 }
165
166 /**
167  * hw_ep_get_halt: return endpoint halt status
168  * @num: endpoint number
169  * @dir: endpoint direction
170  *
171  * This function returns 1 if endpoint halted
172  */
173 static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
174 {
175         u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
176
177         return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
178 }
179
180 /**
181  * hw_test_and_clear_setup_status: test & clear setup status (execute without
182  *                                 interruption)
183  * @n: endpoint number
184  *
185  * This function returns setup status
186  */
187 static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
188 {
189         n = ep_to_bit(ci, n);
190         return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
191 }
192
193 /**
194  * hw_ep_prime: primes endpoint (execute without interruption)
195  * @num:     endpoint number
196  * @dir:     endpoint direction
197  * @is_ctrl: true if control endpoint
198  *
199  * This function returns an error code
200  */
201 static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
202 {
203         int n = hw_ep_bit(num, dir);
204
205         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
206                 return -EAGAIN;
207
208         hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
209
210         while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
211                 cpu_relax();
212         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
213                 return -EAGAIN;
214
215         /* status shoult be tested according with manual but it doesn't work */
216         return 0;
217 }
218
219 /**
220  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
221  *                 without interruption)
222  * @num:   endpoint number
223  * @dir:   endpoint direction
224  * @value: true => stall, false => unstall
225  *
226  * This function returns an error code
227  */
228 static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
229 {
230         if (value != 0 && value != 1)
231                 return -EINVAL;
232
233         do {
234                 enum ci_hw_regs reg = OP_ENDPTCTRL + num;
235                 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
236                 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
237
238                 /* data toggle - reserved for EP0 but it's in ESS */
239                 hw_write(ci, reg, mask_xs|mask_xr,
240                           value ? mask_xs : mask_xr);
241         } while (value != hw_ep_get_halt(ci, num, dir));
242
243         return 0;
244 }
245
246 /**
247  * hw_is_port_high_speed: test if port is high speed
248  *
249  * This function returns true if high speed port
250  */
251 static int hw_port_is_high_speed(struct ci_hdrc *ci)
252 {
253         return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
254                 hw_read(ci, OP_PORTSC, PORTSC_HSP);
255 }
256
257 /**
258  * hw_read_intr_enable: returns interrupt enable register
259  *
260  * This function returns register data
261  */
262 static u32 hw_read_intr_enable(struct ci_hdrc *ci)
263 {
264         return hw_read(ci, OP_USBINTR, ~0);
265 }
266
267 /**
268  * hw_read_intr_status: returns interrupt status register
269  *
270  * This function returns register data
271  */
272 static u32 hw_read_intr_status(struct ci_hdrc *ci)
273 {
274         return hw_read(ci, OP_USBSTS, ~0);
275 }
276
277 /**
278  * hw_test_and_clear_complete: test & clear complete status (execute without
279  *                             interruption)
280  * @n: endpoint number
281  *
282  * This function returns complete status
283  */
284 static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
285 {
286         n = ep_to_bit(ci, n);
287         return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
288 }
289
290 /**
291  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
292  *                                without interruption)
293  *
294  * This function returns active interrutps
295  */
296 static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
297 {
298         u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
299
300         hw_write(ci, OP_USBSTS, ~0, reg);
301         return reg;
302 }
303
304 /**
305  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
306  *                                interruption)
307  *
308  * This function returns guard value
309  */
310 static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
311 {
312         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
313 }
314
315 /**
316  * hw_test_and_set_setup_guard: test & set setup guard (execute without
317  *                              interruption)
318  *
319  * This function returns guard value
320  */
321 static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
322 {
323         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
324 }
325
326 /**
327  * hw_usb_set_address: configures USB address (execute without interruption)
328  * @value: new USB address
329  *
330  * This function explicitly sets the address, without the "USBADRA" (advance)
331  * feature, which is not supported by older versions of the controller.
332  */
333 static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
334 {
335         hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
336                  value << __ffs(DEVICEADDR_USBADR));
337 }
338
339 /**
340  * hw_usb_reset: restart device after a bus reset (execute without
341  *               interruption)
342  *
343  * This function returns an error code
344  */
345 static int hw_usb_reset(struct ci_hdrc *ci)
346 {
347         hw_usb_set_address(ci, 0);
348
349         /* ESS flushes only at end?!? */
350         hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
351
352         /* clear setup token semaphores */
353         hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
354
355         /* clear complete status */
356         hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
357
358         /* wait until all bits cleared */
359         while (hw_read(ci, OP_ENDPTPRIME, ~0))
360                 udelay(10);             /* not RTOS friendly */
361
362         /* reset all endpoints ? */
363
364         /* reset internal status and wait for further instructions
365            no need to verify the port reset status (ESS does it) */
366
367         return 0;
368 }
369
370 /******************************************************************************
371  * UTIL block
372  *****************************************************************************/
373
374 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
375                           unsigned length)
376 {
377         int i;
378         u32 temp;
379         struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
380                                                   GFP_ATOMIC);
381
382         if (node == NULL)
383                 return -ENOMEM;
384
385         node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
386                                    &node->dma);
387         if (node->ptr == NULL) {
388                 kfree(node);
389                 return -ENOMEM;
390         }
391
392         memset(node->ptr, 0, sizeof(struct ci_hw_td));
393         node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
394         node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
395         node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
396
397         temp = (u32) (hwreq->req.dma + hwreq->req.actual);
398         if (length) {
399                 node->ptr->page[0] = cpu_to_le32(temp);
400                 for (i = 1; i < TD_PAGE_COUNT; i++) {
401                         u32 page = temp + i * CI_HDRC_PAGE_SIZE;
402                         page &= ~TD_RESERVED_MASK;
403                         node->ptr->page[i] = cpu_to_le32(page);
404                 }
405         }
406
407         hwreq->req.actual += length;
408
409         if (!list_empty(&hwreq->tds)) {
410                 /* get the last entry */
411                 lastnode = list_entry(hwreq->tds.prev,
412                                 struct td_node, td);
413                 lastnode->ptr->next = cpu_to_le32(node->dma);
414         }
415
416         INIT_LIST_HEAD(&node->td);
417         list_add_tail(&node->td, &hwreq->tds);
418
419         return 0;
420 }
421
422 /**
423  * _usb_addr: calculates endpoint address from direction & number
424  * @ep:  endpoint
425  */
426 static inline u8 _usb_addr(struct ci_hw_ep *ep)
427 {
428         return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
429 }
430
431 /**
432  * _hardware_queue: configures a request at hardware level
433  * @gadget: gadget
434  * @hwep:   endpoint
435  *
436  * This function returns an error code
437  */
438 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
439 {
440         struct ci_hdrc *ci = hwep->ci;
441         int ret = 0;
442         unsigned rest = hwreq->req.length;
443         int pages = TD_PAGE_COUNT;
444         struct td_node *firstnode, *lastnode;
445
446         /* don't queue twice */
447         if (hwreq->req.status == -EALREADY)
448                 return -EALREADY;
449
450         hwreq->req.status = -EALREADY;
451
452         ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
453         if (ret)
454                 return ret;
455
456         /*
457          * The first buffer could be not page aligned.
458          * In that case we have to span into one extra td.
459          */
460         if (hwreq->req.dma % PAGE_SIZE)
461                 pages--;
462
463         if (rest == 0)
464                 add_td_to_list(hwep, hwreq, 0);
465
466         while (rest > 0) {
467                 unsigned count = min(hwreq->req.length - hwreq->req.actual,
468                                         (unsigned)(pages * CI_HDRC_PAGE_SIZE));
469                 add_td_to_list(hwep, hwreq, count);
470                 rest -= count;
471         }
472
473         if (hwreq->req.zero && hwreq->req.length
474             && (hwreq->req.length % hwep->ep.maxpacket == 0))
475                 add_td_to_list(hwep, hwreq, 0);
476
477         firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
478
479         lastnode = list_entry(hwreq->tds.prev,
480                 struct td_node, td);
481
482         lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
483         if (!hwreq->req.no_interrupt)
484                 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
485         wmb();
486
487         hwreq->req.actual = 0;
488         if (!list_empty(&hwep->qh.queue)) {
489                 struct ci_hw_req *hwreqprev;
490                 int n = hw_ep_bit(hwep->num, hwep->dir);
491                 int tmp_stat;
492                 struct td_node *prevlastnode;
493                 u32 next = firstnode->dma & TD_ADDR_MASK;
494
495                 hwreqprev = list_entry(hwep->qh.queue.prev,
496                                 struct ci_hw_req, queue);
497                 prevlastnode = list_entry(hwreqprev->tds.prev,
498                                 struct td_node, td);
499
500                 prevlastnode->ptr->next = cpu_to_le32(next);
501                 wmb();
502                 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
503                         goto done;
504                 do {
505                         hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
506                         tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
507                 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
508                 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
509                 if (tmp_stat)
510                         goto done;
511         }
512
513         /*  QH configuration */
514         hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
515         hwep->qh.ptr->td.token &=
516                 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
517
518         if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
519                 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
520
521                 if (hwreq->req.length % hwep->ep.maxpacket)
522                         mul++;
523                 hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
524         }
525
526         wmb();   /* synchronize before ep prime */
527
528         ret = hw_ep_prime(ci, hwep->num, hwep->dir,
529                            hwep->type == USB_ENDPOINT_XFER_CONTROL);
530 done:
531         return ret;
532 }
533
534 /*
535  * free_pending_td: remove a pending request for the endpoint
536  * @hwep: endpoint
537  */
538 static void free_pending_td(struct ci_hw_ep *hwep)
539 {
540         struct td_node *pending = hwep->pending_td;
541
542         dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
543         hwep->pending_td = NULL;
544         kfree(pending);
545 }
546
547 /**
548  * _hardware_dequeue: handles a request at hardware level
549  * @gadget: gadget
550  * @hwep:   endpoint
551  *
552  * This function returns an error code
553  */
554 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
555 {
556         u32 tmptoken;
557         struct td_node *node, *tmpnode;
558         unsigned remaining_length;
559         unsigned actual = hwreq->req.length;
560
561         if (hwreq->req.status != -EALREADY)
562                 return -EINVAL;
563
564         hwreq->req.status = 0;
565
566         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
567                 tmptoken = le32_to_cpu(node->ptr->token);
568                 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
569                         hwreq->req.status = -EALREADY;
570                         return -EBUSY;
571                 }
572
573                 remaining_length = (tmptoken & TD_TOTAL_BYTES);
574                 remaining_length >>= __ffs(TD_TOTAL_BYTES);
575                 actual -= remaining_length;
576
577                 hwreq->req.status = tmptoken & TD_STATUS;
578                 if ((TD_STATUS_HALTED & hwreq->req.status)) {
579                         hwreq->req.status = -EPIPE;
580                         break;
581                 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
582                         hwreq->req.status = -EPROTO;
583                         break;
584                 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
585                         hwreq->req.status = -EILSEQ;
586                         break;
587                 }
588
589                 if (remaining_length) {
590                         if (hwep->dir) {
591                                 hwreq->req.status = -EPROTO;
592                                 break;
593                         }
594                 }
595                 /*
596                  * As the hardware could still address the freed td
597                  * which will run the udc unusable, the cleanup of the
598                  * td has to be delayed by one.
599                  */
600                 if (hwep->pending_td)
601                         free_pending_td(hwep);
602
603                 hwep->pending_td = node;
604                 list_del_init(&node->td);
605         }
606
607         usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
608
609         hwreq->req.actual += actual;
610
611         if (hwreq->req.status)
612                 return hwreq->req.status;
613
614         return hwreq->req.actual;
615 }
616
617 /**
618  * _ep_nuke: dequeues all endpoint requests
619  * @hwep: endpoint
620  *
621  * This function returns an error code
622  * Caller must hold lock
623  */
624 static int _ep_nuke(struct ci_hw_ep *hwep)
625 __releases(hwep->lock)
626 __acquires(hwep->lock)
627 {
628         struct td_node *node, *tmpnode;
629         if (hwep == NULL)
630                 return -EINVAL;
631
632         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
633
634         while (!list_empty(&hwep->qh.queue)) {
635
636                 /* pop oldest request */
637                 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
638                                                      struct ci_hw_req, queue);
639
640                 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
641                         dma_pool_free(hwep->td_pool, node->ptr, node->dma);
642                         list_del_init(&node->td);
643                         node->ptr = NULL;
644                         kfree(node);
645                 }
646
647                 list_del_init(&hwreq->queue);
648                 hwreq->req.status = -ESHUTDOWN;
649
650                 if (hwreq->req.complete != NULL) {
651                         spin_unlock(hwep->lock);
652                         hwreq->req.complete(&hwep->ep, &hwreq->req);
653                         spin_lock(hwep->lock);
654                 }
655         }
656
657         if (hwep->pending_td)
658                 free_pending_td(hwep);
659
660         return 0;
661 }
662
663 /**
664  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
665  * @gadget: gadget
666  *
667  * This function returns an error code
668  */
669 static int _gadget_stop_activity(struct usb_gadget *gadget)
670 {
671         struct usb_ep *ep;
672         struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
673         unsigned long flags;
674
675         spin_lock_irqsave(&ci->lock, flags);
676         ci->gadget.speed = USB_SPEED_UNKNOWN;
677         ci->remote_wakeup = 0;
678         ci->suspended = 0;
679         spin_unlock_irqrestore(&ci->lock, flags);
680
681         /* flush all endpoints */
682         gadget_for_each_ep(ep, gadget) {
683                 usb_ep_fifo_flush(ep);
684         }
685         usb_ep_fifo_flush(&ci->ep0out->ep);
686         usb_ep_fifo_flush(&ci->ep0in->ep);
687
688         /* make sure to disable all endpoints */
689         gadget_for_each_ep(ep, gadget) {
690                 usb_ep_disable(ep);
691         }
692
693         if (ci->status != NULL) {
694                 usb_ep_free_request(&ci->ep0in->ep, ci->status);
695                 ci->status = NULL;
696         }
697
698         return 0;
699 }
700
701 /******************************************************************************
702  * ISR block
703  *****************************************************************************/
704 /**
705  * isr_reset_handler: USB reset interrupt handler
706  * @ci: UDC device
707  *
708  * This function resets USB engine after a bus reset occurred
709  */
710 static void isr_reset_handler(struct ci_hdrc *ci)
711 __releases(ci->lock)
712 __acquires(ci->lock)
713 {
714         int retval;
715
716         spin_unlock(&ci->lock);
717         if (ci->gadget.speed != USB_SPEED_UNKNOWN) {
718                 if (ci->driver)
719                         ci->driver->disconnect(&ci->gadget);
720         }
721
722         retval = _gadget_stop_activity(&ci->gadget);
723         if (retval)
724                 goto done;
725
726         retval = hw_usb_reset(ci);
727         if (retval)
728                 goto done;
729
730         ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
731         if (ci->status == NULL)
732                 retval = -ENOMEM;
733
734 done:
735         spin_lock(&ci->lock);
736
737         if (retval)
738                 dev_err(ci->dev, "error: %i\n", retval);
739 }
740
741 /**
742  * isr_get_status_complete: get_status request complete function
743  * @ep:  endpoint
744  * @req: request handled
745  *
746  * Caller must release lock
747  */
748 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
749 {
750         if (ep == NULL || req == NULL)
751                 return;
752
753         kfree(req->buf);
754         usb_ep_free_request(ep, req);
755 }
756
757 /**
758  * _ep_queue: queues (submits) an I/O request to an endpoint
759  *
760  * Caller must hold lock
761  */
762 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
763                     gfp_t __maybe_unused gfp_flags)
764 {
765         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
766         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
767         struct ci_hdrc *ci = hwep->ci;
768         int retval = 0;
769
770         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
771                 return -EINVAL;
772
773         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
774                 if (req->length)
775                         hwep = (ci->ep0_dir == RX) ?
776                                ci->ep0out : ci->ep0in;
777                 if (!list_empty(&hwep->qh.queue)) {
778                         _ep_nuke(hwep);
779                         retval = -EOVERFLOW;
780                         dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
781                                  _usb_addr(hwep));
782                 }
783         }
784
785         if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
786             hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
787                 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
788                 return -EMSGSIZE;
789         }
790
791         /* first nuke then test link, e.g. previous status has not sent */
792         if (!list_empty(&hwreq->queue)) {
793                 dev_err(hwep->ci->dev, "request already in queue\n");
794                 return -EBUSY;
795         }
796
797         /* push request */
798         hwreq->req.status = -EINPROGRESS;
799         hwreq->req.actual = 0;
800
801         retval = _hardware_enqueue(hwep, hwreq);
802
803         if (retval == -EALREADY)
804                 retval = 0;
805         if (!retval)
806                 list_add_tail(&hwreq->queue, &hwep->qh.queue);
807
808         return retval;
809 }
810
811 /**
812  * isr_get_status_response: get_status request response
813  * @ci: ci struct
814  * @setup: setup request packet
815  *
816  * This function returns an error code
817  */
818 static int isr_get_status_response(struct ci_hdrc *ci,
819                                    struct usb_ctrlrequest *setup)
820 __releases(hwep->lock)
821 __acquires(hwep->lock)
822 {
823         struct ci_hw_ep *hwep = ci->ep0in;
824         struct usb_request *req = NULL;
825         gfp_t gfp_flags = GFP_ATOMIC;
826         int dir, num, retval;
827
828         if (hwep == NULL || setup == NULL)
829                 return -EINVAL;
830
831         spin_unlock(hwep->lock);
832         req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
833         spin_lock(hwep->lock);
834         if (req == NULL)
835                 return -ENOMEM;
836
837         req->complete = isr_get_status_complete;
838         req->length   = 2;
839         req->buf      = kzalloc(req->length, gfp_flags);
840         if (req->buf == NULL) {
841                 retval = -ENOMEM;
842                 goto err_free_req;
843         }
844
845         if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
846                 /* Assume that device is bus powered for now. */
847                 *(u16 *)req->buf = ci->remote_wakeup << 1;
848                 retval = 0;
849         } else if ((setup->bRequestType & USB_RECIP_MASK) \
850                    == USB_RECIP_ENDPOINT) {
851                 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
852                         TX : RX;
853                 num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
854                 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
855         }
856         /* else do nothing; reserved for future use */
857
858         retval = _ep_queue(&hwep->ep, req, gfp_flags);
859         if (retval)
860                 goto err_free_buf;
861
862         return 0;
863
864  err_free_buf:
865         kfree(req->buf);
866  err_free_req:
867         spin_unlock(hwep->lock);
868         usb_ep_free_request(&hwep->ep, req);
869         spin_lock(hwep->lock);
870         return retval;
871 }
872
873 /**
874  * isr_setup_status_complete: setup_status request complete function
875  * @ep:  endpoint
876  * @req: request handled
877  *
878  * Caller must release lock. Put the port in test mode if test mode
879  * feature is selected.
880  */
881 static void
882 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
883 {
884         struct ci_hdrc *ci = req->context;
885         unsigned long flags;
886
887         if (ci->setaddr) {
888                 hw_usb_set_address(ci, ci->address);
889                 ci->setaddr = false;
890         }
891
892         spin_lock_irqsave(&ci->lock, flags);
893         if (ci->test_mode)
894                 hw_port_test_set(ci, ci->test_mode);
895         spin_unlock_irqrestore(&ci->lock, flags);
896 }
897
898 /**
899  * isr_setup_status_phase: queues the status phase of a setup transation
900  * @ci: ci struct
901  *
902  * This function returns an error code
903  */
904 static int isr_setup_status_phase(struct ci_hdrc *ci)
905 {
906         int retval;
907         struct ci_hw_ep *hwep;
908
909         hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
910         ci->status->context = ci;
911         ci->status->complete = isr_setup_status_complete;
912
913         retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
914
915         return retval;
916 }
917
918 /**
919  * isr_tr_complete_low: transaction complete low level handler
920  * @hwep: endpoint
921  *
922  * This function returns an error code
923  * Caller must hold lock
924  */
925 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
926 __releases(hwep->lock)
927 __acquires(hwep->lock)
928 {
929         struct ci_hw_req *hwreq, *hwreqtemp;
930         struct ci_hw_ep *hweptemp = hwep;
931         int retval = 0;
932
933         list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
934                         queue) {
935                 retval = _hardware_dequeue(hwep, hwreq);
936                 if (retval < 0)
937                         break;
938                 list_del_init(&hwreq->queue);
939                 if (hwreq->req.complete != NULL) {
940                         spin_unlock(hwep->lock);
941                         if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
942                                         hwreq->req.length)
943                                 hweptemp = hwep->ci->ep0in;
944                         hwreq->req.complete(&hweptemp->ep, &hwreq->req);
945                         spin_lock(hwep->lock);
946                 }
947         }
948
949         if (retval == -EBUSY)
950                 retval = 0;
951
952         return retval;
953 }
954
955 /**
956  * isr_tr_complete_handler: transaction complete interrupt handler
957  * @ci: UDC descriptor
958  *
959  * This function handles traffic events
960  */
961 static void isr_tr_complete_handler(struct ci_hdrc *ci)
962 __releases(ci->lock)
963 __acquires(ci->lock)
964 {
965         unsigned i;
966         u8 tmode = 0;
967
968         for (i = 0; i < ci->hw_ep_max; i++) {
969                 struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
970                 int type, num, dir, err = -EINVAL;
971                 struct usb_ctrlrequest req;
972
973                 if (hwep->ep.desc == NULL)
974                         continue;   /* not configured */
975
976                 if (hw_test_and_clear_complete(ci, i)) {
977                         err = isr_tr_complete_low(hwep);
978                         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
979                                 if (err > 0)   /* needs status phase */
980                                         err = isr_setup_status_phase(ci);
981                                 if (err < 0) {
982                                         spin_unlock(&ci->lock);
983                                         if (usb_ep_set_halt(&hwep->ep))
984                                                 dev_err(ci->dev,
985                                                         "error: ep_set_halt\n");
986                                         spin_lock(&ci->lock);
987                                 }
988                         }
989                 }
990
991                 if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
992                     !hw_test_and_clear_setup_status(ci, i))
993                         continue;
994
995                 if (i != 0) {
996                         dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
997                         continue;
998                 }
999
1000                 /*
1001                  * Flush data and handshake transactions of previous
1002                  * setup packet.
1003                  */
1004                 _ep_nuke(ci->ep0out);
1005                 _ep_nuke(ci->ep0in);
1006
1007                 /* read_setup_packet */
1008                 do {
1009                         hw_test_and_set_setup_guard(ci);
1010                         memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1011                 } while (!hw_test_and_clear_setup_guard(ci));
1012
1013                 type = req.bRequestType;
1014
1015                 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1016
1017                 switch (req.bRequest) {
1018                 case USB_REQ_CLEAR_FEATURE:
1019                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1020                                         le16_to_cpu(req.wValue) ==
1021                                         USB_ENDPOINT_HALT) {
1022                                 if (req.wLength != 0)
1023                                         break;
1024                                 num  = le16_to_cpu(req.wIndex);
1025                                 dir = num & USB_ENDPOINT_DIR_MASK;
1026                                 num &= USB_ENDPOINT_NUMBER_MASK;
1027                                 if (dir) /* TX */
1028                                         num += ci->hw_ep_max/2;
1029                                 if (!ci->ci_hw_ep[num].wedge) {
1030                                         spin_unlock(&ci->lock);
1031                                         err = usb_ep_clear_halt(
1032                                                 &ci->ci_hw_ep[num].ep);
1033                                         spin_lock(&ci->lock);
1034                                         if (err)
1035                                                 break;
1036                                 }
1037                                 err = isr_setup_status_phase(ci);
1038                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1039                                         le16_to_cpu(req.wValue) ==
1040                                         USB_DEVICE_REMOTE_WAKEUP) {
1041                                 if (req.wLength != 0)
1042                                         break;
1043                                 ci->remote_wakeup = 0;
1044                                 err = isr_setup_status_phase(ci);
1045                         } else {
1046                                 goto delegate;
1047                         }
1048                         break;
1049                 case USB_REQ_GET_STATUS:
1050                         if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
1051                             type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1052                             type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1053                                 goto delegate;
1054                         if (le16_to_cpu(req.wLength) != 2 ||
1055                             le16_to_cpu(req.wValue)  != 0)
1056                                 break;
1057                         err = isr_get_status_response(ci, &req);
1058                         break;
1059                 case USB_REQ_SET_ADDRESS:
1060                         if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1061                                 goto delegate;
1062                         if (le16_to_cpu(req.wLength) != 0 ||
1063                             le16_to_cpu(req.wIndex)  != 0)
1064                                 break;
1065                         ci->address = (u8)le16_to_cpu(req.wValue);
1066                         ci->setaddr = true;
1067                         err = isr_setup_status_phase(ci);
1068                         break;
1069                 case USB_REQ_SET_FEATURE:
1070                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1071                                         le16_to_cpu(req.wValue) ==
1072                                         USB_ENDPOINT_HALT) {
1073                                 if (req.wLength != 0)
1074                                         break;
1075                                 num  = le16_to_cpu(req.wIndex);
1076                                 dir = num & USB_ENDPOINT_DIR_MASK;
1077                                 num &= USB_ENDPOINT_NUMBER_MASK;
1078                                 if (dir) /* TX */
1079                                         num += ci->hw_ep_max/2;
1080
1081                                 spin_unlock(&ci->lock);
1082                                 err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
1083                                 spin_lock(&ci->lock);
1084                                 if (!err)
1085                                         isr_setup_status_phase(ci);
1086                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1087                                 if (req.wLength != 0)
1088                                         break;
1089                                 switch (le16_to_cpu(req.wValue)) {
1090                                 case USB_DEVICE_REMOTE_WAKEUP:
1091                                         ci->remote_wakeup = 1;
1092                                         err = isr_setup_status_phase(ci);
1093                                         break;
1094                                 case USB_DEVICE_TEST_MODE:
1095                                         tmode = le16_to_cpu(req.wIndex) >> 8;
1096                                         switch (tmode) {
1097                                         case TEST_J:
1098                                         case TEST_K:
1099                                         case TEST_SE0_NAK:
1100                                         case TEST_PACKET:
1101                                         case TEST_FORCE_EN:
1102                                                 ci->test_mode = tmode;
1103                                                 err = isr_setup_status_phase(
1104                                                                 ci);
1105                                                 break;
1106                                         default:
1107                                                 break;
1108                                         }
1109                                 default:
1110                                         goto delegate;
1111                                 }
1112                         } else {
1113                                 goto delegate;
1114                         }
1115                         break;
1116                 default:
1117 delegate:
1118                         if (req.wLength == 0)   /* no data phase */
1119                                 ci->ep0_dir = TX;
1120
1121                         spin_unlock(&ci->lock);
1122                         err = ci->driver->setup(&ci->gadget, &req);
1123                         spin_lock(&ci->lock);
1124                         break;
1125                 }
1126
1127                 if (err < 0) {
1128                         spin_unlock(&ci->lock);
1129                         if (usb_ep_set_halt(&hwep->ep))
1130                                 dev_err(ci->dev, "error: ep_set_halt\n");
1131                         spin_lock(&ci->lock);
1132                 }
1133         }
1134 }
1135
1136 /******************************************************************************
1137  * ENDPT block
1138  *****************************************************************************/
1139 /**
1140  * ep_enable: configure endpoint, making it usable
1141  *
1142  * Check usb_ep_enable() at "usb_gadget.h" for details
1143  */
1144 static int ep_enable(struct usb_ep *ep,
1145                      const struct usb_endpoint_descriptor *desc)
1146 {
1147         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1148         int retval = 0;
1149         unsigned long flags;
1150         u32 cap = 0;
1151
1152         if (ep == NULL || desc == NULL)
1153                 return -EINVAL;
1154
1155         spin_lock_irqsave(hwep->lock, flags);
1156
1157         /* only internal SW should enable ctrl endpts */
1158
1159         hwep->ep.desc = desc;
1160
1161         if (!list_empty(&hwep->qh.queue))
1162                 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1163
1164         hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1165         hwep->num  = usb_endpoint_num(desc);
1166         hwep->type = usb_endpoint_type(desc);
1167
1168         hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
1169         hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
1170
1171         if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1172                 cap |= QH_IOS;
1173         if (hwep->num)
1174                 cap |= QH_ZLT;
1175         cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1176
1177         hwep->qh.ptr->cap = cpu_to_le32(cap);
1178
1179         hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1180
1181         /*
1182          * Enable endpoints in the HW other than ep0 as ep0
1183          * is always enabled
1184          */
1185         if (hwep->num)
1186                 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1187                                        hwep->type);
1188
1189         spin_unlock_irqrestore(hwep->lock, flags);
1190         return retval;
1191 }
1192
1193 /**
1194  * ep_disable: endpoint is no longer usable
1195  *
1196  * Check usb_ep_disable() at "usb_gadget.h" for details
1197  */
1198 static int ep_disable(struct usb_ep *ep)
1199 {
1200         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1201         int direction, retval = 0;
1202         unsigned long flags;
1203
1204         if (ep == NULL)
1205                 return -EINVAL;
1206         else if (hwep->ep.desc == NULL)
1207                 return -EBUSY;
1208
1209         spin_lock_irqsave(hwep->lock, flags);
1210
1211         /* only internal SW should disable ctrl endpts */
1212
1213         direction = hwep->dir;
1214         do {
1215                 retval |= _ep_nuke(hwep);
1216                 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1217
1218                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1219                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1220
1221         } while (hwep->dir != direction);
1222
1223         hwep->ep.desc = NULL;
1224
1225         spin_unlock_irqrestore(hwep->lock, flags);
1226         return retval;
1227 }
1228
1229 /**
1230  * ep_alloc_request: allocate a request object to use with this endpoint
1231  *
1232  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1233  */
1234 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1235 {
1236         struct ci_hw_req *hwreq = NULL;
1237
1238         if (ep == NULL)
1239                 return NULL;
1240
1241         hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1242         if (hwreq != NULL) {
1243                 INIT_LIST_HEAD(&hwreq->queue);
1244                 INIT_LIST_HEAD(&hwreq->tds);
1245         }
1246
1247         return (hwreq == NULL) ? NULL : &hwreq->req;
1248 }
1249
1250 /**
1251  * ep_free_request: frees a request object
1252  *
1253  * Check usb_ep_free_request() at "usb_gadget.h" for details
1254  */
1255 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1256 {
1257         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1258         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1259         struct td_node *node, *tmpnode;
1260         unsigned long flags;
1261
1262         if (ep == NULL || req == NULL) {
1263                 return;
1264         } else if (!list_empty(&hwreq->queue)) {
1265                 dev_err(hwep->ci->dev, "freeing queued request\n");
1266                 return;
1267         }
1268
1269         spin_lock_irqsave(hwep->lock, flags);
1270
1271         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1272                 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1273                 list_del_init(&node->td);
1274                 node->ptr = NULL;
1275                 kfree(node);
1276         }
1277
1278         kfree(hwreq);
1279
1280         spin_unlock_irqrestore(hwep->lock, flags);
1281 }
1282
1283 /**
1284  * ep_queue: queues (submits) an I/O request to an endpoint
1285  *
1286  * Check usb_ep_queue()* at usb_gadget.h" for details
1287  */
1288 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1289                     gfp_t __maybe_unused gfp_flags)
1290 {
1291         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1292         int retval = 0;
1293         unsigned long flags;
1294
1295         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1296                 return -EINVAL;
1297
1298         spin_lock_irqsave(hwep->lock, flags);
1299         retval = _ep_queue(ep, req, gfp_flags);
1300         spin_unlock_irqrestore(hwep->lock, flags);
1301         return retval;
1302 }
1303
1304 /**
1305  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1306  *
1307  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1308  */
1309 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1310 {
1311         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1312         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1313         unsigned long flags;
1314
1315         if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1316                 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1317                 list_empty(&hwep->qh.queue))
1318                 return -EINVAL;
1319
1320         spin_lock_irqsave(hwep->lock, flags);
1321
1322         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1323
1324         /* pop request */
1325         list_del_init(&hwreq->queue);
1326
1327         usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1328
1329         req->status = -ECONNRESET;
1330
1331         if (hwreq->req.complete != NULL) {
1332                 spin_unlock(hwep->lock);
1333                 hwreq->req.complete(&hwep->ep, &hwreq->req);
1334                 spin_lock(hwep->lock);
1335         }
1336
1337         spin_unlock_irqrestore(hwep->lock, flags);
1338         return 0;
1339 }
1340
1341 /**
1342  * ep_set_halt: sets the endpoint halt feature
1343  *
1344  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1345  */
1346 static int ep_set_halt(struct usb_ep *ep, int value)
1347 {
1348         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1349         int direction, retval = 0;
1350         unsigned long flags;
1351
1352         if (ep == NULL || hwep->ep.desc == NULL)
1353                 return -EINVAL;
1354
1355         if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1356                 return -EOPNOTSUPP;
1357
1358         spin_lock_irqsave(hwep->lock, flags);
1359
1360 #ifndef STALL_IN
1361         /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1362         if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1363             !list_empty(&hwep->qh.queue)) {
1364                 spin_unlock_irqrestore(hwep->lock, flags);
1365                 return -EAGAIN;
1366         }
1367 #endif
1368
1369         direction = hwep->dir;
1370         do {
1371                 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1372
1373                 if (!value)
1374                         hwep->wedge = 0;
1375
1376                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1377                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1378
1379         } while (hwep->dir != direction);
1380
1381         spin_unlock_irqrestore(hwep->lock, flags);
1382         return retval;
1383 }
1384
1385 /**
1386  * ep_set_wedge: sets the halt feature and ignores clear requests
1387  *
1388  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1389  */
1390 static int ep_set_wedge(struct usb_ep *ep)
1391 {
1392         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1393         unsigned long flags;
1394
1395         if (ep == NULL || hwep->ep.desc == NULL)
1396                 return -EINVAL;
1397
1398         spin_lock_irqsave(hwep->lock, flags);
1399         hwep->wedge = 1;
1400         spin_unlock_irqrestore(hwep->lock, flags);
1401
1402         return usb_ep_set_halt(ep);
1403 }
1404
1405 /**
1406  * ep_fifo_flush: flushes contents of a fifo
1407  *
1408  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1409  */
1410 static void ep_fifo_flush(struct usb_ep *ep)
1411 {
1412         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1413         unsigned long flags;
1414
1415         if (ep == NULL) {
1416                 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1417                 return;
1418         }
1419
1420         spin_lock_irqsave(hwep->lock, flags);
1421
1422         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1423
1424         spin_unlock_irqrestore(hwep->lock, flags);
1425 }
1426
1427 /**
1428  * Endpoint-specific part of the API to the USB controller hardware
1429  * Check "usb_gadget.h" for details
1430  */
1431 static const struct usb_ep_ops usb_ep_ops = {
1432         .enable        = ep_enable,
1433         .disable       = ep_disable,
1434         .alloc_request = ep_alloc_request,
1435         .free_request  = ep_free_request,
1436         .queue         = ep_queue,
1437         .dequeue       = ep_dequeue,
1438         .set_halt      = ep_set_halt,
1439         .set_wedge     = ep_set_wedge,
1440         .fifo_flush    = ep_fifo_flush,
1441 };
1442
1443 /******************************************************************************
1444  * GADGET block
1445  *****************************************************************************/
1446 static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1447 {
1448         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1449         unsigned long flags;
1450         int gadget_ready = 0;
1451
1452         spin_lock_irqsave(&ci->lock, flags);
1453         ci->vbus_active = is_active;
1454         if (ci->driver)
1455                 gadget_ready = 1;
1456         spin_unlock_irqrestore(&ci->lock, flags);
1457
1458         if (gadget_ready) {
1459                 if (is_active) {
1460                         pm_runtime_get_sync(&_gadget->dev);
1461                         hw_device_reset(ci, USBMODE_CM_DC);
1462                         hw_device_state(ci, ci->ep0out->qh.dma);
1463                         dev_dbg(ci->dev, "Connected to host\n");
1464                 } else {
1465                         if (ci->driver)
1466                                 ci->driver->disconnect(&ci->gadget);
1467                         hw_device_state(ci, 0);
1468                         if (ci->platdata->notify_event)
1469                                 ci->platdata->notify_event(ci,
1470                                 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1471                         _gadget_stop_activity(&ci->gadget);
1472                         pm_runtime_put_sync(&_gadget->dev);
1473                         dev_dbg(ci->dev, "Disconnected from host\n");
1474                 }
1475         }
1476
1477         return 0;
1478 }
1479
1480 static int ci_udc_wakeup(struct usb_gadget *_gadget)
1481 {
1482         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1483         unsigned long flags;
1484         int ret = 0;
1485
1486         spin_lock_irqsave(&ci->lock, flags);
1487         if (!ci->remote_wakeup) {
1488                 ret = -EOPNOTSUPP;
1489                 goto out;
1490         }
1491         if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1492                 ret = -EINVAL;
1493                 goto out;
1494         }
1495         hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1496 out:
1497         spin_unlock_irqrestore(&ci->lock, flags);
1498         return ret;
1499 }
1500
1501 static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1502 {
1503         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1504
1505         if (ci->transceiver)
1506                 return usb_phy_set_power(ci->transceiver, ma);
1507         return -ENOTSUPP;
1508 }
1509
1510 /* Change Data+ pullup status
1511  * this func is used by usb_gadget_connect/disconnet
1512  */
1513 static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1514 {
1515         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1516
1517         if (!ci->vbus_active)
1518                 return -EOPNOTSUPP;
1519
1520         if (is_on)
1521                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1522         else
1523                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1524
1525         return 0;
1526 }
1527
1528 static int ci_udc_start(struct usb_gadget *gadget,
1529                          struct usb_gadget_driver *driver);
1530 static int ci_udc_stop(struct usb_gadget *gadget,
1531                         struct usb_gadget_driver *driver);
1532 /**
1533  * Device operations part of the API to the USB controller hardware,
1534  * which don't involve endpoints (or i/o)
1535  * Check  "usb_gadget.h" for details
1536  */
1537 static const struct usb_gadget_ops usb_gadget_ops = {
1538         .vbus_session   = ci_udc_vbus_session,
1539         .wakeup         = ci_udc_wakeup,
1540         .pullup         = ci_udc_pullup,
1541         .vbus_draw      = ci_udc_vbus_draw,
1542         .udc_start      = ci_udc_start,
1543         .udc_stop       = ci_udc_stop,
1544 };
1545
1546 static int init_eps(struct ci_hdrc *ci)
1547 {
1548         int retval = 0, i, j;
1549
1550         for (i = 0; i < ci->hw_ep_max/2; i++)
1551                 for (j = RX; j <= TX; j++) {
1552                         int k = i + j * ci->hw_ep_max/2;
1553                         struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1554
1555                         scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1556                                         (j == TX)  ? "in" : "out");
1557
1558                         hwep->ci          = ci;
1559                         hwep->lock         = &ci->lock;
1560                         hwep->td_pool      = ci->td_pool;
1561
1562                         hwep->ep.name      = hwep->name;
1563                         hwep->ep.ops       = &usb_ep_ops;
1564                         /*
1565                          * for ep0: maxP defined in desc, for other
1566                          * eps, maxP is set by epautoconfig() called
1567                          * by gadget layer
1568                          */
1569                         hwep->ep.maxpacket = (unsigned short)~0;
1570
1571                         INIT_LIST_HEAD(&hwep->qh.queue);
1572                         hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1573                                                      &hwep->qh.dma);
1574                         if (hwep->qh.ptr == NULL)
1575                                 retval = -ENOMEM;
1576                         else
1577                                 memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
1578
1579                         /*
1580                          * set up shorthands for ep0 out and in endpoints,
1581                          * don't add to gadget's ep_list
1582                          */
1583                         if (i == 0) {
1584                                 if (j == RX)
1585                                         ci->ep0out = hwep;
1586                                 else
1587                                         ci->ep0in = hwep;
1588
1589                                 hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
1590                                 continue;
1591                         }
1592
1593                         list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1594                 }
1595
1596         return retval;
1597 }
1598
1599 static void destroy_eps(struct ci_hdrc *ci)
1600 {
1601         int i;
1602
1603         for (i = 0; i < ci->hw_ep_max; i++) {
1604                 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1605
1606                 if (hwep->pending_td)
1607                         free_pending_td(hwep);
1608                 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1609         }
1610 }
1611
1612 /**
1613  * ci_udc_start: register a gadget driver
1614  * @gadget: our gadget
1615  * @driver: the driver being registered
1616  *
1617  * Interrupts are enabled here.
1618  */
1619 static int ci_udc_start(struct usb_gadget *gadget,
1620                          struct usb_gadget_driver *driver)
1621 {
1622         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1623         unsigned long flags;
1624         int retval = -ENOMEM;
1625
1626         if (driver->disconnect == NULL)
1627                 return -EINVAL;
1628
1629
1630         ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1631         retval = usb_ep_enable(&ci->ep0out->ep);
1632         if (retval)
1633                 return retval;
1634
1635         ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1636         retval = usb_ep_enable(&ci->ep0in->ep);
1637         if (retval)
1638                 return retval;
1639
1640         ci->driver = driver;
1641         pm_runtime_get_sync(&ci->gadget.dev);
1642         if (ci->vbus_active) {
1643                 spin_lock_irqsave(&ci->lock, flags);
1644                 hw_device_reset(ci, USBMODE_CM_DC);
1645         } else {
1646                 pm_runtime_put_sync(&ci->gadget.dev);
1647                 return retval;
1648         }
1649
1650         retval = hw_device_state(ci, ci->ep0out->qh.dma);
1651         spin_unlock_irqrestore(&ci->lock, flags);
1652         if (retval)
1653                 pm_runtime_put_sync(&ci->gadget.dev);
1654
1655         return retval;
1656 }
1657
1658 /**
1659  * ci_udc_stop: unregister a gadget driver
1660  */
1661 static int ci_udc_stop(struct usb_gadget *gadget,
1662                         struct usb_gadget_driver *driver)
1663 {
1664         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1665         unsigned long flags;
1666
1667         spin_lock_irqsave(&ci->lock, flags);
1668
1669         if (ci->vbus_active) {
1670                 hw_device_state(ci, 0);
1671                 if (ci->platdata->notify_event)
1672                         ci->platdata->notify_event(ci,
1673                         CI_HDRC_CONTROLLER_STOPPED_EVENT);
1674                 spin_unlock_irqrestore(&ci->lock, flags);
1675                 _gadget_stop_activity(&ci->gadget);
1676                 spin_lock_irqsave(&ci->lock, flags);
1677                 pm_runtime_put(&ci->gadget.dev);
1678         }
1679
1680         ci->driver = NULL;
1681         spin_unlock_irqrestore(&ci->lock, flags);
1682
1683         return 0;
1684 }
1685
1686 /******************************************************************************
1687  * BUS block
1688  *****************************************************************************/
1689 /**
1690  * udc_irq: ci interrupt handler
1691  *
1692  * This function returns IRQ_HANDLED if the IRQ has been handled
1693  * It locks access to registers
1694  */
1695 static irqreturn_t udc_irq(struct ci_hdrc *ci)
1696 {
1697         irqreturn_t retval;
1698         u32 intr;
1699
1700         if (ci == NULL)
1701                 return IRQ_HANDLED;
1702
1703         spin_lock(&ci->lock);
1704
1705         if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1706                 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1707                                 USBMODE_CM_DC) {
1708                         spin_unlock(&ci->lock);
1709                         return IRQ_NONE;
1710                 }
1711         }
1712         intr = hw_test_and_clear_intr_active(ci);
1713
1714         if (intr) {
1715                 /* order defines priority - do NOT change it */
1716                 if (USBi_URI & intr)
1717                         isr_reset_handler(ci);
1718
1719                 if (USBi_PCI & intr) {
1720                         ci->gadget.speed = hw_port_is_high_speed(ci) ?
1721                                 USB_SPEED_HIGH : USB_SPEED_FULL;
1722                         if (ci->suspended && ci->driver->resume) {
1723                                 spin_unlock(&ci->lock);
1724                                 ci->driver->resume(&ci->gadget);
1725                                 spin_lock(&ci->lock);
1726                                 ci->suspended = 0;
1727                         }
1728                 }
1729
1730                 if (USBi_UI  & intr)
1731                         isr_tr_complete_handler(ci);
1732
1733                 if (USBi_SLI & intr) {
1734                         if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1735                             ci->driver->suspend) {
1736                                 ci->suspended = 1;
1737                                 spin_unlock(&ci->lock);
1738                                 ci->driver->suspend(&ci->gadget);
1739                                 spin_lock(&ci->lock);
1740                         }
1741                 }
1742                 retval = IRQ_HANDLED;
1743         } else {
1744                 retval = IRQ_NONE;
1745         }
1746         spin_unlock(&ci->lock);
1747
1748         return retval;
1749 }
1750
1751 /**
1752  * udc_start: initialize gadget role
1753  * @ci: chipidea controller
1754  */
1755 static int udc_start(struct ci_hdrc *ci)
1756 {
1757         struct device *dev = ci->dev;
1758         int retval = 0;
1759
1760         spin_lock_init(&ci->lock);
1761
1762         ci->gadget.ops          = &usb_gadget_ops;
1763         ci->gadget.speed        = USB_SPEED_UNKNOWN;
1764         ci->gadget.max_speed    = USB_SPEED_HIGH;
1765         ci->gadget.is_otg       = 0;
1766         ci->gadget.name         = ci->platdata->name;
1767
1768         INIT_LIST_HEAD(&ci->gadget.ep_list);
1769
1770         /* alloc resources */
1771         ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
1772                                        sizeof(struct ci_hw_qh),
1773                                        64, CI_HDRC_PAGE_SIZE);
1774         if (ci->qh_pool == NULL)
1775                 return -ENOMEM;
1776
1777         ci->td_pool = dma_pool_create("ci_hw_td", dev,
1778                                        sizeof(struct ci_hw_td),
1779                                        64, CI_HDRC_PAGE_SIZE);
1780         if (ci->td_pool == NULL) {
1781                 retval = -ENOMEM;
1782                 goto free_qh_pool;
1783         }
1784
1785         retval = init_eps(ci);
1786         if (retval)
1787                 goto free_pools;
1788
1789         ci->gadget.ep0 = &ci->ep0in->ep;
1790
1791         retval = usb_add_gadget_udc(dev, &ci->gadget);
1792         if (retval)
1793                 goto destroy_eps;
1794
1795         pm_runtime_no_callbacks(&ci->gadget.dev);
1796         pm_runtime_enable(&ci->gadget.dev);
1797
1798         return retval;
1799
1800 destroy_eps:
1801         destroy_eps(ci);
1802 free_pools:
1803         dma_pool_destroy(ci->td_pool);
1804 free_qh_pool:
1805         dma_pool_destroy(ci->qh_pool);
1806         return retval;
1807 }
1808
1809 /**
1810  * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
1811  *
1812  * No interrupts active, the IRQ has been released
1813  */
1814 void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
1815 {
1816         if (!ci->roles[CI_ROLE_GADGET])
1817                 return;
1818
1819         usb_del_gadget_udc(&ci->gadget);
1820
1821         destroy_eps(ci);
1822
1823         dma_pool_destroy(ci->td_pool);
1824         dma_pool_destroy(ci->qh_pool);
1825
1826         if (ci->transceiver) {
1827                 otg_set_peripheral(ci->transceiver->otg, NULL);
1828                 if (ci->global_phy)
1829                         usb_put_phy(ci->transceiver);
1830         }
1831 }
1832
1833 static int udc_id_switch_for_device(struct ci_hdrc *ci)
1834 {
1835         if (ci->is_otg) {
1836                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1837                 ci_enable_otg_interrupt(ci, OTGSC_BSVIE);
1838         }
1839
1840         return 0;
1841 }
1842
1843 static void udc_id_switch_for_host(struct ci_hdrc *ci)
1844 {
1845         if (ci->is_otg) {
1846                 /* host doesn't care B_SESSION_VALID event */
1847                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1848                 ci_disable_otg_interrupt(ci, OTGSC_BSVIE);
1849         }
1850 }
1851
1852 /**
1853  * ci_hdrc_gadget_init - initialize device related bits
1854  * ci: the controller
1855  *
1856  * This function initializes the gadget, if the device is "device capable".
1857  */
1858 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1859 {
1860         struct ci_role_driver *rdrv;
1861
1862         if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1863                 return -ENXIO;
1864
1865         rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1866         if (!rdrv)
1867                 return -ENOMEM;
1868
1869         rdrv->start     = udc_id_switch_for_device;
1870         rdrv->stop      = udc_id_switch_for_host;
1871         rdrv->irq       = udc_irq;
1872         rdrv->name      = "gadget";
1873         ci->roles[CI_ROLE_GADGET] = rdrv;
1874
1875         return udc_start(ci);
1876 }