usb: chipidea: udc: Consolidate the call of disconnect
[firefly-linux-kernel-4.4.55.git] / drivers / usb / chipidea / udc.c
1 /*
2  * udc.c - ChipIdea UDC driver
3  *
4  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5  *
6  * Author: David Lopo
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/err.h>
17 #include <linux/irqreturn.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/otg.h>
24 #include <linux/usb/chipidea.h>
25
26 #include "ci.h"
27 #include "udc.h"
28 #include "bits.h"
29 #include "debug.h"
30 #include "otg.h"
31
32 /* control endpoint description */
33 static const struct usb_endpoint_descriptor
34 ctrl_endpt_out_desc = {
35         .bLength         = USB_DT_ENDPOINT_SIZE,
36         .bDescriptorType = USB_DT_ENDPOINT,
37
38         .bEndpointAddress = USB_DIR_OUT,
39         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
40         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
41 };
42
43 static const struct usb_endpoint_descriptor
44 ctrl_endpt_in_desc = {
45         .bLength         = USB_DT_ENDPOINT_SIZE,
46         .bDescriptorType = USB_DT_ENDPOINT,
47
48         .bEndpointAddress = USB_DIR_IN,
49         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
50         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
51 };
52
53 /**
54  * hw_ep_bit: calculates the bit number
55  * @num: endpoint number
56  * @dir: endpoint direction
57  *
58  * This function returns bit number
59  */
60 static inline int hw_ep_bit(int num, int dir)
61 {
62         return num + (dir ? 16 : 0);
63 }
64
65 static inline int ep_to_bit(struct ci_hdrc *ci, int n)
66 {
67         int fill = 16 - ci->hw_ep_max / 2;
68
69         if (n >= ci->hw_ep_max / 2)
70                 n += fill;
71
72         return n;
73 }
74
75 /**
76  * hw_device_state: enables/disables interrupts (execute without interruption)
77  * @dma: 0 => disable, !0 => enable and set dma engine
78  *
79  * This function returns an error code
80  */
81 static int hw_device_state(struct ci_hdrc *ci, u32 dma)
82 {
83         if (dma) {
84                 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
85                 /* interrupt, error, port change, reset, sleep/suspend */
86                 hw_write(ci, OP_USBINTR, ~0,
87                              USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
88                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
89         } else {
90                 hw_write(ci, OP_USBINTR, ~0, 0);
91                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
92         }
93         return 0;
94 }
95
96 /**
97  * hw_ep_flush: flush endpoint fifo (execute without interruption)
98  * @num: endpoint number
99  * @dir: endpoint direction
100  *
101  * This function returns an error code
102  */
103 static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
104 {
105         int n = hw_ep_bit(num, dir);
106
107         do {
108                 /* flush any pending transfer */
109                 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
110                 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
111                         cpu_relax();
112         } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
113
114         return 0;
115 }
116
117 /**
118  * hw_ep_disable: disables endpoint (execute without interruption)
119  * @num: endpoint number
120  * @dir: endpoint direction
121  *
122  * This function returns an error code
123  */
124 static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
125 {
126         hw_ep_flush(ci, num, dir);
127         hw_write(ci, OP_ENDPTCTRL + num,
128                  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
129         return 0;
130 }
131
132 /**
133  * hw_ep_enable: enables endpoint (execute without interruption)
134  * @num:  endpoint number
135  * @dir:  endpoint direction
136  * @type: endpoint type
137  *
138  * This function returns an error code
139  */
140 static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
141 {
142         u32 mask, data;
143
144         if (dir) {
145                 mask  = ENDPTCTRL_TXT;  /* type    */
146                 data  = type << __ffs(mask);
147
148                 mask |= ENDPTCTRL_TXS;  /* unstall */
149                 mask |= ENDPTCTRL_TXR;  /* reset data toggle */
150                 data |= ENDPTCTRL_TXR;
151                 mask |= ENDPTCTRL_TXE;  /* enable  */
152                 data |= ENDPTCTRL_TXE;
153         } else {
154                 mask  = ENDPTCTRL_RXT;  /* type    */
155                 data  = type << __ffs(mask);
156
157                 mask |= ENDPTCTRL_RXS;  /* unstall */
158                 mask |= ENDPTCTRL_RXR;  /* reset data toggle */
159                 data |= ENDPTCTRL_RXR;
160                 mask |= ENDPTCTRL_RXE;  /* enable  */
161                 data |= ENDPTCTRL_RXE;
162         }
163         hw_write(ci, OP_ENDPTCTRL + num, mask, data);
164         return 0;
165 }
166
167 /**
168  * hw_ep_get_halt: return endpoint halt status
169  * @num: endpoint number
170  * @dir: endpoint direction
171  *
172  * This function returns 1 if endpoint halted
173  */
174 static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
175 {
176         u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
177
178         return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
179 }
180
181 /**
182  * hw_test_and_clear_setup_status: test & clear setup status (execute without
183  *                                 interruption)
184  * @n: endpoint number
185  *
186  * This function returns setup status
187  */
188 static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
189 {
190         n = ep_to_bit(ci, n);
191         return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
192 }
193
194 /**
195  * hw_ep_prime: primes endpoint (execute without interruption)
196  * @num:     endpoint number
197  * @dir:     endpoint direction
198  * @is_ctrl: true if control endpoint
199  *
200  * This function returns an error code
201  */
202 static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
203 {
204         int n = hw_ep_bit(num, dir);
205
206         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
207                 return -EAGAIN;
208
209         hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
210
211         while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
212                 cpu_relax();
213         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
214                 return -EAGAIN;
215
216         /* status shoult be tested according with manual but it doesn't work */
217         return 0;
218 }
219
220 /**
221  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
222  *                 without interruption)
223  * @num:   endpoint number
224  * @dir:   endpoint direction
225  * @value: true => stall, false => unstall
226  *
227  * This function returns an error code
228  */
229 static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
230 {
231         if (value != 0 && value != 1)
232                 return -EINVAL;
233
234         do {
235                 enum ci_hw_regs reg = OP_ENDPTCTRL + num;
236                 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
237                 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
238
239                 /* data toggle - reserved for EP0 but it's in ESS */
240                 hw_write(ci, reg, mask_xs|mask_xr,
241                           value ? mask_xs : mask_xr);
242         } while (value != hw_ep_get_halt(ci, num, dir));
243
244         return 0;
245 }
246
247 /**
248  * hw_is_port_high_speed: test if port is high speed
249  *
250  * This function returns true if high speed port
251  */
252 static int hw_port_is_high_speed(struct ci_hdrc *ci)
253 {
254         return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
255                 hw_read(ci, OP_PORTSC, PORTSC_HSP);
256 }
257
258 /**
259  * hw_read_intr_enable: returns interrupt enable register
260  *
261  * This function returns register data
262  */
263 static u32 hw_read_intr_enable(struct ci_hdrc *ci)
264 {
265         return hw_read(ci, OP_USBINTR, ~0);
266 }
267
268 /**
269  * hw_read_intr_status: returns interrupt status register
270  *
271  * This function returns register data
272  */
273 static u32 hw_read_intr_status(struct ci_hdrc *ci)
274 {
275         return hw_read(ci, OP_USBSTS, ~0);
276 }
277
278 /**
279  * hw_test_and_clear_complete: test & clear complete status (execute without
280  *                             interruption)
281  * @n: endpoint number
282  *
283  * This function returns complete status
284  */
285 static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
286 {
287         n = ep_to_bit(ci, n);
288         return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
289 }
290
291 /**
292  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
293  *                                without interruption)
294  *
295  * This function returns active interrutps
296  */
297 static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
298 {
299         u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
300
301         hw_write(ci, OP_USBSTS, ~0, reg);
302         return reg;
303 }
304
305 /**
306  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
307  *                                interruption)
308  *
309  * This function returns guard value
310  */
311 static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
312 {
313         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
314 }
315
316 /**
317  * hw_test_and_set_setup_guard: test & set setup guard (execute without
318  *                              interruption)
319  *
320  * This function returns guard value
321  */
322 static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
323 {
324         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
325 }
326
327 /**
328  * hw_usb_set_address: configures USB address (execute without interruption)
329  * @value: new USB address
330  *
331  * This function explicitly sets the address, without the "USBADRA" (advance)
332  * feature, which is not supported by older versions of the controller.
333  */
334 static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
335 {
336         hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
337                  value << __ffs(DEVICEADDR_USBADR));
338 }
339
340 /**
341  * hw_usb_reset: restart device after a bus reset (execute without
342  *               interruption)
343  *
344  * This function returns an error code
345  */
346 static int hw_usb_reset(struct ci_hdrc *ci)
347 {
348         hw_usb_set_address(ci, 0);
349
350         /* ESS flushes only at end?!? */
351         hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
352
353         /* clear setup token semaphores */
354         hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
355
356         /* clear complete status */
357         hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
358
359         /* wait until all bits cleared */
360         while (hw_read(ci, OP_ENDPTPRIME, ~0))
361                 udelay(10);             /* not RTOS friendly */
362
363         /* reset all endpoints ? */
364
365         /* reset internal status and wait for further instructions
366            no need to verify the port reset status (ESS does it) */
367
368         return 0;
369 }
370
371 /******************************************************************************
372  * UTIL block
373  *****************************************************************************/
374
375 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
376                           unsigned length)
377 {
378         int i;
379         u32 temp;
380         struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
381                                                   GFP_ATOMIC);
382
383         if (node == NULL)
384                 return -ENOMEM;
385
386         node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
387                                    &node->dma);
388         if (node->ptr == NULL) {
389                 kfree(node);
390                 return -ENOMEM;
391         }
392
393         memset(node->ptr, 0, sizeof(struct ci_hw_td));
394         node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
395         node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
396         node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
397
398         temp = (u32) (hwreq->req.dma + hwreq->req.actual);
399         if (length) {
400                 node->ptr->page[0] = cpu_to_le32(temp);
401                 for (i = 1; i < TD_PAGE_COUNT; i++) {
402                         u32 page = temp + i * CI_HDRC_PAGE_SIZE;
403                         page &= ~TD_RESERVED_MASK;
404                         node->ptr->page[i] = cpu_to_le32(page);
405                 }
406         }
407
408         hwreq->req.actual += length;
409
410         if (!list_empty(&hwreq->tds)) {
411                 /* get the last entry */
412                 lastnode = list_entry(hwreq->tds.prev,
413                                 struct td_node, td);
414                 lastnode->ptr->next = cpu_to_le32(node->dma);
415         }
416
417         INIT_LIST_HEAD(&node->td);
418         list_add_tail(&node->td, &hwreq->tds);
419
420         return 0;
421 }
422
423 /**
424  * _usb_addr: calculates endpoint address from direction & number
425  * @ep:  endpoint
426  */
427 static inline u8 _usb_addr(struct ci_hw_ep *ep)
428 {
429         return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
430 }
431
432 /**
433  * _hardware_queue: configures a request at hardware level
434  * @gadget: gadget
435  * @hwep:   endpoint
436  *
437  * This function returns an error code
438  */
439 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
440 {
441         struct ci_hdrc *ci = hwep->ci;
442         int ret = 0;
443         unsigned rest = hwreq->req.length;
444         int pages = TD_PAGE_COUNT;
445         struct td_node *firstnode, *lastnode;
446
447         /* don't queue twice */
448         if (hwreq->req.status == -EALREADY)
449                 return -EALREADY;
450
451         hwreq->req.status = -EALREADY;
452
453         ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
454         if (ret)
455                 return ret;
456
457         /*
458          * The first buffer could be not page aligned.
459          * In that case we have to span into one extra td.
460          */
461         if (hwreq->req.dma % PAGE_SIZE)
462                 pages--;
463
464         if (rest == 0)
465                 add_td_to_list(hwep, hwreq, 0);
466
467         while (rest > 0) {
468                 unsigned count = min(hwreq->req.length - hwreq->req.actual,
469                                         (unsigned)(pages * CI_HDRC_PAGE_SIZE));
470                 add_td_to_list(hwep, hwreq, count);
471                 rest -= count;
472         }
473
474         if (hwreq->req.zero && hwreq->req.length
475             && (hwreq->req.length % hwep->ep.maxpacket == 0))
476                 add_td_to_list(hwep, hwreq, 0);
477
478         firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
479
480         lastnode = list_entry(hwreq->tds.prev,
481                 struct td_node, td);
482
483         lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
484         if (!hwreq->req.no_interrupt)
485                 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
486         wmb();
487
488         hwreq->req.actual = 0;
489         if (!list_empty(&hwep->qh.queue)) {
490                 struct ci_hw_req *hwreqprev;
491                 int n = hw_ep_bit(hwep->num, hwep->dir);
492                 int tmp_stat;
493                 struct td_node *prevlastnode;
494                 u32 next = firstnode->dma & TD_ADDR_MASK;
495
496                 hwreqprev = list_entry(hwep->qh.queue.prev,
497                                 struct ci_hw_req, queue);
498                 prevlastnode = list_entry(hwreqprev->tds.prev,
499                                 struct td_node, td);
500
501                 prevlastnode->ptr->next = cpu_to_le32(next);
502                 wmb();
503                 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
504                         goto done;
505                 do {
506                         hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
507                         tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
508                 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
509                 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
510                 if (tmp_stat)
511                         goto done;
512         }
513
514         /*  QH configuration */
515         hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
516         hwep->qh.ptr->td.token &=
517                 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
518
519         if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
520                 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
521
522                 if (hwreq->req.length % hwep->ep.maxpacket)
523                         mul++;
524                 hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
525         }
526
527         wmb();   /* synchronize before ep prime */
528
529         ret = hw_ep_prime(ci, hwep->num, hwep->dir,
530                            hwep->type == USB_ENDPOINT_XFER_CONTROL);
531 done:
532         return ret;
533 }
534
535 /*
536  * free_pending_td: remove a pending request for the endpoint
537  * @hwep: endpoint
538  */
539 static void free_pending_td(struct ci_hw_ep *hwep)
540 {
541         struct td_node *pending = hwep->pending_td;
542
543         dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
544         hwep->pending_td = NULL;
545         kfree(pending);
546 }
547
548 /**
549  * _hardware_dequeue: handles a request at hardware level
550  * @gadget: gadget
551  * @hwep:   endpoint
552  *
553  * This function returns an error code
554  */
555 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
556 {
557         u32 tmptoken;
558         struct td_node *node, *tmpnode;
559         unsigned remaining_length;
560         unsigned actual = hwreq->req.length;
561
562         if (hwreq->req.status != -EALREADY)
563                 return -EINVAL;
564
565         hwreq->req.status = 0;
566
567         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
568                 tmptoken = le32_to_cpu(node->ptr->token);
569                 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
570                         hwreq->req.status = -EALREADY;
571                         return -EBUSY;
572                 }
573
574                 remaining_length = (tmptoken & TD_TOTAL_BYTES);
575                 remaining_length >>= __ffs(TD_TOTAL_BYTES);
576                 actual -= remaining_length;
577
578                 hwreq->req.status = tmptoken & TD_STATUS;
579                 if ((TD_STATUS_HALTED & hwreq->req.status)) {
580                         hwreq->req.status = -EPIPE;
581                         break;
582                 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
583                         hwreq->req.status = -EPROTO;
584                         break;
585                 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
586                         hwreq->req.status = -EILSEQ;
587                         break;
588                 }
589
590                 if (remaining_length) {
591                         if (hwep->dir) {
592                                 hwreq->req.status = -EPROTO;
593                                 break;
594                         }
595                 }
596                 /*
597                  * As the hardware could still address the freed td
598                  * which will run the udc unusable, the cleanup of the
599                  * td has to be delayed by one.
600                  */
601                 if (hwep->pending_td)
602                         free_pending_td(hwep);
603
604                 hwep->pending_td = node;
605                 list_del_init(&node->td);
606         }
607
608         usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
609
610         hwreq->req.actual += actual;
611
612         if (hwreq->req.status)
613                 return hwreq->req.status;
614
615         return hwreq->req.actual;
616 }
617
618 /**
619  * _ep_nuke: dequeues all endpoint requests
620  * @hwep: endpoint
621  *
622  * This function returns an error code
623  * Caller must hold lock
624  */
625 static int _ep_nuke(struct ci_hw_ep *hwep)
626 __releases(hwep->lock)
627 __acquires(hwep->lock)
628 {
629         struct td_node *node, *tmpnode;
630         if (hwep == NULL)
631                 return -EINVAL;
632
633         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
634
635         while (!list_empty(&hwep->qh.queue)) {
636
637                 /* pop oldest request */
638                 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
639                                                      struct ci_hw_req, queue);
640
641                 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
642                         dma_pool_free(hwep->td_pool, node->ptr, node->dma);
643                         list_del_init(&node->td);
644                         node->ptr = NULL;
645                         kfree(node);
646                 }
647
648                 list_del_init(&hwreq->queue);
649                 hwreq->req.status = -ESHUTDOWN;
650
651                 if (hwreq->req.complete != NULL) {
652                         spin_unlock(hwep->lock);
653                         hwreq->req.complete(&hwep->ep, &hwreq->req);
654                         spin_lock(hwep->lock);
655                 }
656         }
657
658         if (hwep->pending_td)
659                 free_pending_td(hwep);
660
661         return 0;
662 }
663
664 /**
665  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
666  * @gadget: gadget
667  *
668  * This function returns an error code
669  */
670 static int _gadget_stop_activity(struct usb_gadget *gadget)
671 {
672         struct usb_ep *ep;
673         struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
674         unsigned long flags;
675
676         spin_lock_irqsave(&ci->lock, flags);
677         ci->gadget.speed = USB_SPEED_UNKNOWN;
678         ci->remote_wakeup = 0;
679         ci->suspended = 0;
680         spin_unlock_irqrestore(&ci->lock, flags);
681
682         /* flush all endpoints */
683         gadget_for_each_ep(ep, gadget) {
684                 usb_ep_fifo_flush(ep);
685         }
686         usb_ep_fifo_flush(&ci->ep0out->ep);
687         usb_ep_fifo_flush(&ci->ep0in->ep);
688
689         /* make sure to disable all endpoints */
690         gadget_for_each_ep(ep, gadget) {
691                 usb_ep_disable(ep);
692         }
693
694         if (ci->status != NULL) {
695                 usb_ep_free_request(&ci->ep0in->ep, ci->status);
696                 ci->status = NULL;
697         }
698
699         return 0;
700 }
701
702 /******************************************************************************
703  * ISR block
704  *****************************************************************************/
705 /**
706  * isr_reset_handler: USB reset interrupt handler
707  * @ci: UDC device
708  *
709  * This function resets USB engine after a bus reset occurred
710  */
711 static void isr_reset_handler(struct ci_hdrc *ci)
712 __releases(ci->lock)
713 __acquires(ci->lock)
714 {
715         int retval;
716
717         if (ci->gadget.speed != USB_SPEED_UNKNOWN) {
718                 if (ci->driver)
719                         ci->driver->disconnect(&ci->gadget);
720         }
721
722         spin_unlock(&ci->lock);
723         retval = _gadget_stop_activity(&ci->gadget);
724         if (retval)
725                 goto done;
726
727         retval = hw_usb_reset(ci);
728         if (retval)
729                 goto done;
730
731         ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
732         if (ci->status == NULL)
733                 retval = -ENOMEM;
734
735 done:
736         spin_lock(&ci->lock);
737
738         if (retval)
739                 dev_err(ci->dev, "error: %i\n", retval);
740 }
741
742 /**
743  * isr_get_status_complete: get_status request complete function
744  * @ep:  endpoint
745  * @req: request handled
746  *
747  * Caller must release lock
748  */
749 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
750 {
751         if (ep == NULL || req == NULL)
752                 return;
753
754         kfree(req->buf);
755         usb_ep_free_request(ep, req);
756 }
757
758 /**
759  * _ep_queue: queues (submits) an I/O request to an endpoint
760  *
761  * Caller must hold lock
762  */
763 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
764                     gfp_t __maybe_unused gfp_flags)
765 {
766         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
767         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
768         struct ci_hdrc *ci = hwep->ci;
769         int retval = 0;
770
771         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
772                 return -EINVAL;
773
774         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
775                 if (req->length)
776                         hwep = (ci->ep0_dir == RX) ?
777                                ci->ep0out : ci->ep0in;
778                 if (!list_empty(&hwep->qh.queue)) {
779                         _ep_nuke(hwep);
780                         retval = -EOVERFLOW;
781                         dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
782                                  _usb_addr(hwep));
783                 }
784         }
785
786         if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
787             hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
788                 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
789                 return -EMSGSIZE;
790         }
791
792         /* first nuke then test link, e.g. previous status has not sent */
793         if (!list_empty(&hwreq->queue)) {
794                 dev_err(hwep->ci->dev, "request already in queue\n");
795                 return -EBUSY;
796         }
797
798         /* push request */
799         hwreq->req.status = -EINPROGRESS;
800         hwreq->req.actual = 0;
801
802         retval = _hardware_enqueue(hwep, hwreq);
803
804         if (retval == -EALREADY)
805                 retval = 0;
806         if (!retval)
807                 list_add_tail(&hwreq->queue, &hwep->qh.queue);
808
809         return retval;
810 }
811
812 /**
813  * isr_get_status_response: get_status request response
814  * @ci: ci struct
815  * @setup: setup request packet
816  *
817  * This function returns an error code
818  */
819 static int isr_get_status_response(struct ci_hdrc *ci,
820                                    struct usb_ctrlrequest *setup)
821 __releases(hwep->lock)
822 __acquires(hwep->lock)
823 {
824         struct ci_hw_ep *hwep = ci->ep0in;
825         struct usb_request *req = NULL;
826         gfp_t gfp_flags = GFP_ATOMIC;
827         int dir, num, retval;
828
829         if (hwep == NULL || setup == NULL)
830                 return -EINVAL;
831
832         spin_unlock(hwep->lock);
833         req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
834         spin_lock(hwep->lock);
835         if (req == NULL)
836                 return -ENOMEM;
837
838         req->complete = isr_get_status_complete;
839         req->length   = 2;
840         req->buf      = kzalloc(req->length, gfp_flags);
841         if (req->buf == NULL) {
842                 retval = -ENOMEM;
843                 goto err_free_req;
844         }
845
846         if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
847                 /* Assume that device is bus powered for now. */
848                 *(u16 *)req->buf = ci->remote_wakeup << 1;
849                 retval = 0;
850         } else if ((setup->bRequestType & USB_RECIP_MASK) \
851                    == USB_RECIP_ENDPOINT) {
852                 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
853                         TX : RX;
854                 num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
855                 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
856         }
857         /* else do nothing; reserved for future use */
858
859         retval = _ep_queue(&hwep->ep, req, gfp_flags);
860         if (retval)
861                 goto err_free_buf;
862
863         return 0;
864
865  err_free_buf:
866         kfree(req->buf);
867  err_free_req:
868         spin_unlock(hwep->lock);
869         usb_ep_free_request(&hwep->ep, req);
870         spin_lock(hwep->lock);
871         return retval;
872 }
873
874 /**
875  * isr_setup_status_complete: setup_status request complete function
876  * @ep:  endpoint
877  * @req: request handled
878  *
879  * Caller must release lock. Put the port in test mode if test mode
880  * feature is selected.
881  */
882 static void
883 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
884 {
885         struct ci_hdrc *ci = req->context;
886         unsigned long flags;
887
888         if (ci->setaddr) {
889                 hw_usb_set_address(ci, ci->address);
890                 ci->setaddr = false;
891         }
892
893         spin_lock_irqsave(&ci->lock, flags);
894         if (ci->test_mode)
895                 hw_port_test_set(ci, ci->test_mode);
896         spin_unlock_irqrestore(&ci->lock, flags);
897 }
898
899 /**
900  * isr_setup_status_phase: queues the status phase of a setup transation
901  * @ci: ci struct
902  *
903  * This function returns an error code
904  */
905 static int isr_setup_status_phase(struct ci_hdrc *ci)
906 {
907         int retval;
908         struct ci_hw_ep *hwep;
909
910         hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
911         ci->status->context = ci;
912         ci->status->complete = isr_setup_status_complete;
913
914         retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
915
916         return retval;
917 }
918
919 /**
920  * isr_tr_complete_low: transaction complete low level handler
921  * @hwep: endpoint
922  *
923  * This function returns an error code
924  * Caller must hold lock
925  */
926 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
927 __releases(hwep->lock)
928 __acquires(hwep->lock)
929 {
930         struct ci_hw_req *hwreq, *hwreqtemp;
931         struct ci_hw_ep *hweptemp = hwep;
932         int retval = 0;
933
934         list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
935                         queue) {
936                 retval = _hardware_dequeue(hwep, hwreq);
937                 if (retval < 0)
938                         break;
939                 list_del_init(&hwreq->queue);
940                 if (hwreq->req.complete != NULL) {
941                         spin_unlock(hwep->lock);
942                         if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
943                                         hwreq->req.length)
944                                 hweptemp = hwep->ci->ep0in;
945                         hwreq->req.complete(&hweptemp->ep, &hwreq->req);
946                         spin_lock(hwep->lock);
947                 }
948         }
949
950         if (retval == -EBUSY)
951                 retval = 0;
952
953         return retval;
954 }
955
956 /**
957  * isr_tr_complete_handler: transaction complete interrupt handler
958  * @ci: UDC descriptor
959  *
960  * This function handles traffic events
961  */
962 static void isr_tr_complete_handler(struct ci_hdrc *ci)
963 __releases(ci->lock)
964 __acquires(ci->lock)
965 {
966         unsigned i;
967         u8 tmode = 0;
968
969         for (i = 0; i < ci->hw_ep_max; i++) {
970                 struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
971                 int type, num, dir, err = -EINVAL;
972                 struct usb_ctrlrequest req;
973
974                 if (hwep->ep.desc == NULL)
975                         continue;   /* not configured */
976
977                 if (hw_test_and_clear_complete(ci, i)) {
978                         err = isr_tr_complete_low(hwep);
979                         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
980                                 if (err > 0)   /* needs status phase */
981                                         err = isr_setup_status_phase(ci);
982                                 if (err < 0) {
983                                         spin_unlock(&ci->lock);
984                                         if (usb_ep_set_halt(&hwep->ep))
985                                                 dev_err(ci->dev,
986                                                         "error: ep_set_halt\n");
987                                         spin_lock(&ci->lock);
988                                 }
989                         }
990                 }
991
992                 if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
993                     !hw_test_and_clear_setup_status(ci, i))
994                         continue;
995
996                 if (i != 0) {
997                         dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
998                         continue;
999                 }
1000
1001                 /*
1002                  * Flush data and handshake transactions of previous
1003                  * setup packet.
1004                  */
1005                 _ep_nuke(ci->ep0out);
1006                 _ep_nuke(ci->ep0in);
1007
1008                 /* read_setup_packet */
1009                 do {
1010                         hw_test_and_set_setup_guard(ci);
1011                         memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1012                 } while (!hw_test_and_clear_setup_guard(ci));
1013
1014                 type = req.bRequestType;
1015
1016                 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1017
1018                 switch (req.bRequest) {
1019                 case USB_REQ_CLEAR_FEATURE:
1020                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1021                                         le16_to_cpu(req.wValue) ==
1022                                         USB_ENDPOINT_HALT) {
1023                                 if (req.wLength != 0)
1024                                         break;
1025                                 num  = le16_to_cpu(req.wIndex);
1026                                 dir = num & USB_ENDPOINT_DIR_MASK;
1027                                 num &= USB_ENDPOINT_NUMBER_MASK;
1028                                 if (dir) /* TX */
1029                                         num += ci->hw_ep_max/2;
1030                                 if (!ci->ci_hw_ep[num].wedge) {
1031                                         spin_unlock(&ci->lock);
1032                                         err = usb_ep_clear_halt(
1033                                                 &ci->ci_hw_ep[num].ep);
1034                                         spin_lock(&ci->lock);
1035                                         if (err)
1036                                                 break;
1037                                 }
1038                                 err = isr_setup_status_phase(ci);
1039                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1040                                         le16_to_cpu(req.wValue) ==
1041                                         USB_DEVICE_REMOTE_WAKEUP) {
1042                                 if (req.wLength != 0)
1043                                         break;
1044                                 ci->remote_wakeup = 0;
1045                                 err = isr_setup_status_phase(ci);
1046                         } else {
1047                                 goto delegate;
1048                         }
1049                         break;
1050                 case USB_REQ_GET_STATUS:
1051                         if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
1052                             type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1053                             type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1054                                 goto delegate;
1055                         if (le16_to_cpu(req.wLength) != 2 ||
1056                             le16_to_cpu(req.wValue)  != 0)
1057                                 break;
1058                         err = isr_get_status_response(ci, &req);
1059                         break;
1060                 case USB_REQ_SET_ADDRESS:
1061                         if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1062                                 goto delegate;
1063                         if (le16_to_cpu(req.wLength) != 0 ||
1064                             le16_to_cpu(req.wIndex)  != 0)
1065                                 break;
1066                         ci->address = (u8)le16_to_cpu(req.wValue);
1067                         ci->setaddr = true;
1068                         err = isr_setup_status_phase(ci);
1069                         break;
1070                 case USB_REQ_SET_FEATURE:
1071                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1072                                         le16_to_cpu(req.wValue) ==
1073                                         USB_ENDPOINT_HALT) {
1074                                 if (req.wLength != 0)
1075                                         break;
1076                                 num  = le16_to_cpu(req.wIndex);
1077                                 dir = num & USB_ENDPOINT_DIR_MASK;
1078                                 num &= USB_ENDPOINT_NUMBER_MASK;
1079                                 if (dir) /* TX */
1080                                         num += ci->hw_ep_max/2;
1081
1082                                 spin_unlock(&ci->lock);
1083                                 err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
1084                                 spin_lock(&ci->lock);
1085                                 if (!err)
1086                                         isr_setup_status_phase(ci);
1087                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1088                                 if (req.wLength != 0)
1089                                         break;
1090                                 switch (le16_to_cpu(req.wValue)) {
1091                                 case USB_DEVICE_REMOTE_WAKEUP:
1092                                         ci->remote_wakeup = 1;
1093                                         err = isr_setup_status_phase(ci);
1094                                         break;
1095                                 case USB_DEVICE_TEST_MODE:
1096                                         tmode = le16_to_cpu(req.wIndex) >> 8;
1097                                         switch (tmode) {
1098                                         case TEST_J:
1099                                         case TEST_K:
1100                                         case TEST_SE0_NAK:
1101                                         case TEST_PACKET:
1102                                         case TEST_FORCE_EN:
1103                                                 ci->test_mode = tmode;
1104                                                 err = isr_setup_status_phase(
1105                                                                 ci);
1106                                                 break;
1107                                         default:
1108                                                 break;
1109                                         }
1110                                 default:
1111                                         goto delegate;
1112                                 }
1113                         } else {
1114                                 goto delegate;
1115                         }
1116                         break;
1117                 default:
1118 delegate:
1119                         if (req.wLength == 0)   /* no data phase */
1120                                 ci->ep0_dir = TX;
1121
1122                         spin_unlock(&ci->lock);
1123                         err = ci->driver->setup(&ci->gadget, &req);
1124                         spin_lock(&ci->lock);
1125                         break;
1126                 }
1127
1128                 if (err < 0) {
1129                         spin_unlock(&ci->lock);
1130                         if (usb_ep_set_halt(&hwep->ep))
1131                                 dev_err(ci->dev, "error: ep_set_halt\n");
1132                         spin_lock(&ci->lock);
1133                 }
1134         }
1135 }
1136
1137 /******************************************************************************
1138  * ENDPT block
1139  *****************************************************************************/
1140 /**
1141  * ep_enable: configure endpoint, making it usable
1142  *
1143  * Check usb_ep_enable() at "usb_gadget.h" for details
1144  */
1145 static int ep_enable(struct usb_ep *ep,
1146                      const struct usb_endpoint_descriptor *desc)
1147 {
1148         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1149         int retval = 0;
1150         unsigned long flags;
1151         u32 cap = 0;
1152
1153         if (ep == NULL || desc == NULL)
1154                 return -EINVAL;
1155
1156         spin_lock_irqsave(hwep->lock, flags);
1157
1158         /* only internal SW should enable ctrl endpts */
1159
1160         hwep->ep.desc = desc;
1161
1162         if (!list_empty(&hwep->qh.queue))
1163                 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1164
1165         hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1166         hwep->num  = usb_endpoint_num(desc);
1167         hwep->type = usb_endpoint_type(desc);
1168
1169         hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
1170         hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
1171
1172         if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1173                 cap |= QH_IOS;
1174         if (hwep->num)
1175                 cap |= QH_ZLT;
1176         cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1177
1178         hwep->qh.ptr->cap = cpu_to_le32(cap);
1179
1180         hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1181
1182         /*
1183          * Enable endpoints in the HW other than ep0 as ep0
1184          * is always enabled
1185          */
1186         if (hwep->num)
1187                 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1188                                        hwep->type);
1189
1190         spin_unlock_irqrestore(hwep->lock, flags);
1191         return retval;
1192 }
1193
1194 /**
1195  * ep_disable: endpoint is no longer usable
1196  *
1197  * Check usb_ep_disable() at "usb_gadget.h" for details
1198  */
1199 static int ep_disable(struct usb_ep *ep)
1200 {
1201         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1202         int direction, retval = 0;
1203         unsigned long flags;
1204
1205         if (ep == NULL)
1206                 return -EINVAL;
1207         else if (hwep->ep.desc == NULL)
1208                 return -EBUSY;
1209
1210         spin_lock_irqsave(hwep->lock, flags);
1211
1212         /* only internal SW should disable ctrl endpts */
1213
1214         direction = hwep->dir;
1215         do {
1216                 retval |= _ep_nuke(hwep);
1217                 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1218
1219                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1220                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1221
1222         } while (hwep->dir != direction);
1223
1224         hwep->ep.desc = NULL;
1225
1226         spin_unlock_irqrestore(hwep->lock, flags);
1227         return retval;
1228 }
1229
1230 /**
1231  * ep_alloc_request: allocate a request object to use with this endpoint
1232  *
1233  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1234  */
1235 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1236 {
1237         struct ci_hw_req *hwreq = NULL;
1238
1239         if (ep == NULL)
1240                 return NULL;
1241
1242         hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1243         if (hwreq != NULL) {
1244                 INIT_LIST_HEAD(&hwreq->queue);
1245                 INIT_LIST_HEAD(&hwreq->tds);
1246         }
1247
1248         return (hwreq == NULL) ? NULL : &hwreq->req;
1249 }
1250
1251 /**
1252  * ep_free_request: frees a request object
1253  *
1254  * Check usb_ep_free_request() at "usb_gadget.h" for details
1255  */
1256 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1257 {
1258         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1259         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1260         struct td_node *node, *tmpnode;
1261         unsigned long flags;
1262
1263         if (ep == NULL || req == NULL) {
1264                 return;
1265         } else if (!list_empty(&hwreq->queue)) {
1266                 dev_err(hwep->ci->dev, "freeing queued request\n");
1267                 return;
1268         }
1269
1270         spin_lock_irqsave(hwep->lock, flags);
1271
1272         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1273                 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1274                 list_del_init(&node->td);
1275                 node->ptr = NULL;
1276                 kfree(node);
1277         }
1278
1279         kfree(hwreq);
1280
1281         spin_unlock_irqrestore(hwep->lock, flags);
1282 }
1283
1284 /**
1285  * ep_queue: queues (submits) an I/O request to an endpoint
1286  *
1287  * Check usb_ep_queue()* at usb_gadget.h" for details
1288  */
1289 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1290                     gfp_t __maybe_unused gfp_flags)
1291 {
1292         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1293         int retval = 0;
1294         unsigned long flags;
1295
1296         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1297                 return -EINVAL;
1298
1299         spin_lock_irqsave(hwep->lock, flags);
1300         retval = _ep_queue(ep, req, gfp_flags);
1301         spin_unlock_irqrestore(hwep->lock, flags);
1302         return retval;
1303 }
1304
1305 /**
1306  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1307  *
1308  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1309  */
1310 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1311 {
1312         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1313         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1314         unsigned long flags;
1315
1316         if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1317                 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1318                 list_empty(&hwep->qh.queue))
1319                 return -EINVAL;
1320
1321         spin_lock_irqsave(hwep->lock, flags);
1322
1323         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1324
1325         /* pop request */
1326         list_del_init(&hwreq->queue);
1327
1328         usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1329
1330         req->status = -ECONNRESET;
1331
1332         if (hwreq->req.complete != NULL) {
1333                 spin_unlock(hwep->lock);
1334                 hwreq->req.complete(&hwep->ep, &hwreq->req);
1335                 spin_lock(hwep->lock);
1336         }
1337
1338         spin_unlock_irqrestore(hwep->lock, flags);
1339         return 0;
1340 }
1341
1342 /**
1343  * ep_set_halt: sets the endpoint halt feature
1344  *
1345  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1346  */
1347 static int ep_set_halt(struct usb_ep *ep, int value)
1348 {
1349         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1350         int direction, retval = 0;
1351         unsigned long flags;
1352
1353         if (ep == NULL || hwep->ep.desc == NULL)
1354                 return -EINVAL;
1355
1356         if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1357                 return -EOPNOTSUPP;
1358
1359         spin_lock_irqsave(hwep->lock, flags);
1360
1361 #ifndef STALL_IN
1362         /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1363         if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1364             !list_empty(&hwep->qh.queue)) {
1365                 spin_unlock_irqrestore(hwep->lock, flags);
1366                 return -EAGAIN;
1367         }
1368 #endif
1369
1370         direction = hwep->dir;
1371         do {
1372                 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1373
1374                 if (!value)
1375                         hwep->wedge = 0;
1376
1377                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1378                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1379
1380         } while (hwep->dir != direction);
1381
1382         spin_unlock_irqrestore(hwep->lock, flags);
1383         return retval;
1384 }
1385
1386 /**
1387  * ep_set_wedge: sets the halt feature and ignores clear requests
1388  *
1389  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1390  */
1391 static int ep_set_wedge(struct usb_ep *ep)
1392 {
1393         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1394         unsigned long flags;
1395
1396         if (ep == NULL || hwep->ep.desc == NULL)
1397                 return -EINVAL;
1398
1399         spin_lock_irqsave(hwep->lock, flags);
1400         hwep->wedge = 1;
1401         spin_unlock_irqrestore(hwep->lock, flags);
1402
1403         return usb_ep_set_halt(ep);
1404 }
1405
1406 /**
1407  * ep_fifo_flush: flushes contents of a fifo
1408  *
1409  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1410  */
1411 static void ep_fifo_flush(struct usb_ep *ep)
1412 {
1413         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1414         unsigned long flags;
1415
1416         if (ep == NULL) {
1417                 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1418                 return;
1419         }
1420
1421         spin_lock_irqsave(hwep->lock, flags);
1422
1423         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1424
1425         spin_unlock_irqrestore(hwep->lock, flags);
1426 }
1427
1428 /**
1429  * Endpoint-specific part of the API to the USB controller hardware
1430  * Check "usb_gadget.h" for details
1431  */
1432 static const struct usb_ep_ops usb_ep_ops = {
1433         .enable        = ep_enable,
1434         .disable       = ep_disable,
1435         .alloc_request = ep_alloc_request,
1436         .free_request  = ep_free_request,
1437         .queue         = ep_queue,
1438         .dequeue       = ep_dequeue,
1439         .set_halt      = ep_set_halt,
1440         .set_wedge     = ep_set_wedge,
1441         .fifo_flush    = ep_fifo_flush,
1442 };
1443
1444 /******************************************************************************
1445  * GADGET block
1446  *****************************************************************************/
1447 static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1448 {
1449         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1450         unsigned long flags;
1451         int gadget_ready = 0;
1452
1453         spin_lock_irqsave(&ci->lock, flags);
1454         ci->vbus_active = is_active;
1455         if (ci->driver)
1456                 gadget_ready = 1;
1457         spin_unlock_irqrestore(&ci->lock, flags);
1458
1459         if (gadget_ready) {
1460                 if (is_active) {
1461                         pm_runtime_get_sync(&_gadget->dev);
1462                         hw_device_reset(ci, USBMODE_CM_DC);
1463                         hw_device_state(ci, ci->ep0out->qh.dma);
1464                         dev_dbg(ci->dev, "Connected to host\n");
1465                 } else {
1466                         if (ci->driver)
1467                                 ci->driver->disconnect(&ci->gadget);
1468                         hw_device_state(ci, 0);
1469                         if (ci->platdata->notify_event)
1470                                 ci->platdata->notify_event(ci,
1471                                 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1472                         _gadget_stop_activity(&ci->gadget);
1473                         pm_runtime_put_sync(&_gadget->dev);
1474                         dev_dbg(ci->dev, "Disconnected from host\n");
1475                 }
1476         }
1477
1478         return 0;
1479 }
1480
1481 static int ci_udc_wakeup(struct usb_gadget *_gadget)
1482 {
1483         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1484         unsigned long flags;
1485         int ret = 0;
1486
1487         spin_lock_irqsave(&ci->lock, flags);
1488         if (!ci->remote_wakeup) {
1489                 ret = -EOPNOTSUPP;
1490                 goto out;
1491         }
1492         if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1493                 ret = -EINVAL;
1494                 goto out;
1495         }
1496         hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1497 out:
1498         spin_unlock_irqrestore(&ci->lock, flags);
1499         return ret;
1500 }
1501
1502 static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1503 {
1504         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1505
1506         if (ci->transceiver)
1507                 return usb_phy_set_power(ci->transceiver, ma);
1508         return -ENOTSUPP;
1509 }
1510
1511 /* Change Data+ pullup status
1512  * this func is used by usb_gadget_connect/disconnet
1513  */
1514 static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1515 {
1516         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1517
1518         if (!ci->vbus_active)
1519                 return -EOPNOTSUPP;
1520
1521         if (is_on)
1522                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1523         else
1524                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1525
1526         return 0;
1527 }
1528
1529 static int ci_udc_start(struct usb_gadget *gadget,
1530                          struct usb_gadget_driver *driver);
1531 static int ci_udc_stop(struct usb_gadget *gadget,
1532                         struct usb_gadget_driver *driver);
1533 /**
1534  * Device operations part of the API to the USB controller hardware,
1535  * which don't involve endpoints (or i/o)
1536  * Check  "usb_gadget.h" for details
1537  */
1538 static const struct usb_gadget_ops usb_gadget_ops = {
1539         .vbus_session   = ci_udc_vbus_session,
1540         .wakeup         = ci_udc_wakeup,
1541         .pullup         = ci_udc_pullup,
1542         .vbus_draw      = ci_udc_vbus_draw,
1543         .udc_start      = ci_udc_start,
1544         .udc_stop       = ci_udc_stop,
1545 };
1546
1547 static int init_eps(struct ci_hdrc *ci)
1548 {
1549         int retval = 0, i, j;
1550
1551         for (i = 0; i < ci->hw_ep_max/2; i++)
1552                 for (j = RX; j <= TX; j++) {
1553                         int k = i + j * ci->hw_ep_max/2;
1554                         struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1555
1556                         scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1557                                         (j == TX)  ? "in" : "out");
1558
1559                         hwep->ci          = ci;
1560                         hwep->lock         = &ci->lock;
1561                         hwep->td_pool      = ci->td_pool;
1562
1563                         hwep->ep.name      = hwep->name;
1564                         hwep->ep.ops       = &usb_ep_ops;
1565                         /*
1566                          * for ep0: maxP defined in desc, for other
1567                          * eps, maxP is set by epautoconfig() called
1568                          * by gadget layer
1569                          */
1570                         hwep->ep.maxpacket = (unsigned short)~0;
1571
1572                         INIT_LIST_HEAD(&hwep->qh.queue);
1573                         hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1574                                                      &hwep->qh.dma);
1575                         if (hwep->qh.ptr == NULL)
1576                                 retval = -ENOMEM;
1577                         else
1578                                 memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
1579
1580                         /*
1581                          * set up shorthands for ep0 out and in endpoints,
1582                          * don't add to gadget's ep_list
1583                          */
1584                         if (i == 0) {
1585                                 if (j == RX)
1586                                         ci->ep0out = hwep;
1587                                 else
1588                                         ci->ep0in = hwep;
1589
1590                                 hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
1591                                 continue;
1592                         }
1593
1594                         list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1595                 }
1596
1597         return retval;
1598 }
1599
1600 static void destroy_eps(struct ci_hdrc *ci)
1601 {
1602         int i;
1603
1604         for (i = 0; i < ci->hw_ep_max; i++) {
1605                 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1606
1607                 if (hwep->pending_td)
1608                         free_pending_td(hwep);
1609                 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1610         }
1611 }
1612
1613 /**
1614  * ci_udc_start: register a gadget driver
1615  * @gadget: our gadget
1616  * @driver: the driver being registered
1617  *
1618  * Interrupts are enabled here.
1619  */
1620 static int ci_udc_start(struct usb_gadget *gadget,
1621                          struct usb_gadget_driver *driver)
1622 {
1623         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1624         unsigned long flags;
1625         int retval = -ENOMEM;
1626
1627         if (driver->disconnect == NULL)
1628                 return -EINVAL;
1629
1630
1631         ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1632         retval = usb_ep_enable(&ci->ep0out->ep);
1633         if (retval)
1634                 return retval;
1635
1636         ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1637         retval = usb_ep_enable(&ci->ep0in->ep);
1638         if (retval)
1639                 return retval;
1640         spin_lock_irqsave(&ci->lock, flags);
1641
1642         ci->driver = driver;
1643         pm_runtime_get_sync(&ci->gadget.dev);
1644         if (ci->vbus_active) {
1645                 hw_device_reset(ci, USBMODE_CM_DC);
1646         } else {
1647                 pm_runtime_put_sync(&ci->gadget.dev);
1648                 goto done;
1649         }
1650
1651         retval = hw_device_state(ci, ci->ep0out->qh.dma);
1652         if (retval)
1653                 pm_runtime_put_sync(&ci->gadget.dev);
1654
1655  done:
1656         spin_unlock_irqrestore(&ci->lock, flags);
1657         return retval;
1658 }
1659
1660 /**
1661  * ci_udc_stop: unregister a gadget driver
1662  */
1663 static int ci_udc_stop(struct usb_gadget *gadget,
1664                         struct usb_gadget_driver *driver)
1665 {
1666         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1667         unsigned long flags;
1668
1669         spin_lock_irqsave(&ci->lock, flags);
1670
1671         if (ci->vbus_active) {
1672                 hw_device_state(ci, 0);
1673                 if (ci->platdata->notify_event)
1674                         ci->platdata->notify_event(ci,
1675                         CI_HDRC_CONTROLLER_STOPPED_EVENT);
1676                 ci->driver = NULL;
1677                 spin_unlock_irqrestore(&ci->lock, flags);
1678                 _gadget_stop_activity(&ci->gadget);
1679                 spin_lock_irqsave(&ci->lock, flags);
1680                 pm_runtime_put(&ci->gadget.dev);
1681         }
1682
1683         spin_unlock_irqrestore(&ci->lock, flags);
1684
1685         return 0;
1686 }
1687
1688 /******************************************************************************
1689  * BUS block
1690  *****************************************************************************/
1691 /**
1692  * udc_irq: ci interrupt handler
1693  *
1694  * This function returns IRQ_HANDLED if the IRQ has been handled
1695  * It locks access to registers
1696  */
1697 static irqreturn_t udc_irq(struct ci_hdrc *ci)
1698 {
1699         irqreturn_t retval;
1700         u32 intr;
1701
1702         if (ci == NULL)
1703                 return IRQ_HANDLED;
1704
1705         spin_lock(&ci->lock);
1706
1707         if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1708                 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1709                                 USBMODE_CM_DC) {
1710                         spin_unlock(&ci->lock);
1711                         return IRQ_NONE;
1712                 }
1713         }
1714         intr = hw_test_and_clear_intr_active(ci);
1715
1716         if (intr) {
1717                 /* order defines priority - do NOT change it */
1718                 if (USBi_URI & intr)
1719                         isr_reset_handler(ci);
1720
1721                 if (USBi_PCI & intr) {
1722                         ci->gadget.speed = hw_port_is_high_speed(ci) ?
1723                                 USB_SPEED_HIGH : USB_SPEED_FULL;
1724                         if (ci->suspended && ci->driver->resume) {
1725                                 spin_unlock(&ci->lock);
1726                                 ci->driver->resume(&ci->gadget);
1727                                 spin_lock(&ci->lock);
1728                                 ci->suspended = 0;
1729                         }
1730                 }
1731
1732                 if (USBi_UI  & intr)
1733                         isr_tr_complete_handler(ci);
1734
1735                 if (USBi_SLI & intr) {
1736                         if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1737                             ci->driver->suspend) {
1738                                 ci->suspended = 1;
1739                                 spin_unlock(&ci->lock);
1740                                 ci->driver->suspend(&ci->gadget);
1741                                 spin_lock(&ci->lock);
1742                         }
1743                 }
1744                 retval = IRQ_HANDLED;
1745         } else {
1746                 retval = IRQ_NONE;
1747         }
1748         spin_unlock(&ci->lock);
1749
1750         return retval;
1751 }
1752
1753 /**
1754  * udc_start: initialize gadget role
1755  * @ci: chipidea controller
1756  */
1757 static int udc_start(struct ci_hdrc *ci)
1758 {
1759         struct device *dev = ci->dev;
1760         int retval = 0;
1761
1762         spin_lock_init(&ci->lock);
1763
1764         ci->gadget.ops          = &usb_gadget_ops;
1765         ci->gadget.speed        = USB_SPEED_UNKNOWN;
1766         ci->gadget.max_speed    = USB_SPEED_HIGH;
1767         ci->gadget.is_otg       = 0;
1768         ci->gadget.name         = ci->platdata->name;
1769
1770         INIT_LIST_HEAD(&ci->gadget.ep_list);
1771
1772         /* alloc resources */
1773         ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
1774                                        sizeof(struct ci_hw_qh),
1775                                        64, CI_HDRC_PAGE_SIZE);
1776         if (ci->qh_pool == NULL)
1777                 return -ENOMEM;
1778
1779         ci->td_pool = dma_pool_create("ci_hw_td", dev,
1780                                        sizeof(struct ci_hw_td),
1781                                        64, CI_HDRC_PAGE_SIZE);
1782         if (ci->td_pool == NULL) {
1783                 retval = -ENOMEM;
1784                 goto free_qh_pool;
1785         }
1786
1787         retval = init_eps(ci);
1788         if (retval)
1789                 goto free_pools;
1790
1791         ci->gadget.ep0 = &ci->ep0in->ep;
1792
1793         if (ci->global_phy) {
1794                 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
1795                 if (IS_ERR(ci->transceiver))
1796                         ci->transceiver = NULL;
1797         }
1798
1799         if (ci->platdata->flags & CI_HDRC_REQUIRE_TRANSCEIVER) {
1800                 if (ci->transceiver == NULL) {
1801                         retval = -ENODEV;
1802                         goto destroy_eps;
1803                 }
1804         }
1805
1806         if (ci->transceiver) {
1807                 retval = otg_set_peripheral(ci->transceiver->otg,
1808                                                 &ci->gadget);
1809                 /*
1810                  * If we implement all USB functions using chipidea drivers,
1811                  * it doesn't need to call above API, meanwhile, if we only
1812                  * use gadget function, calling above API is useless.
1813                  */
1814                 if (retval && retval != -ENOTSUPP)
1815                         goto put_transceiver;
1816         }
1817
1818         retval = usb_add_gadget_udc(dev, &ci->gadget);
1819         if (retval)
1820                 goto remove_trans;
1821
1822         pm_runtime_no_callbacks(&ci->gadget.dev);
1823         pm_runtime_enable(&ci->gadget.dev);
1824
1825         /* Update ci->vbus_active */
1826         ci_handle_vbus_change(ci);
1827
1828         return retval;
1829
1830 remove_trans:
1831         if (ci->transceiver) {
1832                 otg_set_peripheral(ci->transceiver->otg, NULL);
1833                 if (ci->global_phy)
1834                         usb_put_phy(ci->transceiver);
1835         }
1836
1837         dev_err(dev, "error = %i\n", retval);
1838 put_transceiver:
1839         if (ci->transceiver && ci->global_phy)
1840                 usb_put_phy(ci->transceiver);
1841 destroy_eps:
1842         destroy_eps(ci);
1843 free_pools:
1844         dma_pool_destroy(ci->td_pool);
1845 free_qh_pool:
1846         dma_pool_destroy(ci->qh_pool);
1847         return retval;
1848 }
1849
1850 /**
1851  * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
1852  *
1853  * No interrupts active, the IRQ has been released
1854  */
1855 void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
1856 {
1857         if (!ci->roles[CI_ROLE_GADGET])
1858                 return;
1859
1860         usb_del_gadget_udc(&ci->gadget);
1861
1862         destroy_eps(ci);
1863
1864         dma_pool_destroy(ci->td_pool);
1865         dma_pool_destroy(ci->qh_pool);
1866
1867         if (ci->transceiver) {
1868                 otg_set_peripheral(ci->transceiver->otg, NULL);
1869                 if (ci->global_phy)
1870                         usb_put_phy(ci->transceiver);
1871         }
1872 }
1873
1874 static int udc_id_switch_for_device(struct ci_hdrc *ci)
1875 {
1876         if (ci->is_otg) {
1877                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1878                 ci_enable_otg_interrupt(ci, OTGSC_BSVIE);
1879         }
1880
1881         return 0;
1882 }
1883
1884 static void udc_id_switch_for_host(struct ci_hdrc *ci)
1885 {
1886         if (ci->is_otg) {
1887                 /* host doesn't care B_SESSION_VALID event */
1888                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1889                 ci_disable_otg_interrupt(ci, OTGSC_BSVIE);
1890         }
1891 }
1892
1893 /**
1894  * ci_hdrc_gadget_init - initialize device related bits
1895  * ci: the controller
1896  *
1897  * This function initializes the gadget, if the device is "device capable".
1898  */
1899 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1900 {
1901         struct ci_role_driver *rdrv;
1902
1903         if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1904                 return -ENXIO;
1905
1906         rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1907         if (!rdrv)
1908                 return -ENOMEM;
1909
1910         rdrv->start     = udc_id_switch_for_device;
1911         rdrv->stop      = udc_id_switch_for_host;
1912         rdrv->irq       = udc_irq;
1913         rdrv->name      = "gadget";
1914         ci->roles[CI_ROLE_GADGET] = rdrv;
1915
1916         return udc_start(ci);
1917 }