2 * udc.c - ChipIdea UDC driver
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/usb/ch9.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/usb/otg.h>
29 #include <linux/usb/chipidea.h>
36 /* control endpoint description */
37 static const struct usb_endpoint_descriptor
38 ctrl_endpt_out_desc = {
39 .bLength = USB_DT_ENDPOINT_SIZE,
40 .bDescriptorType = USB_DT_ENDPOINT,
42 .bEndpointAddress = USB_DIR_OUT,
43 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
44 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
47 static const struct usb_endpoint_descriptor
48 ctrl_endpt_in_desc = {
49 .bLength = USB_DT_ENDPOINT_SIZE,
50 .bDescriptorType = USB_DT_ENDPOINT,
52 .bEndpointAddress = USB_DIR_IN,
53 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
54 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
58 * hw_ep_bit: calculates the bit number
59 * @num: endpoint number
60 * @dir: endpoint direction
62 * This function returns bit number
64 static inline int hw_ep_bit(int num, int dir)
66 return num + (dir ? 16 : 0);
69 static inline int ep_to_bit(struct ci13xxx *udc, int n)
71 int fill = 16 - udc->hw_ep_max / 2;
73 if (n >= udc->hw_ep_max / 2)
80 * hw_device_state: enables/disables interrupts & starts/stops device (execute
81 * without interruption)
82 * @dma: 0 => disable, !0 => enable and set dma engine
84 * This function returns an error code
86 static int hw_device_state(struct ci13xxx *udc, u32 dma)
89 hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
90 /* interrupt, error, port change, reset, sleep/suspend */
91 hw_write(udc, OP_USBINTR, ~0,
92 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
93 hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
95 hw_write(udc, OP_USBCMD, USBCMD_RS, 0);
96 hw_write(udc, OP_USBINTR, ~0, 0);
102 * hw_ep_flush: flush endpoint fifo (execute without interruption)
103 * @num: endpoint number
104 * @dir: endpoint direction
106 * This function returns an error code
108 static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
110 int n = hw_ep_bit(num, dir);
113 /* flush any pending transfer */
114 hw_write(udc, OP_ENDPTFLUSH, BIT(n), BIT(n));
115 while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
117 } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
123 * hw_ep_disable: disables endpoint (execute without interruption)
124 * @num: endpoint number
125 * @dir: endpoint direction
127 * This function returns an error code
129 static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
131 hw_ep_flush(udc, num, dir);
132 hw_write(udc, OP_ENDPTCTRL + num,
133 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
138 * hw_ep_enable: enables endpoint (execute without interruption)
139 * @num: endpoint number
140 * @dir: endpoint direction
141 * @type: endpoint type
143 * This function returns an error code
145 static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
150 mask = ENDPTCTRL_TXT; /* type */
151 data = type << ffs_nr(mask);
153 mask |= ENDPTCTRL_TXS; /* unstall */
154 mask |= ENDPTCTRL_TXR; /* reset data toggle */
155 data |= ENDPTCTRL_TXR;
156 mask |= ENDPTCTRL_TXE; /* enable */
157 data |= ENDPTCTRL_TXE;
159 mask = ENDPTCTRL_RXT; /* type */
160 data = type << ffs_nr(mask);
162 mask |= ENDPTCTRL_RXS; /* unstall */
163 mask |= ENDPTCTRL_RXR; /* reset data toggle */
164 data |= ENDPTCTRL_RXR;
165 mask |= ENDPTCTRL_RXE; /* enable */
166 data |= ENDPTCTRL_RXE;
168 hw_write(udc, OP_ENDPTCTRL + num, mask, data);
173 * hw_ep_get_halt: return endpoint halt status
174 * @num: endpoint number
175 * @dir: endpoint direction
177 * This function returns 1 if endpoint halted
179 static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
181 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
183 return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
187 * hw_test_and_clear_setup_status: test & clear setup status (execute without
189 * @n: endpoint number
191 * This function returns setup status
193 static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
195 n = ep_to_bit(udc, n);
196 return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
200 * hw_ep_prime: primes endpoint (execute without interruption)
201 * @num: endpoint number
202 * @dir: endpoint direction
203 * @is_ctrl: true if control endpoint
205 * This function returns an error code
207 static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
209 int n = hw_ep_bit(num, dir);
211 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
214 hw_write(udc, OP_ENDPTPRIME, BIT(n), BIT(n));
216 while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
218 if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
221 /* status shoult be tested according with manual but it doesn't work */
226 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
227 * without interruption)
228 * @num: endpoint number
229 * @dir: endpoint direction
230 * @value: true => stall, false => unstall
232 * This function returns an error code
234 static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
236 if (value != 0 && value != 1)
240 enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
241 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
242 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
244 /* data toggle - reserved for EP0 but it's in ESS */
245 hw_write(udc, reg, mask_xs|mask_xr,
246 value ? mask_xs : mask_xr);
247 } while (value != hw_ep_get_halt(udc, num, dir));
253 * hw_is_port_high_speed: test if port is high speed
255 * This function returns true if high speed port
257 static int hw_port_is_high_speed(struct ci13xxx *udc)
259 return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
260 hw_read(udc, OP_PORTSC, PORTSC_HSP);
264 * hw_read_intr_enable: returns interrupt enable register
266 * This function returns register data
268 static u32 hw_read_intr_enable(struct ci13xxx *udc)
270 return hw_read(udc, OP_USBINTR, ~0);
274 * hw_read_intr_status: returns interrupt status register
276 * This function returns register data
278 static u32 hw_read_intr_status(struct ci13xxx *udc)
280 return hw_read(udc, OP_USBSTS, ~0);
284 * hw_test_and_clear_complete: test & clear complete status (execute without
286 * @n: endpoint number
288 * This function returns complete status
290 static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
292 n = ep_to_bit(udc, n);
293 return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
297 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
298 * without interruption)
300 * This function returns active interrutps
302 static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
304 u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
306 hw_write(udc, OP_USBSTS, ~0, reg);
311 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
314 * This function returns guard value
316 static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
318 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
322 * hw_test_and_set_setup_guard: test & set setup guard (execute without
325 * This function returns guard value
327 static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
329 return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
333 * hw_usb_set_address: configures USB address (execute without interruption)
334 * @value: new USB address
336 * This function explicitly sets the address, without the "USBADRA" (advance)
337 * feature, which is not supported by older versions of the controller.
339 static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
341 hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
342 value << ffs_nr(DEVICEADDR_USBADR));
346 * hw_usb_reset: restart device after a bus reset (execute without
349 * This function returns an error code
351 static int hw_usb_reset(struct ci13xxx *udc)
353 hw_usb_set_address(udc, 0);
355 /* ESS flushes only at end?!? */
356 hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
358 /* clear setup token semaphores */
359 hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
361 /* clear complete status */
362 hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
364 /* wait until all bits cleared */
365 while (hw_read(udc, OP_ENDPTPRIME, ~0))
366 udelay(10); /* not RTOS friendly */
368 /* reset all endpoints ? */
370 /* reset internal status and wait for further instructions
371 no need to verify the port reset status (ESS does it) */
376 /******************************************************************************
378 *****************************************************************************/
380 * _usb_addr: calculates endpoint address from direction & number
383 static inline u8 _usb_addr(struct ci13xxx_ep *ep)
385 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
389 * _hardware_queue: configures a request at hardware level
393 * This function returns an error code
395 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
397 struct ci13xxx *udc = mEp->udc;
400 unsigned length = mReq->req.length;
402 /* don't queue twice */
403 if (mReq->req.status == -EALREADY)
406 mReq->req.status = -EALREADY;
408 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
409 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
411 if (mReq->zptr == NULL)
414 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
415 mReq->zptr->next = TD_TERMINATE;
416 mReq->zptr->token = TD_STATUS_ACTIVE;
417 if (!mReq->req.no_interrupt)
418 mReq->zptr->token |= TD_IOC;
420 ret = usb_gadget_map_request(&udc->gadget, &mReq->req, mEp->dir);
426 * TODO - handle requests which spawns into several TDs
428 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
429 mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
430 mReq->ptr->token &= TD_TOTAL_BYTES;
431 mReq->ptr->token |= TD_STATUS_ACTIVE;
433 mReq->ptr->next = mReq->zdma;
435 mReq->ptr->next = TD_TERMINATE;
436 if (!mReq->req.no_interrupt)
437 mReq->ptr->token |= TD_IOC;
439 mReq->ptr->page[0] = mReq->req.dma;
440 for (i = 1; i < 5; i++)
442 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
444 if (!list_empty(&mEp->qh.queue)) {
445 struct ci13xxx_req *mReqPrev;
446 int n = hw_ep_bit(mEp->num, mEp->dir);
449 mReqPrev = list_entry(mEp->qh.queue.prev,
450 struct ci13xxx_req, queue);
452 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
454 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
456 if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
459 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
460 tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
461 } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
462 hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
467 /* QH configuration */
468 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
469 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
470 mEp->qh.ptr->cap |= QH_ZLT;
472 wmb(); /* synchronize before ep prime */
474 ret = hw_ep_prime(udc, mEp->num, mEp->dir,
475 mEp->type == USB_ENDPOINT_XFER_CONTROL);
481 * _hardware_dequeue: handles a request at hardware level
485 * This function returns an error code
487 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
489 if (mReq->req.status != -EALREADY)
492 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
496 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
498 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
502 mReq->req.status = 0;
504 usb_gadget_unmap_request(&mEp->udc->gadget, &mReq->req, mEp->dir);
506 mReq->req.status = mReq->ptr->token & TD_STATUS;
507 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
508 mReq->req.status = -1;
509 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
510 mReq->req.status = -1;
511 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
512 mReq->req.status = -1;
514 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
515 mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
516 mReq->req.actual = mReq->req.length - mReq->req.actual;
517 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
519 return mReq->req.actual;
523 * _ep_nuke: dequeues all endpoint requests
526 * This function returns an error code
527 * Caller must hold lock
529 static int _ep_nuke(struct ci13xxx_ep *mEp)
530 __releases(mEp->lock)
531 __acquires(mEp->lock)
536 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
538 while (!list_empty(&mEp->qh.queue)) {
540 /* pop oldest request */
541 struct ci13xxx_req *mReq = \
542 list_entry(mEp->qh.queue.next,
543 struct ci13xxx_req, queue);
544 list_del_init(&mReq->queue);
545 mReq->req.status = -ESHUTDOWN;
547 if (mReq->req.complete != NULL) {
548 spin_unlock(mEp->lock);
549 mReq->req.complete(&mEp->ep, &mReq->req);
550 spin_lock(mEp->lock);
557 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
560 * This function returns an error code
562 static int _gadget_stop_activity(struct usb_gadget *gadget)
565 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
568 spin_lock_irqsave(&udc->lock, flags);
569 udc->gadget.speed = USB_SPEED_UNKNOWN;
570 udc->remote_wakeup = 0;
572 spin_unlock_irqrestore(&udc->lock, flags);
574 /* flush all endpoints */
575 gadget_for_each_ep(ep, gadget) {
576 usb_ep_fifo_flush(ep);
578 usb_ep_fifo_flush(&udc->ep0out->ep);
579 usb_ep_fifo_flush(&udc->ep0in->ep);
582 udc->driver->disconnect(gadget);
584 /* make sure to disable all endpoints */
585 gadget_for_each_ep(ep, gadget) {
589 if (udc->status != NULL) {
590 usb_ep_free_request(&udc->ep0in->ep, udc->status);
597 /******************************************************************************
599 *****************************************************************************/
601 * isr_reset_handler: USB reset interrupt handler
604 * This function resets USB engine after a bus reset occurred
606 static void isr_reset_handler(struct ci13xxx *udc)
607 __releases(udc->lock)
608 __acquires(udc->lock)
612 dbg_event(0xFF, "BUS RST", 0);
614 spin_unlock(&udc->lock);
615 retval = _gadget_stop_activity(&udc->gadget);
619 retval = hw_usb_reset(udc);
623 udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
624 if (udc->status == NULL)
628 spin_lock(&udc->lock);
631 dev_err(udc->dev, "error: %i\n", retval);
635 * isr_get_status_complete: get_status request complete function
637 * @req: request handled
639 * Caller must release lock
641 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
643 if (ep == NULL || req == NULL)
647 usb_ep_free_request(ep, req);
651 * isr_get_status_response: get_status request response
653 * @setup: setup request packet
655 * This function returns an error code
657 static int isr_get_status_response(struct ci13xxx *udc,
658 struct usb_ctrlrequest *setup)
659 __releases(mEp->lock)
660 __acquires(mEp->lock)
662 struct ci13xxx_ep *mEp = udc->ep0in;
663 struct usb_request *req = NULL;
664 gfp_t gfp_flags = GFP_ATOMIC;
665 int dir, num, retval;
667 if (mEp == NULL || setup == NULL)
670 spin_unlock(mEp->lock);
671 req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
672 spin_lock(mEp->lock);
676 req->complete = isr_get_status_complete;
678 req->buf = kzalloc(req->length, gfp_flags);
679 if (req->buf == NULL) {
684 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
685 /* Assume that device is bus powered for now. */
686 *(u16 *)req->buf = udc->remote_wakeup << 1;
688 } else if ((setup->bRequestType & USB_RECIP_MASK) \
689 == USB_RECIP_ENDPOINT) {
690 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
692 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
693 *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
695 /* else do nothing; reserved for future use */
697 spin_unlock(mEp->lock);
698 retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
699 spin_lock(mEp->lock);
708 spin_unlock(mEp->lock);
709 usb_ep_free_request(&mEp->ep, req);
710 spin_lock(mEp->lock);
715 * isr_setup_status_complete: setup_status request complete function
717 * @req: request handled
719 * Caller must release lock. Put the port in test mode if test mode
720 * feature is selected.
723 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
725 struct ci13xxx *udc = req->context;
729 hw_usb_set_address(udc, udc->address);
730 udc->setaddr = false;
733 spin_lock_irqsave(&udc->lock, flags);
735 hw_port_test_set(udc, udc->test_mode);
736 spin_unlock_irqrestore(&udc->lock, flags);
740 * isr_setup_status_phase: queues the status phase of a setup transation
743 * This function returns an error code
745 static int isr_setup_status_phase(struct ci13xxx *udc)
746 __releases(mEp->lock)
747 __acquires(mEp->lock)
750 struct ci13xxx_ep *mEp;
752 mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
753 udc->status->context = udc;
754 udc->status->complete = isr_setup_status_complete;
756 spin_unlock(mEp->lock);
757 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
758 spin_lock(mEp->lock);
764 * isr_tr_complete_low: transaction complete low level handler
767 * This function returns an error code
768 * Caller must hold lock
770 static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
771 __releases(mEp->lock)
772 __acquires(mEp->lock)
774 struct ci13xxx_req *mReq, *mReqTemp;
775 struct ci13xxx_ep *mEpTemp = mEp;
776 int uninitialized_var(retval);
778 if (list_empty(&mEp->qh.queue))
781 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
783 retval = _hardware_dequeue(mEp, mReq);
786 list_del_init(&mReq->queue);
787 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
788 if (mReq->req.complete != NULL) {
789 spin_unlock(mEp->lock);
790 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
792 mEpTemp = mEp->udc->ep0in;
793 mReq->req.complete(&mEpTemp->ep, &mReq->req);
794 spin_lock(mEp->lock);
798 if (retval == -EBUSY)
801 dbg_event(_usb_addr(mEp), "DONE", retval);
807 * isr_tr_complete_handler: transaction complete interrupt handler
808 * @udc: UDC descriptor
810 * This function handles traffic events
812 static void isr_tr_complete_handler(struct ci13xxx *udc)
813 __releases(udc->lock)
814 __acquires(udc->lock)
819 for (i = 0; i < udc->hw_ep_max; i++) {
820 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
821 int type, num, dir, err = -EINVAL;
822 struct usb_ctrlrequest req;
824 if (mEp->ep.desc == NULL)
825 continue; /* not configured */
827 if (hw_test_and_clear_complete(udc, i)) {
828 err = isr_tr_complete_low(mEp);
829 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
830 if (err > 0) /* needs status phase */
831 err = isr_setup_status_phase(udc);
833 dbg_event(_usb_addr(mEp),
835 spin_unlock(&udc->lock);
836 if (usb_ep_set_halt(&mEp->ep))
838 "error: ep_set_halt\n");
839 spin_lock(&udc->lock);
844 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
845 !hw_test_and_clear_setup_status(udc, i))
849 dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
854 * Flush data and handshake transactions of previous
857 _ep_nuke(udc->ep0out);
858 _ep_nuke(udc->ep0in);
860 /* read_setup_packet */
862 hw_test_and_set_setup_guard(udc);
863 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
864 } while (!hw_test_and_clear_setup_guard(udc));
866 type = req.bRequestType;
868 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
870 dbg_setup(_usb_addr(mEp), &req);
872 switch (req.bRequest) {
873 case USB_REQ_CLEAR_FEATURE:
874 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
875 le16_to_cpu(req.wValue) ==
877 if (req.wLength != 0)
879 num = le16_to_cpu(req.wIndex);
880 dir = num & USB_ENDPOINT_DIR_MASK;
881 num &= USB_ENDPOINT_NUMBER_MASK;
883 num += udc->hw_ep_max/2;
884 if (!udc->ci13xxx_ep[num].wedge) {
885 spin_unlock(&udc->lock);
886 err = usb_ep_clear_halt(
887 &udc->ci13xxx_ep[num].ep);
888 spin_lock(&udc->lock);
892 err = isr_setup_status_phase(udc);
893 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
894 le16_to_cpu(req.wValue) ==
895 USB_DEVICE_REMOTE_WAKEUP) {
896 if (req.wLength != 0)
898 udc->remote_wakeup = 0;
899 err = isr_setup_status_phase(udc);
904 case USB_REQ_GET_STATUS:
905 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
906 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
907 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
909 if (le16_to_cpu(req.wLength) != 2 ||
910 le16_to_cpu(req.wValue) != 0)
912 err = isr_get_status_response(udc, &req);
914 case USB_REQ_SET_ADDRESS:
915 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
917 if (le16_to_cpu(req.wLength) != 0 ||
918 le16_to_cpu(req.wIndex) != 0)
920 udc->address = (u8)le16_to_cpu(req.wValue);
922 err = isr_setup_status_phase(udc);
924 case USB_REQ_SET_FEATURE:
925 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
926 le16_to_cpu(req.wValue) ==
928 if (req.wLength != 0)
930 num = le16_to_cpu(req.wIndex);
931 dir = num & USB_ENDPOINT_DIR_MASK;
932 num &= USB_ENDPOINT_NUMBER_MASK;
934 num += udc->hw_ep_max/2;
936 spin_unlock(&udc->lock);
937 err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
938 spin_lock(&udc->lock);
940 isr_setup_status_phase(udc);
941 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
942 if (req.wLength != 0)
944 switch (le16_to_cpu(req.wValue)) {
945 case USB_DEVICE_REMOTE_WAKEUP:
946 udc->remote_wakeup = 1;
947 err = isr_setup_status_phase(udc);
949 case USB_DEVICE_TEST_MODE:
950 tmode = le16_to_cpu(req.wIndex) >> 8;
957 udc->test_mode = tmode;
958 err = isr_setup_status_phase(
973 if (req.wLength == 0) /* no data phase */
976 spin_unlock(&udc->lock);
977 err = udc->driver->setup(&udc->gadget, &req);
978 spin_lock(&udc->lock);
983 dbg_event(_usb_addr(mEp), "ERROR", err);
985 spin_unlock(&udc->lock);
986 if (usb_ep_set_halt(&mEp->ep))
987 dev_err(udc->dev, "error: ep_set_halt\n");
988 spin_lock(&udc->lock);
993 /******************************************************************************
995 *****************************************************************************/
997 * ep_enable: configure endpoint, making it usable
999 * Check usb_ep_enable() at "usb_gadget.h" for details
1001 static int ep_enable(struct usb_ep *ep,
1002 const struct usb_endpoint_descriptor *desc)
1004 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1006 unsigned long flags;
1008 if (ep == NULL || desc == NULL)
1011 spin_lock_irqsave(mEp->lock, flags);
1013 /* only internal SW should enable ctrl endpts */
1015 mEp->ep.desc = desc;
1017 if (!list_empty(&mEp->qh.queue))
1018 dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
1020 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1021 mEp->num = usb_endpoint_num(desc);
1022 mEp->type = usb_endpoint_type(desc);
1024 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
1026 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1028 mEp->qh.ptr->cap = 0;
1030 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1031 mEp->qh.ptr->cap |= QH_IOS;
1032 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
1033 mEp->qh.ptr->cap &= ~QH_MULT;
1035 mEp->qh.ptr->cap &= ~QH_ZLT;
1038 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
1039 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
1042 * Enable endpoints in the HW other than ep0 as ep0
1046 retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
1048 spin_unlock_irqrestore(mEp->lock, flags);
1053 * ep_disable: endpoint is no longer usable
1055 * Check usb_ep_disable() at "usb_gadget.h" for details
1057 static int ep_disable(struct usb_ep *ep)
1059 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1060 int direction, retval = 0;
1061 unsigned long flags;
1065 else if (mEp->ep.desc == NULL)
1068 spin_lock_irqsave(mEp->lock, flags);
1070 /* only internal SW should disable ctrl endpts */
1072 direction = mEp->dir;
1074 dbg_event(_usb_addr(mEp), "DISABLE", 0);
1076 retval |= _ep_nuke(mEp);
1077 retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
1079 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1080 mEp->dir = (mEp->dir == TX) ? RX : TX;
1082 } while (mEp->dir != direction);
1084 mEp->ep.desc = NULL;
1086 spin_unlock_irqrestore(mEp->lock, flags);
1091 * ep_alloc_request: allocate a request object to use with this endpoint
1093 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1095 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1097 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1098 struct ci13xxx_req *mReq = NULL;
1103 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
1105 INIT_LIST_HEAD(&mReq->queue);
1107 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
1109 if (mReq->ptr == NULL) {
1115 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
1117 return (mReq == NULL) ? NULL : &mReq->req;
1121 * ep_free_request: frees a request object
1123 * Check usb_ep_free_request() at "usb_gadget.h" for details
1125 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1127 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1128 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1129 unsigned long flags;
1131 if (ep == NULL || req == NULL) {
1133 } else if (!list_empty(&mReq->queue)) {
1134 dev_err(mEp->udc->dev, "freeing queued request\n");
1138 spin_lock_irqsave(mEp->lock, flags);
1141 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
1144 dbg_event(_usb_addr(mEp), "FREE", 0);
1146 spin_unlock_irqrestore(mEp->lock, flags);
1150 * ep_queue: queues (submits) an I/O request to an endpoint
1152 * Check usb_ep_queue()* at usb_gadget.h" for details
1154 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1155 gfp_t __maybe_unused gfp_flags)
1157 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1158 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1159 struct ci13xxx *udc = mEp->udc;
1161 unsigned long flags;
1163 if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
1166 spin_lock_irqsave(mEp->lock, flags);
1168 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1170 mEp = (udc->ep0_dir == RX) ?
1171 udc->ep0out : udc->ep0in;
1172 if (!list_empty(&mEp->qh.queue)) {
1174 retval = -EOVERFLOW;
1175 dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
1180 /* first nuke then test link, e.g. previous status has not sent */
1181 if (!list_empty(&mReq->queue)) {
1183 dev_err(mEp->udc->dev, "request already in queue\n");
1187 if (req->length > 4 * CI13XXX_PAGE_SIZE) {
1188 req->length = 4 * CI13XXX_PAGE_SIZE;
1190 dev_warn(mEp->udc->dev, "request length truncated\n");
1193 dbg_queue(_usb_addr(mEp), req, retval);
1196 mReq->req.status = -EINPROGRESS;
1197 mReq->req.actual = 0;
1199 retval = _hardware_enqueue(mEp, mReq);
1201 if (retval == -EALREADY) {
1202 dbg_event(_usb_addr(mEp), "QUEUE", retval);
1206 list_add_tail(&mReq->queue, &mEp->qh.queue);
1209 spin_unlock_irqrestore(mEp->lock, flags);
1214 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1216 * Check usb_ep_dequeue() at "usb_gadget.h" for details
1218 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1220 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1221 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
1222 unsigned long flags;
1224 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
1225 mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
1226 list_empty(&mEp->qh.queue))
1229 spin_lock_irqsave(mEp->lock, flags);
1231 dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
1233 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1236 list_del_init(&mReq->queue);
1238 usb_gadget_unmap_request(&mEp->udc->gadget, req, mEp->dir);
1240 req->status = -ECONNRESET;
1242 if (mReq->req.complete != NULL) {
1243 spin_unlock(mEp->lock);
1244 mReq->req.complete(&mEp->ep, &mReq->req);
1245 spin_lock(mEp->lock);
1248 spin_unlock_irqrestore(mEp->lock, flags);
1253 * ep_set_halt: sets the endpoint halt feature
1255 * Check usb_ep_set_halt() at "usb_gadget.h" for details
1257 static int ep_set_halt(struct usb_ep *ep, int value)
1259 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1260 int direction, retval = 0;
1261 unsigned long flags;
1263 if (ep == NULL || mEp->ep.desc == NULL)
1266 spin_lock_irqsave(mEp->lock, flags);
1269 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1270 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
1271 !list_empty(&mEp->qh.queue)) {
1272 spin_unlock_irqrestore(mEp->lock, flags);
1277 direction = mEp->dir;
1279 dbg_event(_usb_addr(mEp), "HALT", value);
1280 retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
1285 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1286 mEp->dir = (mEp->dir == TX) ? RX : TX;
1288 } while (mEp->dir != direction);
1290 spin_unlock_irqrestore(mEp->lock, flags);
1295 * ep_set_wedge: sets the halt feature and ignores clear requests
1297 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1299 static int ep_set_wedge(struct usb_ep *ep)
1301 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1302 unsigned long flags;
1304 if (ep == NULL || mEp->ep.desc == NULL)
1307 spin_lock_irqsave(mEp->lock, flags);
1309 dbg_event(_usb_addr(mEp), "WEDGE", 0);
1312 spin_unlock_irqrestore(mEp->lock, flags);
1314 return usb_ep_set_halt(ep);
1318 * ep_fifo_flush: flushes contents of a fifo
1320 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1322 static void ep_fifo_flush(struct usb_ep *ep)
1324 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1325 unsigned long flags;
1328 dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
1332 spin_lock_irqsave(mEp->lock, flags);
1334 dbg_event(_usb_addr(mEp), "FFLUSH", 0);
1335 hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
1337 spin_unlock_irqrestore(mEp->lock, flags);
1341 * Endpoint-specific part of the API to the USB controller hardware
1342 * Check "usb_gadget.h" for details
1344 static const struct usb_ep_ops usb_ep_ops = {
1345 .enable = ep_enable,
1346 .disable = ep_disable,
1347 .alloc_request = ep_alloc_request,
1348 .free_request = ep_free_request,
1350 .dequeue = ep_dequeue,
1351 .set_halt = ep_set_halt,
1352 .set_wedge = ep_set_wedge,
1353 .fifo_flush = ep_fifo_flush,
1356 /******************************************************************************
1358 *****************************************************************************/
1359 static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
1361 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1362 unsigned long flags;
1363 int gadget_ready = 0;
1365 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
1368 spin_lock_irqsave(&udc->lock, flags);
1369 udc->vbus_active = is_active;
1372 spin_unlock_irqrestore(&udc->lock, flags);
1376 pm_runtime_get_sync(&_gadget->dev);
1377 hw_device_reset(udc, USBMODE_CM_DC);
1378 hw_device_state(udc, udc->ep0out->qh.dma);
1380 hw_device_state(udc, 0);
1381 if (udc->udc_driver->notify_event)
1382 udc->udc_driver->notify_event(udc,
1383 CI13XXX_CONTROLLER_STOPPED_EVENT);
1384 _gadget_stop_activity(&udc->gadget);
1385 pm_runtime_put_sync(&_gadget->dev);
1392 static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1394 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1395 unsigned long flags;
1398 spin_lock_irqsave(&udc->lock, flags);
1399 if (!udc->remote_wakeup) {
1403 if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
1407 hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1409 spin_unlock_irqrestore(&udc->lock, flags);
1413 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1415 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1417 if (udc->transceiver)
1418 return usb_phy_set_power(udc->transceiver, mA);
1422 static int ci13xxx_start(struct usb_gadget *gadget,
1423 struct usb_gadget_driver *driver);
1424 static int ci13xxx_stop(struct usb_gadget *gadget,
1425 struct usb_gadget_driver *driver);
1427 * Device operations part of the API to the USB controller hardware,
1428 * which don't involve endpoints (or i/o)
1429 * Check "usb_gadget.h" for details
1431 static const struct usb_gadget_ops usb_gadget_ops = {
1432 .vbus_session = ci13xxx_vbus_session,
1433 .wakeup = ci13xxx_wakeup,
1434 .vbus_draw = ci13xxx_vbus_draw,
1435 .udc_start = ci13xxx_start,
1436 .udc_stop = ci13xxx_stop,
1439 static int init_eps(struct ci13xxx *udc)
1441 int retval = 0, i, j;
1443 for (i = 0; i < udc->hw_ep_max/2; i++)
1444 for (j = RX; j <= TX; j++) {
1445 int k = i + j * udc->hw_ep_max/2;
1446 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
1448 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
1449 (j == TX) ? "in" : "out");
1452 mEp->lock = &udc->lock;
1453 mEp->device = &udc->gadget.dev;
1454 mEp->td_pool = udc->td_pool;
1456 mEp->ep.name = mEp->name;
1457 mEp->ep.ops = &usb_ep_ops;
1458 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
1460 INIT_LIST_HEAD(&mEp->qh.queue);
1461 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
1463 if (mEp->qh.ptr == NULL)
1466 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
1469 * set up shorthands for ep0 out and in endpoints,
1470 * don't add to gadget's ep_list
1481 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
1488 * ci13xxx_start: register a gadget driver
1489 * @gadget: our gadget
1490 * @driver: the driver being registered
1492 * Interrupts are enabled here.
1494 static int ci13xxx_start(struct usb_gadget *gadget,
1495 struct usb_gadget_driver *driver)
1497 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1498 unsigned long flags;
1499 int retval = -ENOMEM;
1501 if (driver->disconnect == NULL)
1505 udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
1506 retval = usb_ep_enable(&udc->ep0out->ep);
1510 udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
1511 retval = usb_ep_enable(&udc->ep0in->ep);
1514 spin_lock_irqsave(&udc->lock, flags);
1516 udc->driver = driver;
1517 pm_runtime_get_sync(&udc->gadget.dev);
1518 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
1519 if (udc->vbus_active) {
1520 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
1521 hw_device_reset(udc, USBMODE_CM_DC);
1523 pm_runtime_put_sync(&udc->gadget.dev);
1528 retval = hw_device_state(udc, udc->ep0out->qh.dma);
1530 pm_runtime_put_sync(&udc->gadget.dev);
1533 spin_unlock_irqrestore(&udc->lock, flags);
1538 * ci13xxx_stop: unregister a gadget driver
1540 static int ci13xxx_stop(struct usb_gadget *gadget,
1541 struct usb_gadget_driver *driver)
1543 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1544 unsigned long flags;
1546 spin_lock_irqsave(&udc->lock, flags);
1548 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
1550 hw_device_state(udc, 0);
1551 if (udc->udc_driver->notify_event)
1552 udc->udc_driver->notify_event(udc,
1553 CI13XXX_CONTROLLER_STOPPED_EVENT);
1555 spin_unlock_irqrestore(&udc->lock, flags);
1556 _gadget_stop_activity(&udc->gadget);
1557 spin_lock_irqsave(&udc->lock, flags);
1558 pm_runtime_put(&udc->gadget.dev);
1561 spin_unlock_irqrestore(&udc->lock, flags);
1566 /******************************************************************************
1568 *****************************************************************************/
1570 * udc_irq: udc interrupt handler
1572 * This function returns IRQ_HANDLED if the IRQ has been handled
1573 * It locks access to registers
1575 static irqreturn_t udc_irq(struct ci13xxx *udc)
1583 spin_lock(&udc->lock);
1585 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
1586 if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
1588 spin_unlock(&udc->lock);
1592 intr = hw_test_and_clear_intr_active(udc);
1593 dbg_interrupt(intr);
1596 /* order defines priority - do NOT change it */
1597 if (USBi_URI & intr)
1598 isr_reset_handler(udc);
1600 if (USBi_PCI & intr) {
1601 udc->gadget.speed = hw_port_is_high_speed(udc) ?
1602 USB_SPEED_HIGH : USB_SPEED_FULL;
1603 if (udc->suspended && udc->driver->resume) {
1604 spin_unlock(&udc->lock);
1605 udc->driver->resume(&udc->gadget);
1606 spin_lock(&udc->lock);
1612 isr_tr_complete_handler(udc);
1614 if (USBi_SLI & intr) {
1615 if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
1616 udc->driver->suspend) {
1618 spin_unlock(&udc->lock);
1619 udc->driver->suspend(&udc->gadget);
1620 spin_lock(&udc->lock);
1623 retval = IRQ_HANDLED;
1627 spin_unlock(&udc->lock);
1633 * udc_release: driver release function
1636 * Currently does nothing
1638 static void udc_release(struct device *dev)
1643 * udc_start: initialize gadget role
1644 * @udc: chipidea controller
1646 static int udc_start(struct ci13xxx *udc)
1648 struct device *dev = udc->dev;
1654 spin_lock_init(&udc->lock);
1656 udc->gadget.ops = &usb_gadget_ops;
1657 udc->gadget.speed = USB_SPEED_UNKNOWN;
1658 udc->gadget.max_speed = USB_SPEED_HIGH;
1659 udc->gadget.is_otg = 0;
1660 udc->gadget.name = udc->udc_driver->name;
1662 INIT_LIST_HEAD(&udc->gadget.ep_list);
1664 dev_set_name(&udc->gadget.dev, "gadget");
1665 udc->gadget.dev.dma_mask = dev->dma_mask;
1666 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
1667 udc->gadget.dev.parent = dev;
1668 udc->gadget.dev.release = udc_release;
1670 /* alloc resources */
1671 udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
1672 sizeof(struct ci13xxx_qh),
1673 64, CI13XXX_PAGE_SIZE);
1674 if (udc->qh_pool == NULL)
1677 udc->td_pool = dma_pool_create("ci13xxx_td", dev,
1678 sizeof(struct ci13xxx_td),
1679 64, CI13XXX_PAGE_SIZE);
1680 if (udc->td_pool == NULL) {
1685 retval = init_eps(udc);
1689 udc->gadget.ep0 = &udc->ep0in->ep;
1691 udc->transceiver = usb_get_transceiver();
1693 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
1694 if (udc->transceiver == NULL) {
1700 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
1701 retval = hw_device_reset(udc, USBMODE_CM_DC);
1703 goto put_transceiver;
1706 retval = device_register(&udc->gadget.dev);
1708 put_device(&udc->gadget.dev);
1709 goto put_transceiver;
1712 retval = dbg_create_files(&udc->gadget.dev);
1716 if (udc->transceiver) {
1717 retval = otg_set_peripheral(udc->transceiver->otg,
1723 retval = usb_add_gadget_udc(dev, &udc->gadget);
1727 pm_runtime_no_callbacks(&udc->gadget.dev);
1728 pm_runtime_enable(&udc->gadget.dev);
1733 if (udc->transceiver) {
1734 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
1735 usb_put_transceiver(udc->transceiver);
1738 dev_err(dev, "error = %i\n", retval);
1740 dbg_remove_files(&udc->gadget.dev);
1742 device_unregister(&udc->gadget.dev);
1744 if (udc->transceiver)
1745 usb_put_transceiver(udc->transceiver);
1747 dma_pool_destroy(udc->td_pool);
1749 dma_pool_destroy(udc->qh_pool);
1754 * udc_remove: parent remove must call this to remove UDC
1756 * No interrupts active, the IRQ has been released
1758 static void udc_stop(struct ci13xxx *udc)
1765 usb_del_gadget_udc(&udc->gadget);
1767 for (i = 0; i < udc->hw_ep_max; i++) {
1768 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
1770 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
1773 dma_pool_destroy(udc->td_pool);
1774 dma_pool_destroy(udc->qh_pool);
1776 if (udc->transceiver) {
1777 otg_set_peripheral(udc->transceiver->otg, NULL);
1778 usb_put_transceiver(udc->transceiver);
1780 dbg_remove_files(&udc->gadget.dev);
1781 device_unregister(&udc->gadget.dev);
1782 /* my kobject is dynamic, I swear! */
1783 memset(&udc->gadget, 0, sizeof(udc->gadget));
1787 * ci_hdrc_gadget_init - initialize device related bits
1788 * ci: the controller
1790 * This function enables the gadget role, if the device is "device capable".
1792 int ci_hdrc_gadget_init(struct ci13xxx *ci)
1794 struct ci_role_driver *rdrv;
1796 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1799 rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1803 rdrv->start = udc_start;
1804 rdrv->stop = udc_stop;
1805 rdrv->irq = udc_irq;
1806 rdrv->name = "gadget";
1807 ci->roles[CI_ROLE_GADGET] = rdrv;