1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
36 * This file implements PCD Core. All code in this file is portable and doesn't
37 * use any OS specific functions.
38 * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39 * header file, which can be used to implement OS specific PCD interface.
41 * An important function of the PCD is managing interrupts generated
42 * by the DWC_otg controller. The implementation of the DWC_otg device
43 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
45 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46 * @todo Does it work when the request size is greater than DEPTSIZ
51 #include "dwc_otg_pcd.h"
54 #include "dwc_otg_cfi.h"
56 extern int init_cfi(cfiobject_t *cfiobj);
60 * Choose endpoint from ep arrays using usb_ep structure.
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t *pcd, void *handle)
65 if (pcd->ep0.priv == handle) {
68 for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69 if (pcd->in_ep[i].priv == handle)
70 return &pcd->in_ep[i];
71 if (pcd->out_ep[i].priv == handle)
72 return &pcd->out_ep[i];
79 * This function completes a request. It call's the request call back.
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
84 unsigned stopped = ep->stopped;
86 DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
89 /* don't modify queue heads during completion callback */
91 /* spin_unlock/spin_lock now done in fops->complete() */
92 ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
95 if (ep->pcd->request_pending > 0) {
96 --ep->pcd->request_pending;
99 ep->stopped = stopped;
104 * This function terminates all the requsts in the EP request queue.
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
108 dwc_otg_pcd_request_t *req;
112 /* called with irqs blocked?? */
113 while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
119 void dwc_otg_pcd_start(dwc_otg_pcd_t *pcd,
120 const struct dwc_otg_pcd_function_ops *fops)
126 * PCD Callback function for initializing the PCD when switching to
129 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
131 static int32_t dwc_otg_pcd_start_cb(void *p)
133 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
137 * Initialized the Core for Device mode.
139 if (dwc_otg_is_device_mode(core_if)) {
140 /* dwc_otg_core_dev_init(core_if); */
141 /* Set core_if's lock pointer to the pcd->lock */
142 core_if->lock = pcd->lock;
147 /** CFI-specific buffer allocation function for EP */
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
150 size_t buflen, int flags)
152 dwc_otg_pcd_ep_t *ep;
153 ep = get_ep_from_handle(pcd, pep);
155 DWC_WARN("bad ep\n");
156 return -DWC_E_INVALID;
159 return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
164 size_t buflen, int flags);
168 * PCD Callback function for notifying the PCD when resuming from
171 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
175 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
177 if (pcd->fops->resume) {
178 pcd->fops->resume(pcd);
181 /* Stop the SRP timeout timer. */
182 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183 || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184 if (GET_CORE_IF(pcd)->srp_timer_started) {
185 GET_CORE_IF(pcd)->srp_timer_started = 0;
186 DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
193 * PCD Callback function for notifying the PCD device is suspended.
195 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
199 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
201 if (pcd->fops->suspend) {
202 DWC_SPINUNLOCK(pcd->lock);
203 pcd->fops->suspend(pcd);
204 DWC_SPINLOCK(pcd->lock);
211 * PCD Callback function for stopping the PCD when switching to Host
214 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
218 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
221 dwc_otg_pcd_stop(pcd);
226 * PCD Callback structure for handling mode switching.
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229 .start = dwc_otg_pcd_start_cb,
230 .stop = dwc_otg_pcd_stop_cb,
231 .suspend = dwc_otg_pcd_suspend_cb,
232 .resume_wakeup = dwc_otg_pcd_resume_cb,
233 .p = 0, /* Set at registration */
237 * This function allocates a DMA Descriptor chain for the Endpoint
238 * buffer to be used for a transfer to/from the specified endpoint.
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t *dma_desc_addr,
243 return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
248 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t *desc_addr,
251 uint32_t dma_desc_addr, uint32_t count)
253 DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
260 * This function initializes a descriptor chain for Isochronous transfer
262 * @param core_if Programming view of DWC_otg controller.
263 * @param dwc_ep The EP to start the transfer on.
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if,
270 dsts_data_t dsts = {.d32 = 0 };
271 depctl_data_t depctl = {.d32 = 0 };
272 volatile uint32_t *addr;
277 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
280 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
283 /** Allocate descriptors for double buffering */
284 dwc_ep->iso_desc_addr =
285 dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286 dwc_ep->desc_cnt * 2);
287 if (dwc_ep->desc_addr) {
288 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
292 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
295 if (dwc_ep->is_in == 0) {
296 dev_dma_desc_sts_t sts = {.d32 = 0 };
297 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
299 uint32_t data_per_desc;
300 dwc_otg_dev_out_ep_regs_t *out_regs =
301 core_if->dev_if->out_ep_regs[dwc_ep->num];
304 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
307 /** Buffer 0 descriptors setup */
308 dma_ad = dwc_ep->dma_addr0;
310 sts.b_iso_out.bs = BS_HOST_READY;
311 sts.b_iso_out.rxsts = 0;
313 sts.b_iso_out.sp = 0;
314 sts.b_iso_out.ioc = 0;
315 sts.b_iso_out.pid = 0;
316 sts.b_iso_out.framenum = 0;
319 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320 i += dwc_ep->pkt_per_frm) {
322 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323 uint32_t len = (j + 1)*dwc_ep->maxpacket;
324 if (len > dwc_ep->data_per_frame)
326 dwc_ep->data_per_frame -
329 data_per_desc = dwc_ep->maxpacket;
330 len = data_per_desc % 4;
332 data_per_desc += 4 - len;
334 sts.b_iso_out.rxbytes = data_per_desc;
335 dma_desc->buf = dma_ad;
336 dma_desc->status.d32 = sts.d32;
338 offset += data_per_desc;
340 dma_ad += data_per_desc;
344 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345 uint32_t len = (j + 1)*dwc_ep->maxpacket;
346 if (len > dwc_ep->data_per_frame)
348 dwc_ep->data_per_frame -
351 data_per_desc = dwc_ep->maxpacket;
352 len = data_per_desc % 4;
354 data_per_desc += 4 - len;
355 sts.b_iso_out.rxbytes = data_per_desc;
356 dma_desc->buf = dma_ad;
357 dma_desc->status.d32 = sts.d32;
359 offset += data_per_desc;
361 dma_ad += data_per_desc;
364 sts.b_iso_out.ioc = 1;
365 len = (j + 1)*dwc_ep->maxpacket;
366 if (len > dwc_ep->data_per_frame)
368 dwc_ep->data_per_frame - j*dwc_ep->maxpacket;
370 data_per_desc = dwc_ep->maxpacket;
371 len = data_per_desc % 4;
373 data_per_desc += 4 - len;
374 sts.b_iso_out.rxbytes = data_per_desc;
376 dma_desc->buf = dma_ad;
377 dma_desc->status.d32 = sts.d32;
380 /** Buffer 1 descriptors setup */
381 sts.b_iso_out.ioc = 0;
382 dma_ad = dwc_ep->dma_addr1;
385 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386 i += dwc_ep->pkt_per_frm) {
387 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388 uint32_t len = (j + 1)*dwc_ep->maxpacket;
389 if (len > dwc_ep->data_per_frame)
391 dwc_ep->data_per_frame -
394 data_per_desc = dwc_ep->maxpacket;
395 len = data_per_desc % 4;
397 data_per_desc += 4 - len;
400 sts.b_iso_out.rxbytes = data_per_desc;
401 dma_desc->buf = dma_ad;
402 dma_desc->status.d32 = sts.d32;
404 offset += data_per_desc;
406 dma_ad += data_per_desc;
409 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
411 ((j + 1)*dwc_ep->maxpacket >
412 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
415 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416 sts.b_iso_out.rxbytes = data_per_desc;
417 dma_desc->buf = dma_ad;
418 dma_desc->status.d32 = sts.d32;
420 offset += data_per_desc;
422 dma_ad += data_per_desc;
425 sts.b_iso_out.ioc = 1;
428 ((j + 1)*dwc_ep->maxpacket >
429 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430 j*dwc_ep->maxpacket : dwc_ep->maxpacket;
432 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433 sts.b_iso_out.rxbytes = data_per_desc;
435 dma_desc->buf = dma_ad;
436 dma_desc->status.d32 = sts.d32;
438 dwc_ep->next_frame = 0;
440 /** Write dma_ad into DOEPDMA register */
441 DWC_WRITE_REG32(&(out_regs->doepdma),
442 (uint32_t) dwc_ep->iso_dma_desc_addr);
447 dev_dma_desc_sts_t sts = {.d32 = 0 };
448 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
450 dwc_otg_dev_in_ep_regs_t *in_regs =
451 core_if->dev_if->in_ep_regs[dwc_ep->num];
452 unsigned int frmnumber;
453 fifosize_data_t txfifosize, rxfifosize;
456 DWC_READ_REG32(&core_if->dev_if->
457 in_ep_regs[dwc_ep->num]->dtxfsts);
459 DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
461 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
463 dma_ad = dwc_ep->dma_addr0;
466 DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
468 sts.b_iso_in.bs = BS_HOST_READY;
469 sts.b_iso_in.txsts = 0;
471 (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472 sts.b_iso_in.ioc = 0;
473 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
475 frmnumber = dwc_ep->next_frame;
477 sts.b_iso_in.framenum = frmnumber;
478 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
481 /** Buffer 0 descriptors setup */
482 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483 dma_desc->buf = dma_ad;
484 dma_desc->status.d32 = sts.d32;
487 dma_ad += dwc_ep->data_per_frame;
488 sts.b_iso_in.framenum += dwc_ep->bInterval;
491 sts.b_iso_in.ioc = 1;
492 dma_desc->buf = dma_ad;
493 dma_desc->status.d32 = sts.d32;
496 /** Buffer 1 descriptors setup */
497 sts.b_iso_in.ioc = 0;
498 dma_ad = dwc_ep->dma_addr1;
500 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501 i += dwc_ep->pkt_per_frm) {
502 dma_desc->buf = dma_ad;
503 dma_desc->status.d32 = sts.d32;
506 dma_ad += dwc_ep->data_per_frame;
507 sts.b_iso_in.framenum += dwc_ep->bInterval;
509 sts.b_iso_in.ioc = 0;
511 sts.b_iso_in.ioc = 1;
514 dma_desc->buf = dma_ad;
515 dma_desc->status.d32 = sts.d32;
517 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
519 /** Write dma_ad into diepdma register */
520 DWC_WRITE_REG32(&(in_regs->diepdma),
521 (uint32_t) dwc_ep->iso_dma_desc_addr);
523 /** Enable endpoint, clear nak */
526 depctl.b.usbactep = 1;
529 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530 depctl.d32 = DWC_READ_REG32(addr);
534 * This function initializes a descriptor chain for Isochronous transfer
536 * @param core_if Programming view of DWC_otg controller.
537 * @param ep The EP to start the transfer on.
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if,
543 depctl_data_t depctl = {.d32 = 0 };
544 volatile uint32_t *addr;
547 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
549 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
552 if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
555 deptsiz_data_t deptsiz = {.d32 = 0 };
558 ep->data_per_frame*ep->buf_proc_intrvl / ep->bInterval;
560 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
563 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
565 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
568 /* Program the transfer size and packet count
569 * as follows: xfersize = N * maxpacket +
570 * short_packet pktcnt = N + (short_packet
573 deptsiz.b.mc = ep->pkt_per_frm;
574 deptsiz.b.xfersize = ep->xfer_len;
576 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577 DWC_WRITE_REG32(&core_if->dev_if->
578 in_ep_regs[ep->num]->dieptsiz,
581 /* Write the DMA register */
584 in_ep_regs[ep->num]->diepdma),
585 (uint32_t) ep->dma_addr);
589 (ep->xfer_len + (ep->maxpacket - 1)) /
591 deptsiz.b.xfersize = deptsiz.b.pktcnt*ep->maxpacket;
593 DWC_WRITE_REG32(&core_if->dev_if->
594 out_ep_regs[ep->num]->doeptsiz,
597 /* Write the DMA register */
600 out_ep_regs[ep->num]->doepdma),
601 (uint32_t) ep->dma_addr);
604 /** Enable endpoint, clear nak */
609 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
614 * This function does the setup for a data transfer for an EP and
615 * starts the transfer. For an IN transfer, the packets will be
616 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
617 * the packets are unloaded from the Rx FIFO in the ISR.
619 * @param core_if Programming view of DWC_otg controller.
620 * @param ep The EP to start the transfer on.
623 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if,
626 if (core_if->dma_enable) {
627 if (core_if->dma_desc_enable) {
629 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
631 ep->desc_cnt = ep->pkt_cnt;
633 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
635 if (core_if->pti_enh_enable) {
636 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
639 (ep->proc_buf_num) ? ep->
640 xfer_buff1 : ep->xfer_buff0;
641 ep->cur_pkt_dma_addr =
642 (ep->proc_buf_num) ? ep->
643 dma_addr1 : ep->dma_addr0;
644 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
649 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
650 ep->cur_pkt_dma_addr =
651 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
652 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
657 * This function stops transfer for an EP and
658 * resets the ep's variables.
660 * @param core_if Programming view of DWC_otg controller.
661 * @param ep The EP to start the transfer on.
664 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
666 depctl_data_t depctl = {.d32 = 0 };
667 volatile uint32_t *addr;
669 if (ep->is_in == 1) {
670 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
672 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
676 depctl.d32 = DWC_READ_REG32(addr);
681 DWC_WRITE_REG32(addr, depctl.d32);
683 if (core_if->dma_desc_enable &&
684 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
685 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
686 ep->iso_dma_desc_addr,
690 /* reset varibales */
695 ep->data_per_frame = 0;
696 ep->data_pattern_frame = 0;
698 ep->buf_proc_intrvl = 0;
700 ep->proc_buf_num = 0;
704 ep->iso_desc_addr = 0;
705 ep->iso_dma_desc_addr = 0;
708 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t *pcd, void *ep_handle,
709 uint8_t *buf0, uint8_t *buf1, dwc_dma_t dma0,
710 dwc_dma_t dma1, int sync_frame, int dp_frame,
711 int data_per_frame, int start_frame,
712 int buf_proc_intrvl, void *req_handle,
715 dwc_otg_pcd_ep_t *ep;
716 dwc_irqflags_t flags = 0;
720 dwc_otg_core_if_t *core_if;
722 ep = get_ep_from_handle(pcd, ep_handle);
724 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
725 DWC_WARN("bad ep\n");
726 return -DWC_E_INVALID;
729 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
730 core_if = GET_CORE_IF(pcd);
731 dwc_ep = &ep->dwc_ep;
733 if (ep->iso_req_handle) {
734 DWC_WARN("ISO request in progress\n");
737 dwc_ep->dma_addr0 = dma0;
738 dwc_ep->dma_addr1 = dma1;
740 dwc_ep->xfer_buff0 = buf0;
741 dwc_ep->xfer_buff1 = buf1;
743 dwc_ep->data_per_frame = data_per_frame;
745 /** @todo - pattern data support is to be implemented in the future */
746 dwc_ep->data_pattern_frame = dp_frame;
747 dwc_ep->sync_frame = sync_frame;
749 dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
751 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
753 dwc_ep->proc_buf_num = 0;
755 dwc_ep->pkt_per_frm = 0;
756 frm_data = ep->dwc_ep.data_per_frame;
757 while (frm_data > 0) {
758 dwc_ep->pkt_per_frm++;
759 frm_data -= ep->dwc_ep.maxpacket;
762 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
764 if (start_frame == -1) {
765 dwc_ep->next_frame = dsts.b.soffn + 1;
766 if (dwc_ep->bInterval != 1) {
768 dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
773 dwc_ep->next_frame = start_frame;
776 if (!core_if->pti_enh_enable) {
778 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
782 (dwc_ep->data_per_frame *
783 (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
784 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
787 if (core_if->dma_desc_enable) {
789 dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
795 DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
798 DWC_ALLOC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
800 if (!dwc_ep->pkt_info) {
801 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
802 return -DWC_E_NO_MEMORY;
804 if (core_if->pti_enh_enable) {
805 dwc_memset(dwc_ep->pkt_info, 0,
806 sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
810 ep->iso_req_handle = req_handle;
812 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
813 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
817 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t *pcd, void *ep_handle,
820 dwc_irqflags_t flags = 0;
821 dwc_otg_pcd_ep_t *ep;
824 ep = get_ep_from_handle(pcd, ep_handle);
825 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
826 DWC_WARN("bad ep\n");
827 return -DWC_E_INVALID;
829 dwc_ep = &ep->dwc_ep;
831 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
833 DWC_FREE(dwc_ep->pkt_info);
834 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
835 if (ep->iso_req_handle != req_handle) {
836 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
837 return -DWC_E_INVALID;
840 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
842 ep->iso_req_handle = 0;
847 * This function is used for perodical data exchnage between PCD and gadget drivers.
848 * for Isochronous EPs
850 * - Every time a sync period completes this function is called to
851 * perform data exchange between PCD and gadget
853 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep,
859 dwc_ep = &ep->dwc_ep;
861 DWC_SPINUNLOCK(ep->pcd->lock);
862 pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
863 dwc_ep->proc_buf_num ^ 0x1);
864 DWC_SPINLOCK(ep->pcd->lock);
866 for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
867 dwc_ep->pkt_info[i].status = 0;
868 dwc_ep->pkt_info[i].offset = 0;
869 dwc_ep->pkt_info[i].length = 0;
873 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t *pcd, void *ep_handle,
874 void *iso_req_handle)
876 dwc_otg_pcd_ep_t *ep;
879 ep = get_ep_from_handle(pcd, ep_handle);
880 if (!ep->desc || ep->dwc_ep.num == 0) {
881 DWC_WARN("bad ep\n");
882 return -DWC_E_INVALID;
884 dwc_ep = &ep->dwc_ep;
886 return dwc_ep->pkt_cnt;
889 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t *pcd, void *ep_handle,
890 void *iso_req_handle, int packet,
891 int *status, int *actual, int *offset)
893 dwc_otg_pcd_ep_t *ep;
896 ep = get_ep_from_handle(pcd, ep_handle);
898 DWC_WARN("bad ep\n");
900 dwc_ep = &ep->dwc_ep;
902 *status = dwc_ep->pkt_info[packet].status;
903 *actual = dwc_ep->pkt_info[packet].length;
904 *offset = dwc_ep->pkt_info[packet].offset;
907 #endif /* DWC_EN_ISOC */
909 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *pcd_ep,
910 uint32_t is_in, uint32_t ep_num)
912 /* Init EP structure */
916 pcd_ep->queue_sof = 0;
918 /* Init DWC ep structure */
919 pcd_ep->dwc_ep.is_in = is_in;
920 pcd_ep->dwc_ep.num = ep_num;
921 pcd_ep->dwc_ep.active = 0;
922 pcd_ep->dwc_ep.tx_fifo_num = 0;
923 /* Control until ep is actvated */
924 pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
925 pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
926 pcd_ep->dwc_ep.dma_addr = 0;
927 pcd_ep->dwc_ep.start_xfer_buff = 0;
928 pcd_ep->dwc_ep.xfer_buff = 0;
929 pcd_ep->dwc_ep.xfer_len = 0;
930 pcd_ep->dwc_ep.xfer_count = 0;
931 pcd_ep->dwc_ep.sent_zlp = 0;
932 pcd_ep->dwc_ep.total_len = 0;
933 pcd_ep->dwc_ep.desc_addr = 0;
934 pcd_ep->dwc_ep.dma_desc_addr = 0;
935 DWC_CIRCLEQ_INIT(&pcd_ep->queue);
941 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
945 dwc_otg_pcd_ep_t *ep;
946 int in_ep_cntr, out_ep_cntr;
947 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
948 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
950 * Initialize the EP0 structure.
953 dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
956 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
957 for (i = 1; in_ep_cntr < num_in_eps; i++) {
958 if ((hwcfg1 & 0x1) == 0) {
959 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
962 * @todo NGS: Add direction to EP, based on contents
963 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
966 dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
968 DWC_CIRCLEQ_INIT(&ep->queue);
974 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
975 for (i = 1; out_ep_cntr < num_out_eps; i++) {
976 if ((hwcfg1 & 0x1) == 0) {
977 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
980 * @todo NGS: Add direction to EP, based on contents
981 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
984 dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
985 DWC_CIRCLEQ_INIT(&ep->queue);
990 pcd->ep0state = EP0_DISCONNECT;
991 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
992 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
996 * This function is called when the SRP timer expires. The SRP should
997 * complete within 6 seconds.
999 static void srp_timeout(void *ptr)
1001 gotgctl_data_t gotgctl;
1002 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1003 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1005 gotgctl.d32 = DWC_READ_REG32(addr);
1007 core_if->srp_timer_started = 0;
1009 if (core_if->adp_enable) {
1010 if (gotgctl.b.bsesvld == 0) {
1011 gpwrdn_data_t gpwrdn = {.d32 = 0 };
1012 DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1013 /* Power off the core */
1014 if (core_if->power_down == 2) {
1015 gpwrdn.b.pwrdnswtch = 1;
1016 DWC_MODIFY_REG32(&core_if->core_global_regs->
1017 gpwrdn, gpwrdn.d32, 0);
1021 gpwrdn.b.pmuintsel = 1;
1022 gpwrdn.b.pmuactv = 1;
1023 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1025 dwc_otg_adp_probe_start(core_if);
1027 DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028 core_if->op_state = B_PERIPHERAL;
1029 dwc_otg_core_init(core_if);
1030 dwc_otg_enable_global_interrupts(core_if);
1031 cil_pcd_start(core_if);
1035 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036 (core_if->core_params->i2c_enable)) {
1037 DWC_PRINTF("SRP Timeout\n");
1039 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
1045 /* Clear Session Request */
1047 gotgctl.b.sesreq = 1;
1048 DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1051 core_if->srp_success = 0;
1053 __DWC_ERROR("Device not connected/responding\n");
1054 gotgctl.b.sesreq = 0;
1055 DWC_WRITE_REG32(addr, gotgctl.d32);
1057 } else if (gotgctl.b.sesreq) {
1058 DWC_PRINTF("SRP Timeout\n");
1060 __DWC_ERROR("Device not connected/responding\n");
1061 gotgctl.b.sesreq = 0;
1062 DWC_WRITE_REG32(addr, gotgctl.d32);
1064 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1072 extern void start_next_request(dwc_otg_pcd_ep_t *ep);
1074 static void start_xfer_tasklet_func(void *data)
1076 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1077 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1080 depctl_data_t diepctl;
1082 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1084 diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1086 if (pcd->ep0.queue_sof) {
1087 pcd->ep0.queue_sof = 0;
1088 start_next_request(&pcd->ep0);
1092 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1093 depctl_data_t diepctl;
1095 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1097 if (pcd->in_ep[i].queue_sof) {
1098 pcd->in_ep[i].queue_sof = 0;
1099 start_next_request(&pcd->in_ep[i]);
1108 * This function initialized the PCD portion of the driver.
1111 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t *core_if)
1113 dwc_otg_pcd_t *pcd = NULL;
1114 dwc_otg_dev_if_t *dev_if;
1118 * Allocate PCD structure
1120 pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1126 pcd->lock = DWC_SPINLOCK_ALLOC();
1128 DWC_ERROR("Could not allocate lock for pcd");
1132 /* Set core_if's lock pointer to hcd->lock */
1133 core_if->lock = pcd->lock;
1134 pcd->core_if = core_if;
1136 dev_if = core_if->dev_if;
1137 dev_if->isoc_ep = NULL;
1139 if (core_if->hwcfg4.b.ded_fifo_en) {
1140 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1142 DWC_PRINTF("Shared Tx FIFO mode\n");
1146 * Initialized the Core for Device mode here if there is nod ADP support.
1147 * Otherwise it will be done later in dwc_otg_adp_start routine.
1149 /* if (dwc_otg_is_device_mode(core_if) ) { */
1150 /* dwc_otg_core_dev_init(core_if); */
1154 * Register the PCD Callbacks.
1156 dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1159 * Initialize the DMA buffer for SETUP packets
1161 if (GET_CORE_IF(pcd)->dma_enable) {
1163 DWC_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
1164 &pcd->setup_pkt_dma_handle);
1165 if (pcd->setup_pkt == NULL) {
1171 DWC_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
1172 &pcd->status_buf_dma_handle);
1173 if (pcd->status_buf == NULL) {
1174 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1175 pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1180 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1181 dev_if->setup_desc_addr[0] =
1182 dwc_otg_ep_alloc_desc_chain
1183 (&dev_if->dma_setup_desc_addr[0], 1);
1184 dev_if->setup_desc_addr[1] =
1185 dwc_otg_ep_alloc_desc_chain
1186 (&dev_if->dma_setup_desc_addr[1], 1);
1187 dev_if->in_desc_addr =
1188 dwc_otg_ep_alloc_desc_chain
1189 (&dev_if->dma_in_desc_addr, 1);
1190 dev_if->out_desc_addr =
1191 dwc_otg_ep_alloc_desc_chain
1192 (&dev_if->dma_out_desc_addr, 1);
1193 pcd->data_terminated = 0;
1195 if (dev_if->setup_desc_addr[0] == 0
1196 || dev_if->setup_desc_addr[1] == 0
1197 || dev_if->in_desc_addr == 0
1198 || dev_if->out_desc_addr == 0) {
1200 if (dev_if->out_desc_addr)
1201 dwc_otg_ep_free_desc_chain
1202 (dev_if->out_desc_addr,
1203 dev_if->dma_out_desc_addr, 1);
1204 if (dev_if->in_desc_addr)
1205 dwc_otg_ep_free_desc_chain
1206 (dev_if->in_desc_addr,
1207 dev_if->dma_in_desc_addr, 1);
1208 if (dev_if->setup_desc_addr[1])
1209 dwc_otg_ep_free_desc_chain
1210 (dev_if->setup_desc_addr[1],
1211 dev_if->dma_setup_desc_addr[1], 1);
1212 if (dev_if->setup_desc_addr[0])
1213 dwc_otg_ep_free_desc_chain
1214 (dev_if->setup_desc_addr[0],
1215 dev_if->dma_setup_desc_addr[0], 1);
1217 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1219 pcd->setup_pkt_dma_handle);
1220 DWC_DMA_FREE(sizeof(*pcd->status_buf),
1222 pcd->status_buf_dma_handle);
1230 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1231 if (pcd->setup_pkt == NULL) {
1236 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1237 if (pcd->status_buf == NULL) {
1238 DWC_FREE(pcd->setup_pkt);
1244 dwc_otg_pcd_reinit(pcd);
1246 /* Allocate the cfi object for the PCD */
1248 pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1249 if (NULL == pcd->cfi)
1251 if (init_cfi(pcd->cfi)) {
1252 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1257 /* Initialize tasklets */
1258 pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1259 start_xfer_tasklet_func, pcd);
1260 pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1263 /* Initialize SRP timer */
1264 core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1266 if (core_if->core_params->dev_out_nak) {
1268 * Initialize xfer timeout timer. Implemented for
1269 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1271 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1272 pcd->core_if->ep_xfer_timer[i] =
1273 DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1274 &pcd->core_if->ep_xfer_info[i]);
1283 DWC_FREE(pcd->setup_pkt);
1284 if (pcd->status_buf)
1285 DWC_FREE(pcd->status_buf);
1297 * Remove PCD specific data
1299 void dwc_otg_pcd_remove(dwc_otg_pcd_t *pcd)
1301 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1303 if (pcd->core_if->core_params->dev_out_nak) {
1304 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1305 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1306 pcd->core_if->ep_xfer_info[i].state = 0;
1310 if (GET_CORE_IF(pcd)->dma_enable) {
1311 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1312 pcd->setup_pkt_dma_handle);
1313 DWC_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1314 pcd->status_buf_dma_handle);
1315 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1316 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1317 dev_if->dma_setup_desc_addr
1319 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1320 dev_if->dma_setup_desc_addr
1322 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1323 dev_if->dma_in_desc_addr, 1);
1324 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1325 dev_if->dma_out_desc_addr,
1329 DWC_FREE(pcd->setup_pkt);
1330 DWC_FREE(pcd->status_buf);
1332 DWC_SPINLOCK_FREE(pcd->lock);
1333 /* Set core_if's lock pointer to NULL */
1334 pcd->core_if->lock = NULL;
1336 DWC_TASK_FREE(pcd->start_xfer_tasklet);
1337 DWC_TASK_FREE(pcd->test_mode_tasklet);
1338 if (pcd->core_if->core_params->dev_out_nak) {
1339 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1340 if (pcd->core_if->ep_xfer_timer[i]) {
1341 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1346 /* Release the CFI object's dynamic memory */
1348 if (pcd->cfi->ops.release) {
1349 pcd->cfi->ops.release(pcd->cfi);
1357 * Returns whether registered pcd is dual speed or not
1359 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t *pcd)
1361 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1363 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1364 ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1365 (core_if->hwcfg2.b.fs_phy_type == 1) &&
1366 (core_if->core_params->ulpi_fs_ls))) {
1374 * Returns whether registered pcd is OTG capable or not
1376 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t *pcd)
1378 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1379 gusbcfg_data_t usbcfg = {.d32 = 0 };
1380 uint32_t retval = 0;
1382 usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1383 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
1384 if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1389 if (!usbcfg.b.srpcap)
1394 if (usbcfg.b.hnpcap)
1397 if (core_if->adp_enable)
1405 * This function assigns periodic Tx FIFO to an periodic EP
1406 * in shared Tx FIFO mode
1408 static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
1413 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1414 if ((TxMsk & core_if->tx_msk) == 0) {
1415 core_if->tx_msk |= TxMsk;
1424 * This function assigns periodic Tx FIFO to an periodic EP
1425 * in shared Tx FIFO mode
1427 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
1429 uint32_t PerTxMsk = 1;
1431 for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1432 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1433 core_if->p_tx_msk |= PerTxMsk;
1442 * This function releases periodic Tx FIFO
1443 * in shared Tx FIFO mode
1445 static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if,
1449 (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1453 * This function releases periodic Tx FIFO
1454 * in shared Tx FIFO mode
1456 static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
1459 (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1463 * This function is being called from gadget
1464 * to enable PCD endpoint.
1466 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t *pcd,
1467 const uint8_t *ep_desc, void *usb_ep)
1470 dwc_otg_pcd_ep_t *ep = NULL;
1471 const usb_endpoint_descriptor_t *desc;
1472 dwc_irqflags_t flags;
1473 /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1474 /* gdfifocfg_data_t gdfifocfg = {.d32 = 0 }; */
1475 /* gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 }; */
1479 desc = (const usb_endpoint_descriptor_t *)ep_desc;
1482 pcd->ep0.priv = usb_ep;
1484 retval = -DWC_E_INVALID;
1488 num = UE_GET_ADDR(desc->bEndpointAddress);
1489 dir = UE_GET_DIR(desc->bEndpointAddress);
1491 if (!desc->wMaxPacketSize) {
1492 DWC_WARN("bad maxpacketsize\n");
1493 retval = -DWC_E_INVALID;
1497 if (dir == UE_DIR_IN) {
1498 epcount = pcd->core_if->dev_if->num_in_eps;
1499 for (i = 0; i < epcount; i++) {
1500 if (num == pcd->in_ep[i].dwc_ep.num) {
1501 ep = &pcd->in_ep[i];
1506 epcount = pcd->core_if->dev_if->num_out_eps;
1507 for (i = 0; i < epcount; i++) {
1508 if (num == pcd->out_ep[i].dwc_ep.num) {
1509 ep = &pcd->out_ep[i];
1516 DWC_WARN("bad address\n");
1517 retval = -DWC_E_INVALID;
1521 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1531 ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1532 ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1534 ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1536 if (ep->dwc_ep.is_in) {
1537 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1538 ep->dwc_ep.tx_fifo_num = 0;
1540 if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1542 * if ISOC EP then assign a Periodic Tx FIFO.
1544 ep->dwc_ep.tx_fifo_num =
1545 assign_perio_tx_fifo(GET_CORE_IF(pcd));
1549 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1551 ep->dwc_ep.tx_fifo_num =
1552 assign_tx_fifo(GET_CORE_IF(pcd));
1555 /* Calculating EP info controller base address */
1557 if (ep->dwc_ep.tx_fifo_num
1558 && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1560 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
1562 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1565 (&GET_CORE_IF(pcd)->
1566 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num -
1568 gdfifocfg.b.epinfobase =
1569 gdfifocfgbase.d32 + dptxfsiz.d32;
1570 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1571 DWC_WRITE_REG32(&GET_CORE_IF
1572 (pcd)->core_global_regs->
1573 gdfifocfg, gdfifocfg.d32);
1578 /* Set initial data PID. */
1579 if (ep->dwc_ep.type == UE_BULK) {
1580 ep->dwc_ep.data_pid_start = 0;
1583 /* Alloc DMA Descriptors */
1584 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1585 #ifndef DWC_UTE_PER_IO
1586 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1588 ep->dwc_ep.desc_addr =
1589 dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
1592 if (!ep->dwc_ep.desc_addr) {
1593 DWC_WARN("%s, can't allocate DMA descriptor\n",
1595 retval = -DWC_E_SHUTDOWN;
1596 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1599 #ifndef DWC_UTE_PER_IO
1604 DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1605 (ep->dwc_ep.is_in ? "IN" : "OUT"),
1606 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1607 #ifdef DWC_UTE_PER_IO
1608 ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1610 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1611 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1612 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1615 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1618 if (pcd->cfi->ops.ep_enable) {
1619 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1623 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1630 * This function is being called from gadget
1631 * to disable PCD endpoint.
1633 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t *pcd, void *ep_handle)
1635 dwc_otg_pcd_ep_t *ep;
1636 dwc_irqflags_t flags;
1637 dwc_otg_dev_dma_desc_t *desc_addr;
1638 dwc_dma_t dma_desc_addr;
1639 gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1640 gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1641 /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1643 ep = get_ep_from_handle(pcd, ep_handle);
1645 if (!ep || !ep->desc) {
1646 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1647 return -DWC_E_INVALID;
1650 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1652 dwc_otg_request_nuke(ep);
1654 dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1655 if (pcd->core_if->core_params->dev_out_nak) {
1656 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1657 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1663 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1664 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1666 if (ep->dwc_ep.is_in) {
1667 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1668 /* Flush the Tx FIFO */
1669 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1670 ep->dwc_ep.tx_fifo_num);
1672 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1673 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1675 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1676 /* Decreasing EPinfo Base Addr */
1679 (&GET_CORE_IF(pcd)->core_global_regs->
1680 dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1681 gdfifocfg.b.epinfobase =
1682 gdfifocfgbase.d32 - dptxfsiz.d32;
1683 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1684 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1685 core_global_regs->gdfifocfg,
1692 /* Free DMA Descriptors */
1693 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1694 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1695 desc_addr = ep->dwc_ep.desc_addr;
1696 dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1698 /* Cannot call dma_free_coherent() with IRQs disabled */
1699 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1700 dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1706 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1709 DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1710 ep->dwc_ep.is_in ? "IN" : "OUT");
1715 /******************************************************************************/
1716 #ifdef DWC_UTE_PER_IO
1719 * Free the request and its extended parts
1722 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req)
1724 DWC_FREE(req->ext_req.per_io_frame_descs);
1729 * Start the next request in the endpoint's queue.
1732 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t *pcd,
1733 dwc_otg_pcd_ep_t *ep)
1736 dwc_otg_pcd_request_t *req = NULL;
1737 dwc_ep_t *dwcep = NULL;
1738 struct dwc_iso_xreq_port *ereq = NULL;
1739 struct dwc_iso_pkt_desc_port *ddesc_iso;
1741 depctl_data_t diepctl;
1743 dwcep = &ep->dwc_ep;
1745 if (dwcep->xiso_active_xfers > 0) {
1747 /* Disable this to decrease s/w overhead
1748 * that is crucial for Isoc transfers */
1749 DWC_WARN("There are currently active transfers for EP%d \
1750 (active=%d; queued=%d)", dwcep->num,
1751 dwcep->xiso_active_xfers, dwcep->xiso_queued_xfers);
1756 nat = UGETW(ep->desc->wMaxPacketSize);
1757 nat = (nat >> 11) & 0x03;
1759 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1760 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1761 ereq = &req->ext_req;
1764 /* Get the frame number */
1765 dwcep->xiso_frame_num =
1766 dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1767 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1769 ddesc_iso = ereq->per_io_frame_descs;
1772 /* Setup DMA Descriptor chain for IN Isoc request */
1773 for (i = 0; i < ereq->pio_pkt_count; i++) {
1774 /* if ((i % (nat + 1)) == 0) */
1776 dwcep->xiso_frame_num =
1777 (dwcep->xiso_bInterval +
1778 dwcep->xiso_frame_num) & 0x3FFF;
1779 dwcep->desc_addr[i].buf =
1780 req->dma + ddesc_iso[i].offset;
1781 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1782 ddesc_iso[i].length;
1783 dwcep->desc_addr[i].status.b_iso_in.framenum =
1784 dwcep->xiso_frame_num;
1785 dwcep->desc_addr[i].status.b_iso_in.bs =
1787 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1788 dwcep->desc_addr[i].status.b_iso_in.sp =
1789 (ddesc_iso[i].length %
1790 dwcep->maxpacket) ? 1 : 0;
1791 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1792 dwcep->desc_addr[i].status.b_iso_in.pid =
1794 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1796 /* Process the last descriptor */
1797 if (i == ereq->pio_pkt_count - 1) {
1798 dwcep->desc_addr[i].status.b_iso_in.
1800 dwcep->desc_addr[i].status.b_iso_in.l =
1805 /* Setup and start the transfer for this endpoint */
1806 dwcep->xiso_active_xfers++;
1807 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1808 dev_if->in_ep_regs[dwcep->num]->diepdma,
1809 dwcep->dma_desc_addr);
1811 diepctl.b.epena = 1;
1813 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1814 dev_if->in_ep_regs[dwcep->num]->
1815 diepctl, 0, diepctl.d32);
1817 /* Setup DMA Descriptor chain for OUT Isoc request */
1818 for (i = 0; i < ereq->pio_pkt_count; i++) {
1819 /* if ((i % (nat + 1)) == 0) */
1820 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1824 dwcep->desc_addr[i].buf =
1825 req->dma + ddesc_iso[i].offset;
1826 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1827 ddesc_iso[i].length;
1828 dwcep->desc_addr[i].status.b_iso_out.framenum =
1829 dwcep->xiso_frame_num;
1830 dwcep->desc_addr[i].status.b_iso_out.bs =
1832 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1833 dwcep->desc_addr[i].status.b_iso_out.sp =
1834 (ddesc_iso[i].length %
1835 dwcep->maxpacket) ? 1 : 0;
1836 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1837 dwcep->desc_addr[i].status.b_iso_out.pid =
1839 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1841 /* Process the last descriptor */
1842 if (i == ereq->pio_pkt_count - 1) {
1843 dwcep->desc_addr[i].status.b_iso_out.
1845 dwcep->desc_addr[i].status.b_iso_out.l =
1850 /* Setup and start the transfer for this endpoint */
1851 dwcep->xiso_active_xfers++;
1852 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1853 out_ep_regs[dwcep->num]->doepdma,
1854 dwcep->dma_desc_addr);
1856 diepctl.b.epena = 1;
1858 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1859 out_ep_regs[dwcep->num]->doepctl, 0,
1871 * - Remove the request from the queue
1873 void complete_xiso_ep(dwc_otg_pcd_ep_t *ep)
1875 dwc_otg_pcd_request_t *req = NULL;
1876 struct dwc_iso_xreq_port *ereq = NULL;
1877 struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1878 dwc_ep_t *dwcep = NULL;
1882 dwcep = &ep->dwc_ep;
1884 /* Get the first pending request from the queue */
1885 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1886 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1888 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1891 dwcep->xiso_active_xfers--;
1892 dwcep->xiso_queued_xfers--;
1893 /* Remove this request from the queue */
1894 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1896 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1901 ereq = &req->ext_req;
1902 ddesc_iso = ereq->per_io_frame_descs;
1904 if (dwcep->xiso_active_xfers < 0) {
1905 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1906 dwcep->xiso_active_xfers);
1909 /* Fill the Isoc descs of portable extended req from dma descriptors */
1910 for (i = 0; i < ereq->pio_pkt_count; i++) {
1911 if (dwcep->is_in) { /* IN endpoints */
1912 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1913 dwcep->desc_addr[i].status.b_iso_in.txbytes;
1914 ddesc_iso[i].status =
1915 dwcep->desc_addr[i].status.b_iso_in.txsts;
1916 } else { /* OUT endpoints */
1917 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1918 dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1919 ddesc_iso[i].status =
1920 dwcep->desc_addr[i].status.b_iso_out.rxsts;
1924 DWC_SPINUNLOCK(ep->pcd->lock);
1926 /* Call the completion function in the non-portable logic */
1927 ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1930 DWC_SPINLOCK(ep->pcd->lock);
1932 /* Free the request - specific freeing needed for extended request object */
1933 dwc_pcd_xiso_ereq_free(ep, req);
1935 /* Start the next request */
1936 dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1942 * Create and initialize the Isoc pkt descriptors of the extended request.
1945 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t *req,
1949 struct dwc_iso_xreq_port *ereq = NULL;
1950 struct dwc_iso_xreq_port *req_mapped = NULL;
1951 struct dwc_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
1955 ereq = &req->ext_req;
1956 req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1957 pkt_count = req_mapped->pio_pkt_count;
1959 /* Create the isoc descs */
1961 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1963 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1967 DWC_ERROR("Failed to allocate isoc descriptors");
1968 return -DWC_E_NO_MEMORY;
1971 /* Initialize the extended request fields */
1972 ereq->per_io_frame_descs = ipds;
1973 ereq->error_count = 0;
1974 ereq->pio_alloc_pkt_count = pkt_count;
1975 ereq->pio_pkt_count = pkt_count;
1976 ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1978 /* Init the Isoc descriptors */
1979 for (i = 0; i < pkt_count; i++) {
1980 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1981 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1982 ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
1983 ipds[i].actual_length =
1984 req_mapped->per_io_frame_descs[i].actual_length;
1990 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1992 struct dwc_iso_pkt_desc_port *xfd = NULL;
1995 DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1996 DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1997 DWC_DEBUG("error_count=%d", ereq->error_count);
1998 DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1999 DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
2000 DWC_DEBUG("res=%d", ereq->res);
2002 for (i = 0; i < ereq->pio_pkt_count; i++) {
2003 xfd = &ereq->per_io_frame_descs[0];
2004 DWC_DEBUG("FD #%d", i);
2006 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
2007 DWC_DEBUG("xfd->length=%d", xfd->length);
2008 DWC_DEBUG("xfd->offset=%d", xfd->offset);
2009 DWC_DEBUG("xfd->status=%d", xfd->status);
2016 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2017 uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2018 int zero, void *req_handle, int atomic_alloc,
2021 dwc_otg_pcd_request_t *req = NULL;
2022 dwc_otg_pcd_ep_t *ep;
2023 dwc_irqflags_t flags;
2026 ep = get_ep_from_handle(pcd, ep_handle);
2028 DWC_WARN("bad ep\n");
2029 return -DWC_E_INVALID;
2032 /* We support this extension only for DDMA mode */
2033 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2034 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2035 return -DWC_E_INVALID;
2037 /* Create a dwc_otg_pcd_request_t object */
2039 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2041 req = DWC_ALLOC(sizeof(*req));
2045 return -DWC_E_NO_MEMORY;
2048 /* Create the Isoc descs for this request which shall be the exact match
2049 * of the structure sent to us from the non-portable logic */
2051 dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2053 DWC_WARN("Failed to init the Isoc descriptors");
2058 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2060 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2063 req->length = buflen;
2064 req->sent_zlp = zero;
2065 req->priv = req_handle;
2067 /* DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags); */
2068 ep->dwc_ep.dma_addr = dma_buf;
2069 ep->dwc_ep.start_xfer_buff = buf;
2070 ep->dwc_ep.xfer_buff = buf;
2071 ep->dwc_ep.xfer_len = 0;
2072 ep->dwc_ep.xfer_count = 0;
2073 ep->dwc_ep.sent_zlp = 0;
2074 ep->dwc_ep.total_len = buflen;
2076 /* Add this request to the tail */
2077 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2078 ep->dwc_ep.xiso_queued_xfers++;
2080 /* DWC_DEBUG("CP_0"); */
2081 /* DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags); */
2082 /* prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport); */
2083 /* prn_ext_request(&req->ext_req); */
2085 /* DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags); */
2087 /* If the req->status == ASAP then check if there is any active transfer
2088 * for this endpoint. If no active transfers, then get the first entry
2089 * from the queue and start that transfer
2091 if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2092 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2094 DWC_WARN("Failed to start the next Isoc transfer");
2095 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2101 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2106 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2107 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2108 uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2109 int zero, void *req_handle, int atomic_alloc)
2111 dwc_irqflags_t flags;
2112 dwc_otg_pcd_request_t *req;
2113 dwc_otg_pcd_ep_t *ep;
2114 uint32_t max_transfer;
2116 ep = get_ep_from_handle(pcd, ep_handle);
2117 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2118 DWC_WARN("bad ep\n");
2119 return -DWC_E_INVALID;
2123 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2125 req = DWC_ALLOC(sizeof(*req));
2129 return -DWC_E_NO_MEMORY;
2131 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2132 if (!GET_CORE_IF(pcd)->core_params->opt) {
2133 if (ep->dwc_ep.num != 0) {
2134 DWC_ERROR("queue req %p, len %d buf %p\n",
2135 req_handle, buflen, buf);
2141 req->length = buflen;
2142 req->sent_zlp = zero;
2143 req->priv = req_handle;
2144 req->dw_align_buf = NULL;
2145 if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2146 && !GET_CORE_IF(pcd)->dma_desc_enable)
2147 req->dw_align_buf = DWC_DMA_ALLOC(buflen,
2148 &req->dw_align_buf_dma);
2149 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2152 * After adding request to the queue for IN ISOC wait for In Token Received
2153 * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2154 * Received when EP is disabled interrupt to obtain starting microframe
2155 * (odd/even) start transfer
2157 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2159 depctl_data_t depctl = {.d32 =
2160 DWC_READ_REG32(&pcd->core_if->
2161 dev_if->in_ep_regs[ep->
2165 ++pcd->request_pending;
2167 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2168 if (ep->dwc_ep.is_in) {
2170 DWC_WRITE_REG32(&pcd->core_if->
2171 dev_if->in_ep_regs[ep->dwc_ep.
2176 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2182 * For EP0 IN without premature status, zlp is required?
2184 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2185 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2186 /* _req->zero = 1; */
2189 /* Start the transfer */
2190 if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2192 if (ep->dwc_ep.num == 0) {
2193 switch (pcd->ep0state) {
2194 case EP0_IN_DATA_PHASE:
2195 DWC_DEBUGPL(DBG_PCD,
2196 "%s ep0: EP0_IN_DATA_PHASE\n",
2200 case EP0_OUT_DATA_PHASE:
2201 DWC_DEBUGPL(DBG_PCD,
2202 "%s ep0: EP0_OUT_DATA_PHASE\n",
2204 if (pcd->request_config) {
2205 /* Complete STATUS PHASE */
2206 ep->dwc_ep.is_in = 1;
2207 pcd->ep0state = EP0_IN_STATUS_PHASE;
2211 case EP0_IN_STATUS_PHASE:
2212 DWC_DEBUGPL(DBG_PCD,
2213 "%s ep0: EP0_IN_STATUS_PHASE\n",
2218 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2220 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2221 return -DWC_E_SHUTDOWN;
2224 ep->dwc_ep.dma_addr = dma_buf;
2225 ep->dwc_ep.start_xfer_buff = buf;
2226 ep->dwc_ep.xfer_buff = buf;
2227 ep->dwc_ep.xfer_len = buflen;
2228 ep->dwc_ep.xfer_count = 0;
2229 ep->dwc_ep.sent_zlp = 0;
2230 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2233 if ((ep->dwc_ep.xfer_len %
2234 ep->dwc_ep.maxpacket == 0)
2235 && (ep->dwc_ep.xfer_len != 0)) {
2236 ep->dwc_ep.sent_zlp = 1;
2241 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2243 } /* non-ep0 endpoints */
2246 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2247 /* store the request length */
2248 ep->dwc_ep.cfi_req_len = buflen;
2249 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2254 GET_CORE_IF(ep->pcd)->
2255 core_params->max_transfer_size;
2257 /* Setup and start the Transfer */
2258 if (req->dw_align_buf) {
2259 if (ep->dwc_ep.is_in)
2260 dwc_memcpy(req->dw_align_buf,
2262 ep->dwc_ep.dma_addr =
2263 req->dw_align_buf_dma;
2264 ep->dwc_ep.start_xfer_buff =
2266 ep->dwc_ep.xfer_buff =
2269 ep->dwc_ep.dma_addr = dma_buf;
2270 ep->dwc_ep.start_xfer_buff = buf;
2271 ep->dwc_ep.xfer_buff = buf;
2273 ep->dwc_ep.xfer_len = 0;
2274 ep->dwc_ep.xfer_count = 0;
2275 ep->dwc_ep.sent_zlp = 0;
2276 ep->dwc_ep.total_len = buflen;
2278 ep->dwc_ep.maxxfer = max_transfer;
2279 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2280 uint32_t out_max_xfer =
2281 DDMA_MAX_TRANSFER_SIZE -
2282 (DDMA_MAX_TRANSFER_SIZE % 4);
2283 if (ep->dwc_ep.is_in) {
2284 if (ep->dwc_ep.maxxfer >
2285 DDMA_MAX_TRANSFER_SIZE) {
2286 ep->dwc_ep.maxxfer =
2287 DDMA_MAX_TRANSFER_SIZE;
2290 if (ep->dwc_ep.maxxfer >
2292 ep->dwc_ep.maxxfer =
2297 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2298 ep->dwc_ep.maxxfer -=
2299 (ep->dwc_ep.maxxfer %
2300 ep->dwc_ep.maxpacket);
2304 if ((ep->dwc_ep.total_len %
2305 ep->dwc_ep.maxpacket == 0)
2306 && (ep->dwc_ep.total_len != 0)) {
2307 ep->dwc_ep.sent_zlp = 1;
2313 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2319 ++pcd->request_pending;
2320 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2321 if (ep->dwc_ep.is_in && ep->stopped
2322 && !(GET_CORE_IF(pcd)->dma_enable)) {
2323 /** @todo NGS Create a function for this. */
2324 diepmsk_data_t diepmsk = {.d32 = 0 };
2325 diepmsk.b.intktxfemp = 1;
2326 if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2327 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2329 diepeachintmsk[ep->dwc_ep.num],
2332 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2333 dev_global_regs->diepmsk, 0,
2339 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2344 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t *pcd, void *ep_handle,
2347 dwc_irqflags_t flags;
2348 dwc_otg_pcd_request_t *req;
2349 dwc_otg_pcd_ep_t *ep;
2351 ep = get_ep_from_handle(pcd, ep_handle);
2352 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2353 DWC_WARN("bad argument\n");
2354 return -DWC_E_INVALID;
2357 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2359 /* make sure it's actually queued on this endpoint */
2360 DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2361 if (req->priv == (void *)req_handle) {
2366 if (req->priv != (void *)req_handle) {
2367 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2368 return -DWC_E_INVALID;
2371 if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2372 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2377 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2379 return req ? 0 : -DWC_E_SHUTDOWN;
2383 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t *pcd, void *ep_handle, int value)
2385 dwc_otg_pcd_ep_t *ep;
2386 dwc_irqflags_t flags;
2389 ep = get_ep_from_handle(pcd, ep_handle);
2391 if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2392 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2393 DWC_WARN("%s, bad ep\n", __func__);
2394 return -DWC_E_INVALID;
2397 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2398 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2399 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2400 ep->dwc_ep.is_in ? "IN" : "OUT");
2401 retval = -DWC_E_AGAIN;
2402 } else if (value == 0) {
2403 ep->dwc_ep.stall_clear_flag = 0;
2404 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2405 } else if (value == 1) {
2407 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2408 dtxfsts_data_t txstatus;
2409 fifosize_data_t txfifosize;
2412 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2413 dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2415 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2416 in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2418 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2419 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2420 retval = -DWC_E_AGAIN;
2422 if (ep->dwc_ep.num == 0) {
2423 pcd->ep0state = EP0_STALL;
2427 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2431 if (ep->dwc_ep.num == 0) {
2432 pcd->ep0state = EP0_STALL;
2436 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2438 } else if (value == 2) {
2439 ep->dwc_ep.stall_clear_flag = 0;
2440 } else if (value == 3) {
2441 ep->dwc_ep.stall_clear_flag = 1;
2445 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2451 * This function initiates remote wakeup of the host from suspend state.
2453 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t *pcd, int set)
2455 dctl_data_t dctl = { 0 };
2456 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2459 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2460 if (!dsts.b.suspsts) {
2461 DWC_WARN("Remote wakeup while is not in suspend state\n");
2463 /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2464 if (pcd->remote_wakeup_enable) {
2467 if (core_if->adp_enable) {
2468 gpwrdn_data_t gpwrdn;
2470 dwc_otg_adp_probe_stop(core_if);
2472 /* Mask SRP detected interrupt from Power Down Logic */
2474 gpwrdn.b.srp_det_msk = 1;
2475 DWC_MODIFY_REG32(&core_if->core_global_regs->
2476 gpwrdn, gpwrdn.d32, 0);
2478 /* Disable Power Down Logic */
2480 gpwrdn.b.pmuactv = 1;
2481 DWC_MODIFY_REG32(&core_if->core_global_regs->
2482 gpwrdn, gpwrdn.d32, 0);
2485 * Initialize the Core for Device mode.
2487 core_if->op_state = B_PERIPHERAL;
2488 dwc_otg_core_init(core_if);
2489 dwc_otg_enable_global_interrupts(core_if);
2490 cil_pcd_start(core_if);
2492 dwc_otg_initiate_srp(core_if);
2495 dctl.b.rmtwkupsig = 1;
2496 DWC_MODIFY_REG32(&core_if->dev_if->
2497 dev_global_regs->dctl, 0, dctl.d32);
2498 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2501 DWC_MODIFY_REG32(&core_if->dev_if->
2502 dev_global_regs->dctl, dctl.d32, 0);
2503 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2506 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2510 #ifdef CONFIG_USB_DWC_OTG_LPM
2512 * This function initiates remote wakeup of the host from L1 sleep state.
2514 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t *pcd, int set)
2516 glpmcfg_data_t lpmcfg;
2517 pcgcctl_data_t pcgcctl = {.d32 = 0 };
2519 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2521 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2523 /* Check if we are in L1 state */
2524 if (!lpmcfg.b.prt_sleep_sts) {
2525 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2529 /* Check if host allows remote wakeup */
2530 if (!lpmcfg.b.rem_wkup_en) {
2531 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2535 /* Check if Resume OK */
2536 if (!lpmcfg.b.sleep_state_resumeok) {
2537 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2541 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2542 lpmcfg.b.en_utmi_sleep = 0;
2543 lpmcfg.b.hird_thres &= (~(1 << 4));
2545 /* Clear Enbl_L1Gating bit. */
2546 pcgcctl.b.enbl_sleep_gating = 1;
2547 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
2549 DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2552 dctl_data_t dctl = {.d32 = 0 };
2553 dctl.b.rmtwkupsig = 1;
2554 /* Set RmtWkUpSig bit to start remote wakup signaling.
2555 * Hardware will automatically clear this bit.
2557 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2559 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2566 * Performs remote wakeup.
2568 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
2570 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2571 dwc_irqflags_t flags;
2572 if (dwc_otg_is_device_mode(core_if)) {
2573 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2574 #ifdef CONFIG_USB_DWC_OTG_LPM
2575 if (core_if->lx_state == DWC_OTG_L1) {
2576 dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2579 dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2580 #ifdef CONFIG_USB_DWC_OTG_LPM
2583 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2588 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t *pcd, int no_of_usecs)
2590 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2591 dctl_data_t dctl = { 0 };
2593 if (dwc_otg_is_device_mode(core_if)) {
2594 dctl.b.sftdiscon = 1;
2595 DWC_PRINTF("Soft disconnect for %d useconds\n", no_of_usecs);
2596 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
2598 dwc_udelay(no_of_usecs);
2599 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2603 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2609 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t *pcd)
2612 gotgctl_data_t gotgctl;
2615 * This function starts the Protocol if no session is in progress. If
2616 * a session is already in progress, but the device is suspended,
2617 * remote wakeup signaling is started.
2620 /* Check if valid session */
2622 DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2623 if (gotgctl.b.bsesvld) {
2624 /* Check if suspend state */
2627 (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
2629 if (dsts.b.suspsts) {
2630 dwc_otg_pcd_remote_wakeup(pcd, 1);
2633 dwc_otg_pcd_initiate_srp(pcd);
2641 * Implement Soft-Connect and Soft-Disconnect function
2644 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t *pcd)
2647 DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 2,
2651 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t *pcd)
2654 DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 0,
2658 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2660 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2661 dwc_otg_disable_global_interrupts(core_if);
2662 dwc_otg_core_init(core_if);
2663 dwc_otg_pcd_reinit(pcd);
2664 dwc_otg_core_dev_init(core_if);
2665 dwc_otg_enable_global_interrupts(core_if);
2669 * Start the SRP timer to detect when the SRP does not complete within
2672 * @param pcd the pcd structure.
2674 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
2676 dwc_irqflags_t flags;
2677 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2678 dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2679 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2682 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t *pcd)
2684 return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2687 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t *pcd)
2689 return GET_CORE_IF(pcd)->core_params->lpm_enable;
2692 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t *pcd)
2694 return GET_CORE_IF(pcd)->core_params->besl_enable;
2697 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t *pcd)
2699 return GET_CORE_IF(pcd)->core_params->baseline_besl;
2702 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t *pcd)
2704 return GET_CORE_IF(pcd)->core_params->deep_besl;
2707 uint32_t get_b_hnp_enable(dwc_otg_pcd_t *pcd)
2709 return pcd->b_hnp_enable;
2712 uint32_t get_a_hnp_support(dwc_otg_pcd_t *pcd)
2714 return pcd->a_hnp_support;
2717 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t *pcd)
2719 return pcd->a_alt_hnp_support;
2722 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t *pcd)
2724 return pcd->remote_wakeup_enable;
2727 #endif /* DWC_HOST_ONLY */