1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
35 #include "dwc_otg_hcd.h"
36 #include "dwc_otg_regs.h"
37 #include <linux/usb.h>
38 #include "dwc_otg_driver.h"
39 #include "usbdev_rk.h"
40 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
41 #include <../drivers/usb/core/hcd.h>
43 #include <linux/usb/hcd.h>
46 * This file contains the implementation of the HCD Interrupt handlers.
49 /** This function handles interrupts for the HCD. */
50 int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd)
54 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
55 gintsts_data_t gintsts;
57 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
60 if (dwc_otg_check_haps_status(core_if) == -1) {
61 DWC_WARN("HAPS is disconnected");
65 /* Exit from ISR if core is hibernated */
66 if (core_if->hibernation_suspend == 1) {
69 DWC_SPINLOCK(dwc_otg_hcd->lock);
70 /* Check if HOST Mode */
71 if (dwc_otg_is_host_mode(core_if)) {
72 gintsts.d32 = dwc_otg_read_core_intr(core_if);
74 DWC_SPINUNLOCK(dwc_otg_hcd->lock);
78 /* Don't print debug message in the interrupt handler on SOF */
80 if (gintsts.d32 != DWC_SOF_INTR_MASK)
82 DWC_DEBUGPL(DBG_HCD, "\n");
87 if (gintsts.d32 != DWC_SOF_INTR_MASK)
90 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
94 if (gintsts.b.sofintr) {
95 retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
97 if (gintsts.b.rxstsqlvl) {
99 dwc_otg_hcd_handle_rx_status_q_level_intr
102 if (gintsts.b.nptxfempty) {
104 dwc_otg_hcd_handle_np_tx_fifo_empty_intr
107 if (gintsts.b.i2cintr) {
108 /** @todo Implement i2cintr handler. */
110 if (gintsts.b.portintr) {
111 retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
113 if (gintsts.b.hcintr) {
114 retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
116 if (gintsts.b.ptxfempty) {
118 dwc_otg_hcd_handle_perio_tx_fifo_empty_intr
123 if (gintsts.d32 != DWC_SOF_INTR_MASK)
127 "DWC OTG HCD Finished Servicing Interrupts\n");
128 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
129 DWC_READ_REG32(&global_regs->gintsts));
130 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
131 DWC_READ_REG32(&global_regs->gintmsk));
137 if (gintsts.d32 != DWC_SOF_INTR_MASK)
139 DWC_DEBUGPL(DBG_HCD, "\n");
143 DWC_SPINUNLOCK(dwc_otg_hcd->lock);
147 #ifdef DWC_TRACK_MISSED_SOFS
148 #warning Compiling code to track missed SOFs
149 #define FRAME_NUM_ARRAY_SIZE 1000
151 * This function is for debug only.
153 static inline void track_missed_sofs(uint16_t curr_frame_number)
155 static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
156 static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
157 static int frame_num_idx;
158 static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
159 static int dumped_frame_num_array;
161 if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
162 if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
164 frame_num_array[frame_num_idx] = curr_frame_number;
165 last_frame_num_array[frame_num_idx++] = last_frame_num;
167 } else if (!dumped_frame_num_array) {
169 DWC_PRINTF("Frame Last Frame\n");
170 DWC_PRINTF("----- ----------\n");
171 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
172 DWC_PRINTF("0x%04x 0x%04x\n",
173 frame_num_array[i], last_frame_num_array[i]);
175 dumped_frame_num_array = 1;
177 last_frame_num = curr_frame_number;
182 * Handles the start-of-frame interrupt in host mode. Non-periodic
183 * transactions may be queued to the DWC_otg controller for the current
184 * (micro)frame. Periodic transactions may be queued to the controller for the
187 int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd)
190 dwc_list_link_t *qh_entry;
192 dwc_otg_transaction_type_e tr_type;
193 gintsts_data_t gintsts = {.d32 = 0 };
196 DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
199 DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
201 hcd->frame_number = hfnum.b.frnum;
204 hcd->frrem_accum += hfnum.b.frrem;
205 hcd->frrem_samples++;
208 #ifdef DWC_TRACK_MISSED_SOFS
209 track_missed_sofs(hcd->frame_number);
211 /* Determine whether any periodic QHs should be executed. */
212 qh_entry = DWC_LIST_FIRST(&hcd->periodic_sched_inactive);
213 while (qh_entry != &hcd->periodic_sched_inactive) {
214 qh = DWC_LIST_ENTRY(qh_entry, dwc_otg_qh_t, qh_list_entry);
215 qh_entry = qh_entry->next;
216 if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
218 * Move QH to the ready list to be executed next
221 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
225 tr_type = dwc_otg_hcd_select_transactions(hcd);
226 if (tr_type != DWC_OTG_TRANSACTION_NONE) {
227 dwc_otg_hcd_queue_transactions(hcd, tr_type);
230 /* Clear interrupt */
231 gintsts.b.sofintr = 1;
232 DWC_WRITE_REG32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
237 /** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
238 * least one packet in the Rx FIFO. The packets are moved from the FIFO to
239 * memory if the DWC_otg controller is operating in Slave mode. */
240 int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd)
242 host_grxsts_data_t grxsts;
245 DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
248 DWC_READ_REG32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
250 hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
252 DWC_ERROR("Unable to get corresponding channel\n");
257 DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
258 DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
259 DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
261 DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
263 switch (grxsts.b.pktsts) {
264 case DWC_GRXSTS_PKTSTS_IN:
265 /* Read the data into the host buffer. */
266 if (grxsts.b.bcnt > 0) {
267 dwc_otg_read_packet(dwc_otg_hcd->core_if,
268 hc->xfer_buff, grxsts.b.bcnt);
270 /* Update the HC fields for the next packet received. */
271 hc->xfer_count += grxsts.b.bcnt;
272 hc->xfer_buff += grxsts.b.bcnt;
275 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
276 case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
277 case DWC_GRXSTS_PKTSTS_CH_HALTED:
278 /* Handled in interrupt, just ignore data */
281 DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
289 /** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
290 * data packets may be written to the FIFO for OUT transfers. More requests
291 * may be written to the non-periodic request queue for IN transfers. This
292 * interrupt is enabled only in Slave mode. */
293 int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
295 DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
296 dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
297 DWC_OTG_TRANSACTION_NON_PERIODIC);
301 /** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
302 * packets may be written to the FIFO for OUT transfers. More requests may be
303 * written to the periodic request queue for IN transfers. This interrupt is
304 * enabled only in Slave mode. */
305 int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd)
307 DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
308 dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
309 DWC_OTG_TRANSACTION_PERIODIC);
313 /** There are multiple conditions that can cause a port interrupt. This function
314 * determines which interrupt conditions have occurred and handles them
316 int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd)
320 hprt0_data_t hprt0_modify;
321 struct usb_hcd *hcd = dwc_otg_hcd_get_priv_data(dwc_otg_hcd);
322 struct usb_bus *bus = hcd_to_bus(hcd);
324 hprt0.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
325 hprt0_modify.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
327 /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
330 hprt0_modify.b.prtena = 0;
331 hprt0_modify.b.prtconndet = 0;
332 hprt0_modify.b.prtenchng = 0;
333 hprt0_modify.b.prtovrcurrchng = 0;
335 /* Port Connect Detected
336 * Set flag and clear if detected */
337 if (dwc_otg_hcd->core_if->hibernation_suspend == 1) {
338 /* Dont modify port status if we are in hibernation state */
339 hprt0_modify.b.prtconndet = 1;
340 hprt0_modify.b.prtenchng = 1;
341 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0,
344 DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
348 if (hprt0.b.prtconndet) {
349 /** @todo - check if steps performed in 'else' block should be perfromed regardles adp */
350 if (dwc_otg_hcd->core_if->adp_enable &&
351 dwc_otg_hcd->core_if->adp.vbuson_timer_started == 1) {
352 DWC_PRINTF("PORT CONNECT DETECTED ----------------\n");
353 DWC_TIMER_CANCEL(dwc_otg_hcd->core_if->adp.
355 dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
356 /* TODO - check if this is required, as
357 * host initialization was already performed
358 * after initial ADP probing
360 /*dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
361 dwc_otg_core_init(dwc_otg_hcd->core_if);
362 dwc_otg_enable_global_interrupts(dwc_otg_hcd->core_if);
363 cil_hcd_start(dwc_otg_hcd->core_if); */
365 hprt0_data_t hprt0_local;
366 /* check if root hub is in suspend state
367 * if root hub in suspend, resume it.
370 && (hcd->state == HC_STATE_SUSPENDED)) {
372 ("%s: hcd->state = %d, hcd->flags = %ld\n",
373 __func__, hcd->state, hcd->flags);
374 usb_hcd_resume_root_hub(hcd);
376 DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
377 "Port Connect Detected--\n", hprt0.d32);
378 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
379 dwc_otg_hcd->flags.b.port_connect_status = 1;
380 hprt0_modify.b.prtconndet = 1;
382 if (dwc_otg_hcd->core_if->otg_ver
383 && (dwc_otg_hcd->core_if->test_mode == 7)) {
385 dwc_otg_read_hprt0(dwc_otg_hcd->core_if);
386 hprt0_local.b.prtrst = 1;
387 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->
388 hprt0, hprt0_local.d32);
391 dwc_otg_read_hprt0(dwc_otg_hcd->core_if);
393 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->
397 /* B-Device has connected, Delete the connection timer. */
398 DWC_TIMER_CANCEL(dwc_otg_hcd->conn_timer);
400 /* The Hub driver asserts a reset when it sees port connect
401 * status change flag */
405 /* Port Enable Changed
406 * Clear if detected - Set internal flag if disabled */
407 if (hprt0.b.prtenchng) {
408 DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
409 "Port Enable Changed--\n", hprt0.d32);
410 hprt0_modify.b.prtenchng = 1;
411 if (hprt0.b.prtena == 1) {
414 dwc_otg_core_params_t *params =
415 dwc_otg_hcd->core_if->core_params;
416 dwc_otg_core_global_regs_t *global_regs =
417 dwc_otg_hcd->core_if->core_global_regs;
418 dwc_otg_host_if_t *host_if =
419 dwc_otg_hcd->core_if->host_if;
421 /* Every time when port enables calculate
425 DWC_READ_REG32(&host_if->host_global_regs->hfir);
427 calc_frame_interval(dwc_otg_hcd->core_if);
428 DWC_WRITE_REG32(&host_if->host_global_regs->hfir,
431 /* Check if we need to adjust the PHY clock speed for
432 * low power and adjust it */
433 if (params->host_support_fs_ls_low_power) {
434 gusbcfg_data_t usbcfg;
437 DWC_READ_REG32(&global_regs->gusbcfg);
439 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED
441 DWC_HPRT0_PRTSPD_FULL_SPEED) {
446 if (usbcfg.b.phylpwrclksel == 0) {
447 /* Set PHY low power clock select for FS/LS devices */
448 usbcfg.b.phylpwrclksel = 1;
450 (&global_regs->gusbcfg,
457 (&host_if->host_global_regs->hcfg);
459 if (hprt0.b.prtspd ==
460 DWC_HPRT0_PRTSPD_LOW_SPEED
461 && params->host_ls_low_power_phy_clk
463 DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
466 "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
467 if (hcfg.b.fslspclksel !=
480 "FS_PHY programming HCFG to 48 MHz ()\n");
481 if (hcfg.b.fslspclksel !=
496 if (usbcfg.b.phylpwrclksel == 1) {
497 usbcfg.b.phylpwrclksel = 0;
499 (&global_regs->gusbcfg,
506 DWC_TASK_SCHEDULE(dwc_otg_hcd->
512 /* Port has been enabled set the reset change flag */
513 dwc_otg_hcd->flags.b.port_reset_change = 1;
516 dwc_otg_hcd->flags.b.port_enable_change = 1;
521 /** Overcurrent Change Interrupt */
522 if (hprt0.b.prtovrcurrchng) {
523 DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
524 "Port Overcurrent Changed--\n", hprt0.d32);
525 dwc_otg_hcd->flags.b.port_over_current_change = 1;
526 hprt0_modify.b.prtovrcurrchng = 1;
530 /* Clear Port Interrupts */
531 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
536 /** This interrupt indicates that one or more host channels has a pending
537 * interrupt. There are multiple conditions that can cause each host channel
538 * interrupt. This function determines which conditions have occurred for each
539 * host channel interrupt and handles them appropriately. */
540 int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd)
546 /* Clear appropriate bits in HCINTn to clear the interrupt bit in
549 haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
551 for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
552 if (haint.b2.chint & (1 << i)) {
553 retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
561 * Gets the actual length of a transfer after the transfer halts. _halt_status
562 * holds the reason for the halt.
564 * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
565 * *short_read is set to 1 upon return if less than the requested
566 * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
567 * return. short_read may also be NULL on entry, in which case it remains
570 static uint32_t get_actual_xfer_length(dwc_hc_t *hc,
571 dwc_otg_hc_regs_t *hc_regs,
573 dwc_otg_halt_status_e halt_status,
576 hctsiz_data_t hctsiz;
579 if (short_read != NULL) {
582 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
584 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
586 length = hc->xfer_len - hctsiz.b.xfersize;
587 if (short_read != NULL) {
588 *short_read = (hctsiz.b.xfersize != 0);
590 } else if (hc->qh->do_split) {
591 length = qtd->ssplit_out_xfer_count;
593 length = hc->xfer_len;
597 * Must use the hctsiz.pktcnt field to determine how much data
598 * has been transferred. This field reflects the number of
599 * packets that have been transferred via the USB. This is
600 * always an integral number of packets if the transfer was
601 * halted before its normal completion. (Can't use the
602 * hctsiz.xfersize field because that reflects the number of
603 * bytes transferred via the AHB, not the USB).
606 (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
613 * Updates the state of the URB after a Transfer Complete interrupt on the
614 * host channel. Updates the actual_length field of the URB based on the
615 * number of bytes transferred via the host channel. Sets the URB status
616 * if the data transfer is finished.
618 * @return 1 if the data transfer specified by the URB is completely finished,
621 static int update_urb_state_xfer_comp(dwc_hc_t *hc,
622 dwc_otg_hc_regs_t *hc_regs,
623 dwc_otg_hcd_urb_t *urb,
631 xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
632 DWC_OTG_HC_XFER_COMPLETE,
635 /* non DWORD-aligned buffer case handling. */
636 if (hc->align_buff && xfer_length && hc->ep_is_in) {
637 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
641 urb->actual_length += xfer_length;
643 if (xfer_length && (hc->ep_type == DWC_OTG_EP_TYPE_BULK) &&
644 (urb->flags & URB_SEND_ZERO_PACKET)
645 && (urb->actual_length == urb->length)
646 && !(urb->length % hc->max_packet)) {
648 } else if (short_read || urb->actual_length >= urb->length) {
654 hctsiz_data_t hctsiz;
655 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
656 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
657 __func__, (hc->ep_is_in ? "IN" : "OUT"),
659 DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len);
660 DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n",
662 DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
664 DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
666 DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
667 short_read, xfer_done);
675 * Save the starting data toggle for the next transfer. The data toggle is
676 * saved in the QH for non-control transfers and it's saved in the QTD for
679 void dwc_otg_hcd_save_data_toggle(dwc_hc_t *hc,
680 dwc_otg_hc_regs_t *hc_regs,
683 hctsiz_data_t hctsiz;
684 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
686 if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
687 dwc_otg_qh_t *qh = hc->qh;
688 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
689 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
691 qh->data_toggle = DWC_OTG_HC_PID_DATA1;
694 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
695 qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
697 qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
703 * Updates the state of an Isochronous URB when the transfer is stopped for
704 * any reason. The fields of the current entry in the frame descriptor array
705 * are set based on the transfer state and the input _halt_status. Completes
706 * the Isochronous URB if all the URB frames have been completed.
708 * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
709 * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
711 static dwc_otg_halt_status_e
712 update_isoc_urb_state(dwc_otg_hcd_t *hcd,
714 dwc_otg_hc_regs_t *hc_regs,
715 dwc_otg_qtd_t *qtd, dwc_otg_halt_status_e halt_status)
717 dwc_otg_hcd_urb_t *urb = qtd->urb;
718 dwc_otg_halt_status_e ret_val = halt_status;
719 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
721 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
722 switch (halt_status) {
723 case DWC_OTG_HC_XFER_COMPLETE:
724 frame_desc->status = 0;
725 frame_desc->actual_length =
726 get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
728 /* non DWORD-aligned buffer case handling. */
729 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
730 dwc_memcpy(urb->buf + frame_desc->offset +
731 qtd->isoc_split_offset, hc->qh->dw_align_buf,
732 frame_desc->actual_length);
736 case DWC_OTG_HC_XFER_FRAME_OVERRUN:
739 frame_desc->status = -DWC_E_NO_STREAM_RES;
741 frame_desc->status = -DWC_E_COMMUNICATION;
743 frame_desc->actual_length = 0;
745 case DWC_OTG_HC_XFER_BABBLE_ERR:
747 frame_desc->status = -DWC_E_OVERFLOW;
748 /* Don't need to update actual_length in this case. */
750 case DWC_OTG_HC_XFER_XACT_ERR:
752 frame_desc->status = -DWC_E_PROTOCOL;
753 frame_desc->actual_length =
754 get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
756 /* non DWORD-aligned buffer case handling. */
757 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
758 dwc_memcpy(urb->buf + frame_desc->offset +
759 qtd->isoc_split_offset, hc->qh->dw_align_buf,
760 frame_desc->actual_length);
762 /* Skip whole frame */
763 if (hc->qh->do_split && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) &&
764 hc->ep_is_in && hcd->core_if->dma_enable) {
765 qtd->complete_split = 0;
766 qtd->isoc_split_offset = 0;
771 DWC_ASSERT(1, "Unhandled _halt_status (%d)\n", halt_status);
774 if (++qtd->isoc_frame_index == urb->packet_count) {
776 * urb->status is not used for isoc transfers.
777 * The individual frame_desc statuses are used instead.
779 hcd->fops->complete(hcd, urb->priv, urb, 0);
780 ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
782 ret_val = DWC_OTG_HC_XFER_COMPLETE;
788 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
789 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
790 * still linked to the QH, the QH is added to the end of the inactive
791 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
792 * schedule if no more QTDs are linked to the QH.
794 static void deactivate_qh(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int free_qtd)
796 int continue_split = 0;
799 DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
801 qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
803 if (qtd->complete_split) {
805 } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID ||
806 qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) {
811 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
816 dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
820 * Releases a host channel for use by other transfers. Attempts to select and
821 * queue more transactions since at least one host channel is available.
823 * @param hcd The HCD state structure.
824 * @param hc The host channel to release.
825 * @param qtd The QTD associated with the host channel. This QTD may be freed
826 * if the transfer is complete or an error has occurred.
827 * @param halt_status Reason the channel is being released. This status
828 * determines the actions taken by this function.
830 static void release_channel(dwc_otg_hcd_t *hcd,
833 dwc_otg_halt_status_e halt_status)
835 dwc_otg_transaction_type_e tr_type;
837 int continue_trans = 1;
839 DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n",
840 __func__, hc->hc_num, halt_status);
842 switch (halt_status) {
843 case DWC_OTG_HC_XFER_URB_COMPLETE:
846 case DWC_OTG_HC_XFER_AHB_ERR:
847 case DWC_OTG_HC_XFER_STALL:
848 case DWC_OTG_HC_XFER_BABBLE_ERR:
851 case DWC_OTG_HC_XFER_XACT_ERR:
852 if (qtd->error_count >= 3) {
853 DWC_DEBUGPL(DBG_HCDV,
854 " Complete URB with transaction error\n");
856 qtd->urb->status = -DWC_E_PROTOCOL;
857 hcd->fops->complete(hcd, qtd->urb->priv,
858 qtd->urb, -DWC_E_PROTOCOL);
863 case DWC_OTG_HC_XFER_URB_DEQUEUE:
865 * The QTD has already been removed and the QH has been
866 * deactivated. Don't want to do anything except release the
867 * host channel and try to queue more transfers.
871 case DWC_OTG_HC_XFER_NO_HALT_STATUS:
874 case DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE:
875 DWC_DEBUGPL(DBG_HCDV, " Complete URB with I/O error\n");
877 qtd->urb->status = -DWC_E_IO;
878 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, -DWC_E_IO);
885 if (hc->csplit_nak) {
889 deactivate_qh(hcd, hc->qh, free_qtd);
893 * Release the host channel for use by other transfers. The cleanup
894 * function clears the channel interrupt enables and conditions, so
895 * there's no need to clear the Channel Halted interrupt separately.
897 dwc_otg_hc_cleanup(hcd->core_if, hc);
898 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
900 switch (hc->ep_type) {
901 case DWC_OTG_EP_TYPE_CONTROL:
902 case DWC_OTG_EP_TYPE_BULK:
903 hcd->non_periodic_channels--;
908 * Don't release reservations for periodic channels here.
909 * That's done when a periodic transfer is descheduled (i.e.
910 * when the QH is removed from the periodic schedule).
915 /* Try to queue more transfers now that there's a free channel. */
916 if (continue_trans) {
917 tr_type = dwc_otg_hcd_select_transactions(hcd);
918 if (tr_type != DWC_OTG_TRANSACTION_NONE)
919 dwc_otg_hcd_queue_transactions(hcd, tr_type);
924 * Halts a host channel. If the channel cannot be halted immediately because
925 * the request queue is full, this function ensures that the FIFO empty
926 * interrupt for the appropriate queue is enabled so that the halt request can
927 * be queued when there is space in the request queue.
929 * This function may also be called in DMA mode. In that case, the channel is
930 * simply released since the core always halts the channel automatically in
933 static void halt_channel(dwc_otg_hcd_t *hcd,
935 dwc_otg_qtd_t *qtd, dwc_otg_halt_status_e halt_status)
937 if (hcd->core_if->dma_enable) {
938 release_channel(hcd, hc, qtd, halt_status);
942 /* Slave mode processing... */
943 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
945 if (hc->halt_on_queue) {
946 gintmsk_data_t gintmsk = {.d32 = 0 };
947 dwc_otg_core_global_regs_t *global_regs;
948 global_regs = hcd->core_if->core_global_regs;
950 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
951 hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
953 * Make sure the Non-periodic Tx FIFO empty interrupt
954 * is enabled so that the non-periodic schedule will
957 gintmsk.b.nptxfempty = 1;
958 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
961 * Move the QH from the periodic queued schedule to
962 * the periodic assigned schedule. This allows the
963 * halt to be queued when the periodic schedule is
966 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
967 &hc->qh->qh_list_entry);
970 * Make sure the Periodic Tx FIFO Empty interrupt is
971 * enabled so that the periodic schedule will be
974 gintmsk.b.ptxfempty = 1;
975 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
981 * Performs common cleanup for non-periodic transfers after a Transfer
982 * Complete interrupt. This function should be called after any endpoint type
983 * specific handling is finished to release the host channel.
985 static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd,
987 dwc_otg_hc_regs_t *hc_regs,
989 dwc_otg_halt_status_e halt_status)
993 qtd->error_count = 0;
995 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
998 * Got a NYET on the last transaction of the transfer. This
999 * means that the endpoint should be in the PING state at the
1000 * beginning of the next transfer.
1002 hc->qh->ping_state = 1;
1003 clear_hc_int(hc_regs, nyet);
1007 * Always halt and release the host channel to make it available for
1008 * more transfers. There may still be more phases for a control
1009 * transfer or more data packets for a bulk transfer at this point,
1010 * but the host channel is still halted. A channel will be reassigned
1011 * to the transfer when the non-periodic schedule is processed after
1012 * the channel is released. This allows transactions to be queued
1013 * properly via dwc_otg_hcd_queue_transactions, which also enables the
1014 * Tx FIFO Empty interrupt if necessary.
1018 * IN transfers in Slave mode require an explicit disable to
1019 * halt the channel. (In DMA mode, this call simply releases
1022 halt_channel(hcd, hc, qtd, halt_status);
1025 * The channel is automatically disabled by the core for OUT
1026 * transfers in Slave mode.
1028 release_channel(hcd, hc, qtd, halt_status);
1033 * Performs common cleanup for periodic transfers after a Transfer Complete
1034 * interrupt. This function should be called after any endpoint type specific
1035 * handling is finished to release the host channel.
1037 static void complete_periodic_xfer(dwc_otg_hcd_t *hcd,
1039 dwc_otg_hc_regs_t *hc_regs,
1041 dwc_otg_halt_status_e halt_status)
1043 hctsiz_data_t hctsiz;
1044 qtd->error_count = 0;
1046 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1047 if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
1048 /* Core halts channel in these cases. */
1049 release_channel(hcd, hc, qtd, halt_status);
1051 /* Flush any outstanding requests from the Tx queue. */
1052 halt_channel(hcd, hc, qtd, halt_status);
1056 static int32_t handle_xfercomp_isoc_split_in(dwc_otg_hcd_t *hcd,
1058 dwc_otg_hc_regs_t *hc_regs,
1062 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
1063 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
1065 len = get_actual_xfer_length(hc, hc_regs, qtd,
1066 DWC_OTG_HC_XFER_COMPLETE, NULL);
1069 qtd->complete_split = 0;
1070 qtd->isoc_split_offset = 0;
1073 frame_desc->actual_length += len;
1075 if (hc->align_buff && len)
1076 dwc_memcpy(qtd->urb->buf + frame_desc->offset +
1077 qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
1078 qtd->isoc_split_offset += len;
1080 if (frame_desc->length == frame_desc->actual_length) {
1081 frame_desc->status = 0;
1082 qtd->isoc_frame_index++;
1083 qtd->complete_split = 0;
1084 qtd->isoc_split_offset = 0;
1087 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
1088 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
1089 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1091 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
1094 return 1; /* Indicates that channel released */
1098 * Handles a host channel Transfer Complete interrupt. This handler may be
1099 * called in either DMA mode or Slave mode.
1101 static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd,
1103 dwc_otg_hc_regs_t *hc_regs,
1107 dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
1108 dwc_otg_hcd_urb_t *urb;
1111 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1112 "Transfer Complete--\n", hc->hc_num);
1115 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_DEQUEUE);
1116 goto handle_xfercomp_done;
1121 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_DEQUEUE);
1122 goto handle_xfercomp_done;
1125 pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1127 if (hcd->core_if->dma_desc_enable) {
1128 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
1129 if (pipe_type == UE_ISOCHRONOUS) {
1130 /* Do not disable the interrupt, just clear it */
1131 clear_hc_int(hc_regs, xfercomp);
1134 goto handle_xfercomp_done;
1138 * Handle xfer complete on CSPLIT.
1141 if (hc->qh->do_split) {
1142 if ((hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && hc->ep_is_in
1143 && hcd->core_if->dma_enable) {
1144 if (qtd->complete_split
1145 && handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
1147 goto handle_xfercomp_done;
1149 qtd->complete_split = 0;
1153 /* Update the QTD and URB states. */
1154 switch (pipe_type) {
1156 switch (qtd->control_phase) {
1157 case DWC_OTG_CONTROL_SETUP:
1158 if (urb->length > 0) {
1159 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1161 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1163 DWC_DEBUGPL(DBG_HCDV,
1164 " Control setup transaction done\n");
1165 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1167 case DWC_OTG_CONTROL_DATA:{
1169 update_urb_state_xfer_comp(hc, hc_regs, urb,
1171 if (urb_xfer_done) {
1172 qtd->control_phase =
1173 DWC_OTG_CONTROL_STATUS;
1174 DWC_DEBUGPL(DBG_HCDV,
1175 " Control data transfer done\n");
1177 dwc_otg_hcd_save_data_toggle(hc,
1181 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1184 case DWC_OTG_CONTROL_STATUS:
1185 DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
1186 if (urb->status == -DWC_E_IN_PROGRESS) {
1189 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1190 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1194 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1197 DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
1199 update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1200 if (urb_xfer_done) {
1201 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1202 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1204 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1207 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1208 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1211 DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
1213 update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1216 * Interrupt URB is done on the first transfer complete
1219 if (urb_xfer_done) {
1220 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1221 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1223 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1226 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1227 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1229 case UE_ISOCHRONOUS:
1230 DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
1231 if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1233 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1234 DWC_OTG_HC_XFER_COMPLETE);
1236 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1240 handle_xfercomp_done:
1241 disable_hc_int(hc_regs, xfercompl);
1247 * Handles a host channel STALL interrupt. This handler may be called in
1248 * either DMA mode or Slave mode.
1250 static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd,
1252 dwc_otg_hc_regs_t *hc_regs,
1255 dwc_otg_hcd_urb_t *urb = qtd->urb;
1256 int pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1258 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1259 "STALL Received--\n", hc->hc_num);
1261 if (hcd->core_if->dma_desc_enable) {
1262 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1263 DWC_OTG_HC_XFER_STALL);
1264 goto handle_stall_done;
1267 if (pipe_type == UE_CONTROL) {
1268 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1271 if (pipe_type == UE_BULK || pipe_type == UE_INTERRUPT) {
1272 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1274 * USB protocol requires resetting the data toggle for bulk
1275 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1276 * setup command is issued to the endpoint. Anticipate the
1277 * CLEAR_FEATURE command since a STALL has occurred and reset
1278 * the data toggle now.
1280 hc->qh->data_toggle = 0;
1283 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL);
1286 disable_hc_int(hc_regs, stall);
1292 * Updates the state of the URB when a transfer has been stopped due to an
1293 * abnormal condition before the transfer completes. Modifies the
1294 * actual_length field of the URB to reflect the number of bytes that have
1295 * actually been transferred via the host channel.
1297 static void update_urb_state_xfer_intr(dwc_hc_t *hc,
1298 dwc_otg_hc_regs_t *hc_regs,
1299 dwc_otg_hcd_urb_t *urb,
1301 dwc_otg_halt_status_e halt_status)
1303 uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
1305 /* non DWORD-aligned buffer case handling. */
1306 if (hc->align_buff && bytes_transferred && hc->ep_is_in) {
1307 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
1311 urb->actual_length += bytes_transferred;
1315 hctsiz_data_t hctsiz;
1316 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1317 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1318 __func__, (hc->ep_is_in ? "IN" : "OUT"),
1320 DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n",
1321 hc->start_pkt_count);
1322 DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1323 DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet);
1324 DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n",
1326 DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
1327 urb->actual_length);
1328 DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
1335 * Handles a host channel NAK interrupt. This handler may be called in either
1336 * DMA mode or Slave mode.
1338 static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd,
1340 dwc_otg_hc_regs_t *hc_regs,
1343 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1344 "NAK Received--\n", hc->hc_num);
1347 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1348 * interrupt. Re-start the SSPLIT transfer.
1351 if (hc->complete_split) {
1352 qtd->error_count = 0;
1355 qtd->complete_split = 0;
1356 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1357 goto handle_nak_done;
1360 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1363 if (hcd->core_if->dma_enable && hc->ep_is_in) {
1365 * NAK interrupts are enabled on bulk/control IN
1366 * transfers in DMA mode for the sole purpose of
1367 * resetting the error count after a transaction error
1368 * occurs. The core will continue transferring data.
1370 qtd->error_count = 0;
1371 goto handle_nak_done;
1375 * NAK interrupts normally occur during OUT transfers in DMA
1376 * or Slave mode. For IN transfers, more requests will be
1377 * queued as request queue space is available.
1379 qtd->error_count = 0;
1381 if (!hc->qh->ping_state) {
1382 update_urb_state_xfer_intr(hc, hc_regs,
1384 DWC_OTG_HC_XFER_NAK);
1385 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1387 if (hc->speed == DWC_OTG_EP_SPEED_HIGH)
1388 hc->qh->ping_state = 1;
1392 * Halt the channel so the transfer can be re-started from
1393 * the appropriate point or the PING protocol will
1396 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1399 qtd->error_count = 0;
1400 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1402 case UE_ISOCHRONOUS:
1403 /* Should never get called for isochronous transfers. */
1404 DWC_ASSERT(1, "NACK interrupt for ISOC transfer\n");
1409 disable_hc_int(hc_regs, nak);
1415 * Handles a host channel ACK interrupt. This interrupt is enabled when
1416 * performing the PING protocol in Slave mode, when errors occur during
1417 * either Slave mode or DMA mode, and during Start Split transactions.
1419 static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd,
1421 dwc_otg_hc_regs_t *hc_regs,
1424 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1425 "ACK Received--\n", hc->hc_num);
1429 * Handle ACK on SSPLIT.
1430 * ACK should not occur in CSPLIT.
1432 if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) {
1433 qtd->ssplit_out_xfer_count = hc->xfer_len;
1435 if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
1436 /* Don't need complete for isochronous out transfers. */
1437 qtd->complete_split = 1;
1441 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1442 switch (hc->xact_pos) {
1443 case DWC_HCSPLIT_XACTPOS_ALL:
1445 case DWC_HCSPLIT_XACTPOS_END:
1446 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1447 qtd->isoc_split_offset = 0;
1449 case DWC_HCSPLIT_XACTPOS_BEGIN:
1450 case DWC_HCSPLIT_XACTPOS_MID:
1452 * For BEGIN or MID, calculate the length for
1453 * the next microframe to determine the correct
1454 * SSPLIT token, either MID or END.
1457 struct dwc_otg_hcd_iso_packet_desc
1461 &qtd->urb->iso_descs[qtd->
1463 qtd->isoc_split_offset += 188;
1465 if ((frame_desc->length -
1466 qtd->isoc_split_offset) <= 188) {
1467 qtd->isoc_split_pos =
1468 DWC_HCSPLIT_XACTPOS_END;
1470 qtd->isoc_split_pos =
1471 DWC_HCSPLIT_XACTPOS_MID;
1478 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1481 qtd->error_count = 0;
1483 if (hc->qh->ping_state) {
1484 hc->qh->ping_state = 0;
1486 * Halt the channel so the transfer can be re-started
1487 * from the appropriate point. This only happens in
1488 * Slave mode. In DMA mode, the ping_state is cleared
1489 * when the transfer is started because the core
1490 * automatically executes the PING, then the transfer.
1492 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1497 * If the ACK occurred when _not_ in the PING state, let the channel
1498 * continue transferring data after clearing the error count.
1501 disable_hc_int(hc_regs, ack);
1507 * Handles a host channel NYET interrupt. This interrupt should only occur on
1508 * Bulk and Control OUT endpoints and for complete split transactions. If a
1509 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1510 * handled in the xfercomp interrupt handler, not here. This handler may be
1511 * called in either DMA mode or Slave mode.
1513 static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd,
1515 dwc_otg_hc_regs_t *hc_regs,
1518 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1519 "NYET Received--\n", hc->hc_num);
1523 * re-do the CSPLIT immediately on non-periodic
1525 if (hc->do_split && hc->complete_split) {
1526 if (hc->ep_is_in && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
1527 && hcd->core_if->dma_enable) {
1528 qtd->complete_split = 0;
1529 qtd->isoc_split_offset = 0;
1530 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
1531 hcd->fops->complete(hcd, qtd->urb->priv,
1533 release_channel(hcd, hc, qtd,
1534 DWC_OTG_HC_XFER_URB_COMPLETE);
1536 release_channel(hcd, hc, qtd,
1537 DWC_OTG_HC_XFER_NO_HALT_STATUS);
1538 goto handle_nyet_done;
1541 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1542 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1543 int frnum = dwc_otg_hcd_get_frame_number(hcd);
1545 if (dwc_full_frame_num(frnum) !=
1546 dwc_full_frame_num(hc->qh->sched_frame)) {
1548 * No longer in the same full speed frame.
1549 * Treat this as a transaction error.
1552 /** @todo Fix system performance so this can
1553 * be treated as an error. Right now complete
1554 * splits cannot be scheduled precisely enough
1555 * due to other system activity, so this error
1556 * occurs regularly in Slave mode.
1560 qtd->complete_split = 0;
1561 halt_channel(hcd, hc, qtd,
1562 DWC_OTG_HC_XFER_XACT_ERR);
1563 /** @todo add support for isoc release */
1564 goto handle_nyet_done;
1568 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1569 goto handle_nyet_done;
1572 hc->qh->ping_state = 1;
1573 qtd->error_count = 0;
1575 update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
1576 DWC_OTG_HC_XFER_NYET);
1577 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1580 * Halt the channel and re-start the transfer so the PING
1581 * protocol will start.
1583 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1586 disable_hc_int(hc_regs, nyet);
1591 * Handles a host channel babble interrupt. This handler may be called in
1592 * either DMA mode or Slave mode.
1594 static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd,
1596 dwc_otg_hc_regs_t *hc_regs,
1599 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1600 "Babble Error--\n", hc->hc_num);
1602 if (hcd->core_if->dma_desc_enable) {
1603 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1604 DWC_OTG_HC_XFER_BABBLE_ERR);
1605 goto handle_babble_done;
1608 if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1609 hcd->fops->complete(hcd, qtd->urb->priv,
1610 qtd->urb, -DWC_E_OVERFLOW);
1611 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR);
1613 dwc_otg_halt_status_e halt_status;
1614 halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1615 DWC_OTG_HC_XFER_BABBLE_ERR);
1616 halt_channel(hcd, hc, qtd, halt_status);
1620 disable_hc_int(hc_regs, bblerr);
1625 * Handles a host channel AHB error interrupt. This handler is only called in
1628 static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd,
1630 dwc_otg_hc_regs_t *hc_regs,
1633 hcchar_data_t hcchar;
1634 hcsplt_data_t hcsplt;
1635 hctsiz_data_t hctsiz;
1637 char *pipetype, *speed;
1639 dwc_otg_hcd_urb_t *urb = qtd->urb;
1641 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1642 "AHB Error--\n", hc->hc_num);
1644 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
1645 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
1646 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1647 hcdma = DWC_READ_REG32(&hc_regs->hcdma);
1649 DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
1650 DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1651 DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1652 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1653 DWC_ERROR(" Device address: %d\n",
1654 dwc_otg_hcd_get_dev_addr(&urb->pipe_info));
1655 DWC_ERROR(" Endpoint: %d, %s\n",
1656 dwc_otg_hcd_get_ep_num(&urb->pipe_info),
1657 (dwc_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"));
1659 switch (dwc_otg_hcd_get_pipe_type(&urb->pipe_info)) {
1661 pipetype = "CONTROL";
1667 pipetype = "INTERRUPT";
1669 case UE_ISOCHRONOUS:
1670 pipetype = "ISOCHRONOUS";
1673 pipetype = "UNKNOWN";
1677 DWC_ERROR(" Endpoint type: %s\n", pipetype);
1679 switch (hc->speed) {
1680 case DWC_OTG_EP_SPEED_HIGH:
1683 case DWC_OTG_EP_SPEED_FULL:
1686 case DWC_OTG_EP_SPEED_LOW:
1694 DWC_ERROR(" Speed: %s\n", speed);
1696 DWC_ERROR(" Max packet size: %d\n",
1697 dwc_otg_hcd_get_mps(&urb->pipe_info));
1698 DWC_ERROR(" Data buffer length: %d\n", urb->length);
1699 DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
1700 urb->buf, (void *)urb->dma);
1701 DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
1702 urb->setup_packet, (void *)urb->setup_dma);
1703 DWC_ERROR(" Interval: %d\n", urb->interval);
1705 /* Core haltes the channel for Descriptor DMA mode */
1706 if (hcd->core_if->dma_desc_enable) {
1707 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1708 DWC_OTG_HC_XFER_AHB_ERR);
1709 goto handle_ahberr_done;
1712 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_IO);
1715 * Force a channel halt. Don't call halt_channel because that won't
1716 * write to the HCCHARn register in DMA mode to force the halt.
1718 dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR);
1720 disable_hc_int(hc_regs, ahberr);
1725 * Handles a host channel transaction error interrupt. This handler may be
1726 * called in either DMA mode or Slave mode.
1728 static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd,
1730 dwc_otg_hc_regs_t *hc_regs,
1733 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1734 "Transaction Error--\n", hc->hc_num);
1736 if (hcd->core_if->dma_desc_enable) {
1737 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1738 DWC_OTG_HC_XFER_XACT_ERR);
1739 goto handle_xacterr_done;
1742 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1746 if (!hc->qh->ping_state) {
1748 update_urb_state_xfer_intr(hc, hc_regs,
1750 DWC_OTG_HC_XFER_XACT_ERR);
1751 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1752 if (!hc->ep_is_in && hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1753 hc->qh->ping_state = 1;
1758 * Halt the channel so the transfer can be re-started from
1759 * the appropriate point or the PING protocol will start.
1761 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1765 if (hc->do_split && hc->complete_split) {
1766 qtd->complete_split = 0;
1768 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1770 case UE_ISOCHRONOUS:
1772 dwc_otg_halt_status_e halt_status;
1774 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1775 DWC_OTG_HC_XFER_XACT_ERR);
1777 halt_channel(hcd, hc, qtd, halt_status);
1781 handle_xacterr_done:
1782 disable_hc_int(hc_regs, xacterr);
1788 * Handles a host channel frame overrun interrupt. This handler may be called
1789 * in either DMA mode or Slave mode.
1791 static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd,
1793 dwc_otg_hc_regs_t *hc_regs,
1796 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1797 "Frame Overrun--\n", hc->hc_num);
1799 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1804 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
1806 case UE_ISOCHRONOUS:
1808 dwc_otg_halt_status_e halt_status;
1810 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1811 DWC_OTG_HC_XFER_FRAME_OVERRUN);
1813 halt_channel(hcd, hc, qtd, halt_status);
1818 disable_hc_int(hc_regs, frmovrun);
1824 * Handles a host channel data toggle error interrupt. This handler may be
1825 * called in either DMA mode or Slave mode.
1827 static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd,
1829 dwc_otg_hc_regs_t *hc_regs,
1832 DWC_ERROR("--Host Channel %d Interrupt: "
1833 "Data Toggle Error--\n", hc->hc_num);
1834 if (!hcd->flags.b.port_connect_status) {
1835 /* No longer connected. */
1836 DWC_ERROR("Not connected\n");
1840 qtd->error_count += 3; /* Complete the error URB immediately */
1842 DWC_ERROR("Data Toggle Error on OUT transfer,"
1843 "channel %d\n", hc->hc_num);
1845 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1846 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1847 clear_hc_int(hc_regs, chhltd);
1854 * This function is for debug only. It checks that a valid halt status is set
1855 * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
1856 * taken and a warning is issued.
1857 * @return 1 if halt status is ok, 0 otherwise.
1859 static inline int halt_status_ok(dwc_otg_hcd_t *hcd,
1861 dwc_otg_hc_regs_t *hc_regs,
1864 hcchar_data_t hcchar;
1865 hctsiz_data_t hctsiz;
1867 hcintmsk_data_t hcintmsk;
1868 hcsplt_data_t hcsplt;
1870 if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
1872 * This code is here only as a check. This condition should
1873 * never happen. Ignore the halt if it does occur.
1875 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
1876 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1877 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1878 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
1879 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
1881 ("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
1882 "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
1883 "hcint 0x%08x, hcintmsk 0x%08x, "
1884 "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
1885 hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
1886 hcintmsk.d32, hcsplt.d32, qtd->complete_split);
1888 DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
1889 __func__, hc->hc_num);
1891 clear_hc_int(hc_regs, chhltd);
1896 * This code is here only as a check. hcchar.chdis should
1897 * never be set when the halt interrupt occurs. Halt the
1898 * channel again if it does occur.
1900 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
1901 if (hcchar.b.chdis) {
1902 DWC_WARN("%s: hcchar.chdis set unexpectedly, "
1903 "hcchar 0x%08x, trying to halt again\n",
1904 __func__, hcchar.d32);
1905 clear_hc_int(hc_regs, chhltd);
1906 hc->halt_pending = 0;
1907 halt_channel(hcd, hc, qtd, hc->halt_status);
1916 * Handles a host Channel Halted interrupt in DMA mode. This handler
1917 * determines the reason the channel halted and proceeds accordingly.
1919 static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd,
1921 dwc_otg_hc_regs_t *hc_regs,
1925 hcintmsk_data_t hcintmsk;
1926 int out_nak_enh = 0;
1927 struct dwc_otg_platform_data *pldata = hcd->core_if->otg_dev->pldata;
1929 /* For core with OUT NAK enhancement, the flow for high-
1930 * speed CONTROL/BULK OUT is handled a little differently.
1932 if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
1933 if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
1934 (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1935 hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
1940 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1941 (hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR
1942 && !hcd->core_if->dma_desc_enable)) {
1944 * Just release the channel. A dequeue can happen on a
1945 * transfer timeout. In the case of an AHB Error, the channel
1946 * was forced to halt because there's no way to gracefully
1949 if (hcd->core_if->dma_desc_enable)
1950 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1953 release_channel(hcd, hc, qtd, hc->halt_status);
1957 /* Read the HCINTn register to determine the cause for the halt. */
1958 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1959 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
1961 if (hcint.b.xfercomp) {
1962 /** @todo This is here because of a possible hardware bug. Spec
1963 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1964 * interrupt w/ACK bit set should occur, but I only see the
1965 * XFERCOMP bit, even with it masked out. This is a workaround
1966 * for that behavior. Should fix this when hardware is fixed.
1968 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1969 handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
1971 handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
1972 } else if (hcint.b.stall) {
1973 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
1974 } else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
1976 if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
1977 DWC_DEBUG("XactErr with NYET/NAK/ACK\n");
1978 qtd->error_count = 0;
1980 DWC_DEBUG("XactErr without NYET/NAK/ACK\n");
1985 * Must handle xacterr before nak or ack. Could get a xacterr
1986 * at the same time as either of these on a BULK/CONTROL OUT
1987 * that started with a PING. The xacterr takes precedence.
1989 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
1990 } else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
1991 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
1992 } else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
1993 handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
1994 } else if (hcint.b.bblerr) {
1995 handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
1996 } else if (hcint.b.frmovrun) {
1997 handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
1998 } else if (hcint.b.datatglerr) {
1999 handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
2000 } else if (!out_nak_enh) {
2003 * Must handle nyet before nak or ack. Could get a nyet at the
2004 * same time as either of those on a BULK/CONTROL OUT that
2005 * started with a PING. The nyet takes precedence.
2007 handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
2008 } else if (hcint.b.nak && !hcintmsk.b.nak) {
2010 * If nak is not masked, it's because a non-split IN transfer
2011 * is in an error state. In that case, the nak is handled by
2012 * the nak interrupt handler, not here. Handle nak here for
2013 * BULK/CONTROL OUT transfers, which halt on a NAK to allow
2014 * rewinding the buffer pointer.
2016 handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2017 } else if (hcint.b.ack && !hcintmsk.b.ack) {
2019 * If ack is not masked, it's because a non-split IN transfer
2020 * is in an error state. In that case, the ack is handled by
2021 * the ack interrupt handler, not here. Handle ack here for
2022 * split transfers. Start splits halt on ACK.
2024 handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
2026 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2027 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2029 * A periodic transfer halted with no other channel
2030 * interrupts set. Assume it was halted by the core
2031 * because it could not be completed in its scheduled
2036 ("%s: Halt channel %d (assume incomplete periodic transfer)\n",
2037 __func__, hc->hc_num);
2039 halt_channel(hcd, hc, qtd,
2040 DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
2043 ("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
2044 "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
2045 __func__, hc->hc_num, hcint.d32,
2046 DWC_READ_REG32(&hcd->core_if->
2047 core_global_regs->gintsts));
2048 clear_hc_int(hc_regs, chhltd);
2053 DWC_PRINTF("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
2055 pldata->soft_reset(pldata, RST_CHN_HALT);
2057 if (!hcint.b.nyet && !hcint.b.nak && !hcint.b.ack)
2058 clear_hc_int(hc_regs, chhltd);
2063 * Handles a host channel Channel Halted interrupt.
2065 * In slave mode, this handler is called only when the driver specifically
2066 * requests a halt. This occurs during handling other host channel interrupts
2067 * (e.g. nak, xacterr, stall, nyet, etc.).
2069 * In DMA mode, this is the interrupt that occurs when the core has finished
2070 * processing a transfer on a channel. Other host channel interrupts (except
2071 * ahberr) are disabled in DMA mode.
2073 static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd,
2075 dwc_otg_hc_regs_t *hc_regs,
2078 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
2079 "Channel Halted--\n", hc->hc_num);
2081 if (hcd->core_if->dma_enable) {
2082 handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
2085 if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
2089 release_channel(hcd, hc, qtd, hc->halt_status);
2095 /** Handles interrupt for a specific Host Channel */
2096 int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num)
2100 hcintmsk_data_t hcintmsk;
2102 dwc_otg_hc_regs_t *hc_regs;
2105 DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
2107 hc = dwc_otg_hcd->hc_ptr_array[num];
2108 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
2109 if (DWC_CIRCLEQ_EMPTY(&hc->qh->qtd_list)) {
2110 /* All transfer had been killed, clear panding interrupts */
2111 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2112 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2113 release_channel(dwc_otg_hcd, hc, NULL,
2114 DWC_OTG_HC_XFER_URB_DEQUEUE);
2117 qtd = DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list);
2119 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2120 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2121 DWC_DEBUGPL(DBG_HCDV,
2122 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2123 hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
2124 hcint.d32 = hcint.d32 & hcintmsk.d32;
2126 if (!dwc_otg_hcd->core_if->dma_enable) {
2127 if (hcint.b.chhltd && hcint.d32 != 0x2) {
2132 if (hcint.b.chhltd) {
2133 retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2135 if (hcint.b.xfercomp) {
2137 handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2139 * If NYET occurred at same time as Xfer Complete, the NYET is
2140 * handled by the Xfer Complete interrupt handler. Don't want
2141 * to call the NYET interrupt handler in this case.
2145 if (hcint.b.ahberr) {
2146 retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2148 if (hcint.b.stall) {
2149 retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2152 retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2155 if (!hcint.b.chhltd)
2157 handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2160 retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2162 if (hcint.b.xacterr) {
2163 retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2165 if (hcint.b.bblerr) {
2166 retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2168 if (hcint.b.frmovrun) {
2170 handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2172 if (hcint.b.datatglerr) {
2173 disable_hc_int(hc_regs, datatglerr);
2174 clear_hc_int(hc_regs, chhltd);
2180 #endif /* DWC_DEVICE_ONLY */