/* Initialize Host Configuration Register */
init_fslspclksel(_core_if);
+ /* we don't need full speed mode */
+ #if 0
if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL)
{
hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
hcfg.b.fslssupp = 1;
dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
}
+ #endif
/* Configure data FIFO sizes */
if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo)
{
dwc_write_reg32 (&(in_regs->diepdma),
(uint32_t)_ep->dma_addr);
- _ep->dma_addr += _ep->xfer_len;
+ /* EP0 transfer size may more than one packet, dma address has to update
+ * kever@rk 20111120 */
+ _ep->dma_addr += _ep->xfer_len;
}
/* EP enable, IN data in FIFO */
kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_inactive);
kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_active);
kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inactive);
- kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready);
- kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
- kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
+// kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready);
+// kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
+// kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
}
/**
/* Initialize the periodic schedule. */
INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
/*
* Create a host channel descriptor for each host channel implemented
/* Initialize the periodic schedule. */
INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
/*
* Create a host channel descriptor for each host channel implemented
/* Initialize the periodic schedule. */
INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
- INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+// INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
/*
* Create a host channel descriptor for each host channel implemented
qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
- qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
- qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
- qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
+// qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
+// qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
+// qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
/* Free memory for the host channels. */
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
#endif
/* Process entries in the periodic ready list. */
- qh_ptr = _hcd->periodic_sched_ready.next;
- while (qh_ptr != &_hcd->periodic_sched_ready &&
- !list_empty(&_hcd->free_hc_list)) {
-
+ qh_ptr = _hcd->periodic_sched_inactive.next;
+ while (qh_ptr != &_hcd->periodic_sched_inactive) {
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ if(qh->qh_state != QH_READY){
+ qh_ptr = qh_ptr->next;
+ continue;
+ }
+
assign_and_init_hc(_hcd, qh);
/*
* periodic assigned schedule.
*/
qh_ptr = qh_ptr->next;
- list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ //list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ qh->qh_state = QH_ASSIGNED;
ret_val = DWC_OTG_TRANSACTION_PERIODIC;
}
tx_status.b.ptxfspcavail);
#endif
- qh_ptr = _hcd->periodic_sched_assigned.next;
- while (qh_ptr != &_hcd->periodic_sched_assigned) {
+ qh_ptr = _hcd->periodic_sched_inactive.next;
+ while (qh_ptr != &_hcd->periodic_sched_inactive) {
tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
if (tx_status.b.ptxqspcavail == 0) {
no_queue_space = 1;
}
qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ if(qh->qh_state != QH_ASSIGNED){
+ qh_ptr = qh_ptr->next;
+ continue;
+ }
/*
* Set a flag if we're queuing high-bandwidth in slave mode.
* Move the QH from the periodic assigned schedule to
* the periodic queued schedule.
*/
- list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_queued);
+ //list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_queued);
+ qh->qh_state = QH_QUEUED;
/* done queuing high bandwidth */
_hcd->core_if->queuing_high_bandwidth = 0;
DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n",
tx_status.b.ptxfspcavail);
#endif
- if (!(list_empty(&_hcd->periodic_sched_assigned)) ||
+ if (//!(list_empty(&_hcd->periodic_sched_assigned)) ||
no_queue_space || no_fifo_space) {
/*
* May need to queue more transactions as the request
#endif
/* Process host channels associated with periodic transfers. */
if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
- _tr_type == DWC_OTG_TRANSACTION_ALL) &&
- !list_empty(&_hcd->periodic_sched_assigned)) {
+ _tr_type == DWC_OTG_TRANSACTION_ALL) //&&
+ //!list_empty(&_hcd->periodic_sched_assigned)
+ ) {
process_periodic_channels(_hcd);
}
DWC_OTG_TRANSACTION_ALL
} dwc_otg_transaction_type_e;
+/** Transaction types. */
+typedef enum qh_status_type {
+ QH_INACTIVE,
+ QH_ACTIVE,
+ QH_READY,
+ QH_ASSIGNED,
+ QH_QUEUED
+} qh_status_type_e;
+
/**
* A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
* interrupt, or isochronous transfer. A single QTD is created for each URB
/** Ping state if 1. */
uint8_t ping_state;
-
+
+ /**
+ *
+ ****/
+ uint16_t qh_state;
/**
* List of QTDs for this QH.
*/
* Items move from this list to periodic_sched_assigned as host
* channels become available during the current frame.
*/
- struct list_head periodic_sched_ready;
+ //struct list_head periodic_sched_ready;
/**
* List of periodic QHs to be executed in the next frame that are
* Items move from this list to periodic_sched_queued as the
* transactions for the QH are queued to the DWC_otg controller.
*/
- struct list_head periodic_sched_assigned;
+ //struct list_head periodic_sched_assigned;
/**
* List of periodic QHs that have been queued for execution.
* periodic_sched_ready because it must be rescheduled for the next
* frame. Otherwise, the item moves to periodic_sched_inactive.
*/
- struct list_head periodic_sched_queued;
+ //struct list_head periodic_sched_queued;
/**
* Total bandwidth claimed so far for periodic transfers. This value
#include "dwc_otg_driver.h"
#include "dwc_otg_hcd.h"
#include "dwc_otg_regs.h"
-
+int csplit_nak = 0;
/** @file
* This file contains the implementation of the HCD Interrupt handlers.
*/
qh_entry = _hcd->periodic_sched_inactive.next;
while (qh_entry != &_hcd->periodic_sched_inactive) {
qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
+ if(qh->qh_state != QH_INACTIVE){
+ qh_entry = qh_entry->next;
+ continue;
+ }
qh_entry = qh_entry->next;
if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) {
#if 1
* Move QH to the ready list to be executed next
* (micro)frame.
*/
- list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_ready);
+ //list_move_tail(&qh->qh_list_entry, &_hcd->periodic_sched_ready);
+ qh->qh_state = QH_READY;
}
}
}
if (tr_type != DWC_OTG_TRANSACTION_NONE) {
dwc_otg_hcd_queue_transactions(_hcd, tr_type);
#if 1
- } else if (list_empty(&_hcd->periodic_sched_inactive) &&
- list_empty(&_hcd->periodic_sched_ready) &&
- list_empty(&_hcd->periodic_sched_assigned) &&
- list_empty(&_hcd->periodic_sched_queued)) {
+ } else if (list_empty(&_hcd->periodic_sched_inactive) //&&
+// list_empty(&_hcd->periodic_sched_ready) &&
+// list_empty(&_hcd->periodic_sched_assigned) &&
+// list_empty(&_hcd->periodic_sched_queued)
+ ) {
/*
* We don't have USB data to send. Unfortunately the
* Synopsis block continues to generate interrupts at
* in the same relative order as the corresponding start-split transactions
* were issued.
*/
-
- qh_entry = _dwc_otg_hcd->periodic_sched_queued.next;
- while (qh_entry != &_dwc_otg_hcd->periodic_sched_queued) {
+ qh_entry = _dwc_otg_hcd->periodic_sched_inactive.next;
+ while (qh_entry != &_dwc_otg_hcd->periodic_sched_inactive) {
qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
qh_entry = qh_entry->next;
+ if(qh->qh_state != QH_QUEUED)
+ continue;
hcnum = qh->channel->hc_num;
if (haint.b2.chint & (1 << hcnum)) {
retval |= dwc_otg_hcd_handle_hc_n_intr (_dwc_otg_hcd, hcnum);
{
dwc_otg_transaction_type_e tr_type;
int free_qtd;
+ int continue_trans = 1;
+
if((!_qtd)|(_qtd->urb == NULL))
{
goto cleanup;
free_qtd = 0;
break;
}
+ if(csplit_nak)
+ {
+ continue_trans = 0;
+ csplit_nak = 0;
+ }
deactivate_qh(_hcd, _hc->qh, free_qtd);
*/
break;
}
-
- /* Try to queue more transfers now that there's a free channel. */
- tr_type = dwc_otg_hcd_select_transactions(_hcd);
- if (tr_type != DWC_OTG_TRANSACTION_NONE) {
- dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ if(continue_trans)
+ {
+ /* Try to queue more transfers now that there's a free channel. */
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ }
}
/*
* Make sure the start of frame interrupt is enabled now that
* halt to be queued when the periodic schedule is
* processed.
*/
- list_move_tail(&_hc->qh->qh_list_entry,
- &_hcd->periodic_sched_assigned);
+ //list_move_tail(&_hc->qh->qh_list_entry,
+ // &_hcd->periodic_sched_assigned);
/*
* Make sure the Periodic Tx FIFO Empty interrupt is
if (_hc->complete_split) {
_qtd->error_count = 0;
}
+ csplit_nak = 1;
_qtd->complete_split = 0;
halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK);
goto handle_nak_done;
handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd);
} else if(hcint.b.datatglerr){
DWC_PRINT("%s, DATA toggle error, Channel %d\n",__func__, _hc->hc_num);
+ save_data_toggle(_hc, _hc_regs, _qtd); //hzb,设备的第一个USB数据包的data toggle不符合USB协议
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
clear_hc_int(_hc_regs,chhltd);
} else {
if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
* NOT virtual root hub */
_qh->do_split = 0;
- /* yk@rk 20100625
- * _urb->dev->tt->hub may be null
- */
- if((_urb->dev->tt)&&(!_urb->dev->tt->hub))
- DWC_PRINT("%s tt->hub null!\n",__func__);
if (((_urb->dev->speed == USB_SPEED_LOW) ||
(_urb->dev->speed == USB_SPEED_FULL)) &&
(_urb->dev->tt) && (_urb->dev->tt->hub)&&
/* Always start in the inactive schedule. */
list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive);
+ _qh->qh_state = QH_INACTIVE;
/* Reserve the periodic channel. */
_hcd->periodic_channels++;
* appropriate queue.
*/
if (_qh->sched_frame == frame_number) {
- list_move_tail(&_qh->qh_list_entry,
- &_hcd->periodic_sched_ready);
+ //list_move_tail(&_qh->qh_list_entry,
+ // &_hcd->periodic_sched_ready);
+ _qh->qh_state = QH_READY;
} else {
- list_move_tail(&_qh->qh_list_entry,
- &_hcd->periodic_sched_inactive);
+ //list_move_tail(&_qh->qh_list_entry,
+ // &_hcd->periodic_sched_inactive);
+ _qh->qh_state = QH_INACTIVE;
}
}
}