2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /* this file is part of ehci-hcd.c */
22 /*-------------------------------------------------------------------------*/
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
37 static int ehci_get_frame (struct usb_hcd *hcd);
40 * periodic_next_shadow - return "next" pointer on shadow list
41 * @periodic: host pointer to qh/itd/sitd
42 * @tag: hardware tag for type of this record
44 static union ehci_shadow *
45 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
48 switch (hc32_to_cpu(ehci, tag)) {
50 return &periodic->qh->qh_next;
52 return &periodic->fstn->fstn_next;
54 return &periodic->itd->itd_next;
57 return &periodic->sitd->sitd_next;
62 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
65 switch (hc32_to_cpu(ehci, tag)) {
66 /* our ehci_shadow.qh is actually software part */
68 return &periodic->qh->hw->hw_next;
69 /* others are hw parts */
71 return periodic->hw_next;
75 /* caller must hold ehci->lock */
76 static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
78 union ehci_shadow *prev_p = &ehci->pshadow[frame];
79 __hc32 *hw_p = &ehci->periodic[frame];
80 union ehci_shadow here = *prev_p;
82 /* find predecessor of "ptr"; hw and shadow lists are in sync */
83 while (here.ptr && here.ptr != ptr) {
84 prev_p = periodic_next_shadow(ehci, prev_p,
85 Q_NEXT_TYPE(ehci, *hw_p));
86 hw_p = shadow_next_periodic(ehci, &here,
87 Q_NEXT_TYPE(ehci, *hw_p));
90 /* an interrupt entry (at list end) could have been shared */
94 /* update shadow and hardware lists ... the old "next" pointers
95 * from ptr may still be in use, the caller updates them.
97 *prev_p = *periodic_next_shadow(ehci, &here,
98 Q_NEXT_TYPE(ehci, *hw_p));
100 if (!ehci->use_dummy_qh ||
101 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
102 != EHCI_LIST_END(ehci))
103 *hw_p = *shadow_next_periodic(ehci, &here,
104 Q_NEXT_TYPE(ehci, *hw_p));
106 *hw_p = ehci->dummy->qh_dma;
109 static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
110 struct ehci_per_sched *ps)
112 dev_dbg(&ps->udev->dev,
113 "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
114 ps->ep->desc.bEndpointAddress,
115 (sign >= 0 ? "reserve" : "release"), type,
116 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
117 ps->phase, ps->phase_uf, ps->period,
118 ps->usecs, ps->c_usecs, ps->cs_mask);
121 static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
122 struct ehci_qh *qh, int sign)
126 int usecs = qh->ps.usecs;
127 int c_usecs = qh->ps.c_usecs;
129 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
131 start_uf = qh->ps.bw_phase << 3;
133 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
135 if (sign < 0) { /* Release bandwidth */
140 /* Entire transaction (high speed) or start-split (full/low speed) */
141 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
142 i += qh->ps.bw_uperiod)
143 ehci->bandwidth[i] += usecs;
145 /* Complete-split (full/low speed) */
146 if (qh->ps.c_usecs) {
147 /* NOTE: adjustments needed for FSTN */
148 for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
149 i += qh->ps.bw_uperiod) {
150 for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
151 if (qh->ps.cs_mask & m)
152 ehci->bandwidth[i+j] += c_usecs;
158 /*-------------------------------------------------------------------------*/
160 static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
162 if (!dev1->tt || !dev2->tt)
164 if (dev1->tt != dev2->tt)
167 return dev1->ttport == dev2->ttport;
172 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
174 /* Which uframe does the low/fullspeed transfer start in?
176 * The parameter is the mask of ssplits in "H-frame" terms
177 * and this returns the transfer start uframe in "B-frame" terms,
178 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
179 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
180 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
182 static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
184 unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
186 ehci_err(ehci, "invalid empty smask!\n");
187 /* uframe 7 can't have bw so this will indicate failure */
190 return ffs(smask) - 1;
193 static const unsigned char
194 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
196 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
197 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
200 for (i=0; i<7; i++) {
201 if (max_tt_usecs[i] < tt_usecs[i]) {
202 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
203 tt_usecs[i] = max_tt_usecs[i];
208 /* How many of the tt's periodic downstream 1000 usecs are allocated?
210 * While this measures the bandwidth in terms of usecs/uframe,
211 * the low/fullspeed bus has no notion of uframes, so any particular
212 * low/fullspeed transfer can "carry over" from one uframe to the next,
213 * since the TT just performs downstream transfers in sequence.
215 * For example two separate 100 usec transfers can start in the same uframe,
216 * and the second one would "carry over" 75 usecs into the next uframe.
220 struct ehci_hcd *ehci,
221 struct usb_device *dev,
223 unsigned short tt_usecs[8]
226 __hc32 *hw_p = &ehci->periodic [frame];
227 union ehci_shadow *q = &ehci->pshadow [frame];
230 memset(tt_usecs, 0, 16);
233 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
235 hw_p = &q->itd->hw_next;
236 q = &q->itd->itd_next;
239 if (same_tt(dev, q->qh->ps.udev)) {
240 uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
241 tt_usecs[uf] += q->qh->ps.tt_usecs;
243 hw_p = &q->qh->hw->hw_next;
247 if (same_tt(dev, q->sitd->urb->dev)) {
248 uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
249 tt_usecs[uf] += q->sitd->stream->ps.tt_usecs;
251 hw_p = &q->sitd->hw_next;
252 q = &q->sitd->sitd_next;
256 ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
258 hw_p = &q->fstn->hw_next;
259 q = &q->fstn->fstn_next;
263 carryover_tt_bandwidth(tt_usecs);
265 if (max_tt_usecs[7] < tt_usecs[7])
266 ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
267 frame, tt_usecs[7] - max_tt_usecs[7]);
271 * Return true if the device's tt's downstream bus is available for a
272 * periodic transfer of the specified length (usecs), starting at the
273 * specified frame/uframe. Note that (as summarized in section 11.19
274 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
277 * The uframe parameter is when the fullspeed/lowspeed transfer
278 * should be executed in "B-frame" terms, which is the same as the
279 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
280 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
281 * See the EHCI spec sec 4.5 and fig 4.7.
283 * This checks if the full/lowspeed bus, at the specified starting uframe,
284 * has the specified bandwidth available, according to rules listed
285 * in USB 2.0 spec section 11.18.1 fig 11-60.
287 * This does not check if the transfer would exceed the max ssplit
288 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
289 * since proper scheduling limits ssplits to less than 16 per uframe.
291 static int tt_available (
292 struct ehci_hcd *ehci,
294 struct usb_device *dev,
300 if ((period == 0) || (uframe >= 7)) /* error */
303 for (; frame < ehci->periodic_size; frame += period) {
304 unsigned short tt_usecs[8];
306 periodic_tt_usecs (ehci, dev, frame, tt_usecs);
308 if (max_tt_usecs[uframe] <= tt_usecs[uframe])
311 /* special case for isoc transfers larger than 125us:
312 * the first and each subsequent fully used uframe
313 * must be empty, so as to not illegally delay
314 * already scheduled transactions
317 int ufs = (usecs / 125);
319 for (i = uframe; i < (uframe + ufs) && i < 8; i++)
324 tt_usecs[uframe] += usecs;
326 carryover_tt_bandwidth(tt_usecs);
328 /* fail if the carryover pushed bw past the last uframe's limit */
329 if (max_tt_usecs[7] < tt_usecs[7])
338 /* return true iff the device's transaction translator is available
339 * for a periodic transfer starting at the specified frame, using
340 * all the uframes in the mask.
342 static int tt_no_collision (
343 struct ehci_hcd *ehci,
345 struct usb_device *dev,
350 if (period == 0) /* error */
353 /* note bandwidth wastage: split never follows csplit
354 * (different dev or endpoint) until the next uframe.
355 * calling convention doesn't make that distinction.
357 for (; frame < ehci->periodic_size; frame += period) {
358 union ehci_shadow here;
360 struct ehci_qh_hw *hw;
362 here = ehci->pshadow [frame];
363 type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
365 switch (hc32_to_cpu(ehci, type)) {
367 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
368 here = here.itd->itd_next;
372 if (same_tt(dev, here.qh->ps.udev)) {
375 mask = hc32_to_cpu(ehci,
377 /* "knows" no gap is needed */
382 type = Q_NEXT_TYPE(ehci, hw->hw_next);
383 here = here.qh->qh_next;
386 if (same_tt (dev, here.sitd->urb->dev)) {
389 mask = hc32_to_cpu(ehci, here.sitd
391 /* FIXME assumes no gap for IN! */
396 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
397 here = here.sitd->sitd_next;
402 "periodic frame %d bogus type %d\n",
406 /* collision or error */
415 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
417 /*-------------------------------------------------------------------------*/
419 static void enable_periodic(struct ehci_hcd *ehci)
421 if (ehci->periodic_count++)
424 /* Stop waiting to turn off the periodic schedule */
425 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
427 /* Don't start the schedule until PSS is 0 */
429 turn_on_io_watchdog(ehci);
432 static void disable_periodic(struct ehci_hcd *ehci)
434 if (--ehci->periodic_count)
437 /* Don't turn off the schedule until PSS is 1 */
441 /*-------------------------------------------------------------------------*/
443 /* periodic schedule slots have iso tds (normal or split) first, then a
444 * sparse tree for active interrupt transfers.
446 * this just links in a qh; caller guarantees uframe masks are set right.
447 * no FSTN support (yet; ehci 0.96+)
449 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
452 unsigned period = qh->ps.period;
454 dev_dbg(&qh->ps.udev->dev,
455 "link qh%d-%04x/%p start %d [%d/%d us]\n",
456 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
457 & (QH_CMASK | QH_SMASK),
458 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
460 /* high bandwidth, or otherwise every microframe */
464 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
465 union ehci_shadow *prev = &ehci->pshadow[i];
466 __hc32 *hw_p = &ehci->periodic[i];
467 union ehci_shadow here = *prev;
470 /* skip the iso nodes at list head */
472 type = Q_NEXT_TYPE(ehci, *hw_p);
473 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
475 prev = periodic_next_shadow(ehci, prev, type);
476 hw_p = shadow_next_periodic(ehci, &here, type);
480 /* sorting each branch by period (slow-->fast)
481 * enables sharing interior tree nodes
483 while (here.ptr && qh != here.qh) {
484 if (qh->ps.period > here.qh->ps.period)
486 prev = &here.qh->qh_next;
487 hw_p = &here.qh->hw->hw_next;
490 /* link in this qh, unless some earlier pass did that */
494 qh->hw->hw_next = *hw_p;
497 *hw_p = QH_NEXT (ehci, qh->qh_dma);
500 qh->qh_state = QH_STATE_LINKED;
504 /* update per-qh bandwidth for debugfs */
505 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
506 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
507 : (qh->ps.usecs * 8);
509 list_add(&qh->intr_node, &ehci->intr_qh_list);
511 /* maybe enable periodic schedule processing */
513 enable_periodic(ehci);
516 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
522 * If qh is for a low/full-speed device, simply unlinking it
523 * could interfere with an ongoing split transaction. To unlink
524 * it safely would require setting the QH_INACTIVATE bit and
525 * waiting at least one frame, as described in EHCI 4.12.2.5.
527 * We won't bother with any of this. Instead, we assume that the
528 * only reason for unlinking an interrupt QH while the current URB
529 * is still active is to dequeue all the URBs (flush the whole
532 * If rebalancing the periodic schedule is ever implemented, this
533 * approach will no longer be valid.
536 /* high bandwidth, or otherwise part of every microframe */
537 period = qh->ps.period ? : 1;
539 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
540 periodic_unlink (ehci, i, qh);
542 /* update per-qh bandwidth for debugfs */
543 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
544 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
545 : (qh->ps.usecs * 8);
547 dev_dbg(&qh->ps.udev->dev,
548 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
550 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
551 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
553 /* qh->qh_next still "live" to HC */
554 qh->qh_state = QH_STATE_UNLINK;
555 qh->qh_next.ptr = NULL;
557 if (ehci->qh_scan_next == qh)
558 ehci->qh_scan_next = list_entry(qh->intr_node.next,
559 struct ehci_qh, intr_node);
560 list_del(&qh->intr_node);
563 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
565 if (qh->qh_state != QH_STATE_LINKED ||
566 list_empty(&qh->unlink_node))
569 list_del_init(&qh->unlink_node);
572 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
573 * avoiding unnecessary CPU wakeup
577 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
579 /* If the QH isn't linked then there's nothing we can do. */
580 if (qh->qh_state != QH_STATE_LINKED)
583 /* if the qh is waiting for unlink, cancel it now */
584 cancel_unlink_wait_intr(ehci, qh);
586 qh_unlink_periodic (ehci, qh);
588 /* Make sure the unlinks are visible before starting the timer */
592 * The EHCI spec doesn't say how long it takes the controller to
593 * stop accessing an unlinked interrupt QH. The timer delay is
594 * 9 uframes; presumably that will be long enough.
596 qh->unlink_cycle = ehci->intr_unlink_cycle;
598 /* New entries go at the end of the intr_unlink list */
599 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
601 if (ehci->intr_unlinking)
602 ; /* Avoid recursive calls */
603 else if (ehci->rh_state < EHCI_RH_RUNNING)
604 ehci_handle_intr_unlinks(ehci);
605 else if (ehci->intr_unlink.next == &qh->unlink_node) {
606 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
607 ++ehci->intr_unlink_cycle;
612 * It is common only one intr URB is scheduled on one qh, and
613 * given complete() is run in tasklet context, introduce a bit
614 * delay to avoid unlink qh too early.
616 static void start_unlink_intr_wait(struct ehci_hcd *ehci,
619 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
621 /* New entries go at the end of the intr_unlink_wait list */
622 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
624 if (ehci->rh_state < EHCI_RH_RUNNING)
625 ehci_handle_start_intr_unlinks(ehci);
626 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
627 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
628 ++ehci->intr_unlink_wait_cycle;
632 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
634 struct ehci_qh_hw *hw = qh->hw;
637 qh->qh_state = QH_STATE_IDLE;
638 hw->hw_next = EHCI_LIST_END(ehci);
640 if (!list_empty(&qh->qtd_list))
641 qh_completions(ehci, qh);
643 /* reschedule QH iff another request is queued */
644 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
645 rc = qh_schedule(ehci, qh);
647 qh_refresh(ehci, qh);
648 qh_link_periodic(ehci, qh);
651 /* An error here likely indicates handshake failure
652 * or no space left in the schedule. Neither fault
653 * should happen often ...
655 * FIXME kill the now-dysfunctional queued urbs
658 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
663 /* maybe turn off periodic schedule */
665 disable_periodic(ehci);
668 /*-------------------------------------------------------------------------*/
670 static int check_period (
671 struct ehci_hcd *ehci,
677 /* complete split running into next frame?
678 * given FSTN support, we could sometimes check...
683 /* convert "usecs we need" to "max already claimed" */
684 usecs = ehci->uframe_periodic_max - usecs;
686 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
688 if (ehci->bandwidth[uframe] > usecs)
696 static int check_intr_schedule (
697 struct ehci_hcd *ehci,
700 const struct ehci_qh *qh,
704 int retval = -ENOSPC;
707 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
710 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
712 if (!qh->ps.c_usecs) {
718 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
719 if (tt_available(ehci, qh->ps.bw_period, qh->ps.udev, frame, uframe,
723 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
724 for (i = uframe+2; i < 8 && i <= uframe+4; i++)
725 if (!check_period(ehci, frame, i,
726 qh->ps.bw_uperiod, qh->ps.c_usecs))
736 /* Make sure this tt's buffer is also available for CSPLITs.
737 * We pessimize a bit; probably the typical full speed case
738 * doesn't need the second CSPLIT.
740 * NOTE: both SPLIT and CSPLIT could be checked in just
743 mask = 0x03 << (uframe + qh->gap_uf);
747 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
748 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
749 qh->ps.bw_uperiod, qh->ps.c_usecs))
751 if (!check_period(ehci, frame, uframe + qh->gap_uf,
752 qh->ps.bw_uperiod, qh->ps.c_usecs))
761 /* "first fit" scheduling policy used the first time through,
762 * or when the previous schedule slot can't be re-used.
764 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
769 struct ehci_qh_hw *hw = qh->hw;
771 hw->hw_next = EHCI_LIST_END(ehci);
773 /* reuse the previous schedule slots, if we can */
774 if (qh->ps.phase != NO_FRAME) {
775 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
783 /* else scan the schedule to find a group of slots such that all
784 * uframes have enough periodic bandwidth available.
786 /* "normal" case, uframing flexible except with splits */
787 if (qh->ps.bw_period) {
791 for (i = qh->ps.bw_period; status && i > 0; --i) {
792 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
793 for (uframe = 0; uframe < 8; uframe++) {
794 status = check_intr_schedule(ehci,
795 frame, uframe, qh, &c_mask);
801 /* qh->ps.bw_period == 0 means every uframe */
803 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask);
807 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
808 (qh->ps.period - 1) : 0);
809 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
810 qh->ps.phase_uf = uframe;
811 qh->ps.cs_mask = qh->ps.period ?
812 (c_mask << 8) | (1 << uframe) :
815 /* reset S-frame and (maybe) C-frame masks */
816 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
817 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
818 reserve_release_intr_bandwidth(ehci, qh, 1);
824 static int intr_submit (
825 struct ehci_hcd *ehci,
827 struct list_head *qtd_list,
834 struct list_head empty;
836 /* get endpoint and transfer/schedule data */
837 epnum = urb->ep->desc.bEndpointAddress;
839 spin_lock_irqsave (&ehci->lock, flags);
841 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
843 goto done_not_linked;
845 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
846 if (unlikely(status))
847 goto done_not_linked;
849 /* get qh and force any scheduling errors */
850 INIT_LIST_HEAD (&empty);
851 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
856 if (qh->qh_state == QH_STATE_IDLE) {
857 if ((status = qh_schedule (ehci, qh)) != 0)
861 /* then queue the urb's tds to the qh */
862 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
865 /* stuff into the periodic schedule */
866 if (qh->qh_state == QH_STATE_IDLE) {
867 qh_refresh(ehci, qh);
868 qh_link_periodic(ehci, qh);
870 /* cancel unlink wait for the qh */
871 cancel_unlink_wait_intr(ehci, qh);
874 /* ... update usbfs periodic stats */
875 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
878 if (unlikely(status))
879 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
881 spin_unlock_irqrestore (&ehci->lock, flags);
883 qtd_list_free (ehci, urb, qtd_list);
888 static void scan_intr(struct ehci_hcd *ehci)
892 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
895 /* clean any finished work for this qh */
896 if (!list_empty(&qh->qtd_list)) {
900 * Unlinks could happen here; completion reporting
901 * drops the lock. That's why ehci->qh_scan_next
902 * always holds the next qh to scan; if the next qh
903 * gets unlinked then ehci->qh_scan_next is adjusted
904 * in qh_unlink_periodic().
906 temp = qh_completions(ehci, qh);
908 start_unlink_intr(ehci, qh);
909 else if (unlikely(list_empty(&qh->qtd_list) &&
910 qh->qh_state == QH_STATE_LINKED))
911 start_unlink_intr_wait(ehci, qh);
916 /*-------------------------------------------------------------------------*/
918 /* ehci_iso_stream ops work with both ITD and SITD */
920 static struct ehci_iso_stream *
921 iso_stream_alloc (gfp_t mem_flags)
923 struct ehci_iso_stream *stream;
925 stream = kzalloc(sizeof *stream, mem_flags);
926 if (likely (stream != NULL)) {
927 INIT_LIST_HEAD(&stream->td_list);
928 INIT_LIST_HEAD(&stream->free_list);
929 stream->next_uframe = NO_FRAME;
930 stream->ps.phase = NO_FRAME;
937 struct ehci_hcd *ehci,
938 struct ehci_iso_stream *stream,
942 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
944 struct usb_device *dev = urb->dev;
946 unsigned epnum, maxp;
951 * this might be a "high bandwidth" highspeed endpoint,
952 * as encoded in the ep descriptor's wMaxPacket field
954 epnum = usb_pipeendpoint(urb->pipe);
955 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
956 maxp = usb_endpoint_maxp(&urb->ep->desc);
963 /* knows about ITD vs SITD */
964 if (dev->speed == USB_SPEED_HIGH) {
965 unsigned multi = hb_mult(maxp);
967 stream->highspeed = 1;
969 maxp = max_packet(maxp);
973 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
974 stream->buf1 = cpu_to_hc32(ehci, buf1);
975 stream->buf2 = cpu_to_hc32(ehci, multi);
977 /* usbfs wants to report the average usecs per frame tied up
978 * when transfers on this endpoint are scheduled ...
980 stream->ps.usecs = HS_USECS_ISO(maxp);
982 /* period for bandwidth allocation */
983 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
984 1 << (urb->ep->desc.bInterval - 1));
986 /* Allow urb->interval to override */
987 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
989 stream->uperiod = urb->interval;
990 stream->ps.period = urb->interval >> 3;
991 stream->bandwidth = stream->ps.usecs * 8 /
992 stream->ps.bw_uperiod;
999 addr = dev->ttport << 24;
1000 if (!ehci_is_TDI(ehci)
1002 ehci_to_hcd(ehci)->self.root_hub))
1003 addr |= dev->tt->hub->devnum << 16;
1005 addr |= dev->devnum;
1006 stream->ps.usecs = HS_USECS_ISO(maxp);
1007 think_time = dev->tt ? dev->tt->think_time : 0;
1008 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1009 dev->speed, is_input, 1, maxp));
1010 hs_transfers = max (1u, (maxp + 187) / 188);
1015 stream->ps.c_usecs = stream->ps.usecs;
1016 stream->ps.usecs = HS_USECS_ISO(1);
1017 stream->ps.cs_mask = 1;
1019 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1020 tmp = (1 << (hs_transfers + 2)) - 1;
1021 stream->ps.cs_mask |= tmp << (8 + 2);
1023 stream->ps.cs_mask = smask_out[hs_transfers - 1];
1025 /* period for bandwidth allocation */
1026 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1027 1 << (urb->ep->desc.bInterval - 1));
1029 /* Allow urb->interval to override */
1030 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1031 stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1033 stream->ps.period = urb->interval;
1034 stream->uperiod = urb->interval << 3;
1035 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1036 stream->ps.bw_period;
1038 /* stream->splits gets created from cs_mask later */
1039 stream->address = cpu_to_hc32(ehci, addr);
1042 stream->ps.udev = dev;
1043 stream->ps.ep = urb->ep;
1045 stream->bEndpointAddress = is_input | epnum;
1046 stream->maxp = maxp;
1049 static struct ehci_iso_stream *
1050 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1053 struct ehci_iso_stream *stream;
1054 struct usb_host_endpoint *ep;
1055 unsigned long flags;
1057 epnum = usb_pipeendpoint (urb->pipe);
1058 if (usb_pipein(urb->pipe))
1059 ep = urb->dev->ep_in[epnum];
1061 ep = urb->dev->ep_out[epnum];
1063 spin_lock_irqsave (&ehci->lock, flags);
1064 stream = ep->hcpriv;
1066 if (unlikely (stream == NULL)) {
1067 stream = iso_stream_alloc(GFP_ATOMIC);
1068 if (likely (stream != NULL)) {
1069 ep->hcpriv = stream;
1070 iso_stream_init(ehci, stream, urb);
1073 /* if dev->ep [epnum] is a QH, hw is set */
1074 } else if (unlikely (stream->hw != NULL)) {
1075 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1076 urb->dev->devpath, epnum,
1077 usb_pipein(urb->pipe) ? "in" : "out");
1081 spin_unlock_irqrestore (&ehci->lock, flags);
1085 /*-------------------------------------------------------------------------*/
1087 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1089 static struct ehci_iso_sched *
1090 iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1092 struct ehci_iso_sched *iso_sched;
1093 int size = sizeof *iso_sched;
1095 size += packets * sizeof (struct ehci_iso_packet);
1096 iso_sched = kzalloc(size, mem_flags);
1097 if (likely (iso_sched != NULL)) {
1098 INIT_LIST_HEAD (&iso_sched->td_list);
1105 struct ehci_hcd *ehci,
1106 struct ehci_iso_sched *iso_sched,
1107 struct ehci_iso_stream *stream,
1112 dma_addr_t dma = urb->transfer_dma;
1114 /* how many uframes are needed for these transfers */
1115 iso_sched->span = urb->number_of_packets * stream->uperiod;
1117 /* figure out per-uframe itd fields that we'll need later
1118 * when we fit new itds into the schedule.
1120 for (i = 0; i < urb->number_of_packets; i++) {
1121 struct ehci_iso_packet *uframe = &iso_sched->packet [i];
1126 length = urb->iso_frame_desc [i].length;
1127 buf = dma + urb->iso_frame_desc [i].offset;
1129 trans = EHCI_ISOC_ACTIVE;
1130 trans |= buf & 0x0fff;
1131 if (unlikely (((i + 1) == urb->number_of_packets))
1132 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1133 trans |= EHCI_ITD_IOC;
1134 trans |= length << 16;
1135 uframe->transaction = cpu_to_hc32(ehci, trans);
1137 /* might need to cross a buffer page within a uframe */
1138 uframe->bufp = (buf & ~(u64)0x0fff);
1140 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1147 struct ehci_iso_stream *stream,
1148 struct ehci_iso_sched *iso_sched
1153 // caller must hold ehci->lock!
1154 list_splice (&iso_sched->td_list, &stream->free_list);
1159 itd_urb_transaction (
1160 struct ehci_iso_stream *stream,
1161 struct ehci_hcd *ehci,
1166 struct ehci_itd *itd;
1170 struct ehci_iso_sched *sched;
1171 unsigned long flags;
1173 sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1174 if (unlikely (sched == NULL))
1177 itd_sched_init(ehci, sched, stream, urb);
1179 if (urb->interval < 8)
1180 num_itds = 1 + (sched->span + 7) / 8;
1182 num_itds = urb->number_of_packets;
1184 /* allocate/init ITDs */
1185 spin_lock_irqsave (&ehci->lock, flags);
1186 for (i = 0; i < num_itds; i++) {
1189 * Use iTDs from the free list, but not iTDs that may
1190 * still be in use by the hardware.
1192 if (likely(!list_empty(&stream->free_list))) {
1193 itd = list_first_entry(&stream->free_list,
1194 struct ehci_itd, itd_list);
1195 if (itd->frame == ehci->now_frame)
1197 list_del (&itd->itd_list);
1198 itd_dma = itd->itd_dma;
1201 spin_unlock_irqrestore (&ehci->lock, flags);
1202 itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1204 spin_lock_irqsave (&ehci->lock, flags);
1206 iso_sched_free(stream, sched);
1207 spin_unlock_irqrestore(&ehci->lock, flags);
1212 memset (itd, 0, sizeof *itd);
1213 itd->itd_dma = itd_dma;
1214 itd->frame = NO_FRAME;
1215 list_add (&itd->itd_list, &sched->td_list);
1217 spin_unlock_irqrestore (&ehci->lock, flags);
1219 /* temporarily store schedule info in hcpriv */
1220 urb->hcpriv = sched;
1221 urb->error_count = 0;
1225 /*-------------------------------------------------------------------------*/
1227 static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1228 struct ehci_iso_stream *stream, int sign)
1232 unsigned s_mask, c_mask, m;
1233 int usecs = stream->ps.usecs;
1234 int c_usecs = stream->ps.c_usecs;
1236 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
1238 uframe = stream->ps.bw_phase << 3;
1240 bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1242 if (sign < 0) { /* Release bandwidth */
1247 if (!stream->splits) { /* High speed */
1248 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1249 i += stream->ps.bw_uperiod)
1250 ehci->bandwidth[i] += usecs;
1252 } else { /* Full speed */
1253 s_mask = stream->ps.cs_mask;
1254 c_mask = s_mask >> 8;
1256 /* NOTE: adjustment needed for frame overflow */
1257 for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1258 i += stream->ps.bw_uperiod) {
1259 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1262 ehci->bandwidth[i+j] += usecs;
1263 else if (c_mask & m)
1264 ehci->bandwidth[i+j] += c_usecs;
1272 struct ehci_hcd *ehci,
1273 struct ehci_iso_stream *stream,
1279 /* convert "usecs we need" to "max already claimed" */
1280 usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1282 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1283 uframe += stream->ps.bw_uperiod) {
1284 if (ehci->bandwidth[uframe] > usecs)
1292 struct ehci_hcd *ehci,
1293 struct ehci_iso_stream *stream,
1295 struct ehci_iso_sched *sched
1301 mask = stream->ps.cs_mask << (uframe & 7);
1303 /* for OUT, don't wrap SSPLIT into H-microframe 7 */
1304 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1307 /* for IN, don't wrap CSPLIT into the next frame */
1311 /* check bandwidth */
1312 uframe &= stream->ps.bw_uperiod - 1;
1313 frame = uframe >> 3;
1315 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1316 /* The tt's fullspeed bus bandwidth must be available.
1317 * tt_available scheduling guarantees 10+% for control/bulk.
1320 if (!tt_available(ehci, stream->ps.bw_period,
1321 stream->ps.udev, frame, uf, stream->ps.tt_usecs))
1324 /* tt must be idle for start(s), any gap, and csplit.
1325 * assume scheduling slop leaves 10+% for control/bulk.
1327 if (!tt_no_collision(ehci, stream->ps.bw_period,
1328 stream->ps.udev, frame, mask))
1336 /* check starts (OUT uses more than one) */
1338 max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1339 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1340 if (ehci->bandwidth[uf] > max_used)
1344 /* for IN, check CSPLIT */
1345 if (stream->ps.c_usecs) {
1346 max_used = ehci->uframe_periodic_max -
1350 for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1351 if ((stream->ps.cs_mask & tmp) == 0)
1353 if (ehci->bandwidth[uf+i] > max_used)
1358 uframe += stream->ps.bw_uperiod;
1359 } while (uframe < EHCI_BANDWIDTH_SIZE);
1361 stream->ps.cs_mask <<= uframe & 7;
1362 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1367 * This scheduler plans almost as far into the future as it has actual
1368 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1369 * "as small as possible" to be cache-friendlier.) That limits the size
1370 * transfers you can stream reliably; avoid more than 64 msec per urb.
1371 * Also avoid queue depths of less than ehci's worst irq latency (affected
1372 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1373 * and other factors); or more than about 230 msec total (for portability,
1374 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1377 #define SCHEDULING_DELAY 40 /* microframes */
1380 iso_stream_schedule (
1381 struct ehci_hcd *ehci,
1383 struct ehci_iso_stream *stream
1386 u32 now, base, next, start, period, span, now2;
1387 u32 wrap = 0, skip = 0;
1389 unsigned mod = ehci->periodic_size << 3;
1390 struct ehci_iso_sched *sched = urb->hcpriv;
1391 bool empty = list_empty(&stream->td_list);
1393 period = stream->uperiod;
1395 if (!stream->highspeed)
1398 now = ehci_read_frame_index(ehci) & (mod - 1);
1400 /* Take the isochronous scheduling threshold into account */
1402 next = now + ehci->i_thresh; /* uframe cache */
1404 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1407 * Use ehci->last_iso_frame as the base. There can't be any
1408 * TDs scheduled for earlier than that.
1410 base = ehci->last_iso_frame << 3;
1411 next = (next - base) & (mod - 1);
1413 /* Start a new isochronous stream? */
1414 if (unlikely(empty && !hcd_periodic_completion_in_progress(
1415 ehci_to_hcd(ehci), urb->ep))) {
1417 /* Schedule the endpoint */
1418 if (stream->ps.phase == NO_FRAME) {
1421 start = (now & ~0x07) + SCHEDULING_DELAY;
1423 /* find a uframe slot with enough bandwidth.
1424 * Early uframes are more precious because full-speed
1425 * iso IN transfers can't use late uframes,
1426 * and therefore they should be allocated last.
1432 /* check schedule: enough space? */
1433 if (stream->highspeed) {
1434 if (itd_slot_ok(ehci, stream, start))
1437 if ((start % 8) >= 6)
1439 if (sitd_slot_ok(ehci, stream, start,
1443 } while (start > next && !done);
1445 /* no room in the schedule */
1447 ehci_dbg(ehci, "iso sched full %p", urb);
1451 stream->ps.phase = (start >> 3) &
1452 (stream->ps.period - 1);
1453 stream->ps.bw_phase = stream->ps.phase &
1454 (stream->ps.bw_period - 1);
1455 stream->ps.phase_uf = start & 7;
1456 reserve_release_iso_bandwidth(ehci, stream, 1);
1459 /* New stream is already scheduled; use the upcoming slot */
1461 start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1464 start = (start - base) & (mod - 1);
1469 * Typical case: reuse current schedule, stream may still be active.
1470 * Hopefully there are no gaps from the host falling behind
1471 * (irq delays etc). If there are, the behavior depends on
1472 * whether URB_ISO_ASAP is set.
1474 start = (stream->next_uframe - base) & (mod - 1);
1475 now2 = (now - base) & (mod - 1);
1477 /* Is the schedule already full? */
1478 if (unlikely(!empty && start < period)) {
1479 ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
1480 urb, stream->next_uframe, base, period, mod);
1485 /* Is the next packet scheduled after the base time? */
1486 if (likely(!empty || start <= now2 + period)) {
1488 /* URB_ISO_ASAP: make sure that start >= next */
1489 if (unlikely(start < next &&
1490 (urb->transfer_flags & URB_ISO_ASAP)))
1493 /* Otherwise use start, if it's not in the past */
1494 if (likely(start >= now2))
1497 /* Otherwise we got an underrun while the queue was empty */
1499 if (urb->transfer_flags & URB_ISO_ASAP)
1505 /* How many uframes and packets do we need to skip? */
1506 skip = (now2 - start + period - 1) & -period;
1507 if (skip >= span) { /* Entirely in the past? */
1508 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1509 urb, start + base, span - period, now2 + base,
1512 /* Try to keep the last TD intact for scanning later */
1513 skip = span - period;
1515 /* Will it come before the current scan position? */
1517 skip = span; /* Skip the entire URB */
1518 status = 1; /* and give it back immediately */
1519 iso_sched_free(stream, sched);
1523 urb->error_count = skip / period;
1525 sched->first_packet = urb->error_count;
1529 /* Use the first slot after "next" */
1530 start = next + ((start - next) & (period - 1));
1533 /* Tried to schedule too far into the future? */
1534 if (unlikely(start + span - period >= mod + wrap)) {
1535 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1536 urb, start, span - period, mod + wrap);
1542 stream->next_uframe = (start + skip) & (mod - 1);
1544 /* report high speed start in uframes; full speed, in frames */
1545 urb->start_frame = start & (mod - 1);
1546 if (!stream->highspeed)
1547 urb->start_frame >>= 3;
1549 /* Make sure scan_isoc() sees these */
1550 if (ehci->isoc_count == 0)
1551 ehci->last_iso_frame = now >> 3;
1555 iso_sched_free(stream, sched);
1560 /*-------------------------------------------------------------------------*/
1563 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1564 struct ehci_itd *itd)
1568 /* it's been recently zeroed */
1569 itd->hw_next = EHCI_LIST_END(ehci);
1570 itd->hw_bufp [0] = stream->buf0;
1571 itd->hw_bufp [1] = stream->buf1;
1572 itd->hw_bufp [2] = stream->buf2;
1574 for (i = 0; i < 8; i++)
1577 /* All other fields are filled when scheduling */
1582 struct ehci_hcd *ehci,
1583 struct ehci_itd *itd,
1584 struct ehci_iso_sched *iso_sched,
1589 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1590 unsigned pg = itd->pg;
1592 // BUG_ON (pg == 6 && uf->cross);
1595 itd->index [uframe] = index;
1597 itd->hw_transaction[uframe] = uf->transaction;
1598 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1599 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1600 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1602 /* iso_frame_desc[].offset must be strictly increasing */
1603 if (unlikely (uf->cross)) {
1604 u64 bufp = uf->bufp + 4096;
1607 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1608 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1613 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1615 union ehci_shadow *prev = &ehci->pshadow[frame];
1616 __hc32 *hw_p = &ehci->periodic[frame];
1617 union ehci_shadow here = *prev;
1620 /* skip any iso nodes which might belong to previous microframes */
1622 type = Q_NEXT_TYPE(ehci, *hw_p);
1623 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1625 prev = periodic_next_shadow(ehci, prev, type);
1626 hw_p = shadow_next_periodic(ehci, &here, type);
1630 itd->itd_next = here;
1631 itd->hw_next = *hw_p;
1635 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1638 /* fit urb's itds into the selected schedule slot; activate as needed */
1639 static void itd_link_urb(
1640 struct ehci_hcd *ehci,
1643 struct ehci_iso_stream *stream
1647 unsigned next_uframe, uframe, frame;
1648 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1649 struct ehci_itd *itd;
1651 next_uframe = stream->next_uframe & (mod - 1);
1653 if (unlikely (list_empty(&stream->td_list)))
1654 ehci_to_hcd(ehci)->self.bandwidth_allocated
1655 += stream->bandwidth;
1657 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1658 if (ehci->amd_pll_fix == 1)
1659 usb_amd_quirk_pll_disable();
1662 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1664 /* fill iTDs uframe by uframe */
1665 for (packet = iso_sched->first_packet, itd = NULL;
1666 packet < urb->number_of_packets;) {
1668 /* ASSERT: we have all necessary itds */
1669 // BUG_ON (list_empty (&iso_sched->td_list));
1671 /* ASSERT: no itds for this endpoint in this uframe */
1673 itd = list_entry (iso_sched->td_list.next,
1674 struct ehci_itd, itd_list);
1675 list_move_tail (&itd->itd_list, &stream->td_list);
1676 itd->stream = stream;
1678 itd_init (ehci, stream, itd);
1681 uframe = next_uframe & 0x07;
1682 frame = next_uframe >> 3;
1684 itd_patch(ehci, itd, iso_sched, packet, uframe);
1686 next_uframe += stream->uperiod;
1687 next_uframe &= mod - 1;
1690 /* link completed itds into the schedule */
1691 if (((next_uframe >> 3) != frame)
1692 || packet == urb->number_of_packets) {
1693 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1697 stream->next_uframe = next_uframe;
1699 /* don't need that schedule data any more */
1700 iso_sched_free (stream, iso_sched);
1701 urb->hcpriv = stream;
1704 enable_periodic(ehci);
1707 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1709 /* Process and recycle a completed ITD. Return true iff its urb completed,
1710 * and hence its completion callback probably added things to the hardware
1713 * Note that we carefully avoid recycling this descriptor until after any
1714 * completion callback runs, so that it won't be reused quickly. That is,
1715 * assuming (a) no more than two urbs per frame on this endpoint, and also
1716 * (b) only this endpoint's completions submit URBs. It seems some silicon
1717 * corrupts things if you reuse completed descriptors very quickly...
1719 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1721 struct urb *urb = itd->urb;
1722 struct usb_iso_packet_descriptor *desc;
1726 struct ehci_iso_stream *stream = itd->stream;
1727 struct usb_device *dev;
1728 bool retval = false;
1730 /* for each uframe with a packet */
1731 for (uframe = 0; uframe < 8; uframe++) {
1732 if (likely (itd->index[uframe] == -1))
1734 urb_index = itd->index[uframe];
1735 desc = &urb->iso_frame_desc [urb_index];
1737 t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1738 itd->hw_transaction [uframe] = 0;
1740 /* report transfer status */
1741 if (unlikely (t & ISO_ERRS)) {
1743 if (t & EHCI_ISOC_BUF_ERR)
1744 desc->status = usb_pipein (urb->pipe)
1745 ? -ENOSR /* hc couldn't read */
1746 : -ECOMM; /* hc couldn't write */
1747 else if (t & EHCI_ISOC_BABBLE)
1748 desc->status = -EOVERFLOW;
1749 else /* (t & EHCI_ISOC_XACTERR) */
1750 desc->status = -EPROTO;
1752 /* HC need not update length with this error */
1753 if (!(t & EHCI_ISOC_BABBLE)) {
1754 desc->actual_length = EHCI_ITD_LENGTH(t);
1755 urb->actual_length += desc->actual_length;
1757 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1759 desc->actual_length = EHCI_ITD_LENGTH(t);
1760 urb->actual_length += desc->actual_length;
1762 /* URB was too late */
1767 /* handle completion now? */
1768 if (likely ((urb_index + 1) != urb->number_of_packets))
1771 /* ASSERT: it's really the last itd for this urb
1772 list_for_each_entry (itd, &stream->td_list, itd_list)
1773 BUG_ON (itd->urb == urb);
1776 /* give urb back to the driver; completion often (re)submits */
1778 ehci_urb_done(ehci, urb, 0);
1783 disable_periodic(ehci);
1785 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1786 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1787 if (ehci->amd_pll_fix == 1)
1788 usb_amd_quirk_pll_enable();
1791 if (unlikely(list_is_singular(&stream->td_list)))
1792 ehci_to_hcd(ehci)->self.bandwidth_allocated
1793 -= stream->bandwidth;
1798 /* Add to the end of the free list for later reuse */
1799 list_move_tail(&itd->itd_list, &stream->free_list);
1801 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1802 if (list_empty(&stream->td_list)) {
1803 list_splice_tail_init(&stream->free_list,
1804 &ehci->cached_itd_list);
1805 start_free_itds(ehci);
1811 /*-------------------------------------------------------------------------*/
1813 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1816 int status = -EINVAL;
1817 unsigned long flags;
1818 struct ehci_iso_stream *stream;
1820 /* Get iso_stream head */
1821 stream = iso_stream_find (ehci, urb);
1822 if (unlikely (stream == NULL)) {
1823 ehci_dbg (ehci, "can't get iso stream\n");
1826 if (unlikely(urb->interval != stream->uperiod)) {
1827 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1828 stream->uperiod, urb->interval);
1832 #ifdef EHCI_URB_TRACE
1834 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1835 __func__, urb->dev->devpath, urb,
1836 usb_pipeendpoint (urb->pipe),
1837 usb_pipein (urb->pipe) ? "in" : "out",
1838 urb->transfer_buffer_length,
1839 urb->number_of_packets, urb->interval,
1843 /* allocate ITDs w/o locking anything */
1844 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1845 if (unlikely (status < 0)) {
1846 ehci_dbg (ehci, "can't init itds\n");
1850 /* schedule ... need to lock */
1851 spin_lock_irqsave (&ehci->lock, flags);
1852 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1853 status = -ESHUTDOWN;
1854 goto done_not_linked;
1856 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1857 if (unlikely(status))
1858 goto done_not_linked;
1859 status = iso_stream_schedule(ehci, urb, stream);
1860 if (likely(status == 0)) {
1861 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1862 } else if (status > 0) {
1864 ehci_urb_done(ehci, urb, 0);
1866 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1869 spin_unlock_irqrestore (&ehci->lock, flags);
1874 /*-------------------------------------------------------------------------*/
1877 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1878 * TTs in USB 2.0 hubs. These need microframe scheduling.
1883 struct ehci_hcd *ehci,
1884 struct ehci_iso_sched *iso_sched,
1885 struct ehci_iso_stream *stream,
1890 dma_addr_t dma = urb->transfer_dma;
1892 /* how many frames are needed for these transfers */
1893 iso_sched->span = urb->number_of_packets * stream->ps.period;
1895 /* figure out per-frame sitd fields that we'll need later
1896 * when we fit new sitds into the schedule.
1898 for (i = 0; i < urb->number_of_packets; i++) {
1899 struct ehci_iso_packet *packet = &iso_sched->packet [i];
1904 length = urb->iso_frame_desc [i].length & 0x03ff;
1905 buf = dma + urb->iso_frame_desc [i].offset;
1907 trans = SITD_STS_ACTIVE;
1908 if (((i + 1) == urb->number_of_packets)
1909 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1911 trans |= length << 16;
1912 packet->transaction = cpu_to_hc32(ehci, trans);
1914 /* might need to cross a buffer page within a td */
1916 packet->buf1 = (buf + length) & ~0x0fff;
1917 if (packet->buf1 != (buf & ~(u64)0x0fff))
1920 /* OUT uses multiple start-splits */
1921 if (stream->bEndpointAddress & USB_DIR_IN)
1923 length = (length + 187) / 188;
1924 if (length > 1) /* BEGIN vs ALL */
1926 packet->buf1 |= length;
1931 sitd_urb_transaction (
1932 struct ehci_iso_stream *stream,
1933 struct ehci_hcd *ehci,
1938 struct ehci_sitd *sitd;
1939 dma_addr_t sitd_dma;
1941 struct ehci_iso_sched *iso_sched;
1942 unsigned long flags;
1944 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1945 if (iso_sched == NULL)
1948 sitd_sched_init(ehci, iso_sched, stream, urb);
1950 /* allocate/init sITDs */
1951 spin_lock_irqsave (&ehci->lock, flags);
1952 for (i = 0; i < urb->number_of_packets; i++) {
1954 /* NOTE: for now, we don't try to handle wraparound cases
1955 * for IN (using sitd->hw_backpointer, like a FSTN), which
1956 * means we never need two sitds for full speed packets.
1960 * Use siTDs from the free list, but not siTDs that may
1961 * still be in use by the hardware.
1963 if (likely(!list_empty(&stream->free_list))) {
1964 sitd = list_first_entry(&stream->free_list,
1965 struct ehci_sitd, sitd_list);
1966 if (sitd->frame == ehci->now_frame)
1968 list_del (&sitd->sitd_list);
1969 sitd_dma = sitd->sitd_dma;
1972 spin_unlock_irqrestore (&ehci->lock, flags);
1973 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1975 spin_lock_irqsave (&ehci->lock, flags);
1977 iso_sched_free(stream, iso_sched);
1978 spin_unlock_irqrestore(&ehci->lock, flags);
1983 memset (sitd, 0, sizeof *sitd);
1984 sitd->sitd_dma = sitd_dma;
1985 sitd->frame = NO_FRAME;
1986 list_add (&sitd->sitd_list, &iso_sched->td_list);
1989 /* temporarily store schedule info in hcpriv */
1990 urb->hcpriv = iso_sched;
1991 urb->error_count = 0;
1993 spin_unlock_irqrestore (&ehci->lock, flags);
1997 /*-------------------------------------------------------------------------*/
2001 struct ehci_hcd *ehci,
2002 struct ehci_iso_stream *stream,
2003 struct ehci_sitd *sitd,
2004 struct ehci_iso_sched *iso_sched,
2008 struct ehci_iso_packet *uf = &iso_sched->packet [index];
2009 u64 bufp = uf->bufp;
2011 sitd->hw_next = EHCI_LIST_END(ehci);
2012 sitd->hw_fullspeed_ep = stream->address;
2013 sitd->hw_uframe = stream->splits;
2014 sitd->hw_results = uf->transaction;
2015 sitd->hw_backpointer = EHCI_LIST_END(ehci);
2018 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2019 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2021 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2024 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2025 sitd->index = index;
2029 sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2031 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2032 sitd->sitd_next = ehci->pshadow [frame];
2033 sitd->hw_next = ehci->periodic [frame];
2034 ehci->pshadow [frame].sitd = sitd;
2035 sitd->frame = frame;
2037 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2040 /* fit urb's sitds into the selected schedule slot; activate as needed */
2041 static void sitd_link_urb(
2042 struct ehci_hcd *ehci,
2045 struct ehci_iso_stream *stream
2049 unsigned next_uframe;
2050 struct ehci_iso_sched *sched = urb->hcpriv;
2051 struct ehci_sitd *sitd;
2053 next_uframe = stream->next_uframe;
2055 if (list_empty(&stream->td_list))
2056 /* usbfs ignores TT bandwidth */
2057 ehci_to_hcd(ehci)->self.bandwidth_allocated
2058 += stream->bandwidth;
2060 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2061 if (ehci->amd_pll_fix == 1)
2062 usb_amd_quirk_pll_disable();
2065 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2067 /* fill sITDs frame by frame */
2068 for (packet = sched->first_packet, sitd = NULL;
2069 packet < urb->number_of_packets;
2072 /* ASSERT: we have all necessary sitds */
2073 BUG_ON (list_empty (&sched->td_list));
2075 /* ASSERT: no itds for this endpoint in this frame */
2077 sitd = list_entry (sched->td_list.next,
2078 struct ehci_sitd, sitd_list);
2079 list_move_tail (&sitd->sitd_list, &stream->td_list);
2080 sitd->stream = stream;
2083 sitd_patch(ehci, stream, sitd, sched, packet);
2084 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2087 next_uframe += stream->uperiod;
2089 stream->next_uframe = next_uframe & (mod - 1);
2091 /* don't need that schedule data any more */
2092 iso_sched_free (stream, sched);
2093 urb->hcpriv = stream;
2096 enable_periodic(ehci);
2099 /*-------------------------------------------------------------------------*/
2101 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2102 | SITD_STS_XACT | SITD_STS_MMF)
2104 /* Process and recycle a completed SITD. Return true iff its urb completed,
2105 * and hence its completion callback probably added things to the hardware
2108 * Note that we carefully avoid recycling this descriptor until after any
2109 * completion callback runs, so that it won't be reused quickly. That is,
2110 * assuming (a) no more than two urbs per frame on this endpoint, and also
2111 * (b) only this endpoint's completions submit URBs. It seems some silicon
2112 * corrupts things if you reuse completed descriptors very quickly...
2114 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2116 struct urb *urb = sitd->urb;
2117 struct usb_iso_packet_descriptor *desc;
2120 struct ehci_iso_stream *stream = sitd->stream;
2121 struct usb_device *dev;
2122 bool retval = false;
2124 urb_index = sitd->index;
2125 desc = &urb->iso_frame_desc [urb_index];
2126 t = hc32_to_cpup(ehci, &sitd->hw_results);
2128 /* report transfer status */
2129 if (unlikely(t & SITD_ERRS)) {
2131 if (t & SITD_STS_DBE)
2132 desc->status = usb_pipein (urb->pipe)
2133 ? -ENOSR /* hc couldn't read */
2134 : -ECOMM; /* hc couldn't write */
2135 else if (t & SITD_STS_BABBLE)
2136 desc->status = -EOVERFLOW;
2137 else /* XACT, MMF, etc */
2138 desc->status = -EPROTO;
2139 } else if (unlikely(t & SITD_STS_ACTIVE)) {
2140 /* URB was too late */
2144 desc->actual_length = desc->length - SITD_LENGTH(t);
2145 urb->actual_length += desc->actual_length;
2148 /* handle completion now? */
2149 if ((urb_index + 1) != urb->number_of_packets)
2152 /* ASSERT: it's really the last sitd for this urb
2153 list_for_each_entry (sitd, &stream->td_list, sitd_list)
2154 BUG_ON (sitd->urb == urb);
2157 /* give urb back to the driver; completion often (re)submits */
2159 ehci_urb_done(ehci, urb, 0);
2164 disable_periodic(ehci);
2166 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2167 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2168 if (ehci->amd_pll_fix == 1)
2169 usb_amd_quirk_pll_enable();
2172 if (list_is_singular(&stream->td_list))
2173 ehci_to_hcd(ehci)->self.bandwidth_allocated
2174 -= stream->bandwidth;
2179 /* Add to the end of the free list for later reuse */
2180 list_move_tail(&sitd->sitd_list, &stream->free_list);
2182 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2183 if (list_empty(&stream->td_list)) {
2184 list_splice_tail_init(&stream->free_list,
2185 &ehci->cached_sitd_list);
2186 start_free_itds(ehci);
2193 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2196 int status = -EINVAL;
2197 unsigned long flags;
2198 struct ehci_iso_stream *stream;
2200 /* Get iso_stream head */
2201 stream = iso_stream_find (ehci, urb);
2202 if (stream == NULL) {
2203 ehci_dbg (ehci, "can't get iso stream\n");
2206 if (urb->interval != stream->ps.period) {
2207 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2208 stream->ps.period, urb->interval);
2212 #ifdef EHCI_URB_TRACE
2214 "submit %p dev%s ep%d%s-iso len %d\n",
2215 urb, urb->dev->devpath,
2216 usb_pipeendpoint (urb->pipe),
2217 usb_pipein (urb->pipe) ? "in" : "out",
2218 urb->transfer_buffer_length);
2221 /* allocate SITDs */
2222 status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2224 ehci_dbg (ehci, "can't init sitds\n");
2228 /* schedule ... need to lock */
2229 spin_lock_irqsave (&ehci->lock, flags);
2230 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2231 status = -ESHUTDOWN;
2232 goto done_not_linked;
2234 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2235 if (unlikely(status))
2236 goto done_not_linked;
2237 status = iso_stream_schedule(ehci, urb, stream);
2238 if (likely(status == 0)) {
2239 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2240 } else if (status > 0) {
2242 ehci_urb_done(ehci, urb, 0);
2244 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2247 spin_unlock_irqrestore (&ehci->lock, flags);
2252 /*-------------------------------------------------------------------------*/
2254 static void scan_isoc(struct ehci_hcd *ehci)
2256 unsigned uf, now_frame, frame;
2257 unsigned fmask = ehci->periodic_size - 1;
2258 bool modified, live;
2261 * When running, scan from last scan point up to "now"
2262 * else clean up by scanning everything that's left.
2263 * Touches as few pages as possible: cache-friendly.
2265 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2266 uf = ehci_read_frame_index(ehci);
2267 now_frame = (uf >> 3) & fmask;
2270 now_frame = (ehci->last_iso_frame - 1) & fmask;
2273 ehci->now_frame = now_frame;
2275 frame = ehci->last_iso_frame;
2277 union ehci_shadow q, *q_p;
2281 /* scan each element in frame's queue for completions */
2282 q_p = &ehci->pshadow [frame];
2283 hw_p = &ehci->periodic [frame];
2285 type = Q_NEXT_TYPE(ehci, *hw_p);
2288 while (q.ptr != NULL) {
2289 switch (hc32_to_cpu(ehci, type)) {
2291 /* If this ITD is still active, leave it for
2292 * later processing ... check the next entry.
2293 * No need to check for activity unless the
2296 if (frame == now_frame && live) {
2298 for (uf = 0; uf < 8; uf++) {
2299 if (q.itd->hw_transaction[uf] &
2304 q_p = &q.itd->itd_next;
2305 hw_p = &q.itd->hw_next;
2306 type = Q_NEXT_TYPE(ehci,
2313 /* Take finished ITDs out of the schedule
2314 * and process them: recycle, maybe report
2315 * URB completion. HC won't cache the
2316 * pointer for much longer, if at all.
2318 *q_p = q.itd->itd_next;
2319 if (!ehci->use_dummy_qh ||
2320 q.itd->hw_next != EHCI_LIST_END(ehci))
2321 *hw_p = q.itd->hw_next;
2323 *hw_p = ehci->dummy->qh_dma;
2324 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2326 modified = itd_complete (ehci, q.itd);
2330 /* If this SITD is still active, leave it for
2331 * later processing ... check the next entry.
2332 * No need to check for activity unless the
2335 if (((frame == now_frame) ||
2336 (((frame + 1) & fmask) == now_frame))
2338 && (q.sitd->hw_results &
2339 SITD_ACTIVE(ehci))) {
2341 q_p = &q.sitd->sitd_next;
2342 hw_p = &q.sitd->hw_next;
2343 type = Q_NEXT_TYPE(ehci,
2349 /* Take finished SITDs out of the schedule
2350 * and process them: recycle, maybe report
2353 *q_p = q.sitd->sitd_next;
2354 if (!ehci->use_dummy_qh ||
2355 q.sitd->hw_next != EHCI_LIST_END(ehci))
2356 *hw_p = q.sitd->hw_next;
2358 *hw_p = ehci->dummy->qh_dma;
2359 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2361 modified = sitd_complete (ehci, q.sitd);
2365 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2366 type, frame, q.ptr);
2371 /* End of the iTDs and siTDs */
2376 /* assume completion callbacks modify the queue */
2377 if (unlikely(modified && ehci->isoc_count > 0))
2381 /* Stop when we have reached the current frame */
2382 if (frame == now_frame)
2385 /* The last frame may still have active siTDs */
2386 ehci->last_iso_frame = frame;
2387 frame = (frame + 1) & fmask;