2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk;
37 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
48 * Returns negative errno, or zero on success
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
73 * Disable interrupts and begin the xHCI halting process.
75 void xhci_quiesce(struct xhci_hcd *xhci)
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
92 * Force HC into halt state.
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
96 * should halt within 16 ms of the run/stop bit being cleared.
97 * Read HC Halted bit in the status register to see when the HC is finished.
99 int xhci_halt(struct xhci_hcd *xhci)
102 xhci_dbg(xhci, "// Halt the HC\n");
105 ret = handshake(xhci, &xhci->op_regs->status,
106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
111 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
117 * Set the run bit and wait for the host to be running.
119 static int xhci_start(struct xhci_hcd *xhci)
124 temp = xhci_readl(xhci, &xhci->op_regs->command);
126 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
128 xhci_writel(xhci, temp, &xhci->op_regs->command);
131 * Wait for the HCHalted Status bit to be 0 to indicate the host is
134 ret = handshake(xhci, &xhci->op_regs->status,
135 STS_HALT, 0, XHCI_MAX_HALT_USEC);
136 if (ret == -ETIMEDOUT)
137 xhci_err(xhci, "Host took too long to start, "
138 "waited %u microseconds.\n",
141 xhci->xhc_state &= ~XHCI_STATE_HALTED;
148 * This resets pipelines, timers, counters, state machines, etc.
149 * Transactions will be terminated immediately, and operational registers
150 * will be set to their defaults.
152 int xhci_reset(struct xhci_hcd *xhci)
158 state = xhci_readl(xhci, &xhci->op_regs->status);
159 if ((state & STS_HALT) == 0) {
160 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
164 xhci_dbg(xhci, "// Reset the HC\n");
165 command = xhci_readl(xhci, &xhci->op_regs->command);
166 command |= CMD_RESET;
167 xhci_writel(xhci, command, &xhci->op_regs->command);
169 ret = handshake(xhci, &xhci->op_regs->command,
170 CMD_RESET, 0, 10 * 1000 * 1000);
174 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
176 * xHCI cannot write to any doorbells or operational registers other
177 * than status until the "Controller Not Ready" flag is cleared.
179 ret = handshake(xhci, &xhci->op_regs->status,
180 STS_CNR, 0, 10 * 1000 * 1000);
182 for (i = 0; i < 2; ++i) {
183 xhci->bus_state[i].port_c_suspend = 0;
184 xhci->bus_state[i].suspended_ports = 0;
185 xhci->bus_state[i].resuming_ports = 0;
192 static int xhci_free_msi(struct xhci_hcd *xhci)
196 if (!xhci->msix_entries)
199 for (i = 0; i < xhci->msix_count; i++)
200 if (xhci->msix_entries[i].vector)
201 free_irq(xhci->msix_entries[i].vector,
209 static int xhci_setup_msi(struct xhci_hcd *xhci)
212 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
214 ret = pci_enable_msi(pdev);
216 xhci_dbg(xhci, "failed to allocate MSI entry\n");
220 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
221 0, "xhci_hcd", xhci_to_hcd(xhci));
223 xhci_dbg(xhci, "disable MSI interrupt\n");
224 pci_disable_msi(pdev);
232 * free all IRQs request
234 static void xhci_free_irq(struct xhci_hcd *xhci)
236 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
239 /* return if using legacy interrupt */
240 if (xhci_to_hcd(xhci)->irq > 0)
243 ret = xhci_free_msi(xhci);
247 free_irq(pdev->irq, xhci_to_hcd(xhci));
255 static int xhci_setup_msix(struct xhci_hcd *xhci)
258 struct usb_hcd *hcd = xhci_to_hcd(xhci);
259 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
262 * calculate number of msi-x vectors supported.
263 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
264 * with max number of interrupters based on the xhci HCSPARAMS1.
265 * - num_online_cpus: maximum msi-x vectors per CPUs core.
266 * Add additional 1 vector to ensure always available interrupt.
268 xhci->msix_count = min(num_online_cpus() + 1,
269 HCS_MAX_INTRS(xhci->hcs_params1));
272 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
274 if (!xhci->msix_entries) {
275 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
279 for (i = 0; i < xhci->msix_count; i++) {
280 xhci->msix_entries[i].entry = i;
281 xhci->msix_entries[i].vector = 0;
284 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
286 xhci_dbg(xhci, "Failed to enable MSI-X\n");
290 for (i = 0; i < xhci->msix_count; i++) {
291 ret = request_irq(xhci->msix_entries[i].vector,
292 (irq_handler_t)xhci_msi_irq,
293 0, "xhci_hcd", xhci_to_hcd(xhci));
298 hcd->msix_enabled = 1;
302 xhci_dbg(xhci, "disable MSI-X interrupt\n");
304 pci_disable_msix(pdev);
306 kfree(xhci->msix_entries);
307 xhci->msix_entries = NULL;
311 /* Free any IRQs and disable MSI-X */
312 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
314 struct usb_hcd *hcd = xhci_to_hcd(xhci);
315 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
319 if (xhci->msix_entries) {
320 pci_disable_msix(pdev);
321 kfree(xhci->msix_entries);
322 xhci->msix_entries = NULL;
324 pci_disable_msi(pdev);
327 hcd->msix_enabled = 0;
331 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
335 if (xhci->msix_entries) {
336 for (i = 0; i < xhci->msix_count; i++)
337 synchronize_irq(xhci->msix_entries[i].vector);
341 static int xhci_try_enable_msi(struct usb_hcd *hcd)
343 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
344 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
348 * Some Fresco Logic host controllers advertise MSI, but fail to
349 * generate interrupts. Don't even try to enable MSI.
351 if (xhci->quirks & XHCI_BROKEN_MSI)
354 /* unregister the legacy interrupt */
356 free_irq(hcd->irq, hcd);
359 ret = xhci_setup_msix(xhci);
361 /* fall back to msi*/
362 ret = xhci_setup_msi(xhci);
365 /* hcd->irq is 0, we have MSI */
369 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
373 /* fall back to legacy interrupt*/
374 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
375 hcd->irq_descr, hcd);
377 xhci_err(xhci, "request interrupt %d failed\n",
381 hcd->irq = pdev->irq;
387 static int xhci_try_enable_msi(struct usb_hcd *hcd)
392 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
396 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
403 * Initialize memory for HCD and xHC (one-time init).
405 * Program the PAGESIZE register, initialize the device context array, create
406 * device contexts (?), set up a command ring segment (or two?), create event
407 * ring (one for now).
409 int xhci_init(struct usb_hcd *hcd)
411 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
414 xhci_dbg(xhci, "xhci_init\n");
415 spin_lock_init(&xhci->lock);
416 if (xhci->hci_version == 0x95 && link_quirk) {
417 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
418 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
420 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
422 retval = xhci_mem_init(xhci, GFP_KERNEL);
423 xhci_dbg(xhci, "Finished xhci_init\n");
428 /*-------------------------------------------------------------------------*/
431 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
432 static void xhci_event_ring_work(unsigned long arg)
437 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
440 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
442 spin_lock_irqsave(&xhci->lock, flags);
443 temp = xhci_readl(xhci, &xhci->op_regs->status);
444 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
445 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
446 (xhci->xhc_state & XHCI_STATE_HALTED)) {
447 xhci_dbg(xhci, "HW died, polling stopped.\n");
448 spin_unlock_irqrestore(&xhci->lock, flags);
452 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
453 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
454 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
455 xhci->error_bitmask = 0;
456 xhci_dbg(xhci, "Event ring:\n");
457 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
458 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
459 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
460 temp_64 &= ~ERST_PTR_MASK;
461 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
462 xhci_dbg(xhci, "Command ring:\n");
463 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
464 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
465 xhci_dbg_cmd_ptrs(xhci);
466 for (i = 0; i < MAX_HC_SLOTS; ++i) {
469 for (j = 0; j < 31; ++j) {
470 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
473 spin_unlock_irqrestore(&xhci->lock, flags);
476 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
478 xhci_dbg(xhci, "Quit polling the event ring.\n");
482 static int xhci_run_finished(struct xhci_hcd *xhci)
484 if (xhci_start(xhci)) {
488 xhci->shared_hcd->state = HC_STATE_RUNNING;
489 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
491 if (xhci->quirks & XHCI_NEC_HOST)
492 xhci_ring_cmd_db(xhci);
494 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
499 * Start the HC after it was halted.
501 * This function is called by the USB core when the HC driver is added.
502 * Its opposite is xhci_stop().
504 * xhci_init() must be called once before this function can be called.
505 * Reset the HC, enable device slot contexts, program DCBAAP, and
506 * set command ring pointer and event ring pointer.
508 * Setup MSI-X vectors and enable interrupts.
510 int xhci_run(struct usb_hcd *hcd)
515 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
517 /* Start the xHCI host controller running only after the USB 2.0 roothub
521 hcd->uses_new_polling = 1;
522 if (!usb_hcd_is_primary_hcd(hcd))
523 return xhci_run_finished(xhci);
525 xhci_dbg(xhci, "xhci_run\n");
527 ret = xhci_try_enable_msi(hcd);
531 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
532 init_timer(&xhci->event_ring_timer);
533 xhci->event_ring_timer.data = (unsigned long) xhci;
534 xhci->event_ring_timer.function = xhci_event_ring_work;
535 /* Poll the event ring */
536 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
538 xhci_dbg(xhci, "Setting event ring polling timer\n");
539 add_timer(&xhci->event_ring_timer);
542 xhci_dbg(xhci, "Command ring memory map follows:\n");
543 xhci_debug_ring(xhci, xhci->cmd_ring);
544 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
545 xhci_dbg_cmd_ptrs(xhci);
547 xhci_dbg(xhci, "ERST memory map follows:\n");
548 xhci_dbg_erst(xhci, &xhci->erst);
549 xhci_dbg(xhci, "Event ring:\n");
550 xhci_debug_ring(xhci, xhci->event_ring);
551 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
552 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
553 temp_64 &= ~ERST_PTR_MASK;
554 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
556 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
557 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
558 temp &= ~ER_IRQ_INTERVAL_MASK;
560 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
562 /* Set the HCD state before we enable the irqs */
563 temp = xhci_readl(xhci, &xhci->op_regs->command);
565 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
567 xhci_writel(xhci, temp, &xhci->op_regs->command);
569 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
570 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
571 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
572 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
573 &xhci->ir_set->irq_pending);
574 xhci_print_ir_set(xhci, 0);
576 if (xhci->quirks & XHCI_NEC_HOST)
577 xhci_queue_vendor_command(xhci, 0, 0, 0,
578 TRB_TYPE(TRB_NEC_GET_FW));
580 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
584 static void xhci_only_stop_hcd(struct usb_hcd *hcd)
586 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
588 spin_lock_irq(&xhci->lock);
591 /* The shared_hcd is going to be deallocated shortly (the USB core only
592 * calls this function when allocation fails in usb_add_hcd(), or
593 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
595 xhci->shared_hcd = NULL;
596 spin_unlock_irq(&xhci->lock);
602 * This function is called by the USB core when the HC driver is removed.
603 * Its opposite is xhci_run().
605 * Disable device contexts, disable IRQs, and quiesce the HC.
606 * Reset the HC, finish any completed transactions, and cleanup memory.
608 void xhci_stop(struct usb_hcd *hcd)
611 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
613 if (!usb_hcd_is_primary_hcd(hcd)) {
614 xhci_only_stop_hcd(xhci->shared_hcd);
618 spin_lock_irq(&xhci->lock);
619 /* Make sure the xHC is halted for a USB3 roothub
620 * (xhci_stop() could be called as part of failed init).
624 spin_unlock_irq(&xhci->lock);
626 xhci_cleanup_msix(xhci);
628 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
629 /* Tell the event ring poll function not to reschedule */
631 del_timer_sync(&xhci->event_ring_timer);
634 if (xhci->quirks & XHCI_AMD_PLL_FIX)
637 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
638 temp = xhci_readl(xhci, &xhci->op_regs->status);
639 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
640 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
641 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
642 &xhci->ir_set->irq_pending);
643 xhci_print_ir_set(xhci, 0);
645 xhci_dbg(xhci, "cleaning up memory\n");
646 xhci_mem_cleanup(xhci);
647 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
648 xhci_readl(xhci, &xhci->op_regs->status));
652 * Shutdown HC (not bus-specific)
654 * This is called when the machine is rebooting or halting. We assume that the
655 * machine will be powered off, and the HC's internal state will be reset.
656 * Don't bother to free memory.
658 * This will only ever be called with the main usb_hcd (the USB3 roothub).
660 void xhci_shutdown(struct usb_hcd *hcd)
662 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
664 if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
665 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
667 spin_lock_irq(&xhci->lock);
669 spin_unlock_irq(&xhci->lock);
671 xhci_cleanup_msix(xhci);
673 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
674 xhci_readl(xhci, &xhci->op_regs->status));
678 static void xhci_save_registers(struct xhci_hcd *xhci)
680 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
681 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
682 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
683 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
684 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
685 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
686 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
687 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
688 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
691 static void xhci_restore_registers(struct xhci_hcd *xhci)
693 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
694 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
695 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
696 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
697 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
698 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
699 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
700 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
701 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
704 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
708 /* step 2: initialize command ring buffer */
709 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
710 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
711 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
712 xhci->cmd_ring->dequeue) &
713 (u64) ~CMD_RING_RSVD_BITS) |
714 xhci->cmd_ring->cycle_state;
715 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
716 (long unsigned long) val_64);
717 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
721 * The whole command ring must be cleared to zero when we suspend the host.
723 * The host doesn't save the command ring pointer in the suspend well, so we
724 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
725 * aligned, because of the reserved bits in the command ring dequeue pointer
726 * register. Therefore, we can't just set the dequeue pointer back in the
727 * middle of the ring (TRBs are 16-byte aligned).
729 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
731 struct xhci_ring *ring;
732 struct xhci_segment *seg;
734 ring = xhci->cmd_ring;
738 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
739 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
740 cpu_to_le32(~TRB_CYCLE);
742 } while (seg != ring->deq_seg);
744 /* Reset the software enqueue and dequeue pointers */
745 ring->deq_seg = ring->first_seg;
746 ring->dequeue = ring->first_seg->trbs;
747 ring->enq_seg = ring->deq_seg;
748 ring->enqueue = ring->dequeue;
750 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
752 * Ring is now zeroed, so the HW should look for change of ownership
753 * when the cycle bit is set to 1.
755 ring->cycle_state = 1;
758 * Reset the hardware dequeue pointer.
759 * Yes, this will need to be re-written after resume, but we're paranoid
760 * and want to make sure the hardware doesn't access bogus memory
761 * because, say, the BIOS or an SMI started the host without changing
762 * the command ring pointers.
764 xhci_set_cmd_ring_deq(xhci);
768 * Stop HC (not bus-specific)
770 * This is called when the machine transition into S3/S4 mode.
773 int xhci_suspend(struct xhci_hcd *xhci)
776 struct usb_hcd *hcd = xhci_to_hcd(xhci);
779 spin_lock_irq(&xhci->lock);
780 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
781 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
782 /* step 1: stop endpoint */
783 /* skipped assuming that port suspend has done */
785 /* step 2: clear Run/Stop bit */
786 command = xhci_readl(xhci, &xhci->op_regs->command);
788 xhci_writel(xhci, command, &xhci->op_regs->command);
789 if (handshake(xhci, &xhci->op_regs->status,
790 STS_HALT, STS_HALT, 100*100)) {
791 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
792 spin_unlock_irq(&xhci->lock);
795 xhci_clear_command_ring(xhci);
797 /* step 3: save registers */
798 xhci_save_registers(xhci);
800 /* step 4: set CSS flag */
801 command = xhci_readl(xhci, &xhci->op_regs->command);
803 xhci_writel(xhci, command, &xhci->op_regs->command);
804 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
805 xhci_warn(xhci, "WARN: xHC save state timeout\n");
806 spin_unlock_irq(&xhci->lock);
809 spin_unlock_irq(&xhci->lock);
811 /* step 5: remove core well power */
812 /* synchronize irq when using MSI-X */
813 xhci_msix_sync_irqs(xhci);
819 * start xHC (not bus-specific)
821 * This is called when the machine transition from S3/S4 mode.
824 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
826 u32 command, temp = 0;
827 struct usb_hcd *hcd = xhci_to_hcd(xhci);
828 struct usb_hcd *secondary_hcd;
831 /* Wait a bit if either of the roothubs need to settle from the
832 * transition into bus suspend.
834 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
836 xhci->bus_state[1].next_statechange))
839 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
840 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
842 spin_lock_irq(&xhci->lock);
843 if (xhci->quirks & XHCI_RESET_ON_RESUME)
847 /* step 1: restore register */
848 xhci_restore_registers(xhci);
849 /* step 2: initialize command ring buffer */
850 xhci_set_cmd_ring_deq(xhci);
851 /* step 3: restore state and start state*/
852 /* step 3: set CRS flag */
853 command = xhci_readl(xhci, &xhci->op_regs->command);
855 xhci_writel(xhci, command, &xhci->op_regs->command);
856 if (handshake(xhci, &xhci->op_regs->status,
857 STS_RESTORE, 0, 10 * 1000)) {
858 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
859 spin_unlock_irq(&xhci->lock);
862 temp = xhci_readl(xhci, &xhci->op_regs->status);
865 /* If restore operation fails, re-initialize the HC during resume */
866 if ((temp & STS_SRE) || hibernated) {
867 /* Let the USB core know _both_ roothubs lost power. */
868 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
869 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
871 xhci_dbg(xhci, "Stop HCD\n");
874 spin_unlock_irq(&xhci->lock);
875 xhci_cleanup_msix(xhci);
877 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
878 /* Tell the event ring poll function not to reschedule */
880 del_timer_sync(&xhci->event_ring_timer);
883 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
884 temp = xhci_readl(xhci, &xhci->op_regs->status);
885 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
886 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
887 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
888 &xhci->ir_set->irq_pending);
889 xhci_print_ir_set(xhci, 0);
891 xhci_dbg(xhci, "cleaning up memory\n");
892 xhci_mem_cleanup(xhci);
893 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
894 xhci_readl(xhci, &xhci->op_regs->status));
896 /* USB core calls the PCI reinit and start functions twice:
897 * first with the primary HCD, and then with the secondary HCD.
898 * If we don't do the same, the host will never be started.
900 if (!usb_hcd_is_primary_hcd(hcd))
903 secondary_hcd = xhci->shared_hcd;
905 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
906 retval = xhci_init(hcd->primary_hcd);
909 xhci_dbg(xhci, "Start the primary HCD\n");
910 retval = xhci_run(hcd->primary_hcd);
912 xhci_dbg(xhci, "Start the secondary HCD\n");
913 retval = xhci_run(secondary_hcd);
915 hcd->state = HC_STATE_SUSPENDED;
916 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
920 /* step 4: set Run/Stop bit */
921 command = xhci_readl(xhci, &xhci->op_regs->command);
923 xhci_writel(xhci, command, &xhci->op_regs->command);
924 handshake(xhci, &xhci->op_regs->status, STS_HALT,
927 /* step 5: walk topology and initialize portsc,
928 * portpmsc and portli
930 /* this is done in bus_resume */
932 /* step 6: restart each of the previously
933 * Running endpoints by ringing their doorbells
936 spin_unlock_irq(&xhci->lock);
940 usb_hcd_resume_root_hub(hcd);
941 usb_hcd_resume_root_hub(xhci->shared_hcd);
945 #endif /* CONFIG_PM */
947 /*-------------------------------------------------------------------------*/
950 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
951 * HCDs. Find the index for an endpoint given its descriptor. Use the return
952 * value to right shift 1 for the bitmask.
954 * Index = (epnum * 2) + direction - 1,
955 * where direction = 0 for OUT, 1 for IN.
956 * For control endpoints, the IN index is used (OUT index is unused), so
957 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
959 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
962 if (usb_endpoint_xfer_control(desc))
963 index = (unsigned int) (usb_endpoint_num(desc)*2);
965 index = (unsigned int) (usb_endpoint_num(desc)*2) +
966 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
970 /* Find the flag for this endpoint (for use in the control context). Use the
971 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
974 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
976 return 1 << (xhci_get_endpoint_index(desc) + 1);
979 /* Find the flag for this endpoint (for use in the control context). Use the
980 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
983 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
985 return 1 << (ep_index + 1);
988 /* Compute the last valid endpoint context index. Basically, this is the
989 * endpoint index plus one. For slot contexts with more than valid endpoint,
990 * we find the most significant bit set in the added contexts flags.
991 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
992 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
994 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
996 return fls(added_ctxs) - 1;
999 /* Returns 1 if the arguments are OK;
1000 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1002 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1003 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1005 struct xhci_hcd *xhci;
1006 struct xhci_virt_device *virt_dev;
1008 if (!hcd || (check_ep && !ep) || !udev) {
1009 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1013 if (!udev->parent) {
1014 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1019 xhci = hcd_to_xhci(hcd);
1020 if (xhci->xhc_state & XHCI_STATE_HALTED)
1023 if (check_virt_dev) {
1024 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1025 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1030 virt_dev = xhci->devs[udev->slot_id];
1031 if (virt_dev->udev != udev) {
1032 printk(KERN_DEBUG "xHCI %s called with udev and "
1033 "virt_dev does not match\n", func);
1041 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1042 struct usb_device *udev, struct xhci_command *command,
1043 bool ctx_change, bool must_succeed);
1046 * Full speed devices may have a max packet size greater than 8 bytes, but the
1047 * USB core doesn't know that until it reads the first 8 bytes of the
1048 * descriptor. If the usb_device's max packet size changes after that point,
1049 * we need to issue an evaluate context command and wait on it.
1051 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1052 unsigned int ep_index, struct urb *urb)
1054 struct xhci_container_ctx *in_ctx;
1055 struct xhci_container_ctx *out_ctx;
1056 struct xhci_input_control_ctx *ctrl_ctx;
1057 struct xhci_ep_ctx *ep_ctx;
1058 int max_packet_size;
1059 int hw_max_packet_size;
1062 out_ctx = xhci->devs[slot_id]->out_ctx;
1063 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1064 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1065 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1066 if (hw_max_packet_size != max_packet_size) {
1067 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1068 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1070 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1071 hw_max_packet_size);
1072 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1074 /* Set up the modified control endpoint 0 */
1075 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1076 xhci->devs[slot_id]->out_ctx, ep_index);
1077 in_ctx = xhci->devs[slot_id]->in_ctx;
1078 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1079 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1080 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1082 /* Set up the input context flags for the command */
1083 /* FIXME: This won't work if a non-default control endpoint
1084 * changes max packet sizes.
1086 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1087 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1088 ctrl_ctx->drop_flags = 0;
1090 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1091 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1092 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1093 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1095 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1098 /* Clean up the input context for later use by bandwidth
1101 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1107 * non-error returns are a promise to giveback() the urb later
1108 * we drop ownership so next owner (or urb unlink) can get it
1110 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1112 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1113 struct xhci_td *buffer;
1114 unsigned long flags;
1116 unsigned int slot_id, ep_index;
1117 struct urb_priv *urb_priv;
1120 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1121 true, true, __func__) <= 0)
1124 slot_id = urb->dev->slot_id;
1125 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1127 if (!HCD_HW_ACCESSIBLE(hcd)) {
1128 if (!in_interrupt())
1129 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1134 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1135 size = urb->number_of_packets;
1139 urb_priv = kzalloc(sizeof(struct urb_priv) +
1140 size * sizeof(struct xhci_td *), mem_flags);
1144 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1150 for (i = 0; i < size; i++) {
1151 urb_priv->td[i] = buffer;
1155 urb_priv->length = size;
1156 urb_priv->td_cnt = 0;
1157 urb->hcpriv = urb_priv;
1159 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1160 /* Check to see if the max packet size for the default control
1161 * endpoint changed during FS device enumeration
1163 if (urb->dev->speed == USB_SPEED_FULL) {
1164 ret = xhci_check_maxpacket(xhci, slot_id,
1167 xhci_urb_free_priv(xhci, urb_priv);
1173 /* We have a spinlock and interrupts disabled, so we must pass
1174 * atomic context to this function, which may allocate memory.
1176 spin_lock_irqsave(&xhci->lock, flags);
1177 if (xhci->xhc_state & XHCI_STATE_DYING)
1179 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1183 spin_unlock_irqrestore(&xhci->lock, flags);
1184 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1185 spin_lock_irqsave(&xhci->lock, flags);
1186 if (xhci->xhc_state & XHCI_STATE_DYING)
1188 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1189 EP_GETTING_STREAMS) {
1190 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1191 "is transitioning to using streams.\n");
1193 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1194 EP_GETTING_NO_STREAMS) {
1195 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1196 "is transitioning to "
1197 "not having streams.\n");
1200 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1205 spin_unlock_irqrestore(&xhci->lock, flags);
1206 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1207 spin_lock_irqsave(&xhci->lock, flags);
1208 if (xhci->xhc_state & XHCI_STATE_DYING)
1210 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1214 spin_unlock_irqrestore(&xhci->lock, flags);
1216 spin_lock_irqsave(&xhci->lock, flags);
1217 if (xhci->xhc_state & XHCI_STATE_DYING)
1219 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1223 spin_unlock_irqrestore(&xhci->lock, flags);
1228 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1229 "non-responsive xHCI host.\n",
1230 urb->ep->desc.bEndpointAddress, urb);
1233 xhci_urb_free_priv(xhci, urb_priv);
1235 spin_unlock_irqrestore(&xhci->lock, flags);
1239 /* Get the right ring for the given URB.
1240 * If the endpoint supports streams, boundary check the URB's stream ID.
1241 * If the endpoint doesn't support streams, return the singular endpoint ring.
1243 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1246 unsigned int slot_id;
1247 unsigned int ep_index;
1248 unsigned int stream_id;
1249 struct xhci_virt_ep *ep;
1251 slot_id = urb->dev->slot_id;
1252 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1253 stream_id = urb->stream_id;
1254 ep = &xhci->devs[slot_id]->eps[ep_index];
1255 /* Common case: no streams */
1256 if (!(ep->ep_state & EP_HAS_STREAMS))
1259 if (stream_id == 0) {
1261 "WARN: Slot ID %u, ep index %u has streams, "
1262 "but URB has no stream ID.\n",
1267 if (stream_id < ep->stream_info->num_streams)
1268 return ep->stream_info->stream_rings[stream_id];
1271 "WARN: Slot ID %u, ep index %u has "
1272 "stream IDs 1 to %u allocated, "
1273 "but stream ID %u is requested.\n",
1275 ep->stream_info->num_streams - 1,
1281 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1282 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1283 * should pick up where it left off in the TD, unless a Set Transfer Ring
1284 * Dequeue Pointer is issued.
1286 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1287 * the ring. Since the ring is a contiguous structure, they can't be physically
1288 * removed. Instead, there are two options:
1290 * 1) If the HC is in the middle of processing the URB to be canceled, we
1291 * simply move the ring's dequeue pointer past those TRBs using the Set
1292 * Transfer Ring Dequeue Pointer command. This will be the common case,
1293 * when drivers timeout on the last submitted URB and attempt to cancel.
1295 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1296 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1297 * HC will need to invalidate the any TRBs it has cached after the stop
1298 * endpoint command, as noted in the xHCI 0.95 errata.
1300 * 3) The TD may have completed by the time the Stop Endpoint Command
1301 * completes, so software needs to handle that case too.
1303 * This function should protect against the TD enqueueing code ringing the
1304 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1305 * It also needs to account for multiple cancellations on happening at the same
1306 * time for the same endpoint.
1308 * Note that this function can be called in any context, or so says
1309 * usb_hcd_unlink_urb()
1311 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1313 unsigned long flags;
1316 struct xhci_hcd *xhci;
1317 struct urb_priv *urb_priv;
1319 unsigned int ep_index;
1320 struct xhci_ring *ep_ring;
1321 struct xhci_virt_ep *ep;
1323 xhci = hcd_to_xhci(hcd);
1324 spin_lock_irqsave(&xhci->lock, flags);
1325 /* Make sure the URB hasn't completed or been unlinked already */
1326 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1327 if (ret || !urb->hcpriv)
1329 temp = xhci_readl(xhci, &xhci->op_regs->status);
1330 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1331 xhci_dbg(xhci, "HW died, freeing TD.\n");
1332 urb_priv = urb->hcpriv;
1333 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1334 td = urb_priv->td[i];
1335 if (!list_empty(&td->td_list))
1336 list_del_init(&td->td_list);
1337 if (!list_empty(&td->cancelled_td_list))
1338 list_del_init(&td->cancelled_td_list);
1341 usb_hcd_unlink_urb_from_ep(hcd, urb);
1342 spin_unlock_irqrestore(&xhci->lock, flags);
1343 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1344 xhci_urb_free_priv(xhci, urb_priv);
1347 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1348 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1349 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1350 "non-responsive xHCI host.\n",
1351 urb->ep->desc.bEndpointAddress, urb);
1352 /* Let the stop endpoint command watchdog timer (which set this
1353 * state) finish cleaning up the endpoint TD lists. We must
1354 * have caught it in the middle of dropping a lock and giving
1360 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1361 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1362 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1368 urb_priv = urb->hcpriv;
1369 i = urb_priv->td_cnt;
1370 if (i < urb_priv->length)
1371 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1372 "starting at offset 0x%llx\n",
1373 urb, urb->dev->devpath,
1374 urb->ep->desc.bEndpointAddress,
1375 (unsigned long long) xhci_trb_virt_to_dma(
1376 urb_priv->td[i]->start_seg,
1377 urb_priv->td[i]->first_trb));
1379 for (; i < urb_priv->length; i++) {
1380 td = urb_priv->td[i];
1381 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1384 /* Queue a stop endpoint command, but only if this is
1385 * the first cancellation to be handled.
1387 if (!(ep->ep_state & EP_HALT_PENDING)) {
1388 ep->ep_state |= EP_HALT_PENDING;
1389 ep->stop_cmds_pending++;
1390 ep->stop_cmd_timer.expires = jiffies +
1391 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1392 add_timer(&ep->stop_cmd_timer);
1393 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1394 xhci_ring_cmd_db(xhci);
1397 spin_unlock_irqrestore(&xhci->lock, flags);
1401 /* Drop an endpoint from a new bandwidth configuration for this device.
1402 * Only one call to this function is allowed per endpoint before
1403 * check_bandwidth() or reset_bandwidth() must be called.
1404 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1405 * add the endpoint to the schedule with possibly new parameters denoted by a
1406 * different endpoint descriptor in usb_host_endpoint.
1407 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1410 * The USB core will not allow URBs to be queued to an endpoint that is being
1411 * disabled, so there's no need for mutual exclusion to protect
1412 * the xhci->devs[slot_id] structure.
1414 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1415 struct usb_host_endpoint *ep)
1417 struct xhci_hcd *xhci;
1418 struct xhci_container_ctx *in_ctx, *out_ctx;
1419 struct xhci_input_control_ctx *ctrl_ctx;
1420 struct xhci_slot_ctx *slot_ctx;
1421 unsigned int last_ctx;
1422 unsigned int ep_index;
1423 struct xhci_ep_ctx *ep_ctx;
1425 u32 new_add_flags, new_drop_flags, new_slot_info;
1428 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1431 xhci = hcd_to_xhci(hcd);
1432 if (xhci->xhc_state & XHCI_STATE_DYING)
1435 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1436 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1437 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1438 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1439 __func__, drop_flag);
1443 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1444 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1445 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1446 ep_index = xhci_get_endpoint_index(&ep->desc);
1447 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1448 /* If the HC already knows the endpoint is disabled,
1449 * or the HCD has noted it is disabled, ignore this request
1451 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1452 cpu_to_le32(EP_STATE_DISABLED)) ||
1453 le32_to_cpu(ctrl_ctx->drop_flags) &
1454 xhci_get_endpoint_flag(&ep->desc)) {
1455 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1460 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1461 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1463 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1464 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1466 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1467 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1468 /* Update the last valid endpoint context, if we deleted the last one */
1469 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1470 LAST_CTX(last_ctx)) {
1471 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1472 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1474 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1476 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1478 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1479 (unsigned int) ep->desc.bEndpointAddress,
1481 (unsigned int) new_drop_flags,
1482 (unsigned int) new_add_flags,
1483 (unsigned int) new_slot_info);
1487 /* Add an endpoint to a new possible bandwidth configuration for this device.
1488 * Only one call to this function is allowed per endpoint before
1489 * check_bandwidth() or reset_bandwidth() must be called.
1490 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1491 * add the endpoint to the schedule with possibly new parameters denoted by a
1492 * different endpoint descriptor in usb_host_endpoint.
1493 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1496 * The USB core will not allow URBs to be queued to an endpoint until the
1497 * configuration or alt setting is installed in the device, so there's no need
1498 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1500 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1501 struct usb_host_endpoint *ep)
1503 struct xhci_hcd *xhci;
1504 struct xhci_container_ctx *in_ctx, *out_ctx;
1505 unsigned int ep_index;
1506 struct xhci_ep_ctx *ep_ctx;
1507 struct xhci_slot_ctx *slot_ctx;
1508 struct xhci_input_control_ctx *ctrl_ctx;
1510 unsigned int last_ctx;
1511 u32 new_add_flags, new_drop_flags, new_slot_info;
1512 struct xhci_virt_device *virt_dev;
1515 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1517 /* So we won't queue a reset ep command for a root hub */
1521 xhci = hcd_to_xhci(hcd);
1522 if (xhci->xhc_state & XHCI_STATE_DYING)
1525 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1526 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1527 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1528 /* FIXME when we have to issue an evaluate endpoint command to
1529 * deal with ep0 max packet size changing once we get the
1532 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1533 __func__, added_ctxs);
1537 virt_dev = xhci->devs[udev->slot_id];
1538 in_ctx = virt_dev->in_ctx;
1539 out_ctx = virt_dev->out_ctx;
1540 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1541 ep_index = xhci_get_endpoint_index(&ep->desc);
1542 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1544 /* If this endpoint is already in use, and the upper layers are trying
1545 * to add it again without dropping it, reject the addition.
1547 if (virt_dev->eps[ep_index].ring &&
1548 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1549 xhci_get_endpoint_flag(&ep->desc))) {
1550 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1551 "without dropping it.\n",
1552 (unsigned int) ep->desc.bEndpointAddress);
1556 /* If the HCD has already noted the endpoint is enabled,
1557 * ignore this request.
1559 if (le32_to_cpu(ctrl_ctx->add_flags) &
1560 xhci_get_endpoint_flag(&ep->desc)) {
1561 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1567 * Configuration and alternate setting changes must be done in
1568 * process context, not interrupt context (or so documenation
1569 * for usb_set_interface() and usb_set_configuration() claim).
1571 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1572 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1573 __func__, ep->desc.bEndpointAddress);
1577 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1578 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1580 /* If xhci_endpoint_disable() was called for this endpoint, but the
1581 * xHC hasn't been notified yet through the check_bandwidth() call,
1582 * this re-adds a new state for the endpoint from the new endpoint
1583 * descriptors. We must drop and re-add this endpoint, so we leave the
1586 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1588 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1589 /* Update the last valid endpoint context, if we just added one past */
1590 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1591 LAST_CTX(last_ctx)) {
1592 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1593 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1595 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1597 /* Store the usb_device pointer for later use */
1600 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1601 (unsigned int) ep->desc.bEndpointAddress,
1603 (unsigned int) new_drop_flags,
1604 (unsigned int) new_add_flags,
1605 (unsigned int) new_slot_info);
1609 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1611 struct xhci_input_control_ctx *ctrl_ctx;
1612 struct xhci_ep_ctx *ep_ctx;
1613 struct xhci_slot_ctx *slot_ctx;
1616 /* When a device's add flag and drop flag are zero, any subsequent
1617 * configure endpoint command will leave that endpoint's state
1618 * untouched. Make sure we don't leave any old state in the input
1619 * endpoint contexts.
1621 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1622 ctrl_ctx->drop_flags = 0;
1623 ctrl_ctx->add_flags = 0;
1624 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1625 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1626 /* Endpoint 0 is always valid */
1627 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1628 for (i = 1; i < 31; ++i) {
1629 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1630 ep_ctx->ep_info = 0;
1631 ep_ctx->ep_info2 = 0;
1633 ep_ctx->tx_info = 0;
1637 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1638 struct usb_device *udev, u32 *cmd_status)
1642 switch (*cmd_status) {
1644 dev_warn(&udev->dev, "Not enough host controller resources "
1645 "for new device state.\n");
1647 /* FIXME: can we allocate more resources for the HC? */
1650 case COMP_2ND_BW_ERR:
1651 dev_warn(&udev->dev, "Not enough bandwidth "
1652 "for new device state.\n");
1654 /* FIXME: can we go back to the old state? */
1657 /* the HCD set up something wrong */
1658 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1660 "and endpoint is not disabled.\n");
1664 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1665 "configure command.\n");
1669 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1673 xhci_err(xhci, "ERROR: unexpected command completion "
1674 "code 0x%x.\n", *cmd_status);
1681 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1682 struct usb_device *udev, u32 *cmd_status)
1685 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1687 switch (*cmd_status) {
1689 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1690 "context command.\n");
1694 dev_warn(&udev->dev, "WARN: slot not enabled for"
1695 "evaluate context command.\n");
1696 case COMP_CTX_STATE:
1697 dev_warn(&udev->dev, "WARN: invalid context state for "
1698 "evaluate context command.\n");
1699 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1703 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1704 "context command.\n");
1708 /* Max Exit Latency too large error */
1709 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1713 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1717 xhci_err(xhci, "ERROR: unexpected command completion "
1718 "code 0x%x.\n", *cmd_status);
1725 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1726 struct xhci_container_ctx *in_ctx)
1728 struct xhci_input_control_ctx *ctrl_ctx;
1729 u32 valid_add_flags;
1730 u32 valid_drop_flags;
1732 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1733 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1734 * (bit 1). The default control endpoint is added during the Address
1735 * Device command and is never removed until the slot is disabled.
1737 valid_add_flags = ctrl_ctx->add_flags >> 2;
1738 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1740 /* Use hweight32 to count the number of ones in the add flags, or
1741 * number of endpoints added. Don't count endpoints that are changed
1742 * (both added and dropped).
1744 return hweight32(valid_add_flags) -
1745 hweight32(valid_add_flags & valid_drop_flags);
1748 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1749 struct xhci_container_ctx *in_ctx)
1751 struct xhci_input_control_ctx *ctrl_ctx;
1752 u32 valid_add_flags;
1753 u32 valid_drop_flags;
1755 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1756 valid_add_flags = ctrl_ctx->add_flags >> 2;
1757 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1759 return hweight32(valid_drop_flags) -
1760 hweight32(valid_add_flags & valid_drop_flags);
1764 * We need to reserve the new number of endpoints before the configure endpoint
1765 * command completes. We can't subtract the dropped endpoints from the number
1766 * of active endpoints until the command completes because we can oversubscribe
1767 * the host in this case:
1769 * - the first configure endpoint command drops more endpoints than it adds
1770 * - a second configure endpoint command that adds more endpoints is queued
1771 * - the first configure endpoint command fails, so the config is unchanged
1772 * - the second command may succeed, even though there isn't enough resources
1774 * Must be called with xhci->lock held.
1776 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1777 struct xhci_container_ctx *in_ctx)
1781 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1782 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1783 xhci_dbg(xhci, "Not enough ep ctxs: "
1784 "%u active, need to add %u, limit is %u.\n",
1785 xhci->num_active_eps, added_eps,
1786 xhci->limit_active_eps);
1789 xhci->num_active_eps += added_eps;
1790 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1791 xhci->num_active_eps);
1796 * The configure endpoint was failed by the xHC for some other reason, so we
1797 * need to revert the resources that failed configuration would have used.
1799 * Must be called with xhci->lock held.
1801 static void xhci_free_host_resources(struct xhci_hcd *xhci,
1802 struct xhci_container_ctx *in_ctx)
1806 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1807 xhci->num_active_eps -= num_failed_eps;
1808 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1810 xhci->num_active_eps);
1814 * Now that the command has completed, clean up the active endpoint count by
1815 * subtracting out the endpoints that were dropped (but not changed).
1817 * Must be called with xhci->lock held.
1819 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1820 struct xhci_container_ctx *in_ctx)
1822 u32 num_dropped_eps;
1824 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1825 xhci->num_active_eps -= num_dropped_eps;
1826 if (num_dropped_eps)
1827 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1829 xhci->num_active_eps);
1832 static unsigned int xhci_get_block_size(struct usb_device *udev)
1834 switch (udev->speed) {
1836 case USB_SPEED_FULL:
1838 case USB_SPEED_HIGH:
1840 case USB_SPEED_SUPER:
1842 case USB_SPEED_UNKNOWN:
1843 case USB_SPEED_WIRELESS:
1845 /* Should never happen */
1851 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1853 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1855 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1860 /* If we are changing a LS/FS device under a HS hub,
1861 * make sure (if we are activating a new TT) that the HS bus has enough
1862 * bandwidth for this new TT.
1864 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1865 struct xhci_virt_device *virt_dev,
1868 struct xhci_interval_bw_table *bw_table;
1869 struct xhci_tt_bw_info *tt_info;
1871 /* Find the bandwidth table for the root port this TT is attached to. */
1872 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1873 tt_info = virt_dev->tt_info;
1874 /* If this TT already had active endpoints, the bandwidth for this TT
1875 * has already been added. Removing all periodic endpoints (and thus
1876 * making the TT enactive) will only decrease the bandwidth used.
1880 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1881 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1885 /* Not sure why we would have no new active endpoints...
1887 * Maybe because of an Evaluate Context change for a hub update or a
1888 * control endpoint 0 max packet size change?
1889 * FIXME: skip the bandwidth calculation in that case.
1894 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
1895 struct xhci_virt_device *virt_dev)
1897 unsigned int bw_reserved;
1899 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
1900 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
1903 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
1904 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
1911 * This algorithm is a very conservative estimate of the worst-case scheduling
1912 * scenario for any one interval. The hardware dynamically schedules the
1913 * packets, so we can't tell which microframe could be the limiting factor in
1914 * the bandwidth scheduling. This only takes into account periodic endpoints.
1916 * Obviously, we can't solve an NP complete problem to find the minimum worst
1917 * case scenario. Instead, we come up with an estimate that is no less than
1918 * the worst case bandwidth used for any one microframe, but may be an
1921 * We walk the requirements for each endpoint by interval, starting with the
1922 * smallest interval, and place packets in the schedule where there is only one
1923 * possible way to schedule packets for that interval. In order to simplify
1924 * this algorithm, we record the largest max packet size for each interval, and
1925 * assume all packets will be that size.
1927 * For interval 0, we obviously must schedule all packets for each interval.
1928 * The bandwidth for interval 0 is just the amount of data to be transmitted
1929 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1930 * the number of packets).
1932 * For interval 1, we have two possible microframes to schedule those packets
1933 * in. For this algorithm, if we can schedule the same number of packets for
1934 * each possible scheduling opportunity (each microframe), we will do so. The
1935 * remaining number of packets will be saved to be transmitted in the gaps in
1936 * the next interval's scheduling sequence.
1938 * As we move those remaining packets to be scheduled with interval 2 packets,
1939 * we have to double the number of remaining packets to transmit. This is
1940 * because the intervals are actually powers of 2, and we would be transmitting
1941 * the previous interval's packets twice in this interval. We also have to be
1942 * sure that when we look at the largest max packet size for this interval, we
1943 * also look at the largest max packet size for the remaining packets and take
1944 * the greater of the two.
1946 * The algorithm continues to evenly distribute packets in each scheduling
1947 * opportunity, and push the remaining packets out, until we get to the last
1948 * interval. Then those packets and their associated overhead are just added
1949 * to the bandwidth used.
1951 static int xhci_check_bw_table(struct xhci_hcd *xhci,
1952 struct xhci_virt_device *virt_dev,
1955 unsigned int bw_reserved;
1956 unsigned int max_bandwidth;
1957 unsigned int bw_used;
1958 unsigned int block_size;
1959 struct xhci_interval_bw_table *bw_table;
1960 unsigned int packet_size = 0;
1961 unsigned int overhead = 0;
1962 unsigned int packets_transmitted = 0;
1963 unsigned int packets_remaining = 0;
1966 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1967 return xhci_check_ss_bw(xhci, virt_dev);
1969 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1970 max_bandwidth = HS_BW_LIMIT;
1971 /* Convert percent of bus BW reserved to blocks reserved */
1972 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1974 max_bandwidth = FS_BW_LIMIT;
1975 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1978 bw_table = virt_dev->bw_table;
1979 /* We need to translate the max packet size and max ESIT payloads into
1980 * the units the hardware uses.
1982 block_size = xhci_get_block_size(virt_dev->udev);
1984 /* If we are manipulating a LS/FS device under a HS hub, double check
1985 * that the HS bus has enough bandwidth if we are activing a new TT.
1987 if (virt_dev->tt_info) {
1988 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1989 virt_dev->real_port);
1990 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1991 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1992 "newly activated TT.\n");
1995 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1996 virt_dev->tt_info->slot_id,
1997 virt_dev->tt_info->ttport);
1999 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2000 virt_dev->real_port);
2003 /* Add in how much bandwidth will be used for interval zero, or the
2004 * rounded max ESIT payload + number of packets * largest overhead.
2006 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2007 bw_table->interval_bw[0].num_packets *
2008 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2010 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2011 unsigned int bw_added;
2012 unsigned int largest_mps;
2013 unsigned int interval_overhead;
2016 * How many packets could we transmit in this interval?
2017 * If packets didn't fit in the previous interval, we will need
2018 * to transmit that many packets twice within this interval.
2020 packets_remaining = 2 * packets_remaining +
2021 bw_table->interval_bw[i].num_packets;
2023 /* Find the largest max packet size of this or the previous
2026 if (list_empty(&bw_table->interval_bw[i].endpoints))
2029 struct xhci_virt_ep *virt_ep;
2030 struct list_head *ep_entry;
2032 ep_entry = bw_table->interval_bw[i].endpoints.next;
2033 virt_ep = list_entry(ep_entry,
2034 struct xhci_virt_ep, bw_endpoint_list);
2035 /* Convert to blocks, rounding up */
2036 largest_mps = DIV_ROUND_UP(
2037 virt_ep->bw_info.max_packet_size,
2040 if (largest_mps > packet_size)
2041 packet_size = largest_mps;
2043 /* Use the larger overhead of this or the previous interval. */
2044 interval_overhead = xhci_get_largest_overhead(
2045 &bw_table->interval_bw[i]);
2046 if (interval_overhead > overhead)
2047 overhead = interval_overhead;
2049 /* How many packets can we evenly distribute across
2050 * (1 << (i + 1)) possible scheduling opportunities?
2052 packets_transmitted = packets_remaining >> (i + 1);
2054 /* Add in the bandwidth used for those scheduled packets */
2055 bw_added = packets_transmitted * (overhead + packet_size);
2057 /* How many packets do we have remaining to transmit? */
2058 packets_remaining = packets_remaining % (1 << (i + 1));
2060 /* What largest max packet size should those packets have? */
2061 /* If we've transmitted all packets, don't carry over the
2062 * largest packet size.
2064 if (packets_remaining == 0) {
2067 } else if (packets_transmitted > 0) {
2068 /* Otherwise if we do have remaining packets, and we've
2069 * scheduled some packets in this interval, take the
2070 * largest max packet size from endpoints with this
2073 packet_size = largest_mps;
2074 overhead = interval_overhead;
2076 /* Otherwise carry over packet_size and overhead from the last
2077 * time we had a remainder.
2079 bw_used += bw_added;
2080 if (bw_used > max_bandwidth) {
2081 xhci_warn(xhci, "Not enough bandwidth. "
2082 "Proposed: %u, Max: %u\n",
2083 bw_used, max_bandwidth);
2088 * Ok, we know we have some packets left over after even-handedly
2089 * scheduling interval 15. We don't know which microframes they will
2090 * fit into, so we over-schedule and say they will be scheduled every
2093 if (packets_remaining > 0)
2094 bw_used += overhead + packet_size;
2096 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2097 unsigned int port_index = virt_dev->real_port - 1;
2099 /* OK, we're manipulating a HS device attached to a
2100 * root port bandwidth domain. Include the number of active TTs
2101 * in the bandwidth used.
2103 bw_used += TT_HS_OVERHEAD *
2104 xhci->rh_bw[port_index].num_active_tts;
2107 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2108 "Available: %u " "percent\n",
2109 bw_used, max_bandwidth, bw_reserved,
2110 (max_bandwidth - bw_used - bw_reserved) * 100 /
2113 bw_used += bw_reserved;
2114 if (bw_used > max_bandwidth) {
2115 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2116 bw_used, max_bandwidth);
2120 bw_table->bw_used = bw_used;
2124 static bool xhci_is_async_ep(unsigned int ep_type)
2126 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2127 ep_type != ISOC_IN_EP &&
2128 ep_type != INT_IN_EP);
2131 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2133 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2136 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2138 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2140 if (ep_bw->ep_interval == 0)
2141 return SS_OVERHEAD_BURST +
2142 (ep_bw->mult * ep_bw->num_packets *
2143 (SS_OVERHEAD + mps));
2144 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2145 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2146 1 << ep_bw->ep_interval);
2150 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2151 struct xhci_bw_info *ep_bw,
2152 struct xhci_interval_bw_table *bw_table,
2153 struct usb_device *udev,
2154 struct xhci_virt_ep *virt_ep,
2155 struct xhci_tt_bw_info *tt_info)
2157 struct xhci_interval_bw *interval_bw;
2158 int normalized_interval;
2160 if (xhci_is_async_ep(ep_bw->type))
2163 if (udev->speed == USB_SPEED_SUPER) {
2164 if (xhci_is_sync_in_ep(ep_bw->type))
2165 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2166 xhci_get_ss_bw_consumed(ep_bw);
2168 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2169 xhci_get_ss_bw_consumed(ep_bw);
2173 /* SuperSpeed endpoints never get added to intervals in the table, so
2174 * this check is only valid for HS/FS/LS devices.
2176 if (list_empty(&virt_ep->bw_endpoint_list))
2178 /* For LS/FS devices, we need to translate the interval expressed in
2179 * microframes to frames.
2181 if (udev->speed == USB_SPEED_HIGH)
2182 normalized_interval = ep_bw->ep_interval;
2184 normalized_interval = ep_bw->ep_interval - 3;
2186 if (normalized_interval == 0)
2187 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2188 interval_bw = &bw_table->interval_bw[normalized_interval];
2189 interval_bw->num_packets -= ep_bw->num_packets;
2190 switch (udev->speed) {
2192 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2194 case USB_SPEED_FULL:
2195 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2197 case USB_SPEED_HIGH:
2198 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2200 case USB_SPEED_SUPER:
2201 case USB_SPEED_UNKNOWN:
2202 case USB_SPEED_WIRELESS:
2203 /* Should never happen because only LS/FS/HS endpoints will get
2204 * added to the endpoint list.
2209 tt_info->active_eps -= 1;
2210 list_del_init(&virt_ep->bw_endpoint_list);
2213 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2214 struct xhci_bw_info *ep_bw,
2215 struct xhci_interval_bw_table *bw_table,
2216 struct usb_device *udev,
2217 struct xhci_virt_ep *virt_ep,
2218 struct xhci_tt_bw_info *tt_info)
2220 struct xhci_interval_bw *interval_bw;
2221 struct xhci_virt_ep *smaller_ep;
2222 int normalized_interval;
2224 if (xhci_is_async_ep(ep_bw->type))
2227 if (udev->speed == USB_SPEED_SUPER) {
2228 if (xhci_is_sync_in_ep(ep_bw->type))
2229 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2230 xhci_get_ss_bw_consumed(ep_bw);
2232 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2233 xhci_get_ss_bw_consumed(ep_bw);
2237 /* For LS/FS devices, we need to translate the interval expressed in
2238 * microframes to frames.
2240 if (udev->speed == USB_SPEED_HIGH)
2241 normalized_interval = ep_bw->ep_interval;
2243 normalized_interval = ep_bw->ep_interval - 3;
2245 if (normalized_interval == 0)
2246 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2247 interval_bw = &bw_table->interval_bw[normalized_interval];
2248 interval_bw->num_packets += ep_bw->num_packets;
2249 switch (udev->speed) {
2251 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2253 case USB_SPEED_FULL:
2254 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2256 case USB_SPEED_HIGH:
2257 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2259 case USB_SPEED_SUPER:
2260 case USB_SPEED_UNKNOWN:
2261 case USB_SPEED_WIRELESS:
2262 /* Should never happen because only LS/FS/HS endpoints will get
2263 * added to the endpoint list.
2269 tt_info->active_eps += 1;
2270 /* Insert the endpoint into the list, largest max packet size first. */
2271 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2273 if (ep_bw->max_packet_size >=
2274 smaller_ep->bw_info.max_packet_size) {
2275 /* Add the new ep before the smaller endpoint */
2276 list_add_tail(&virt_ep->bw_endpoint_list,
2277 &smaller_ep->bw_endpoint_list);
2281 /* Add the new endpoint at the end of the list. */
2282 list_add_tail(&virt_ep->bw_endpoint_list,
2283 &interval_bw->endpoints);
2286 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2287 struct xhci_virt_device *virt_dev,
2290 struct xhci_root_port_bw_info *rh_bw_info;
2291 if (!virt_dev->tt_info)
2294 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2295 if (old_active_eps == 0 &&
2296 virt_dev->tt_info->active_eps != 0) {
2297 rh_bw_info->num_active_tts += 1;
2298 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2299 } else if (old_active_eps != 0 &&
2300 virt_dev->tt_info->active_eps == 0) {
2301 rh_bw_info->num_active_tts -= 1;
2302 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2306 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2307 struct xhci_virt_device *virt_dev,
2308 struct xhci_container_ctx *in_ctx)
2310 struct xhci_bw_info ep_bw_info[31];
2312 struct xhci_input_control_ctx *ctrl_ctx;
2313 int old_active_eps = 0;
2315 if (virt_dev->tt_info)
2316 old_active_eps = virt_dev->tt_info->active_eps;
2318 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2320 for (i = 0; i < 31; i++) {
2321 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2324 /* Make a copy of the BW info in case we need to revert this */
2325 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2326 sizeof(ep_bw_info[i]));
2327 /* Drop the endpoint from the interval table if the endpoint is
2328 * being dropped or changed.
2330 if (EP_IS_DROPPED(ctrl_ctx, i))
2331 xhci_drop_ep_from_interval_table(xhci,
2332 &virt_dev->eps[i].bw_info,
2338 /* Overwrite the information stored in the endpoints' bw_info */
2339 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2340 for (i = 0; i < 31; i++) {
2341 /* Add any changed or added endpoints to the interval table */
2342 if (EP_IS_ADDED(ctrl_ctx, i))
2343 xhci_add_ep_to_interval_table(xhci,
2344 &virt_dev->eps[i].bw_info,
2351 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2352 /* Ok, this fits in the bandwidth we have.
2353 * Update the number of active TTs.
2355 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2359 /* We don't have enough bandwidth for this, revert the stored info. */
2360 for (i = 0; i < 31; i++) {
2361 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2364 /* Drop the new copies of any added or changed endpoints from
2365 * the interval table.
2367 if (EP_IS_ADDED(ctrl_ctx, i)) {
2368 xhci_drop_ep_from_interval_table(xhci,
2369 &virt_dev->eps[i].bw_info,
2375 /* Revert the endpoint back to its old information */
2376 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2377 sizeof(ep_bw_info[i]));
2378 /* Add any changed or dropped endpoints back into the table */
2379 if (EP_IS_DROPPED(ctrl_ctx, i))
2380 xhci_add_ep_to_interval_table(xhci,
2381 &virt_dev->eps[i].bw_info,
2391 /* Issue a configure endpoint command or evaluate context command
2392 * and wait for it to finish.
2394 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2395 struct usb_device *udev,
2396 struct xhci_command *command,
2397 bool ctx_change, bool must_succeed)
2401 unsigned long flags;
2402 struct xhci_container_ctx *in_ctx;
2403 struct completion *cmd_completion;
2405 struct xhci_virt_device *virt_dev;
2406 union xhci_trb *cmd_trb;
2408 spin_lock_irqsave(&xhci->lock, flags);
2409 virt_dev = xhci->devs[udev->slot_id];
2412 in_ctx = command->in_ctx;
2414 in_ctx = virt_dev->in_ctx;
2416 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2417 xhci_reserve_host_resources(xhci, in_ctx)) {
2418 spin_unlock_irqrestore(&xhci->lock, flags);
2419 xhci_warn(xhci, "Not enough host resources, "
2420 "active endpoint contexts = %u\n",
2421 xhci->num_active_eps);
2424 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2425 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2426 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2427 xhci_free_host_resources(xhci, in_ctx);
2428 spin_unlock_irqrestore(&xhci->lock, flags);
2429 xhci_warn(xhci, "Not enough bandwidth\n");
2434 cmd_completion = command->completion;
2435 cmd_status = &command->status;
2436 command->command_trb = xhci->cmd_ring->enqueue;
2438 /* Enqueue pointer can be left pointing to the link TRB,
2439 * we must handle that
2441 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2442 command->command_trb =
2443 xhci->cmd_ring->enq_seg->next->trbs;
2445 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2447 cmd_completion = &virt_dev->cmd_completion;
2448 cmd_status = &virt_dev->cmd_status;
2450 init_completion(cmd_completion);
2452 cmd_trb = xhci->cmd_ring->dequeue;
2454 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2455 udev->slot_id, must_succeed);
2457 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2458 udev->slot_id, must_succeed);
2461 list_del(&command->cmd_list);
2462 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2463 xhci_free_host_resources(xhci, in_ctx);
2464 spin_unlock_irqrestore(&xhci->lock, flags);
2465 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2468 xhci_ring_cmd_db(xhci);
2469 spin_unlock_irqrestore(&xhci->lock, flags);
2471 /* Wait for the configure endpoint command to complete */
2472 timeleft = wait_for_completion_interruptible_timeout(
2474 XHCI_CMD_DEFAULT_TIMEOUT);
2475 if (timeleft <= 0) {
2476 xhci_warn(xhci, "%s while waiting for %s command\n",
2477 timeleft == 0 ? "Timeout" : "Signal",
2479 "configure endpoint" :
2480 "evaluate context");
2481 /* cancel the configure endpoint command */
2482 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2489 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2491 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2493 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2494 spin_lock_irqsave(&xhci->lock, flags);
2495 /* If the command failed, remove the reserved resources.
2496 * Otherwise, clean up the estimate to include dropped eps.
2499 xhci_free_host_resources(xhci, in_ctx);
2501 xhci_finish_resource_reservation(xhci, in_ctx);
2502 spin_unlock_irqrestore(&xhci->lock, flags);
2507 /* Called after one or more calls to xhci_add_endpoint() or
2508 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2509 * to call xhci_reset_bandwidth().
2511 * Since we are in the middle of changing either configuration or
2512 * installing a new alt setting, the USB core won't allow URBs to be
2513 * enqueued for any endpoint on the old config or interface. Nothing
2514 * else should be touching the xhci->devs[slot_id] structure, so we
2515 * don't need to take the xhci->lock for manipulating that.
2517 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2521 struct xhci_hcd *xhci;
2522 struct xhci_virt_device *virt_dev;
2523 struct xhci_input_control_ctx *ctrl_ctx;
2524 struct xhci_slot_ctx *slot_ctx;
2526 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2529 xhci = hcd_to_xhci(hcd);
2530 if (xhci->xhc_state & XHCI_STATE_DYING)
2533 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2534 virt_dev = xhci->devs[udev->slot_id];
2536 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2537 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2538 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2539 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2540 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2542 /* Don't issue the command if there's no endpoints to update. */
2543 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2544 ctrl_ctx->drop_flags == 0)
2547 xhci_dbg(xhci, "New Input Control Context:\n");
2548 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2549 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2550 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2552 ret = xhci_configure_endpoint(xhci, udev, NULL,
2555 /* Callee should call reset_bandwidth() */
2559 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2560 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2561 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2563 /* Free any rings that were dropped, but not changed. */
2564 for (i = 1; i < 31; ++i) {
2565 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2566 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2567 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2569 xhci_zero_in_ctx(xhci, virt_dev);
2571 * Install any rings for completely new endpoints or changed endpoints,
2572 * and free or cache any old rings from changed endpoints.
2574 for (i = 1; i < 31; ++i) {
2575 if (!virt_dev->eps[i].new_ring)
2577 /* Only cache or free the old ring if it exists.
2578 * It may not if this is the first add of an endpoint.
2580 if (virt_dev->eps[i].ring) {
2581 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2583 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2584 virt_dev->eps[i].new_ring = NULL;
2590 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2592 struct xhci_hcd *xhci;
2593 struct xhci_virt_device *virt_dev;
2596 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2599 xhci = hcd_to_xhci(hcd);
2601 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2602 virt_dev = xhci->devs[udev->slot_id];
2603 /* Free any rings allocated for added endpoints */
2604 for (i = 0; i < 31; ++i) {
2605 if (virt_dev->eps[i].new_ring) {
2606 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2607 virt_dev->eps[i].new_ring = NULL;
2610 xhci_zero_in_ctx(xhci, virt_dev);
2613 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2614 struct xhci_container_ctx *in_ctx,
2615 struct xhci_container_ctx *out_ctx,
2616 u32 add_flags, u32 drop_flags)
2618 struct xhci_input_control_ctx *ctrl_ctx;
2619 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2620 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2621 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2622 xhci_slot_copy(xhci, in_ctx, out_ctx);
2623 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2625 xhci_dbg(xhci, "Input Context:\n");
2626 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2629 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2630 unsigned int slot_id, unsigned int ep_index,
2631 struct xhci_dequeue_state *deq_state)
2633 struct xhci_container_ctx *in_ctx;
2634 struct xhci_ep_ctx *ep_ctx;
2638 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2639 xhci->devs[slot_id]->out_ctx, ep_index);
2640 in_ctx = xhci->devs[slot_id]->in_ctx;
2641 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2642 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2643 deq_state->new_deq_ptr);
2645 xhci_warn(xhci, "WARN Cannot submit config ep after "
2646 "reset ep command\n");
2647 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2648 deq_state->new_deq_seg,
2649 deq_state->new_deq_ptr);
2652 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2654 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2655 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2656 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2659 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2660 struct usb_device *udev, unsigned int ep_index)
2662 struct xhci_dequeue_state deq_state;
2663 struct xhci_virt_ep *ep;
2665 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2666 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2667 /* We need to move the HW's dequeue pointer past this TD,
2668 * or it will attempt to resend it on the next doorbell ring.
2670 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2671 ep_index, ep->stopped_stream, ep->stopped_td,
2674 /* HW with the reset endpoint quirk will use the saved dequeue state to
2675 * issue a configure endpoint command later.
2677 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2678 xhci_dbg(xhci, "Queueing new dequeue state\n");
2679 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2680 ep_index, ep->stopped_stream, &deq_state);
2682 /* Better hope no one uses the input context between now and the
2683 * reset endpoint completion!
2684 * XXX: No idea how this hardware will react when stream rings
2687 xhci_dbg(xhci, "Setting up input context for "
2688 "configure endpoint command\n");
2689 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2690 ep_index, &deq_state);
2694 /* Deal with stalled endpoints. The core should have sent the control message
2695 * to clear the halt condition. However, we need to make the xHCI hardware
2696 * reset its sequence number, since a device will expect a sequence number of
2697 * zero after the halt condition is cleared.
2698 * Context: in_interrupt
2700 void xhci_endpoint_reset(struct usb_hcd *hcd,
2701 struct usb_host_endpoint *ep)
2703 struct xhci_hcd *xhci;
2704 struct usb_device *udev;
2705 unsigned int ep_index;
2706 unsigned long flags;
2708 struct xhci_virt_ep *virt_ep;
2710 xhci = hcd_to_xhci(hcd);
2711 udev = (struct usb_device *) ep->hcpriv;
2712 /* Called with a root hub endpoint (or an endpoint that wasn't added
2713 * with xhci_add_endpoint()
2717 ep_index = xhci_get_endpoint_index(&ep->desc);
2718 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2719 if (!virt_ep->stopped_td) {
2720 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2721 ep->desc.bEndpointAddress);
2724 if (usb_endpoint_xfer_control(&ep->desc)) {
2725 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2729 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2730 spin_lock_irqsave(&xhci->lock, flags);
2731 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2733 * Can't change the ring dequeue pointer until it's transitioned to the
2734 * stopped state, which is only upon a successful reset endpoint
2735 * command. Better hope that last command worked!
2738 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2739 kfree(virt_ep->stopped_td);
2740 xhci_ring_cmd_db(xhci);
2742 virt_ep->stopped_td = NULL;
2743 virt_ep->stopped_trb = NULL;
2744 virt_ep->stopped_stream = 0;
2745 spin_unlock_irqrestore(&xhci->lock, flags);
2748 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2751 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2752 struct usb_device *udev, struct usb_host_endpoint *ep,
2753 unsigned int slot_id)
2756 unsigned int ep_index;
2757 unsigned int ep_state;
2761 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2764 if (ep->ss_ep_comp.bmAttributes == 0) {
2765 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2766 " descriptor for ep 0x%x does not support streams\n",
2767 ep->desc.bEndpointAddress);
2771 ep_index = xhci_get_endpoint_index(&ep->desc);
2772 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2773 if (ep_state & EP_HAS_STREAMS ||
2774 ep_state & EP_GETTING_STREAMS) {
2775 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2776 "already has streams set up.\n",
2777 ep->desc.bEndpointAddress);
2778 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2779 "dynamic stream context array reallocation.\n");
2782 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2783 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2784 "endpoint 0x%x; URBs are pending.\n",
2785 ep->desc.bEndpointAddress);
2791 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2792 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2794 unsigned int max_streams;
2796 /* The stream context array size must be a power of two */
2797 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2799 * Find out how many primary stream array entries the host controller
2800 * supports. Later we may use secondary stream arrays (similar to 2nd
2801 * level page entries), but that's an optional feature for xHCI host
2802 * controllers. xHCs must support at least 4 stream IDs.
2804 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2805 if (*num_stream_ctxs > max_streams) {
2806 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2808 *num_stream_ctxs = max_streams;
2809 *num_streams = max_streams;
2813 /* Returns an error code if one of the endpoint already has streams.
2814 * This does not change any data structures, it only checks and gathers
2817 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2818 struct usb_device *udev,
2819 struct usb_host_endpoint **eps, unsigned int num_eps,
2820 unsigned int *num_streams, u32 *changed_ep_bitmask)
2822 unsigned int max_streams;
2823 unsigned int endpoint_flag;
2827 for (i = 0; i < num_eps; i++) {
2828 ret = xhci_check_streams_endpoint(xhci, udev,
2829 eps[i], udev->slot_id);
2833 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2834 if (max_streams < (*num_streams - 1)) {
2835 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2836 eps[i]->desc.bEndpointAddress,
2838 *num_streams = max_streams+1;
2841 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2842 if (*changed_ep_bitmask & endpoint_flag)
2844 *changed_ep_bitmask |= endpoint_flag;
2849 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2850 struct usb_device *udev,
2851 struct usb_host_endpoint **eps, unsigned int num_eps)
2853 u32 changed_ep_bitmask = 0;
2854 unsigned int slot_id;
2855 unsigned int ep_index;
2856 unsigned int ep_state;
2859 slot_id = udev->slot_id;
2860 if (!xhci->devs[slot_id])
2863 for (i = 0; i < num_eps; i++) {
2864 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2865 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2866 /* Are streams already being freed for the endpoint? */
2867 if (ep_state & EP_GETTING_NO_STREAMS) {
2868 xhci_warn(xhci, "WARN Can't disable streams for "
2870 "streams are being disabled already.",
2871 eps[i]->desc.bEndpointAddress);
2874 /* Are there actually any streams to free? */
2875 if (!(ep_state & EP_HAS_STREAMS) &&
2876 !(ep_state & EP_GETTING_STREAMS)) {
2877 xhci_warn(xhci, "WARN Can't disable streams for "
2879 "streams are already disabled!",
2880 eps[i]->desc.bEndpointAddress);
2881 xhci_warn(xhci, "WARN xhci_free_streams() called "
2882 "with non-streams endpoint\n");
2885 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2887 return changed_ep_bitmask;
2891 * The USB device drivers use this function (though the HCD interface in USB
2892 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2893 * coordinate mass storage command queueing across multiple endpoints (basically
2894 * a stream ID == a task ID).
2896 * Setting up streams involves allocating the same size stream context array
2897 * for each endpoint and issuing a configure endpoint command for all endpoints.
2899 * Don't allow the call to succeed if one endpoint only supports one stream
2900 * (which means it doesn't support streams at all).
2902 * Drivers may get less stream IDs than they asked for, if the host controller
2903 * hardware or endpoints claim they can't support the number of requested
2906 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2907 struct usb_host_endpoint **eps, unsigned int num_eps,
2908 unsigned int num_streams, gfp_t mem_flags)
2911 struct xhci_hcd *xhci;
2912 struct xhci_virt_device *vdev;
2913 struct xhci_command *config_cmd;
2914 unsigned int ep_index;
2915 unsigned int num_stream_ctxs;
2916 unsigned long flags;
2917 u32 changed_ep_bitmask = 0;
2922 /* Add one to the number of streams requested to account for
2923 * stream 0 that is reserved for xHCI usage.
2926 xhci = hcd_to_xhci(hcd);
2927 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2930 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2932 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2936 /* Check to make sure all endpoints are not already configured for
2937 * streams. While we're at it, find the maximum number of streams that
2938 * all the endpoints will support and check for duplicate endpoints.
2940 spin_lock_irqsave(&xhci->lock, flags);
2941 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2942 num_eps, &num_streams, &changed_ep_bitmask);
2944 xhci_free_command(xhci, config_cmd);
2945 spin_unlock_irqrestore(&xhci->lock, flags);
2948 if (num_streams <= 1) {
2949 xhci_warn(xhci, "WARN: endpoints can't handle "
2950 "more than one stream.\n");
2951 xhci_free_command(xhci, config_cmd);
2952 spin_unlock_irqrestore(&xhci->lock, flags);
2955 vdev = xhci->devs[udev->slot_id];
2956 /* Mark each endpoint as being in transition, so
2957 * xhci_urb_enqueue() will reject all URBs.
2959 for (i = 0; i < num_eps; i++) {
2960 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2961 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2963 spin_unlock_irqrestore(&xhci->lock, flags);
2965 /* Setup internal data structures and allocate HW data structures for
2966 * streams (but don't install the HW structures in the input context
2967 * until we're sure all memory allocation succeeded).
2969 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2970 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2971 num_stream_ctxs, num_streams);
2973 for (i = 0; i < num_eps; i++) {
2974 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2975 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2977 num_streams, mem_flags);
2978 if (!vdev->eps[ep_index].stream_info)
2980 /* Set maxPstreams in endpoint context and update deq ptr to
2981 * point to stream context array. FIXME
2985 /* Set up the input context for a configure endpoint command. */
2986 for (i = 0; i < num_eps; i++) {
2987 struct xhci_ep_ctx *ep_ctx;
2989 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2990 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2992 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2993 vdev->out_ctx, ep_index);
2994 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2995 vdev->eps[ep_index].stream_info);
2997 /* Tell the HW to drop its old copy of the endpoint context info
2998 * and add the updated copy from the input context.
3000 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3001 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3003 /* Issue and wait for the configure endpoint command */
3004 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3007 /* xHC rejected the configure endpoint command for some reason, so we
3008 * leave the old ring intact and free our internal streams data
3014 spin_lock_irqsave(&xhci->lock, flags);
3015 for (i = 0; i < num_eps; i++) {
3016 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3017 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3018 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3019 udev->slot_id, ep_index);
3020 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3022 xhci_free_command(xhci, config_cmd);
3023 spin_unlock_irqrestore(&xhci->lock, flags);
3025 /* Subtract 1 for stream 0, which drivers can't use */
3026 return num_streams - 1;
3029 /* If it didn't work, free the streams! */
3030 for (i = 0; i < num_eps; i++) {
3031 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3032 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3033 vdev->eps[ep_index].stream_info = NULL;
3034 /* FIXME Unset maxPstreams in endpoint context and
3035 * update deq ptr to point to normal string ring.
3037 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3038 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3039 xhci_endpoint_zero(xhci, vdev, eps[i]);
3041 xhci_free_command(xhci, config_cmd);
3045 /* Transition the endpoint from using streams to being a "normal" endpoint
3048 * Modify the endpoint context state, submit a configure endpoint command,
3049 * and free all endpoint rings for streams if that completes successfully.
3051 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3052 struct usb_host_endpoint **eps, unsigned int num_eps,
3056 struct xhci_hcd *xhci;
3057 struct xhci_virt_device *vdev;
3058 struct xhci_command *command;
3059 unsigned int ep_index;
3060 unsigned long flags;
3061 u32 changed_ep_bitmask;
3063 xhci = hcd_to_xhci(hcd);
3064 vdev = xhci->devs[udev->slot_id];
3066 /* Set up a configure endpoint command to remove the streams rings */
3067 spin_lock_irqsave(&xhci->lock, flags);
3068 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3069 udev, eps, num_eps);
3070 if (changed_ep_bitmask == 0) {
3071 spin_unlock_irqrestore(&xhci->lock, flags);
3075 /* Use the xhci_command structure from the first endpoint. We may have
3076 * allocated too many, but the driver may call xhci_free_streams() for
3077 * each endpoint it grouped into one call to xhci_alloc_streams().
3079 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3080 command = vdev->eps[ep_index].stream_info->free_streams_command;
3081 for (i = 0; i < num_eps; i++) {
3082 struct xhci_ep_ctx *ep_ctx;
3084 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3085 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3086 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3087 EP_GETTING_NO_STREAMS;
3089 xhci_endpoint_copy(xhci, command->in_ctx,
3090 vdev->out_ctx, ep_index);
3091 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3092 &vdev->eps[ep_index]);
3094 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3095 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3096 spin_unlock_irqrestore(&xhci->lock, flags);
3098 /* Issue and wait for the configure endpoint command,
3099 * which must succeed.
3101 ret = xhci_configure_endpoint(xhci, udev, command,
3104 /* xHC rejected the configure endpoint command for some reason, so we
3105 * leave the streams rings intact.
3110 spin_lock_irqsave(&xhci->lock, flags);
3111 for (i = 0; i < num_eps; i++) {
3112 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3113 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3114 vdev->eps[ep_index].stream_info = NULL;
3115 /* FIXME Unset maxPstreams in endpoint context and
3116 * update deq ptr to point to normal string ring.
3118 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3119 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3121 spin_unlock_irqrestore(&xhci->lock, flags);
3127 * Deletes endpoint resources for endpoints that were active before a Reset
3128 * Device command, or a Disable Slot command. The Reset Device command leaves
3129 * the control endpoint intact, whereas the Disable Slot command deletes it.
3131 * Must be called with xhci->lock held.
3133 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3134 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3137 unsigned int num_dropped_eps = 0;
3138 unsigned int drop_flags = 0;
3140 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3141 if (virt_dev->eps[i].ring) {
3142 drop_flags |= 1 << i;
3146 xhci->num_active_eps -= num_dropped_eps;
3147 if (num_dropped_eps)
3148 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3150 num_dropped_eps, drop_flags,
3151 xhci->num_active_eps);
3155 * This submits a Reset Device Command, which will set the device state to 0,
3156 * set the device address to 0, and disable all the endpoints except the default
3157 * control endpoint. The USB core should come back and call
3158 * xhci_address_device(), and then re-set up the configuration. If this is
3159 * called because of a usb_reset_and_verify_device(), then the old alternate
3160 * settings will be re-installed through the normal bandwidth allocation
3163 * Wait for the Reset Device command to finish. Remove all structures
3164 * associated with the endpoints that were disabled. Clear the input device
3165 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3167 * If the virt_dev to be reset does not exist or does not match the udev,
3168 * it means the device is lost, possibly due to the xHC restore error and
3169 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3170 * re-allocate the device.
3172 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3175 unsigned long flags;
3176 struct xhci_hcd *xhci;
3177 unsigned int slot_id;
3178 struct xhci_virt_device *virt_dev;
3179 struct xhci_command *reset_device_cmd;
3181 int last_freed_endpoint;
3182 struct xhci_slot_ctx *slot_ctx;
3183 int old_active_eps = 0;
3185 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3188 xhci = hcd_to_xhci(hcd);
3189 slot_id = udev->slot_id;
3190 virt_dev = xhci->devs[slot_id];
3192 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3193 "not exist. Re-allocate the device\n", slot_id);
3194 ret = xhci_alloc_dev(hcd, udev);
3201 if (virt_dev->udev != udev) {
3202 /* If the virt_dev and the udev does not match, this virt_dev
3203 * may belong to another udev.
3204 * Re-allocate the device.
3206 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3207 "not match the udev. Re-allocate the device\n",
3209 ret = xhci_alloc_dev(hcd, udev);
3216 /* If device is not setup, there is no point in resetting it */
3217 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3218 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3219 SLOT_STATE_DISABLED)
3222 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3223 /* Allocate the command structure that holds the struct completion.
3224 * Assume we're in process context, since the normal device reset
3225 * process has to wait for the device anyway. Storage devices are
3226 * reset as part of error handling, so use GFP_NOIO instead of
3229 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3230 if (!reset_device_cmd) {
3231 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3235 /* Attempt to submit the Reset Device command to the command ring */
3236 spin_lock_irqsave(&xhci->lock, flags);
3237 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3239 /* Enqueue pointer can be left pointing to the link TRB,
3240 * we must handle that
3242 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3243 reset_device_cmd->command_trb =
3244 xhci->cmd_ring->enq_seg->next->trbs;
3246 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3247 ret = xhci_queue_reset_device(xhci, slot_id);
3249 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3250 list_del(&reset_device_cmd->cmd_list);
3251 spin_unlock_irqrestore(&xhci->lock, flags);
3252 goto command_cleanup;
3254 xhci_ring_cmd_db(xhci);
3255 spin_unlock_irqrestore(&xhci->lock, flags);
3257 /* Wait for the Reset Device command to finish */
3258 timeleft = wait_for_completion_interruptible_timeout(
3259 reset_device_cmd->completion,
3260 USB_CTRL_SET_TIMEOUT);
3261 if (timeleft <= 0) {
3262 xhci_warn(xhci, "%s while waiting for reset device command\n",
3263 timeleft == 0 ? "Timeout" : "Signal");
3264 spin_lock_irqsave(&xhci->lock, flags);
3265 /* The timeout might have raced with the event ring handler, so
3266 * only delete from the list if the item isn't poisoned.
3268 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3269 list_del(&reset_device_cmd->cmd_list);
3270 spin_unlock_irqrestore(&xhci->lock, flags);
3272 goto command_cleanup;
3275 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3276 * unless we tried to reset a slot ID that wasn't enabled,
3277 * or the device wasn't in the addressed or configured state.
3279 ret = reset_device_cmd->status;
3281 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3282 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3283 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3285 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3286 xhci_info(xhci, "Not freeing device rings.\n");
3287 /* Don't treat this as an error. May change my mind later. */
3289 goto command_cleanup;
3291 xhci_dbg(xhci, "Successful reset device command.\n");
3294 if (xhci_is_vendor_info_code(xhci, ret))
3296 xhci_warn(xhci, "Unknown completion code %u for "
3297 "reset device command.\n", ret);
3299 goto command_cleanup;
3302 /* Free up host controller endpoint resources */
3303 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3304 spin_lock_irqsave(&xhci->lock, flags);
3305 /* Don't delete the default control endpoint resources */
3306 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3307 spin_unlock_irqrestore(&xhci->lock, flags);
3310 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3311 last_freed_endpoint = 1;
3312 for (i = 1; i < 31; ++i) {
3313 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3315 if (ep->ep_state & EP_HAS_STREAMS) {
3316 xhci_free_stream_info(xhci, ep->stream_info);
3317 ep->stream_info = NULL;
3318 ep->ep_state &= ~EP_HAS_STREAMS;
3322 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3323 last_freed_endpoint = i;
3325 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3326 xhci_drop_ep_from_interval_table(xhci,
3327 &virt_dev->eps[i].bw_info,
3332 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3334 /* If necessary, update the number of active TTs on this root port */
3335 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3337 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3338 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3342 xhci_free_command(xhci, reset_device_cmd);
3347 * At this point, the struct usb_device is about to go away, the device has
3348 * disconnected, and all traffic has been stopped and the endpoints have been
3349 * disabled. Free any HC data structures associated with that device.
3351 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3353 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3354 struct xhci_virt_device *virt_dev;
3355 unsigned long flags;
3359 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3360 /* If the host is halted due to driver unload, we still need to free the
3363 if (ret <= 0 && ret != -ENODEV)
3366 virt_dev = xhci->devs[udev->slot_id];
3368 /* Stop any wayward timer functions (which may grab the lock) */
3369 for (i = 0; i < 31; ++i) {
3370 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3371 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3374 if (udev->usb2_hw_lpm_enabled) {
3375 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3376 udev->usb2_hw_lpm_enabled = 0;
3379 spin_lock_irqsave(&xhci->lock, flags);
3380 /* Don't disable the slot if the host controller is dead. */
3381 state = xhci_readl(xhci, &xhci->op_regs->status);
3382 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3383 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3384 xhci_free_virt_device(xhci, udev->slot_id);
3385 spin_unlock_irqrestore(&xhci->lock, flags);
3389 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3390 spin_unlock_irqrestore(&xhci->lock, flags);
3391 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3394 xhci_ring_cmd_db(xhci);
3395 spin_unlock_irqrestore(&xhci->lock, flags);
3397 * Event command completion handler will free any data structures
3398 * associated with the slot. XXX Can free sleep?
3403 * Checks if we have enough host controller resources for the default control
3406 * Must be called with xhci->lock held.
3408 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3410 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3411 xhci_dbg(xhci, "Not enough ep ctxs: "
3412 "%u active, need to add 1, limit is %u.\n",
3413 xhci->num_active_eps, xhci->limit_active_eps);
3416 xhci->num_active_eps += 1;
3417 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3418 xhci->num_active_eps);
3424 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3425 * timed out, or allocating memory failed. Returns 1 on success.
3427 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3429 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3430 unsigned long flags;
3433 union xhci_trb *cmd_trb;
3435 spin_lock_irqsave(&xhci->lock, flags);
3436 cmd_trb = xhci->cmd_ring->dequeue;
3437 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3439 spin_unlock_irqrestore(&xhci->lock, flags);
3440 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3443 xhci_ring_cmd_db(xhci);
3444 spin_unlock_irqrestore(&xhci->lock, flags);
3446 /* XXX: how much time for xHC slot assignment? */
3447 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3448 XHCI_CMD_DEFAULT_TIMEOUT);
3449 if (timeleft <= 0) {
3450 xhci_warn(xhci, "%s while waiting for a slot\n",
3451 timeleft == 0 ? "Timeout" : "Signal");
3452 /* cancel the enable slot request */
3453 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3456 if (!xhci->slot_id) {
3457 xhci_err(xhci, "Error while assigning device slot ID\n");
3461 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3462 spin_lock_irqsave(&xhci->lock, flags);
3463 ret = xhci_reserve_host_control_ep_resources(xhci);
3465 spin_unlock_irqrestore(&xhci->lock, flags);
3466 xhci_warn(xhci, "Not enough host resources, "
3467 "active endpoint contexts = %u\n",
3468 xhci->num_active_eps);
3471 spin_unlock_irqrestore(&xhci->lock, flags);
3473 /* Use GFP_NOIO, since this function can be called from
3474 * xhci_discover_or_reset_device(), which may be called as part of
3475 * mass storage driver error handling.
3477 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3478 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3481 udev->slot_id = xhci->slot_id;
3482 /* Is this a LS or FS device under a HS hub? */
3483 /* Hub or peripherial? */
3487 /* Disable slot, if we can do it without mem alloc */
3488 spin_lock_irqsave(&xhci->lock, flags);
3489 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3490 xhci_ring_cmd_db(xhci);
3491 spin_unlock_irqrestore(&xhci->lock, flags);
3496 * Issue an Address Device command (which will issue a SetAddress request to
3498 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3499 * we should only issue and wait on one address command at the same time.
3501 * We add one to the device address issued by the hardware because the USB core
3502 * uses address 1 for the root hubs (even though they're not really devices).
3504 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3506 unsigned long flags;
3508 struct xhci_virt_device *virt_dev;
3510 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3511 struct xhci_slot_ctx *slot_ctx;
3512 struct xhci_input_control_ctx *ctrl_ctx;
3514 union xhci_trb *cmd_trb;
3516 if (!udev->slot_id) {
3517 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3521 virt_dev = xhci->devs[udev->slot_id];
3523 if (WARN_ON(!virt_dev)) {
3525 * In plug/unplug torture test with an NEC controller,
3526 * a zero-dereference was observed once due to virt_dev = 0.
3527 * Print useful debug rather than crash if it is observed again!
3529 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3534 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3536 * If this is the first Set Address since device plug-in or
3537 * virt_device realloaction after a resume with an xHCI power loss,
3538 * then set up the slot context.
3540 if (!slot_ctx->dev_info)
3541 xhci_setup_addressable_virt_dev(xhci, udev);
3542 /* Otherwise, update the control endpoint ring enqueue pointer. */
3544 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3545 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3546 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3547 ctrl_ctx->drop_flags = 0;
3549 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3550 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3552 spin_lock_irqsave(&xhci->lock, flags);
3553 cmd_trb = xhci->cmd_ring->dequeue;
3554 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3557 spin_unlock_irqrestore(&xhci->lock, flags);
3558 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3561 xhci_ring_cmd_db(xhci);
3562 spin_unlock_irqrestore(&xhci->lock, flags);
3564 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3565 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3566 XHCI_CMD_DEFAULT_TIMEOUT);
3567 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3568 * the SetAddress() "recovery interval" required by USB and aborting the
3569 * command on a timeout.
3571 if (timeleft <= 0) {
3572 xhci_warn(xhci, "%s while waiting for address device command\n",
3573 timeleft == 0 ? "Timeout" : "Signal");
3574 /* cancel the address device command */
3575 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3581 switch (virt_dev->cmd_status) {
3582 case COMP_CTX_STATE:
3584 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3589 dev_warn(&udev->dev, "Device not responding to set address.\n");
3593 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3594 "device command.\n");
3598 xhci_dbg(xhci, "Successful Address Device command\n");
3601 xhci_err(xhci, "ERROR: unexpected command completion "
3602 "code 0x%x.\n", virt_dev->cmd_status);
3603 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3604 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3611 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3612 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3613 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3615 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3616 (unsigned long long)
3617 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3618 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3619 (unsigned long long)virt_dev->out_ctx->dma);
3620 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3621 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3622 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3623 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3625 * USB core uses address 1 for the roothubs, so we add one to the
3626 * address given back to us by the HC.
3628 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3629 /* Use kernel assigned address for devices; store xHC assigned
3630 * address locally. */
3631 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3633 /* Zero the input context control for later use */
3634 ctrl_ctx->add_flags = 0;
3635 ctrl_ctx->drop_flags = 0;
3637 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3642 #ifdef CONFIG_USB_SUSPEND
3644 /* BESL to HIRD Encoding array for USB2 LPM */
3645 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3646 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3648 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3649 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3650 struct usb_device *udev)
3652 int u2del, besl, besl_host;
3653 int besl_device = 0;
3656 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3657 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3659 if (field & USB_BESL_SUPPORT) {
3660 for (besl_host = 0; besl_host < 16; besl_host++) {
3661 if (xhci_besl_encoding[besl_host] >= u2del)
3664 /* Use baseline BESL value as default */
3665 if (field & USB_BESL_BASELINE_VALID)
3666 besl_device = USB_GET_BESL_BASELINE(field);
3667 else if (field & USB_BESL_DEEP_VALID)
3668 besl_device = USB_GET_BESL_DEEP(field);
3673 besl_host = (u2del - 51) / 75 + 1;
3676 besl = besl_host + besl_device;
3683 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3684 struct usb_device *udev)
3686 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3687 struct dev_info *dev_info;
3688 __le32 __iomem **port_array;
3689 __le32 __iomem *addr, *pm_addr;
3691 unsigned int port_num;
3692 unsigned long flags;
3696 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3700 /* we only support lpm for non-hub device connected to root hub yet */
3701 if (!udev->parent || udev->parent->parent ||
3702 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3705 spin_lock_irqsave(&xhci->lock, flags);
3707 /* Look for devices in lpm_failed_devs list */
3708 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3709 le16_to_cpu(udev->descriptor.idProduct);
3710 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3711 if (dev_info->dev_id == dev_id) {
3717 port_array = xhci->usb2_ports;
3718 port_num = udev->portnum - 1;
3720 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3721 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3727 * Test USB 2.0 software LPM.
3728 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3729 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3730 * in the June 2011 errata release.
3732 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3734 * Set L1 Device Slot and HIRD/BESL.
3735 * Check device's USB 2.0 extension descriptor to determine whether
3736 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3738 pm_addr = port_array[port_num] + 1;
3739 hird = xhci_calculate_hird_besl(xhci, udev);
3740 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3741 xhci_writel(xhci, temp, pm_addr);
3743 /* Set port link state to U2(L1) */
3744 addr = port_array[port_num];
3745 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3748 spin_unlock_irqrestore(&xhci->lock, flags);
3750 spin_lock_irqsave(&xhci->lock, flags);
3752 /* Check L1 Status */
3753 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3754 if (ret != -ETIMEDOUT) {
3755 /* enter L1 successfully */
3756 temp = xhci_readl(xhci, addr);
3757 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3761 temp = xhci_readl(xhci, pm_addr);
3762 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3763 port_num, temp & PORT_L1S_MASK);
3767 /* Resume the port */
3768 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3770 spin_unlock_irqrestore(&xhci->lock, flags);
3772 spin_lock_irqsave(&xhci->lock, flags);
3775 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3777 /* Check PORTSC to make sure the device is in the right state */
3779 temp = xhci_readl(xhci, addr);
3780 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3781 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3782 (temp & PORT_PLS_MASK) != XDEV_U0) {
3783 xhci_dbg(xhci, "port L1 resume fail\n");
3789 /* Insert dev to lpm_failed_devs list */
3790 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3792 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3797 dev_info->dev_id = dev_id;
3798 INIT_LIST_HEAD(&dev_info->list);
3799 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3801 xhci_ring_device(xhci, udev->slot_id);
3805 spin_unlock_irqrestore(&xhci->lock, flags);
3809 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3810 struct usb_device *udev, int enable)
3812 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3813 __le32 __iomem **port_array;
3814 __le32 __iomem *pm_addr;
3816 unsigned int port_num;
3817 unsigned long flags;
3820 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3824 if (!udev->parent || udev->parent->parent ||
3825 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3828 if (udev->usb2_hw_lpm_capable != 1)
3831 spin_lock_irqsave(&xhci->lock, flags);
3833 port_array = xhci->usb2_ports;
3834 port_num = udev->portnum - 1;
3835 pm_addr = port_array[port_num] + 1;
3836 temp = xhci_readl(xhci, pm_addr);
3838 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3839 enable ? "enable" : "disable", port_num);
3841 hird = xhci_calculate_hird_besl(xhci, udev);
3844 temp &= ~PORT_HIRD_MASK;
3845 temp |= PORT_HIRD(hird) | PORT_RWE;
3846 xhci_writel(xhci, temp, pm_addr);
3847 temp = xhci_readl(xhci, pm_addr);
3849 xhci_writel(xhci, temp, pm_addr);
3851 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3852 xhci_writel(xhci, temp, pm_addr);
3855 spin_unlock_irqrestore(&xhci->lock, flags);
3859 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3861 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3864 ret = xhci_usb2_software_lpm_test(hcd, udev);
3866 xhci_dbg(xhci, "software LPM test succeed\n");
3867 if (xhci->hw_lpm_support == 1) {
3868 udev->usb2_hw_lpm_capable = 1;
3869 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3871 udev->usb2_hw_lpm_enabled = 1;
3880 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3881 struct usb_device *udev, int enable)
3886 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3891 #endif /* CONFIG_USB_SUSPEND */
3893 /*---------------------- USB 3.0 Link PM functions ------------------------*/
3896 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
3897 static unsigned long long xhci_service_interval_to_ns(
3898 struct usb_endpoint_descriptor *desc)
3900 return (1 << (desc->bInterval - 1)) * 125 * 1000;
3903 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
3904 enum usb3_link_state state)
3906 unsigned long long sel;
3907 unsigned long long pel;
3908 unsigned int max_sel_pel;
3913 /* Convert SEL and PEL stored in nanoseconds to microseconds */
3914 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3915 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
3916 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
3920 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
3921 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
3922 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
3926 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
3928 return USB3_LPM_DISABLED;
3931 if (sel <= max_sel_pel && pel <= max_sel_pel)
3932 return USB3_LPM_DEVICE_INITIATED;
3934 if (sel > max_sel_pel)
3935 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3936 "due to long SEL %llu ms\n",
3939 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3940 "due to long PEL %llu\n ms",
3942 return USB3_LPM_DISABLED;
3945 /* Returns the hub-encoded U1 timeout value.
3946 * The U1 timeout should be the maximum of the following values:
3947 * - For control endpoints, U1 system exit latency (SEL) * 3
3948 * - For bulk endpoints, U1 SEL * 5
3949 * - For interrupt endpoints:
3950 * - Notification EPs, U1 SEL * 3
3951 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
3952 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
3954 static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
3955 struct usb_endpoint_descriptor *desc)
3957 unsigned long long timeout_ns;
3961 ep_type = usb_endpoint_type(desc);
3963 case USB_ENDPOINT_XFER_CONTROL:
3964 timeout_ns = udev->u1_params.sel * 3;
3966 case USB_ENDPOINT_XFER_BULK:
3967 timeout_ns = udev->u1_params.sel * 5;
3969 case USB_ENDPOINT_XFER_INT:
3970 intr_type = usb_endpoint_interrupt_type(desc);
3971 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
3972 timeout_ns = udev->u1_params.sel * 3;
3975 /* Otherwise the calculation is the same as isoc eps */
3976 case USB_ENDPOINT_XFER_ISOC:
3977 timeout_ns = xhci_service_interval_to_ns(desc);
3978 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
3979 if (timeout_ns < udev->u1_params.sel * 2)
3980 timeout_ns = udev->u1_params.sel * 2;
3986 /* The U1 timeout is encoded in 1us intervals. */
3987 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
3988 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
3989 if (timeout_ns == USB3_LPM_DISABLED)
3992 /* If the necessary timeout value is bigger than what we can set in the
3993 * USB 3.0 hub, we have to disable hub-initiated U1.
3995 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
3997 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
3998 "due to long timeout %llu ms\n", timeout_ns);
3999 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4002 /* Returns the hub-encoded U2 timeout value.
4003 * The U2 timeout should be the maximum of:
4004 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4005 * - largest bInterval of any active periodic endpoint (to avoid going
4006 * into lower power link states between intervals).
4007 * - the U2 Exit Latency of the device
4009 static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4010 struct usb_endpoint_descriptor *desc)
4012 unsigned long long timeout_ns;
4013 unsigned long long u2_del_ns;
4015 timeout_ns = 10 * 1000 * 1000;
4017 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4018 (xhci_service_interval_to_ns(desc) > timeout_ns))
4019 timeout_ns = xhci_service_interval_to_ns(desc);
4021 u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000;
4022 if (u2_del_ns > timeout_ns)
4023 timeout_ns = u2_del_ns;
4025 /* The U2 timeout is encoded in 256us intervals */
4026 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4027 /* If the necessary timeout value is bigger than what we can set in the
4028 * USB 3.0 hub, we have to disable hub-initiated U2.
4030 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4032 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4033 "due to long timeout %llu ms\n", timeout_ns);
4034 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4037 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4038 struct usb_device *udev,
4039 struct usb_endpoint_descriptor *desc,
4040 enum usb3_link_state state,
4043 if (state == USB3_LPM_U1) {
4044 if (xhci->quirks & XHCI_INTEL_HOST)
4045 return xhci_calculate_intel_u1_timeout(udev, desc);
4047 if (xhci->quirks & XHCI_INTEL_HOST)
4048 return xhci_calculate_intel_u2_timeout(udev, desc);
4051 return USB3_LPM_DISABLED;
4054 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4055 struct usb_device *udev,
4056 struct usb_endpoint_descriptor *desc,
4057 enum usb3_link_state state,
4062 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4063 desc, state, timeout);
4065 /* If we found we can't enable hub-initiated LPM, or
4066 * the U1 or U2 exit latency was too high to allow
4067 * device-initiated LPM as well, just stop searching.
4069 if (alt_timeout == USB3_LPM_DISABLED ||
4070 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4071 *timeout = alt_timeout;
4074 if (alt_timeout > *timeout)
4075 *timeout = alt_timeout;
4079 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4080 struct usb_device *udev,
4081 struct usb_host_interface *alt,
4082 enum usb3_link_state state,
4087 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4088 if (xhci_update_timeout_for_endpoint(xhci, udev,
4089 &alt->endpoint[j].desc, state, timeout))
4096 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4097 enum usb3_link_state state)
4099 struct usb_device *parent;
4100 unsigned int num_hubs;
4102 if (state == USB3_LPM_U2)
4105 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4106 for (parent = udev->parent, num_hubs = 0; parent->parent;
4107 parent = parent->parent)
4113 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4114 " below second-tier hub.\n");
4115 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4116 "to decrease power consumption.\n");
4120 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4121 struct usb_device *udev,
4122 enum usb3_link_state state)
4124 if (xhci->quirks & XHCI_INTEL_HOST)
4125 return xhci_check_intel_tier_policy(udev, state);
4129 /* Returns the U1 or U2 timeout that should be enabled.
4130 * If the tier check or timeout setting functions return with a non-zero exit
4131 * code, that means the timeout value has been finalized and we shouldn't look
4132 * at any more endpoints.
4134 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4135 struct usb_device *udev, enum usb3_link_state state)
4137 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4138 struct usb_host_config *config;
4141 u16 timeout = USB3_LPM_DISABLED;
4143 if (state == USB3_LPM_U1)
4145 else if (state == USB3_LPM_U2)
4148 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4153 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4156 /* Gather some information about the currently installed configuration
4157 * and alternate interface settings.
4159 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4163 config = udev->actconfig;
4167 for (i = 0; i < USB_MAXINTERFACES; i++) {
4168 struct usb_driver *driver;
4169 struct usb_interface *intf = config->interface[i];
4174 /* Check if any currently bound drivers want hub-initiated LPM
4177 if (intf->dev.driver) {
4178 driver = to_usb_driver(intf->dev.driver);
4179 if (driver && driver->disable_hub_initiated_lpm) {
4180 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4181 "at request of driver %s\n",
4182 state_name, driver->name);
4183 return xhci_get_timeout_no_hub_lpm(udev, state);
4187 /* Not sure how this could happen... */
4188 if (!intf->cur_altsetting)
4191 if (xhci_update_timeout_for_interface(xhci, udev,
4192 intf->cur_altsetting,
4200 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4201 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4203 static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4204 struct usb_device *udev, u16 max_exit_latency)
4206 struct xhci_virt_device *virt_dev;
4207 struct xhci_command *command;
4208 struct xhci_input_control_ctx *ctrl_ctx;
4209 struct xhci_slot_ctx *slot_ctx;
4210 unsigned long flags;
4213 spin_lock_irqsave(&xhci->lock, flags);
4214 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4215 spin_unlock_irqrestore(&xhci->lock, flags);
4219 /* Attempt to issue an Evaluate Context command to change the MEL. */
4220 virt_dev = xhci->devs[udev->slot_id];
4221 command = xhci->lpm_command;
4222 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4223 spin_unlock_irqrestore(&xhci->lock, flags);
4225 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4226 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4227 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4228 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4229 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4231 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4232 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4233 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4235 /* Issue and wait for the evaluate context command. */
4236 ret = xhci_configure_endpoint(xhci, udev, command,
4238 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4239 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4242 spin_lock_irqsave(&xhci->lock, flags);
4243 virt_dev->current_mel = max_exit_latency;
4244 spin_unlock_irqrestore(&xhci->lock, flags);
4249 static int calculate_max_exit_latency(struct usb_device *udev,
4250 enum usb3_link_state state_changed,
4251 u16 hub_encoded_timeout)
4253 unsigned long long u1_mel_us = 0;
4254 unsigned long long u2_mel_us = 0;
4255 unsigned long long mel_us = 0;
4261 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4262 hub_encoded_timeout == USB3_LPM_DISABLED);
4263 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4264 hub_encoded_timeout == USB3_LPM_DISABLED);
4266 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4267 hub_encoded_timeout != USB3_LPM_DISABLED);
4268 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4269 hub_encoded_timeout != USB3_LPM_DISABLED);
4271 /* If U1 was already enabled and we're not disabling it,
4272 * or we're going to enable U1, account for the U1 max exit latency.
4274 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4276 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4277 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4279 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4281 if (u1_mel_us > u2_mel_us)
4285 /* xHCI host controller max exit latency field is only 16 bits wide. */
4286 if (mel_us > MAX_EXIT) {
4287 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4288 "is too big.\n", mel_us);
4294 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4295 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4296 struct usb_device *udev, enum usb3_link_state state)
4298 struct xhci_hcd *xhci;
4299 u16 hub_encoded_timeout;
4303 xhci = hcd_to_xhci(hcd);
4304 /* The LPM timeout values are pretty host-controller specific, so don't
4305 * enable hub-initiated timeouts unless the vendor has provided
4306 * information about their timeout algorithm.
4308 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4309 !xhci->devs[udev->slot_id])
4310 return USB3_LPM_DISABLED;
4312 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4313 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4315 /* Max Exit Latency is too big, disable LPM. */
4316 hub_encoded_timeout = USB3_LPM_DISABLED;
4320 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4323 return hub_encoded_timeout;
4326 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4327 struct usb_device *udev, enum usb3_link_state state)
4329 struct xhci_hcd *xhci;
4333 xhci = hcd_to_xhci(hcd);
4334 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4335 !xhci->devs[udev->slot_id])
4338 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4339 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4344 #else /* CONFIG_PM */
4346 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4347 struct usb_device *udev, enum usb3_link_state state)
4349 return USB3_LPM_DISABLED;
4352 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4353 struct usb_device *udev, enum usb3_link_state state)
4357 #endif /* CONFIG_PM */
4359 /*-------------------------------------------------------------------------*/
4361 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4362 * internal data structures for the device.
4364 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4365 struct usb_tt *tt, gfp_t mem_flags)
4367 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4368 struct xhci_virt_device *vdev;
4369 struct xhci_command *config_cmd;
4370 struct xhci_input_control_ctx *ctrl_ctx;
4371 struct xhci_slot_ctx *slot_ctx;
4372 unsigned long flags;
4373 unsigned think_time;
4376 /* Ignore root hubs */
4380 vdev = xhci->devs[hdev->slot_id];
4382 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4385 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4387 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4391 spin_lock_irqsave(&xhci->lock, flags);
4392 if (hdev->speed == USB_SPEED_HIGH &&
4393 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4394 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4395 xhci_free_command(xhci, config_cmd);
4396 spin_unlock_irqrestore(&xhci->lock, flags);
4400 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4401 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4402 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4403 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4404 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4406 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4407 if (xhci->hci_version > 0x95) {
4408 xhci_dbg(xhci, "xHCI version %x needs hub "
4409 "TT think time and number of ports\n",
4410 (unsigned int) xhci->hci_version);
4411 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4412 /* Set TT think time - convert from ns to FS bit times.
4413 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4414 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4416 * xHCI 1.0: this field shall be 0 if the device is not a
4419 think_time = tt->think_time;
4420 if (think_time != 0)
4421 think_time = (think_time / 666) - 1;
4422 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4423 slot_ctx->tt_info |=
4424 cpu_to_le32(TT_THINK_TIME(think_time));
4426 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4427 "TT think time or number of ports\n",
4428 (unsigned int) xhci->hci_version);
4430 slot_ctx->dev_state = 0;
4431 spin_unlock_irqrestore(&xhci->lock, flags);
4433 xhci_dbg(xhci, "Set up %s for hub device.\n",
4434 (xhci->hci_version > 0x95) ?
4435 "configure endpoint" : "evaluate context");
4436 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4437 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4439 /* Issue and wait for the configure endpoint or
4440 * evaluate context command.
4442 if (xhci->hci_version > 0x95)
4443 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4446 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4449 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4450 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4452 xhci_free_command(xhci, config_cmd);
4456 int xhci_get_frame(struct usb_hcd *hcd)
4458 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4459 /* EHCI mods by the periodic size. Why? */
4460 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4463 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4465 struct xhci_hcd *xhci;
4466 struct device *dev = hcd->self.controller;
4470 /* Accept arbitrarily long scatter-gather lists */
4471 hcd->self.sg_tablesize = ~0;
4472 /* XHCI controllers don't stop the ep queue on short packets :| */
4473 hcd->self.no_stop_on_short = 1;
4475 if (usb_hcd_is_primary_hcd(hcd)) {
4476 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4479 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4480 xhci->main_hcd = hcd;
4481 /* Mark the first roothub as being USB 2.0.
4482 * The xHCI driver will register the USB 3.0 roothub.
4484 hcd->speed = HCD_USB2;
4485 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4487 * USB 2.0 roothub under xHCI has an integrated TT,
4488 * (rate matching hub) as opposed to having an OHCI/UHCI
4489 * companion controller.
4493 /* xHCI private pointer was set in xhci_pci_probe for the second
4494 * registered roothub.
4496 xhci = hcd_to_xhci(hcd);
4497 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4498 if (HCC_64BIT_ADDR(temp)) {
4499 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4500 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4502 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4507 xhci->cap_regs = hcd->regs;
4508 xhci->op_regs = hcd->regs +
4509 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4510 xhci->run_regs = hcd->regs +
4511 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4512 /* Cache read-only capability registers */
4513 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4514 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4515 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4516 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4517 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4518 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4519 xhci_print_registers(xhci);
4521 get_quirks(dev, xhci);
4523 /* Make sure the HC is halted. */
4524 retval = xhci_halt(xhci);
4528 xhci_dbg(xhci, "Resetting HCD\n");
4529 /* Reset the internal HC memory state and registers. */
4530 retval = xhci_reset(xhci);
4533 xhci_dbg(xhci, "Reset complete\n");
4535 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4536 if (HCC_64BIT_ADDR(temp)) {
4537 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4538 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4540 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4543 xhci_dbg(xhci, "Calling HCD init\n");
4544 /* Initialize HCD and host controller data structures. */
4545 retval = xhci_init(hcd);
4548 xhci_dbg(xhci, "Called HCD init\n");
4555 MODULE_DESCRIPTION(DRIVER_DESC);
4556 MODULE_AUTHOR(DRIVER_AUTHOR);
4557 MODULE_LICENSE("GPL");
4559 static int __init xhci_hcd_init(void)
4563 retval = xhci_register_pci();
4565 printk(KERN_DEBUG "Problem registering PCI driver.");
4568 retval = xhci_register_plat();
4570 printk(KERN_DEBUG "Problem registering platform driver.");
4574 * Check the compiler generated sizes of structures that must be laid
4575 * out in specific ways for hardware access.
4577 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4578 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4579 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4580 /* xhci_device_control has eight fields, and also
4581 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4583 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4584 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4585 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4586 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4587 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4588 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4589 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4592 xhci_unregister_pci();
4595 module_init(xhci_hcd_init);
4597 static void __exit xhci_hcd_cleanup(void)
4599 xhci_unregister_pci();
4600 xhci_unregister_plat();
4602 module_exit(xhci_hcd_cleanup);