2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
29 #include "xhci-trace.h"
32 * Allocates a generic ring segment from the ring pool, sets the dma address,
33 * initializes the segment to zero, and sets the private next pointer to NULL.
36 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
38 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
39 unsigned int cycle_state, gfp_t flags)
41 struct xhci_segment *seg;
45 seg = kzalloc(sizeof *seg, flags);
49 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
55 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
56 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
57 if (cycle_state == 0) {
58 for (i = 0; i < TRBS_PER_SEGMENT; i++)
59 seg->trbs[i].link.control |= TRB_CYCLE;
67 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
70 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
76 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
77 struct xhci_segment *first)
79 struct xhci_segment *seg;
82 while (seg != first) {
83 struct xhci_segment *next = seg->next;
84 xhci_segment_free(xhci, seg);
87 xhci_segment_free(xhci, first);
91 * Make the prev segment point to the next segment.
93 * Change the last TRB in the prev segment to be a Link TRB which points to the
94 * DMA address of the next segment. The caller needs to set any Link TRB
95 * related flags, such as End TRB, Toggle Cycle, and no snoop.
97 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
98 struct xhci_segment *next, enum xhci_ring_type type)
105 if (type != TYPE_EVENT) {
106 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
107 cpu_to_le64(next->dma);
109 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
110 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
111 val &= ~TRB_TYPE_BITMASK;
112 val |= TRB_TYPE(TRB_LINK);
113 /* Always set the chain bit with 0.95 hardware */
114 /* Set chain bit for isoc rings on AMD 0.96 host */
115 if (xhci_link_trb_quirk(xhci) ||
116 (type == TYPE_ISOC &&
117 (xhci->quirks & XHCI_AMD_0x96_HOST)))
119 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
124 * Link the ring to the new segments.
125 * Set Toggle Cycle for the new ring if needed.
127 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
128 struct xhci_segment *first, struct xhci_segment *last,
129 unsigned int num_segs)
131 struct xhci_segment *next;
133 if (!ring || !first || !last)
136 next = ring->enq_seg->next;
137 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
138 xhci_link_segments(xhci, last, next, ring->type);
139 ring->num_segs += num_segs;
140 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
142 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
143 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
144 &= ~cpu_to_le32(LINK_TOGGLE);
145 last->trbs[TRBS_PER_SEGMENT-1].link.control
146 |= cpu_to_le32(LINK_TOGGLE);
147 ring->last_seg = last;
151 /* XXX: Do we need the hcd structure in all these functions? */
152 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
158 xhci_free_segments_for_ring(xhci, ring->first_seg);
163 static void xhci_initialize_ring_info(struct xhci_ring *ring,
164 unsigned int cycle_state)
166 /* The ring is empty, so the enqueue pointer == dequeue pointer */
167 ring->enqueue = ring->first_seg->trbs;
168 ring->enq_seg = ring->first_seg;
169 ring->dequeue = ring->enqueue;
170 ring->deq_seg = ring->first_seg;
171 /* The ring is initialized to 0. The producer must write 1 to the cycle
172 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
173 * compare CCS to the cycle bit to check ownership, so CCS = 1.
175 * New rings are initialized with cycle state equal to 1; if we are
176 * handling ring expansion, set the cycle state equal to the old ring.
178 ring->cycle_state = cycle_state;
179 /* Not necessary for new rings, but needed for re-initialized rings */
180 ring->enq_updates = 0;
181 ring->deq_updates = 0;
184 * Each segment has a link TRB, and leave an extra TRB for SW
187 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
190 /* Allocate segments and link them for a ring */
191 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
192 struct xhci_segment **first, struct xhci_segment **last,
193 unsigned int num_segs, unsigned int cycle_state,
194 enum xhci_ring_type type, gfp_t flags)
196 struct xhci_segment *prev;
198 prev = xhci_segment_alloc(xhci, cycle_state, flags);
204 while (num_segs > 0) {
205 struct xhci_segment *next;
207 next = xhci_segment_alloc(xhci, cycle_state, flags);
212 xhci_segment_free(xhci, prev);
217 xhci_link_segments(xhci, prev, next, type);
222 xhci_link_segments(xhci, prev, *first, type);
229 * Create a new ring with zero or more segments.
231 * Link each segment together into a ring.
232 * Set the end flag and the cycle toggle bit on the last segment.
233 * See section 4.9.1 and figures 15 and 16.
235 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
236 unsigned int num_segs, unsigned int cycle_state,
237 enum xhci_ring_type type, gfp_t flags)
239 struct xhci_ring *ring;
242 ring = kzalloc(sizeof *(ring), flags);
246 ring->num_segs = num_segs;
247 INIT_LIST_HEAD(&ring->td_list);
252 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
253 &ring->last_seg, num_segs, cycle_state, type, flags);
257 /* Only event ring does not use link TRB */
258 if (type != TYPE_EVENT) {
259 /* See section 4.9.2.1 and 6.4.4.1 */
260 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
261 cpu_to_le32(LINK_TOGGLE);
263 xhci_initialize_ring_info(ring, cycle_state);
271 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
272 struct xhci_virt_device *virt_dev,
273 unsigned int ep_index)
277 rings_cached = virt_dev->num_rings_cached;
278 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
279 virt_dev->ring_cache[rings_cached] =
280 virt_dev->eps[ep_index].ring;
281 virt_dev->num_rings_cached++;
282 xhci_dbg(xhci, "Cached old ring, "
283 "%d ring%s cached\n",
284 virt_dev->num_rings_cached,
285 (virt_dev->num_rings_cached > 1) ? "s" : "");
287 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
288 xhci_dbg(xhci, "Ring cache full (%d rings), "
290 virt_dev->num_rings_cached);
292 virt_dev->eps[ep_index].ring = NULL;
295 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
296 * pointers to the beginning of the ring.
298 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
299 struct xhci_ring *ring, unsigned int cycle_state,
300 enum xhci_ring_type type)
302 struct xhci_segment *seg = ring->first_seg;
307 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
308 if (cycle_state == 0) {
309 for (i = 0; i < TRBS_PER_SEGMENT; i++)
310 seg->trbs[i].link.control |= TRB_CYCLE;
312 /* All endpoint rings have link TRBs */
313 xhci_link_segments(xhci, seg, seg->next, type);
315 } while (seg != ring->first_seg);
317 xhci_initialize_ring_info(ring, cycle_state);
318 /* td list should be empty since all URBs have been cancelled,
319 * but just in case...
321 INIT_LIST_HEAD(&ring->td_list);
325 * Expand an existing ring.
326 * Look for a cached ring or allocate a new ring which has same segment numbers
327 * and link the two rings.
329 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
330 unsigned int num_trbs, gfp_t flags)
332 struct xhci_segment *first;
333 struct xhci_segment *last;
334 unsigned int num_segs;
335 unsigned int num_segs_needed;
338 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
339 (TRBS_PER_SEGMENT - 1);
341 /* Allocate number of segments we needed, or double the ring size */
342 num_segs = ring->num_segs > num_segs_needed ?
343 ring->num_segs : num_segs_needed;
345 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
346 num_segs, ring->cycle_state, ring->type, flags);
350 xhci_link_rings(xhci, ring, first, last, num_segs);
351 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
352 "ring expansion succeed, now has %d segments",
358 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
360 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
361 int type, gfp_t flags)
363 struct xhci_container_ctx *ctx;
365 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
368 ctx = kzalloc(sizeof(*ctx), flags);
373 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
374 if (type == XHCI_CTX_TYPE_INPUT)
375 ctx->size += CTX_SIZE(xhci->hcc_params);
377 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
382 memset(ctx->bytes, 0, ctx->size);
386 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
387 struct xhci_container_ctx *ctx)
391 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
395 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
396 struct xhci_container_ctx *ctx)
398 if (ctx->type != XHCI_CTX_TYPE_INPUT)
401 return (struct xhci_input_control_ctx *)ctx->bytes;
404 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
405 struct xhci_container_ctx *ctx)
407 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
408 return (struct xhci_slot_ctx *)ctx->bytes;
410 return (struct xhci_slot_ctx *)
411 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
414 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
415 struct xhci_container_ctx *ctx,
416 unsigned int ep_index)
418 /* increment ep index by offset of start of ep ctx array */
420 if (ctx->type == XHCI_CTX_TYPE_INPUT)
423 return (struct xhci_ep_ctx *)
424 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
428 /***************** Streams structures manipulation *************************/
430 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
431 unsigned int num_stream_ctxs,
432 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
434 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
436 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
437 dma_free_coherent(&pdev->dev,
438 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
440 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
441 return dma_pool_free(xhci->small_streams_pool,
444 return dma_pool_free(xhci->medium_streams_pool,
449 * The stream context array for each endpoint with bulk streams enabled can
450 * vary in size, based on:
451 * - how many streams the endpoint supports,
452 * - the maximum primary stream array size the host controller supports,
453 * - and how many streams the device driver asks for.
455 * The stream context array must be a power of 2, and can be as small as
456 * 64 bytes or as large as 1MB.
458 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
459 unsigned int num_stream_ctxs, dma_addr_t *dma,
462 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
464 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
465 return dma_alloc_coherent(&pdev->dev,
466 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
468 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
469 return dma_pool_alloc(xhci->small_streams_pool,
472 return dma_pool_alloc(xhci->medium_streams_pool,
476 struct xhci_ring *xhci_dma_to_transfer_ring(
477 struct xhci_virt_ep *ep,
480 if (ep->ep_state & EP_HAS_STREAMS)
481 return radix_tree_lookup(&ep->stream_info->trb_address_map,
482 address >> TRB_SEGMENT_SHIFT);
486 struct xhci_ring *xhci_stream_id_to_ring(
487 struct xhci_virt_device *dev,
488 unsigned int ep_index,
489 unsigned int stream_id)
491 struct xhci_virt_ep *ep = &dev->eps[ep_index];
495 if (!ep->stream_info)
498 if (stream_id > ep->stream_info->num_streams)
500 return ep->stream_info->stream_rings[stream_id];
504 * Change an endpoint's internal structure so it supports stream IDs. The
505 * number of requested streams includes stream 0, which cannot be used by device
508 * The number of stream contexts in the stream context array may be bigger than
509 * the number of streams the driver wants to use. This is because the number of
510 * stream context array entries must be a power of two.
512 * We need a radix tree for mapping physical addresses of TRBs to which stream
513 * ID they belong to. We need to do this because the host controller won't tell
514 * us which stream ring the TRB came from. We could store the stream ID in an
515 * event data TRB, but that doesn't help us for the cancellation case, since the
516 * endpoint may stop before it reaches that event data TRB.
518 * The radix tree maps the upper portion of the TRB DMA address to a ring
519 * segment that has the same upper portion of DMA addresses. For example, say I
520 * have segments of size 1KB, that are always 64-byte aligned. A segment may
521 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
522 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
523 * pass the radix tree a key to get the right stream ID:
525 * 0x10c90fff >> 10 = 0x43243
526 * 0x10c912c0 >> 10 = 0x43244
527 * 0x10c91400 >> 10 = 0x43245
529 * Obviously, only those TRBs with DMA addresses that are within the segment
530 * will make the radix tree return the stream ID for that ring.
532 * Caveats for the radix tree:
534 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
535 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
536 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
537 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
538 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
539 * extended systems (where the DMA address can be bigger than 32-bits),
540 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
542 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
543 unsigned int num_stream_ctxs,
544 unsigned int num_streams, gfp_t mem_flags)
546 struct xhci_stream_info *stream_info;
548 struct xhci_ring *cur_ring;
553 xhci_dbg(xhci, "Allocating %u streams and %u "
554 "stream context array entries.\n",
555 num_streams, num_stream_ctxs);
556 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
557 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
560 xhci->cmd_ring_reserved_trbs++;
562 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
566 stream_info->num_streams = num_streams;
567 stream_info->num_stream_ctxs = num_stream_ctxs;
569 /* Initialize the array of virtual pointers to stream rings. */
570 stream_info->stream_rings = kzalloc(
571 sizeof(struct xhci_ring *)*num_streams,
573 if (!stream_info->stream_rings)
576 /* Initialize the array of DMA addresses for stream rings for the HW. */
577 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
578 num_stream_ctxs, &stream_info->ctx_array_dma,
580 if (!stream_info->stream_ctx_array)
582 memset(stream_info->stream_ctx_array, 0,
583 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
585 /* Allocate everything needed to free the stream rings later */
586 stream_info->free_streams_command =
587 xhci_alloc_command(xhci, true, true, mem_flags);
588 if (!stream_info->free_streams_command)
591 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
593 /* Allocate rings for all the streams that the driver will use,
594 * and add their segment DMA addresses to the radix tree.
595 * Stream 0 is reserved.
597 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
598 stream_info->stream_rings[cur_stream] =
599 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
600 cur_ring = stream_info->stream_rings[cur_stream];
603 cur_ring->stream_id = cur_stream;
604 /* Set deq ptr, cycle bit, and stream context type */
605 addr = cur_ring->first_seg->dma |
606 SCT_FOR_CTX(SCT_PRI_TR) |
607 cur_ring->cycle_state;
608 stream_info->stream_ctx_array[cur_stream].stream_ring =
610 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
611 cur_stream, (unsigned long long) addr);
613 key = (unsigned long)
614 (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
615 ret = radix_tree_insert(&stream_info->trb_address_map,
618 xhci_ring_free(xhci, cur_ring);
619 stream_info->stream_rings[cur_stream] = NULL;
623 /* Leave the other unused stream ring pointers in the stream context
624 * array initialized to zero. This will cause the xHC to give us an
625 * error if the device asks for a stream ID we don't have setup (if it
626 * was any other way, the host controller would assume the ring is
627 * "empty" and wait forever for data to be queued to that stream ID).
633 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
634 cur_ring = stream_info->stream_rings[cur_stream];
636 addr = cur_ring->first_seg->dma;
637 radix_tree_delete(&stream_info->trb_address_map,
638 addr >> TRB_SEGMENT_SHIFT);
639 xhci_ring_free(xhci, cur_ring);
640 stream_info->stream_rings[cur_stream] = NULL;
643 xhci_free_command(xhci, stream_info->free_streams_command);
645 kfree(stream_info->stream_rings);
649 xhci->cmd_ring_reserved_trbs--;
653 * Sets the MaxPStreams field and the Linear Stream Array field.
654 * Sets the dequeue pointer to the stream context array.
656 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
657 struct xhci_ep_ctx *ep_ctx,
658 struct xhci_stream_info *stream_info)
660 u32 max_primary_streams;
661 /* MaxPStreams is the number of stream context array entries, not the
662 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
663 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
665 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
666 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
667 "Setting number of stream ctx array entries to %u",
668 1 << (max_primary_streams + 1));
669 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
670 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
672 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
676 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
677 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
678 * not at the beginning of the ring).
680 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
681 struct xhci_ep_ctx *ep_ctx,
682 struct xhci_virt_ep *ep)
685 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
686 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
687 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
690 /* Frees all stream contexts associated with the endpoint,
692 * Caller should fix the endpoint context streams fields.
694 void xhci_free_stream_info(struct xhci_hcd *xhci,
695 struct xhci_stream_info *stream_info)
698 struct xhci_ring *cur_ring;
704 for (cur_stream = 1; cur_stream < stream_info->num_streams;
706 cur_ring = stream_info->stream_rings[cur_stream];
708 addr = cur_ring->first_seg->dma;
709 radix_tree_delete(&stream_info->trb_address_map,
710 addr >> TRB_SEGMENT_SHIFT);
711 xhci_ring_free(xhci, cur_ring);
712 stream_info->stream_rings[cur_stream] = NULL;
715 xhci_free_command(xhci, stream_info->free_streams_command);
716 xhci->cmd_ring_reserved_trbs--;
717 if (stream_info->stream_ctx_array)
718 xhci_free_stream_ctx(xhci,
719 stream_info->num_stream_ctxs,
720 stream_info->stream_ctx_array,
721 stream_info->ctx_array_dma);
724 kfree(stream_info->stream_rings);
729 /***************** Device context manipulation *************************/
731 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
732 struct xhci_virt_ep *ep)
734 init_timer(&ep->stop_cmd_timer);
735 ep->stop_cmd_timer.data = (unsigned long) ep;
736 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
740 static void xhci_free_tt_info(struct xhci_hcd *xhci,
741 struct xhci_virt_device *virt_dev,
744 struct list_head *tt_list_head;
745 struct xhci_tt_bw_info *tt_info, *next;
746 bool slot_found = false;
748 /* If the device never made it past the Set Address stage,
749 * it may not have the real_port set correctly.
751 if (virt_dev->real_port == 0 ||
752 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
753 xhci_dbg(xhci, "Bad real port.\n");
757 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
758 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
759 /* Multi-TT hubs will have more than one entry */
760 if (tt_info->slot_id == slot_id) {
762 list_del(&tt_info->tt_list);
764 } else if (slot_found) {
770 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
771 struct xhci_virt_device *virt_dev,
772 struct usb_device *hdev,
773 struct usb_tt *tt, gfp_t mem_flags)
775 struct xhci_tt_bw_info *tt_info;
776 unsigned int num_ports;
782 num_ports = hdev->maxchild;
784 for (i = 0; i < num_ports; i++, tt_info++) {
785 struct xhci_interval_bw_table *bw_table;
787 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
790 INIT_LIST_HEAD(&tt_info->tt_list);
791 list_add(&tt_info->tt_list,
792 &xhci->rh_bw[virt_dev->real_port - 1].tts);
793 tt_info->slot_id = virt_dev->udev->slot_id;
795 tt_info->ttport = i+1;
796 bw_table = &tt_info->bw_table;
797 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
798 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
803 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
808 /* All the xhci_tds in the ring's TD list should be freed at this point.
809 * Should be called with xhci->lock held if there is any chance the TT lists
810 * will be manipulated by the configure endpoint, allocate device, or update
811 * hub functions while this function is removing the TT entries from the list.
813 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
815 struct xhci_virt_device *dev;
817 int old_active_eps = 0;
819 /* Slot ID 0 is reserved */
820 if (slot_id == 0 || !xhci->devs[slot_id])
823 dev = xhci->devs[slot_id];
824 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
829 old_active_eps = dev->tt_info->active_eps;
831 for (i = 0; i < 31; ++i) {
832 if (dev->eps[i].ring)
833 xhci_ring_free(xhci, dev->eps[i].ring);
834 if (dev->eps[i].stream_info)
835 xhci_free_stream_info(xhci,
836 dev->eps[i].stream_info);
837 /* Endpoints on the TT/root port lists should have been removed
838 * when usb_disable_device() was called for the device.
839 * We can't drop them anyway, because the udev might have gone
840 * away by this point, and we can't tell what speed it was.
842 if (!list_empty(&dev->eps[i].bw_endpoint_list))
843 xhci_warn(xhci, "Slot %u endpoint %u "
844 "not removed from BW list!\n",
847 /* If this is a hub, free the TT(s) from the TT list */
848 xhci_free_tt_info(xhci, dev, slot_id);
849 /* If necessary, update the number of active TTs on this root port */
850 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
852 if (dev->ring_cache) {
853 for (i = 0; i < dev->num_rings_cached; i++)
854 xhci_ring_free(xhci, dev->ring_cache[i]);
855 kfree(dev->ring_cache);
859 xhci_free_container_ctx(xhci, dev->in_ctx);
861 xhci_free_container_ctx(xhci, dev->out_ctx);
863 kfree(xhci->devs[slot_id]);
864 xhci->devs[slot_id] = NULL;
867 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
868 struct usb_device *udev, gfp_t flags)
870 struct xhci_virt_device *dev;
873 /* Slot ID 0 is reserved */
874 if (slot_id == 0 || xhci->devs[slot_id]) {
875 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
879 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
880 if (!xhci->devs[slot_id])
882 dev = xhci->devs[slot_id];
884 /* Allocate the (output) device context that will be used in the HC. */
885 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
889 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
890 (unsigned long long)dev->out_ctx->dma);
892 /* Allocate the (input) device context for address device command */
893 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
897 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
898 (unsigned long long)dev->in_ctx->dma);
900 /* Initialize the cancellation list and watchdog timers for each ep */
901 for (i = 0; i < 31; i++) {
902 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
903 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
904 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
907 /* Allocate endpoint 0 ring */
908 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
909 if (!dev->eps[0].ring)
912 /* Allocate pointers to the ring cache */
913 dev->ring_cache = kzalloc(
914 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
916 if (!dev->ring_cache)
918 dev->num_rings_cached = 0;
920 init_completion(&dev->cmd_completion);
921 INIT_LIST_HEAD(&dev->cmd_list);
924 /* Point to output device context in dcbaa. */
925 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
926 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
928 &xhci->dcbaa->dev_context_ptrs[slot_id],
929 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
933 xhci_free_virt_device(xhci, slot_id);
937 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
938 struct usb_device *udev)
940 struct xhci_virt_device *virt_dev;
941 struct xhci_ep_ctx *ep0_ctx;
942 struct xhci_ring *ep_ring;
944 virt_dev = xhci->devs[udev->slot_id];
945 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
946 ep_ring = virt_dev->eps[0].ring;
948 * FIXME we don't keep track of the dequeue pointer very well after a
949 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
950 * host to our enqueue pointer. This should only be called after a
951 * configured device has reset, so all control transfers should have
952 * been completed or cancelled before the reset.
954 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
956 | ep_ring->cycle_state);
960 * The xHCI roothub may have ports of differing speeds in any order in the port
961 * status registers. xhci->port_array provides an array of the port speed for
962 * each offset into the port status registers.
964 * The xHCI hardware wants to know the roothub port number that the USB device
965 * is attached to (or the roothub port its ancestor hub is attached to). All we
966 * know is the index of that port under either the USB 2.0 or the USB 3.0
967 * roothub, but that doesn't give us the real index into the HW port status
968 * registers. Call xhci_find_raw_port_number() to get real index.
970 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
971 struct usb_device *udev)
973 struct usb_device *top_dev;
976 if (udev->speed == USB_SPEED_SUPER)
977 hcd = xhci->shared_hcd;
979 hcd = xhci->main_hcd;
981 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
982 top_dev = top_dev->parent)
983 /* Found device below root hub */;
985 return xhci_find_raw_port_number(hcd, top_dev->portnum);
988 /* Setup an xHCI virtual device for a Set Address command */
989 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
991 struct xhci_virt_device *dev;
992 struct xhci_ep_ctx *ep0_ctx;
993 struct xhci_slot_ctx *slot_ctx;
996 struct usb_device *top_dev;
998 dev = xhci->devs[udev->slot_id];
999 /* Slot ID 0 is reserved */
1000 if (udev->slot_id == 0 || !dev) {
1001 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1005 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1006 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1008 /* 3) Only the control endpoint is valid - one endpoint context */
1009 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1010 switch (udev->speed) {
1011 case USB_SPEED_SUPER:
1012 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1013 max_packets = MAX_PACKET(512);
1015 case USB_SPEED_HIGH:
1016 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1017 max_packets = MAX_PACKET(64);
1019 /* USB core guesses at a 64-byte max packet first for FS devices */
1020 case USB_SPEED_FULL:
1021 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1022 max_packets = MAX_PACKET(64);
1025 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1026 max_packets = MAX_PACKET(8);
1028 case USB_SPEED_WIRELESS:
1029 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1033 /* Speed was set earlier, this shouldn't happen. */
1036 /* Find the root hub port this device is under */
1037 port_num = xhci_find_real_port_number(xhci, udev);
1040 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1041 /* Set the port number in the virtual_device to the faked port number */
1042 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1043 top_dev = top_dev->parent)
1044 /* Found device below root hub */;
1045 dev->fake_port = top_dev->portnum;
1046 dev->real_port = port_num;
1047 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1048 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1050 /* Find the right bandwidth table that this device will be a part of.
1051 * If this is a full speed device attached directly to a root port (or a
1052 * decendent of one), it counts as a primary bandwidth domain, not a
1053 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1054 * will never be created for the HS root hub.
1056 if (!udev->tt || !udev->tt->hub->parent) {
1057 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1059 struct xhci_root_port_bw_info *rh_bw;
1060 struct xhci_tt_bw_info *tt_bw;
1062 rh_bw = &xhci->rh_bw[port_num - 1];
1063 /* Find the right TT. */
1064 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1065 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1068 if (!dev->udev->tt->multi ||
1070 tt_bw->ttport == dev->udev->ttport)) {
1071 dev->bw_table = &tt_bw->bw_table;
1072 dev->tt_info = tt_bw;
1077 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1080 /* Is this a LS/FS device under an external HS hub? */
1081 if (udev->tt && udev->tt->hub->parent) {
1082 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1083 (udev->ttport << 8));
1084 if (udev->tt->multi)
1085 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1087 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1088 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1090 /* Step 4 - ring already allocated */
1092 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1094 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1095 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1098 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1099 dev->eps[0].ring->cycle_state);
1101 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1107 * Convert interval expressed as 2^(bInterval - 1) == interval into
1108 * straight exponent value 2^n == interval.
1111 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1112 struct usb_host_endpoint *ep)
1114 unsigned int interval;
1116 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1117 if (interval != ep->desc.bInterval - 1)
1118 dev_warn(&udev->dev,
1119 "ep %#x - rounding interval to %d %sframes\n",
1120 ep->desc.bEndpointAddress,
1122 udev->speed == USB_SPEED_FULL ? "" : "micro");
1124 if (udev->speed == USB_SPEED_FULL) {
1126 * Full speed isoc endpoints specify interval in frames,
1127 * not microframes. We are using microframes everywhere,
1128 * so adjust accordingly.
1130 interval += 3; /* 1 frame = 2^3 uframes */
1137 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1138 * microframes, rounded down to nearest power of 2.
1140 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1141 struct usb_host_endpoint *ep, unsigned int desc_interval,
1142 unsigned int min_exponent, unsigned int max_exponent)
1144 unsigned int interval;
1146 interval = fls(desc_interval) - 1;
1147 interval = clamp_val(interval, min_exponent, max_exponent);
1148 if ((1 << interval) != desc_interval)
1149 dev_warn(&udev->dev,
1150 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1151 ep->desc.bEndpointAddress,
1158 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1159 struct usb_host_endpoint *ep)
1161 if (ep->desc.bInterval == 0)
1163 return xhci_microframes_to_exponent(udev, ep,
1164 ep->desc.bInterval, 0, 15);
1168 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1169 struct usb_host_endpoint *ep)
1171 return xhci_microframes_to_exponent(udev, ep,
1172 ep->desc.bInterval * 8, 3, 10);
1175 /* Return the polling or NAK interval.
1177 * The polling interval is expressed in "microframes". If xHCI's Interval field
1178 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1180 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1183 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1184 struct usb_host_endpoint *ep)
1186 unsigned int interval = 0;
1188 switch (udev->speed) {
1189 case USB_SPEED_HIGH:
1191 if (usb_endpoint_xfer_control(&ep->desc) ||
1192 usb_endpoint_xfer_bulk(&ep->desc)) {
1193 interval = xhci_parse_microframe_interval(udev, ep);
1196 /* Fall through - SS and HS isoc/int have same decoding */
1198 case USB_SPEED_SUPER:
1199 if (usb_endpoint_xfer_int(&ep->desc) ||
1200 usb_endpoint_xfer_isoc(&ep->desc)) {
1201 interval = xhci_parse_exponent_interval(udev, ep);
1205 case USB_SPEED_FULL:
1206 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1207 interval = xhci_parse_exponent_interval(udev, ep);
1211 * Fall through for interrupt endpoint interval decoding
1212 * since it uses the same rules as low speed interrupt
1217 if (usb_endpoint_xfer_int(&ep->desc) ||
1218 usb_endpoint_xfer_isoc(&ep->desc)) {
1220 interval = xhci_parse_frame_interval(udev, ep);
1227 return EP_INTERVAL(interval);
1230 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1231 * High speed endpoint descriptors can define "the number of additional
1232 * transaction opportunities per microframe", but that goes in the Max Burst
1233 * endpoint context field.
1235 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1236 struct usb_host_endpoint *ep)
1238 if (udev->speed != USB_SPEED_SUPER ||
1239 !usb_endpoint_xfer_isoc(&ep->desc))
1241 return ep->ss_ep_comp.bmAttributes;
1244 static u32 xhci_get_endpoint_type(struct usb_device *udev,
1245 struct usb_host_endpoint *ep)
1250 in = usb_endpoint_dir_in(&ep->desc);
1251 if (usb_endpoint_xfer_control(&ep->desc)) {
1252 type = EP_TYPE(CTRL_EP);
1253 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1255 type = EP_TYPE(BULK_IN_EP);
1257 type = EP_TYPE(BULK_OUT_EP);
1258 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1260 type = EP_TYPE(ISOC_IN_EP);
1262 type = EP_TYPE(ISOC_OUT_EP);
1263 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1265 type = EP_TYPE(INT_IN_EP);
1267 type = EP_TYPE(INT_OUT_EP);
1274 /* Return the maximum endpoint service interval time (ESIT) payload.
1275 * Basically, this is the maxpacket size, multiplied by the burst size
1278 static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1279 struct usb_device *udev,
1280 struct usb_host_endpoint *ep)
1285 /* Only applies for interrupt or isochronous endpoints */
1286 if (usb_endpoint_xfer_control(&ep->desc) ||
1287 usb_endpoint_xfer_bulk(&ep->desc))
1290 if (udev->speed == USB_SPEED_SUPER)
1291 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1293 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1294 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1295 /* A 0 in max burst means 1 transfer per ESIT */
1296 return max_packet * (max_burst + 1);
1299 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1300 * Drivers will have to call usb_alloc_streams() to do that.
1302 int xhci_endpoint_init(struct xhci_hcd *xhci,
1303 struct xhci_virt_device *virt_dev,
1304 struct usb_device *udev,
1305 struct usb_host_endpoint *ep,
1308 unsigned int ep_index;
1309 struct xhci_ep_ctx *ep_ctx;
1310 struct xhci_ring *ep_ring;
1311 unsigned int max_packet;
1312 unsigned int max_burst;
1313 enum xhci_ring_type type;
1314 u32 max_esit_payload;
1317 ep_index = xhci_get_endpoint_index(&ep->desc);
1318 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1320 endpoint_type = xhci_get_endpoint_type(udev, ep);
1323 ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
1325 type = usb_endpoint_type(&ep->desc);
1326 /* Set up the endpoint ring */
1327 virt_dev->eps[ep_index].new_ring =
1328 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1329 if (!virt_dev->eps[ep_index].new_ring) {
1330 /* Attempt to use the ring cache */
1331 if (virt_dev->num_rings_cached == 0)
1333 virt_dev->eps[ep_index].new_ring =
1334 virt_dev->ring_cache[virt_dev->num_rings_cached];
1335 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1336 virt_dev->num_rings_cached--;
1337 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1340 virt_dev->eps[ep_index].skip = false;
1341 ep_ring = virt_dev->eps[ep_index].new_ring;
1342 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1344 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1345 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1347 /* FIXME dig Mult and streams info out of ep companion desc */
1349 /* Allow 3 retries for everything but isoc;
1350 * CErr shall be set to 0 for Isoch endpoints.
1352 if (!usb_endpoint_xfer_isoc(&ep->desc))
1353 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
1355 ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
1357 /* Set the max packet size and max burst */
1358 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1360 switch (udev->speed) {
1361 case USB_SPEED_SUPER:
1362 /* dig out max burst from ep companion desc */
1363 max_burst = ep->ss_ep_comp.bMaxBurst;
1365 case USB_SPEED_HIGH:
1366 /* Some devices get this wrong */
1367 if (usb_endpoint_xfer_bulk(&ep->desc))
1369 /* bits 11:12 specify the number of additional transaction
1370 * opportunities per microframe (USB 2.0, section 9.6.6)
1372 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1373 usb_endpoint_xfer_int(&ep->desc)) {
1374 max_burst = (usb_endpoint_maxp(&ep->desc)
1378 case USB_SPEED_FULL:
1384 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1385 MAX_BURST(max_burst));
1386 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1387 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1390 * XXX no idea how to calculate the average TRB buffer length for bulk
1391 * endpoints, as the driver gives us no clue how big each scatter gather
1392 * list entry (or buffer) is going to be.
1394 * For isochronous and interrupt endpoints, we set it to the max
1395 * available, until we have new API in the USB core to allow drivers to
1396 * declare how much bandwidth they actually need.
1398 * Normally, it would be calculated by taking the total of the buffer
1399 * lengths in the TD and then dividing by the number of TRBs in a TD,
1400 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1401 * use Event Data TRBs, and we don't chain in a link TRB on short
1402 * transfers, we're basically dividing by 1.
1404 * xHCI 1.0 specification indicates that the Average TRB Length should
1405 * be set to 8 for control endpoints.
1407 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1408 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1411 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1413 /* FIXME Debug endpoint context */
1417 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1418 struct xhci_virt_device *virt_dev,
1419 struct usb_host_endpoint *ep)
1421 unsigned int ep_index;
1422 struct xhci_ep_ctx *ep_ctx;
1424 ep_index = xhci_get_endpoint_index(&ep->desc);
1425 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1427 ep_ctx->ep_info = 0;
1428 ep_ctx->ep_info2 = 0;
1430 ep_ctx->tx_info = 0;
1431 /* Don't free the endpoint ring until the set interface or configuration
1436 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1438 bw_info->ep_interval = 0;
1440 bw_info->num_packets = 0;
1441 bw_info->max_packet_size = 0;
1443 bw_info->max_esit_payload = 0;
1446 void xhci_update_bw_info(struct xhci_hcd *xhci,
1447 struct xhci_container_ctx *in_ctx,
1448 struct xhci_input_control_ctx *ctrl_ctx,
1449 struct xhci_virt_device *virt_dev)
1451 struct xhci_bw_info *bw_info;
1452 struct xhci_ep_ctx *ep_ctx;
1453 unsigned int ep_type;
1456 for (i = 1; i < 31; ++i) {
1457 bw_info = &virt_dev->eps[i].bw_info;
1459 /* We can't tell what endpoint type is being dropped, but
1460 * unconditionally clearing the bandwidth info for non-periodic
1461 * endpoints should be harmless because the info will never be
1462 * set in the first place.
1464 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1465 /* Dropped endpoint */
1466 xhci_clear_endpoint_bw_info(bw_info);
1470 if (EP_IS_ADDED(ctrl_ctx, i)) {
1471 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1472 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1474 /* Ignore non-periodic endpoints */
1475 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1476 ep_type != ISOC_IN_EP &&
1477 ep_type != INT_IN_EP)
1480 /* Added or changed endpoint */
1481 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1482 le32_to_cpu(ep_ctx->ep_info));
1483 /* Number of packets and mult are zero-based in the
1484 * input context, but we want one-based for the
1487 bw_info->mult = CTX_TO_EP_MULT(
1488 le32_to_cpu(ep_ctx->ep_info)) + 1;
1489 bw_info->num_packets = CTX_TO_MAX_BURST(
1490 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1491 bw_info->max_packet_size = MAX_PACKET_DECODED(
1492 le32_to_cpu(ep_ctx->ep_info2));
1493 bw_info->type = ep_type;
1494 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1495 le32_to_cpu(ep_ctx->tx_info));
1500 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1501 * Useful when you want to change one particular aspect of the endpoint and then
1502 * issue a configure endpoint command.
1504 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1505 struct xhci_container_ctx *in_ctx,
1506 struct xhci_container_ctx *out_ctx,
1507 unsigned int ep_index)
1509 struct xhci_ep_ctx *out_ep_ctx;
1510 struct xhci_ep_ctx *in_ep_ctx;
1512 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1513 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1515 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1516 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1517 in_ep_ctx->deq = out_ep_ctx->deq;
1518 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1521 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1522 * Useful when you want to change one particular aspect of the endpoint and then
1523 * issue a configure endpoint command. Only the context entries field matters,
1524 * but we'll copy the whole thing anyway.
1526 void xhci_slot_copy(struct xhci_hcd *xhci,
1527 struct xhci_container_ctx *in_ctx,
1528 struct xhci_container_ctx *out_ctx)
1530 struct xhci_slot_ctx *in_slot_ctx;
1531 struct xhci_slot_ctx *out_slot_ctx;
1533 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1534 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1536 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1537 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1538 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1539 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1542 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1543 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1546 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1547 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1549 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1550 "Allocating %d scratchpad buffers", num_sp);
1555 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1556 if (!xhci->scratchpad)
1559 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1560 num_sp * sizeof(u64),
1561 &xhci->scratchpad->sp_dma, flags);
1562 if (!xhci->scratchpad->sp_array)
1565 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1566 if (!xhci->scratchpad->sp_buffers)
1569 xhci->scratchpad->sp_dma_buffers =
1570 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1572 if (!xhci->scratchpad->sp_dma_buffers)
1575 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1576 for (i = 0; i < num_sp; i++) {
1578 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1583 xhci->scratchpad->sp_array[i] = dma;
1584 xhci->scratchpad->sp_buffers[i] = buf;
1585 xhci->scratchpad->sp_dma_buffers[i] = dma;
1591 for (i = i - 1; i >= 0; i--) {
1592 dma_free_coherent(dev, xhci->page_size,
1593 xhci->scratchpad->sp_buffers[i],
1594 xhci->scratchpad->sp_dma_buffers[i]);
1596 kfree(xhci->scratchpad->sp_dma_buffers);
1599 kfree(xhci->scratchpad->sp_buffers);
1602 dma_free_coherent(dev, num_sp * sizeof(u64),
1603 xhci->scratchpad->sp_array,
1604 xhci->scratchpad->sp_dma);
1607 kfree(xhci->scratchpad);
1608 xhci->scratchpad = NULL;
1614 static void scratchpad_free(struct xhci_hcd *xhci)
1618 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1620 if (!xhci->scratchpad)
1623 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1625 for (i = 0; i < num_sp; i++) {
1626 dma_free_coherent(&pdev->dev, xhci->page_size,
1627 xhci->scratchpad->sp_buffers[i],
1628 xhci->scratchpad->sp_dma_buffers[i]);
1630 kfree(xhci->scratchpad->sp_dma_buffers);
1631 kfree(xhci->scratchpad->sp_buffers);
1632 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1633 xhci->scratchpad->sp_array,
1634 xhci->scratchpad->sp_dma);
1635 kfree(xhci->scratchpad);
1636 xhci->scratchpad = NULL;
1639 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1640 bool allocate_in_ctx, bool allocate_completion,
1643 struct xhci_command *command;
1645 command = kzalloc(sizeof(*command), mem_flags);
1649 if (allocate_in_ctx) {
1651 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1653 if (!command->in_ctx) {
1659 if (allocate_completion) {
1660 command->completion =
1661 kzalloc(sizeof(struct completion), mem_flags);
1662 if (!command->completion) {
1663 xhci_free_container_ctx(xhci, command->in_ctx);
1667 init_completion(command->completion);
1670 command->status = 0;
1671 INIT_LIST_HEAD(&command->cmd_list);
1675 void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1678 kfree(urb_priv->td[0]);
1683 void xhci_free_command(struct xhci_hcd *xhci,
1684 struct xhci_command *command)
1686 xhci_free_container_ctx(xhci,
1688 kfree(command->completion);
1692 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1694 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1695 struct dev_info *dev_info, *next;
1696 struct xhci_cd *cur_cd, *next_cd;
1697 unsigned long flags;
1699 int i, j, num_ports;
1701 /* Free the Event Ring Segment Table and the actual Event Ring */
1702 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1703 if (xhci->erst.entries)
1704 dma_free_coherent(&pdev->dev, size,
1705 xhci->erst.entries, xhci->erst.erst_dma_addr);
1706 xhci->erst.entries = NULL;
1707 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1708 if (xhci->event_ring)
1709 xhci_ring_free(xhci, xhci->event_ring);
1710 xhci->event_ring = NULL;
1711 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1713 if (xhci->lpm_command)
1714 xhci_free_command(xhci, xhci->lpm_command);
1715 xhci->cmd_ring_reserved_trbs = 0;
1717 xhci_ring_free(xhci, xhci->cmd_ring);
1718 xhci->cmd_ring = NULL;
1719 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1720 list_for_each_entry_safe(cur_cd, next_cd,
1721 &xhci->cancel_cmd_list, cancel_cmd_list) {
1722 list_del(&cur_cd->cancel_cmd_list);
1726 for (i = 1; i < MAX_HC_SLOTS; ++i)
1727 xhci_free_virt_device(xhci, i);
1729 if (xhci->segment_pool)
1730 dma_pool_destroy(xhci->segment_pool);
1731 xhci->segment_pool = NULL;
1732 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1734 if (xhci->device_pool)
1735 dma_pool_destroy(xhci->device_pool);
1736 xhci->device_pool = NULL;
1737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1739 if (xhci->small_streams_pool)
1740 dma_pool_destroy(xhci->small_streams_pool);
1741 xhci->small_streams_pool = NULL;
1742 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1743 "Freed small stream array pool");
1745 if (xhci->medium_streams_pool)
1746 dma_pool_destroy(xhci->medium_streams_pool);
1747 xhci->medium_streams_pool = NULL;
1748 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1749 "Freed medium stream array pool");
1752 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1753 xhci->dcbaa, xhci->dcbaa->dma);
1756 scratchpad_free(xhci);
1758 spin_lock_irqsave(&xhci->lock, flags);
1759 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1760 list_del(&dev_info->list);
1763 spin_unlock_irqrestore(&xhci->lock, flags);
1768 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1769 for (i = 0; i < num_ports; i++) {
1770 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1771 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1772 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1773 while (!list_empty(ep))
1774 list_del_init(ep->next);
1778 for (i = 0; i < num_ports; i++) {
1779 struct xhci_tt_bw_info *tt, *n;
1780 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1781 list_del(&tt->tt_list);
1787 xhci->num_usb2_ports = 0;
1788 xhci->num_usb3_ports = 0;
1789 xhci->num_active_eps = 0;
1790 kfree(xhci->usb2_ports);
1791 kfree(xhci->usb3_ports);
1792 kfree(xhci->port_array);
1794 kfree(xhci->ext_caps);
1796 xhci->page_size = 0;
1797 xhci->page_shift = 0;
1798 xhci->bus_state[0].bus_suspended = 0;
1799 xhci->bus_state[1].bus_suspended = 0;
1802 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1803 struct xhci_segment *input_seg,
1804 union xhci_trb *start_trb,
1805 union xhci_trb *end_trb,
1806 dma_addr_t input_dma,
1807 struct xhci_segment *result_seg,
1808 char *test_name, int test_number)
1810 unsigned long long start_dma;
1811 unsigned long long end_dma;
1812 struct xhci_segment *seg;
1814 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1815 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1817 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1818 if (seg != result_seg) {
1819 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1820 test_name, test_number);
1821 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1822 "input DMA 0x%llx\n",
1824 (unsigned long long) input_dma);
1825 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1826 "ending TRB %p (0x%llx DMA)\n",
1827 start_trb, start_dma,
1829 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1836 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1837 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1840 dma_addr_t input_dma;
1841 struct xhci_segment *result_seg;
1842 } simple_test_vector [] = {
1843 /* A zeroed DMA field should fail */
1845 /* One TRB before the ring start should fail */
1846 { xhci->event_ring->first_seg->dma - 16, NULL },
1847 /* One byte before the ring start should fail */
1848 { xhci->event_ring->first_seg->dma - 1, NULL },
1849 /* Starting TRB should succeed */
1850 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1851 /* Ending TRB should succeed */
1852 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1853 xhci->event_ring->first_seg },
1854 /* One byte after the ring end should fail */
1855 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1856 /* One TRB after the ring end should fail */
1857 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1858 /* An address of all ones should fail */
1859 { (dma_addr_t) (~0), NULL },
1862 struct xhci_segment *input_seg;
1863 union xhci_trb *start_trb;
1864 union xhci_trb *end_trb;
1865 dma_addr_t input_dma;
1866 struct xhci_segment *result_seg;
1867 } complex_test_vector [] = {
1868 /* Test feeding a valid DMA address from a different ring */
1869 { .input_seg = xhci->event_ring->first_seg,
1870 .start_trb = xhci->event_ring->first_seg->trbs,
1871 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1872 .input_dma = xhci->cmd_ring->first_seg->dma,
1875 /* Test feeding a valid end TRB from a different ring */
1876 { .input_seg = xhci->event_ring->first_seg,
1877 .start_trb = xhci->event_ring->first_seg->trbs,
1878 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1879 .input_dma = xhci->cmd_ring->first_seg->dma,
1882 /* Test feeding a valid start and end TRB from a different ring */
1883 { .input_seg = xhci->event_ring->first_seg,
1884 .start_trb = xhci->cmd_ring->first_seg->trbs,
1885 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1886 .input_dma = xhci->cmd_ring->first_seg->dma,
1889 /* TRB in this ring, but after this TD */
1890 { .input_seg = xhci->event_ring->first_seg,
1891 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1892 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1893 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1896 /* TRB in this ring, but before this TD */
1897 { .input_seg = xhci->event_ring->first_seg,
1898 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1899 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1900 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1903 /* TRB in this ring, but after this wrapped TD */
1904 { .input_seg = xhci->event_ring->first_seg,
1905 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1906 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1907 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1910 /* TRB in this ring, but before this wrapped TD */
1911 { .input_seg = xhci->event_ring->first_seg,
1912 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1913 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1914 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1917 /* TRB not in this ring, and we have a wrapped TD */
1918 { .input_seg = xhci->event_ring->first_seg,
1919 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1920 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1921 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1926 unsigned int num_tests;
1929 num_tests = ARRAY_SIZE(simple_test_vector);
1930 for (i = 0; i < num_tests; i++) {
1931 ret = xhci_test_trb_in_td(xhci,
1932 xhci->event_ring->first_seg,
1933 xhci->event_ring->first_seg->trbs,
1934 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1935 simple_test_vector[i].input_dma,
1936 simple_test_vector[i].result_seg,
1942 num_tests = ARRAY_SIZE(complex_test_vector);
1943 for (i = 0; i < num_tests; i++) {
1944 ret = xhci_test_trb_in_td(xhci,
1945 complex_test_vector[i].input_seg,
1946 complex_test_vector[i].start_trb,
1947 complex_test_vector[i].end_trb,
1948 complex_test_vector[i].input_dma,
1949 complex_test_vector[i].result_seg,
1954 xhci_dbg(xhci, "TRB math tests passed.\n");
1958 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1963 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
1964 xhci->event_ring->dequeue);
1965 if (deq == 0 && !in_interrupt())
1966 xhci_warn(xhci, "WARN something wrong with SW event ring "
1968 /* Update HC event ring dequeue pointer */
1969 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1970 temp &= ERST_PTR_MASK;
1971 /* Don't clear the EHB bit (which is RW1C) because
1972 * there might be more events to service.
1975 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1976 "// Write event ring dequeue pointer, "
1977 "preserving EHB bit");
1978 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1979 &xhci->ir_set->erst_dequeue);
1982 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1983 __le32 __iomem *addr, u8 major_revision, int max_caps)
1985 u32 temp, port_offset, port_count;
1988 if (major_revision > 0x03) {
1989 xhci_warn(xhci, "Ignoring unknown port speed, "
1990 "Ext Cap %p, revision = 0x%x\n",
1991 addr, major_revision);
1992 /* Ignoring port protocol we can't understand. FIXME */
1996 /* Port offset and count in the third dword, see section 7.2 */
1997 temp = xhci_readl(xhci, addr + 2);
1998 port_offset = XHCI_EXT_PORT_OFF(temp);
1999 port_count = XHCI_EXT_PORT_COUNT(temp);
2000 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2001 "Ext Cap %p, port offset = %u, "
2002 "count = %u, revision = 0x%x",
2003 addr, port_offset, port_count, major_revision);
2004 /* Port count includes the current port offset */
2005 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2006 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2009 /* cache usb2 port capabilities */
2010 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2011 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2013 /* Check the host's USB2 LPM capability */
2014 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2015 (temp & XHCI_L1C)) {
2016 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2017 "xHCI 0.96: support USB2 software lpm");
2018 xhci->sw_lpm_support = 1;
2021 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2022 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2023 "xHCI 1.0: support USB2 software lpm");
2024 xhci->sw_lpm_support = 1;
2025 if (temp & XHCI_HLC) {
2026 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2027 "xHCI 1.0: support USB2 hardware lpm");
2028 xhci->hw_lpm_support = 1;
2033 for (i = port_offset; i < (port_offset + port_count); i++) {
2034 /* Duplicate entry. Ignore the port if the revisions differ. */
2035 if (xhci->port_array[i] != 0) {
2036 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2037 " port %u\n", addr, i);
2038 xhci_warn(xhci, "Port was marked as USB %u, "
2039 "duplicated as USB %u\n",
2040 xhci->port_array[i], major_revision);
2041 /* Only adjust the roothub port counts if we haven't
2042 * found a similar duplicate.
2044 if (xhci->port_array[i] != major_revision &&
2045 xhci->port_array[i] != DUPLICATE_ENTRY) {
2046 if (xhci->port_array[i] == 0x03)
2047 xhci->num_usb3_ports--;
2049 xhci->num_usb2_ports--;
2050 xhci->port_array[i] = DUPLICATE_ENTRY;
2052 /* FIXME: Should we disable the port? */
2055 xhci->port_array[i] = major_revision;
2056 if (major_revision == 0x03)
2057 xhci->num_usb3_ports++;
2059 xhci->num_usb2_ports++;
2061 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2065 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2066 * specify what speeds each port is supposed to be. We can't count on the port
2067 * speed bits in the PORTSC register being correct until a device is connected,
2068 * but we need to set up the two fake roothubs with the correct number of USB
2069 * 3.0 and USB 2.0 ports at host controller initialization time.
2071 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2073 __le32 __iomem *addr, *tmp_addr;
2074 u32 offset, tmp_offset;
2075 unsigned int num_ports;
2076 int i, j, port_index;
2079 addr = &xhci->cap_regs->hcc_params;
2080 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2082 xhci_err(xhci, "No Extended Capability registers, "
2083 "unable to set up roothub.\n");
2087 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2088 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2089 if (!xhci->port_array)
2092 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2095 for (i = 0; i < num_ports; i++) {
2096 struct xhci_interval_bw_table *bw_table;
2098 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2099 bw_table = &xhci->rh_bw[i].bw_table;
2100 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2101 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2105 * For whatever reason, the first capability offset is from the
2106 * capability register base, not from the HCCPARAMS register.
2107 * See section 5.3.6 for offset calculation.
2109 addr = &xhci->cap_regs->hc_capbase + offset;
2112 tmp_offset = offset;
2114 /* count extended protocol capability entries for later caching */
2117 cap_id = xhci_readl(xhci, tmp_addr);
2118 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2120 tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
2121 tmp_addr += tmp_offset;
2122 } while (tmp_offset);
2124 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2125 if (!xhci->ext_caps)
2131 cap_id = xhci_readl(xhci, addr);
2132 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2133 xhci_add_in_port(xhci, num_ports, addr,
2134 (u8) XHCI_EXT_PORT_MAJOR(cap_id),
2136 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2137 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2141 * Once you're into the Extended Capabilities, the offset is
2142 * always relative to the register holding the offset.
2147 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2148 xhci_warn(xhci, "No ports on the roothubs?\n");
2151 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2152 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2153 xhci->num_usb2_ports, xhci->num_usb3_ports);
2155 /* Place limits on the number of roothub ports so that the hub
2156 * descriptors aren't longer than the USB core will allocate.
2158 if (xhci->num_usb3_ports > 15) {
2159 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2160 "Limiting USB 3.0 roothub ports to 15.");
2161 xhci->num_usb3_ports = 15;
2163 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2164 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2165 "Limiting USB 2.0 roothub ports to %u.",
2167 xhci->num_usb2_ports = USB_MAXCHILDREN;
2171 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2172 * Not sure how the USB core will handle a hub with no ports...
2174 if (xhci->num_usb2_ports) {
2175 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2176 xhci->num_usb2_ports, flags);
2177 if (!xhci->usb2_ports)
2181 for (i = 0; i < num_ports; i++) {
2182 if (xhci->port_array[i] == 0x03 ||
2183 xhci->port_array[i] == 0 ||
2184 xhci->port_array[i] == DUPLICATE_ENTRY)
2187 xhci->usb2_ports[port_index] =
2188 &xhci->op_regs->port_status_base +
2190 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2191 "USB 2.0 port at index %u, "
2193 xhci->usb2_ports[port_index]);
2195 if (port_index == xhci->num_usb2_ports)
2199 if (xhci->num_usb3_ports) {
2200 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2201 xhci->num_usb3_ports, flags);
2202 if (!xhci->usb3_ports)
2206 for (i = 0; i < num_ports; i++)
2207 if (xhci->port_array[i] == 0x03) {
2208 xhci->usb3_ports[port_index] =
2209 &xhci->op_regs->port_status_base +
2211 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2212 "USB 3.0 port at index %u, "
2214 xhci->usb3_ports[port_index]);
2216 if (port_index == xhci->num_usb3_ports)
2223 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2226 struct device *dev = xhci_to_hcd(xhci)->self.controller;
2227 unsigned int val, val2;
2229 struct xhci_segment *seg;
2230 u32 page_size, temp;
2233 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2234 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2236 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2237 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2238 "Supported page size register = 0x%x", page_size);
2239 for (i = 0; i < 16; i++) {
2240 if ((0x1 & page_size) != 0)
2242 page_size = page_size >> 1;
2245 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2246 "Supported page size of %iK", (1 << (i+12)) / 1024);
2248 xhci_warn(xhci, "WARN: no supported page size\n");
2249 /* Use 4K pages, since that's common and the minimum the HC supports */
2250 xhci->page_shift = 12;
2251 xhci->page_size = 1 << xhci->page_shift;
2252 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2253 "HCD page size set to %iK", xhci->page_size / 1024);
2256 * Program the Number of Device Slots Enabled field in the CONFIG
2257 * register with the max value of slots the HC can handle.
2259 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2260 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2261 "// xHC can handle at most %d device slots.", val);
2262 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2263 val |= (val2 & ~HCS_SLOTS_MASK);
2264 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2265 "// Setting Max device slots reg = 0x%x.", val);
2266 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2269 * Section 5.4.8 - doorbell array must be
2270 * "physically contiguous and 64-byte (cache line) aligned".
2272 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2276 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2277 xhci->dcbaa->dma = dma;
2278 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2279 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2280 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2281 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2284 * Initialize the ring segment pool. The ring must be a contiguous
2285 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2286 * however, the command ring segment needs 64-byte aligned segments,
2287 * so we pick the greater alignment need.
2289 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2290 TRB_SEGMENT_SIZE, 64, xhci->page_size);
2292 /* See Table 46 and Note on Figure 55 */
2293 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2294 2112, 64, xhci->page_size);
2295 if (!xhci->segment_pool || !xhci->device_pool)
2298 /* Linear stream context arrays don't have any boundary restrictions,
2299 * and only need to be 16-byte aligned.
2301 xhci->small_streams_pool =
2302 dma_pool_create("xHCI 256 byte stream ctx arrays",
2303 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2304 xhci->medium_streams_pool =
2305 dma_pool_create("xHCI 1KB stream ctx arrays",
2306 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2307 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2308 * will be allocated with dma_alloc_coherent()
2311 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2314 /* Set up the command ring to have one segments for now. */
2315 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2316 if (!xhci->cmd_ring)
2318 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2319 "Allocated command ring at %p", xhci->cmd_ring);
2320 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2321 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2323 /* Set the address in the Command Ring Control register */
2324 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2325 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2326 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2327 xhci->cmd_ring->cycle_state;
2328 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2329 "// Setting command ring address to 0x%x", val);
2330 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2331 xhci_dbg_cmd_ptrs(xhci);
2333 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2334 if (!xhci->lpm_command)
2337 /* Reserve one command ring TRB for disabling LPM.
2338 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2339 * disabling LPM, we only need to reserve one TRB for all devices.
2341 xhci->cmd_ring_reserved_trbs++;
2343 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2345 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2346 "// Doorbell array is located at offset 0x%x"
2347 " from cap regs base addr", val);
2348 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2349 xhci_dbg_regs(xhci);
2350 xhci_print_run_regs(xhci);
2351 /* Set ir_set to interrupt register set 0 */
2352 xhci->ir_set = &xhci->run_regs->ir_set[0];
2355 * Event ring setup: Allocate a normal ring, but also setup
2356 * the event ring segment table (ERST). Section 4.9.3.
2358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2359 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2361 if (!xhci->event_ring)
2363 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2366 xhci->erst.entries = dma_alloc_coherent(dev,
2367 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2369 if (!xhci->erst.entries)
2371 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2372 "// Allocated event ring segment table at 0x%llx",
2373 (unsigned long long)dma);
2375 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2376 xhci->erst.num_entries = ERST_NUM_SEGS;
2377 xhci->erst.erst_dma_addr = dma;
2378 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2379 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2380 xhci->erst.num_entries,
2382 (unsigned long long)xhci->erst.erst_dma_addr);
2384 /* set ring base address and size for each segment table entry */
2385 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2386 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2387 entry->seg_addr = cpu_to_le64(seg->dma);
2388 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2393 /* set ERST count with the number of entries in the segment table */
2394 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2395 val &= ERST_SIZE_MASK;
2396 val |= ERST_NUM_SEGS;
2397 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2398 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2400 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2402 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2403 "// Set ERST entries to point to event ring.");
2404 /* set the segment table base address */
2405 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2406 "// Set ERST base address for ir_set 0 = 0x%llx",
2407 (unsigned long long)xhci->erst.erst_dma_addr);
2408 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2409 val_64 &= ERST_PTR_MASK;
2410 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2411 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2413 /* Set the event ring dequeue address */
2414 xhci_set_hc_event_deq(xhci);
2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416 "Wrote ERST address to ir_set 0.");
2417 xhci_print_ir_set(xhci, 0);
2420 * XXX: Might need to set the Interrupter Moderation Register to
2421 * something other than the default (~1ms minimum between interrupts).
2422 * See section 5.5.1.2.
2424 init_completion(&xhci->addr_dev);
2425 for (i = 0; i < MAX_HC_SLOTS; ++i)
2426 xhci->devs[i] = NULL;
2427 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2428 xhci->bus_state[0].resume_done[i] = 0;
2429 xhci->bus_state[1].resume_done[i] = 0;
2432 if (scratchpad_alloc(xhci, flags))
2434 if (xhci_setup_port_arrays(xhci, flags))
2437 /* Enable USB 3.0 device notifications for function remote wake, which
2438 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2439 * U3 (device suspend).
2441 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
2442 temp &= ~DEV_NOTE_MASK;
2443 temp |= DEV_NOTE_FWAKE;
2444 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
2449 xhci_warn(xhci, "Couldn't initialize memory\n");
2452 xhci_mem_cleanup(xhci);