1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
11 #include <linux/usb.h>
14 /*-------------------------------------------------------------------------*/
16 static int override_alt = -1;
17 module_param_named(alt, override_alt, int, 0644);
18 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
20 /*-------------------------------------------------------------------------*/
22 /* FIXME make these public somewhere; usbdevfs.h? */
23 struct usbtest_param {
25 unsigned test_num; /* 0..(TEST_CASES-1) */
32 struct timeval duration;
34 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
36 /*-------------------------------------------------------------------------*/
38 #define GENERIC /* let probe() bind using module params */
40 /* Some devices that can be used for testing will have "real" drivers.
41 * Entries for those need to be enabled here by hand, after disabling
44 //#define IBOT2 /* grab iBOT2 webcams */
45 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
47 /*-------------------------------------------------------------------------*/
51 u8 ep_in; /* bulk/intr source */
52 u8 ep_out; /* bulk/intr sink */
55 unsigned iso:1; /* try iso in/out */
59 /* this is accessed only through usbfs ioctl calls.
60 * one ioctl to issue a test ... one lock per device.
61 * tests create other threads if they need them.
62 * urbs and buffers are allocated dynamically,
63 * and data generated deterministically.
66 struct usb_interface *intf;
67 struct usbtest_info *info;
72 struct usb_endpoint_descriptor *iso_in, *iso_out;
79 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
81 return interface_to_usbdev(test->intf);
84 /* set up all urbs so they can be used with either bulk or interrupt */
85 #define INTERRUPT_RATE 1 /* msec/transfer */
87 #define ERROR(tdev, fmt, args...) \
88 dev_err(&(tdev)->intf->dev , fmt , ## args)
89 #define WARNING(tdev, fmt, args...) \
90 dev_warn(&(tdev)->intf->dev , fmt , ## args)
92 #define GUARD_BYTE 0xA5
94 /*-------------------------------------------------------------------------*/
97 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
100 struct usb_host_interface *alt;
101 struct usb_host_endpoint *in, *out;
102 struct usb_host_endpoint *iso_in, *iso_out;
103 struct usb_device *udev;
105 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
109 iso_in = iso_out = NULL;
110 alt = intf->altsetting + tmp;
112 if (override_alt >= 0 &&
113 override_alt != alt->desc.bAlternateSetting)
116 /* take the first altsetting with in-bulk + out-bulk;
117 * ignore other endpoints and altsettings.
119 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
120 struct usb_host_endpoint *e;
122 e = alt->endpoint + ep;
123 switch (usb_endpoint_type(&e->desc)) {
124 case USB_ENDPOINT_XFER_BULK:
126 case USB_ENDPOINT_XFER_ISOC:
133 if (usb_endpoint_dir_in(&e->desc)) {
142 if (usb_endpoint_dir_in(&e->desc)) {
150 if ((in && out) || iso_in || iso_out)
156 udev = testdev_to_usbdev(dev);
157 dev->info->alt = alt->desc.bAlternateSetting;
158 if (alt->desc.bAlternateSetting != 0) {
159 tmp = usb_set_interface(udev,
160 alt->desc.bInterfaceNumber,
161 alt->desc.bAlternateSetting);
167 dev->in_pipe = usb_rcvbulkpipe(udev,
168 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
169 dev->out_pipe = usb_sndbulkpipe(udev,
170 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
173 dev->iso_in = &iso_in->desc;
174 dev->in_iso_pipe = usb_rcvisocpipe(udev,
175 iso_in->desc.bEndpointAddress
176 & USB_ENDPOINT_NUMBER_MASK);
180 dev->iso_out = &iso_out->desc;
181 dev->out_iso_pipe = usb_sndisocpipe(udev,
182 iso_out->desc.bEndpointAddress
183 & USB_ENDPOINT_NUMBER_MASK);
188 /*-------------------------------------------------------------------------*/
190 /* Support for testing basic non-queued I/O streams.
192 * These just package urbs as requests that can be easily canceled.
193 * Each urb's data buffer is dynamically allocated; callers can fill
194 * them with non-zero test data (or test for it) when appropriate.
197 static void simple_callback(struct urb *urb)
199 complete(urb->context);
202 static struct urb *usbtest_alloc_urb(
203 struct usb_device *udev,
206 unsigned transfer_flags,
211 urb = usb_alloc_urb(0, GFP_KERNEL);
214 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
215 urb->interval = (udev->speed == USB_SPEED_HIGH)
216 ? (INTERRUPT_RATE << 3)
218 urb->transfer_flags = transfer_flags;
219 if (usb_pipein(pipe))
220 urb->transfer_flags |= URB_SHORT_NOT_OK;
222 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
223 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
224 GFP_KERNEL, &urb->transfer_dma);
226 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
228 if (!urb->transfer_buffer) {
233 /* To test unaligned transfers add an offset and fill the
234 unused memory with a guard value */
236 memset(urb->transfer_buffer, GUARD_BYTE, offset);
237 urb->transfer_buffer += offset;
238 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
239 urb->transfer_dma += offset;
242 /* For inbound transfers use guard byte so that test fails if
243 data not correctly copied */
244 memset(urb->transfer_buffer,
245 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
250 static struct urb *simple_alloc_urb(
251 struct usb_device *udev,
255 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
258 static unsigned pattern;
259 static unsigned mod_pattern;
260 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
261 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
263 static inline void simple_fill_buf(struct urb *urb)
266 u8 *buf = urb->transfer_buffer;
267 unsigned len = urb->transfer_buffer_length;
276 for (i = 0; i < len; i++)
277 *buf++ = (u8) (i % 63);
282 static inline unsigned long buffer_offset(void *buf)
284 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
287 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
289 u8 *buf = urb->transfer_buffer;
290 u8 *guard = buf - buffer_offset(buf);
293 for (i = 0; guard < buf; i++, guard++) {
294 if (*guard != GUARD_BYTE) {
295 ERROR(tdev, "guard byte[%d] %d (not %d)\n",
296 i, *guard, GUARD_BYTE);
303 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
307 u8 *buf = urb->transfer_buffer;
308 unsigned len = urb->actual_length;
310 int ret = check_guard_bytes(tdev, urb);
314 for (i = 0; i < len; i++, buf++) {
316 /* all-zeroes has no synchronization issues */
320 /* mod63 stays in sync with short-terminated transfers,
321 * or otherwise when host and gadget agree on how large
322 * each usb transfer request should be. resync is done
323 * with set_interface or set_config.
328 /* always fail unsupported patterns */
333 if (*buf == expected)
335 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
341 static void simple_free_urb(struct urb *urb)
343 unsigned long offset = buffer_offset(urb->transfer_buffer);
345 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
348 urb->transfer_buffer_length + offset,
349 urb->transfer_buffer - offset,
350 urb->transfer_dma - offset);
352 kfree(urb->transfer_buffer - offset);
356 static int simple_io(
357 struct usbtest_dev *tdev,
365 struct usb_device *udev = urb->dev;
366 int max = urb->transfer_buffer_length;
367 struct completion completion;
370 urb->context = &completion;
371 while (retval == 0 && iterations-- > 0) {
372 init_completion(&completion);
373 if (usb_pipeout(urb->pipe)) {
374 simple_fill_buf(urb);
375 urb->transfer_flags |= URB_ZERO_PACKET;
377 retval = usb_submit_urb(urb, GFP_KERNEL);
381 /* NOTE: no timeouts; can't be broken out of by interrupt */
382 wait_for_completion(&completion);
383 retval = urb->status;
385 if (retval == 0 && usb_pipein(urb->pipe))
386 retval = simple_check_buf(tdev, urb);
389 int len = urb->transfer_buffer_length;
394 len = (vary < max) ? vary : max;
395 urb->transfer_buffer_length = len;
398 /* FIXME if endpoint halted, clear halt (and log) */
400 urb->transfer_buffer_length = max;
402 if (expected != retval)
404 "%s failed, iterations left %d, status %d (not %d)\n",
405 label, iterations, retval, expected);
410 /*-------------------------------------------------------------------------*/
412 /* We use scatterlist primitives to test queued I/O.
413 * Yes, this also tests the scatterlist primitives.
416 static void free_sglist(struct scatterlist *sg, int nents)
422 for (i = 0; i < nents; i++) {
423 if (!sg_page(&sg[i]))
425 kfree(sg_virt(&sg[i]));
430 static struct scatterlist *
431 alloc_sglist(int nents, int max, int vary)
433 struct scatterlist *sg;
440 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
443 sg_init_table(sg, nents);
445 for (i = 0; i < nents; i++) {
449 buf = kzalloc(size, GFP_KERNEL);
455 /* kmalloc pages are always physically contiguous! */
456 sg_set_buf(&sg[i], buf, size);
463 for (j = 0; j < size; j++)
464 *buf++ = (u8) (j % 63);
472 size = (vary < max) ? vary : max;
479 static int perform_sglist(
480 struct usbtest_dev *tdev,
483 struct usb_sg_request *req,
484 struct scatterlist *sg,
488 struct usb_device *udev = testdev_to_usbdev(tdev);
491 while (retval == 0 && iterations-- > 0) {
492 retval = usb_sg_init(req, udev, pipe,
493 (udev->speed == USB_SPEED_HIGH)
494 ? (INTERRUPT_RATE << 3)
496 sg, nents, 0, GFP_KERNEL);
501 retval = req->status;
503 /* FIXME check resulting data pattern */
505 /* FIXME if endpoint halted, clear halt (and log) */
508 /* FIXME for unlink or fault handling tests, don't report
509 * failure if retval is as we expected ...
512 ERROR(tdev, "perform_sglist failed, "
513 "iterations left %d, status %d\n",
519 /*-------------------------------------------------------------------------*/
521 /* unqueued control message testing
523 * there's a nice set of device functional requirements in chapter 9 of the
524 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
525 * special test firmware.
527 * we know the device is configured (or suspended) by the time it's visible
528 * through usbfs. we can't change that, so we won't test enumeration (which
529 * worked 'well enough' to get here, this time), power management (ditto),
530 * or remote wakeup (which needs human interaction).
533 static unsigned realworld = 1;
534 module_param(realworld, uint, 0);
535 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
537 static int get_altsetting(struct usbtest_dev *dev)
539 struct usb_interface *iface = dev->intf;
540 struct usb_device *udev = interface_to_usbdev(iface);
543 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
544 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
545 0, iface->altsetting[0].desc.bInterfaceNumber,
546 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
558 static int set_altsetting(struct usbtest_dev *dev, int alternate)
560 struct usb_interface *iface = dev->intf;
561 struct usb_device *udev;
563 if (alternate < 0 || alternate >= 256)
566 udev = interface_to_usbdev(iface);
567 return usb_set_interface(udev,
568 iface->altsetting[0].desc.bInterfaceNumber,
572 static int is_good_config(struct usbtest_dev *tdev, int len)
574 struct usb_config_descriptor *config;
576 if (len < sizeof(*config))
578 config = (struct usb_config_descriptor *) tdev->buf;
580 switch (config->bDescriptorType) {
582 case USB_DT_OTHER_SPEED_CONFIG:
583 if (config->bLength != 9) {
584 ERROR(tdev, "bogus config descriptor length\n");
587 /* this bit 'must be 1' but often isn't */
588 if (!realworld && !(config->bmAttributes & 0x80)) {
589 ERROR(tdev, "high bit of config attributes not set\n");
592 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
593 ERROR(tdev, "reserved config bits set\n");
601 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
603 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
605 ERROR(tdev, "bogus config descriptor read size\n");
609 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
611 struct usb_ext_cap_descriptor *ext;
614 ext = (struct usb_ext_cap_descriptor *) buf;
616 if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
617 ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
621 attr = le32_to_cpu(ext->bmAttributes);
622 /* bits[1:4] is used and others are reserved */
623 if (attr & ~0x1e) { /* reserved == 0 */
624 ERROR(tdev, "reserved bits set\n");
631 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
633 struct usb_ss_cap_descriptor *ss;
635 ss = (struct usb_ss_cap_descriptor *) buf;
637 if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
638 ERROR(tdev, "bogus superspeed device capability descriptor length\n");
643 * only bit[1] of bmAttributes is used for LTM and others are
646 if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
647 ERROR(tdev, "reserved bits set in bmAttributes\n");
651 /* bits[0:3] of wSpeedSupported is used and others are reserved */
652 if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
653 ERROR(tdev, "reserved bits set in wSpeedSupported\n");
660 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
662 struct usb_ss_container_id_descriptor *con_id;
664 con_id = (struct usb_ss_container_id_descriptor *) buf;
666 if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
667 ERROR(tdev, "bogus container id descriptor length\n");
671 if (con_id->bReserved) { /* reserved == 0 */
672 ERROR(tdev, "reserved bits set\n");
679 /* sanity test for standard requests working with usb_control_mesg() and some
680 * of the utility functions which use it.
682 * this doesn't test how endpoint halts behave or data toggles get set, since
683 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
684 * halt or toggle). toggle testing is impractical without support from hcds.
686 * this avoids failing devices linux would normally work with, by not testing
687 * config/altsetting operations for devices that only support their defaults.
688 * such devices rarely support those needless operations.
690 * NOTE that since this is a sanity test, it's not examining boundary cases
691 * to see if usbcore, hcd, and device all behave right. such testing would
692 * involve varied read sizes and other operation sequences.
694 static int ch9_postconfig(struct usbtest_dev *dev)
696 struct usb_interface *iface = dev->intf;
697 struct usb_device *udev = interface_to_usbdev(iface);
700 /* [9.2.3] if there's more than one altsetting, we need to be able to
701 * set and get each one. mostly trusts the descriptors from usbcore.
703 for (i = 0; i < iface->num_altsetting; i++) {
705 /* 9.2.3 constrains the range here */
706 alt = iface->altsetting[i].desc.bAlternateSetting;
707 if (alt < 0 || alt >= iface->num_altsetting) {
709 "invalid alt [%d].bAltSetting = %d\n",
713 /* [real world] get/set unimplemented if there's only one */
714 if (realworld && iface->num_altsetting == 1)
717 /* [9.4.10] set_interface */
718 retval = set_altsetting(dev, alt);
720 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
725 /* [9.4.4] get_interface always works */
726 retval = get_altsetting(dev);
728 dev_err(&iface->dev, "get alt should be %d, was %d\n",
730 return (retval < 0) ? retval : -EDOM;
735 /* [real world] get_config unimplemented if there's only one */
736 if (!realworld || udev->descriptor.bNumConfigurations != 1) {
737 int expected = udev->actconfig->desc.bConfigurationValue;
739 /* [9.4.2] get_configuration always works
740 * ... although some cheap devices (like one TI Hub I've got)
741 * won't return config descriptors except before set_config.
743 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
744 USB_REQ_GET_CONFIGURATION,
745 USB_DIR_IN | USB_RECIP_DEVICE,
746 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
747 if (retval != 1 || dev->buf[0] != expected) {
748 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
749 retval, dev->buf[0], expected);
750 return (retval < 0) ? retval : -EDOM;
754 /* there's always [9.4.3] a device descriptor [9.6.1] */
755 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
756 dev->buf, sizeof(udev->descriptor));
757 if (retval != sizeof(udev->descriptor)) {
758 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
759 return (retval < 0) ? retval : -EDOM;
763 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
766 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) {
767 struct usb_bos_descriptor *bos = NULL;
768 struct usb_dev_cap_header *header = NULL;
769 unsigned total, num, length;
772 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
773 sizeof(*udev->bos->desc));
774 if (retval != sizeof(*udev->bos->desc)) {
775 dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
776 return (retval < 0) ? retval : -EDOM;
779 bos = (struct usb_bos_descriptor *)dev->buf;
780 total = le16_to_cpu(bos->wTotalLength);
781 num = bos->bNumDeviceCaps;
783 if (total > TBUF_SIZE)
787 * get generic device-level capability descriptors [9.6.2]
790 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
792 if (retval != total) {
793 dev_err(&iface->dev, "bos descriptor set --> %d\n",
795 return (retval < 0) ? retval : -EDOM;
798 length = sizeof(*udev->bos->desc);
800 for (i = 0; i < num; i++) {
802 if (buf + sizeof(struct usb_dev_cap_header) >
806 header = (struct usb_dev_cap_header *)buf;
807 length = header->bLength;
809 if (header->bDescriptorType !=
810 USB_DT_DEVICE_CAPABILITY) {
811 dev_warn(&udev->dev, "not device capability descriptor, skip\n");
815 switch (header->bDevCapabilityType) {
816 case USB_CAP_TYPE_EXT:
817 if (buf + USB_DT_USB_EXT_CAP_SIZE >
819 !is_good_ext(dev, buf)) {
820 dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
824 case USB_SS_CAP_TYPE:
825 if (buf + USB_DT_USB_SS_CAP_SIZE >
827 !is_good_ss_cap(dev, buf)) {
828 dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
832 case CONTAINER_ID_TYPE:
833 if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
835 !is_good_con_id(dev, buf)) {
836 dev_err(&iface->dev, "bogus container id descriptor\n");
846 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
847 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
848 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
849 dev->buf, TBUF_SIZE);
850 if (!is_good_config(dev, retval)) {
852 "config [%d] descriptor --> %d\n",
854 return (retval < 0) ? retval : -EDOM;
857 /* FIXME cross-checking udev->config[i] to make sure usbcore
858 * parsed it right (etc) would be good testing paranoia
862 /* and sometimes [9.2.6.6] speed dependent descriptors */
863 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
864 struct usb_qualifier_descriptor *d = NULL;
866 /* device qualifier [9.6.2] */
867 retval = usb_get_descriptor(udev,
868 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
869 sizeof(struct usb_qualifier_descriptor));
870 if (retval == -EPIPE) {
871 if (udev->speed == USB_SPEED_HIGH) {
873 "hs dev qualifier --> %d\n",
875 return (retval < 0) ? retval : -EDOM;
877 /* usb2.0 but not high-speed capable; fine */
878 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
879 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
880 return (retval < 0) ? retval : -EDOM;
882 d = (struct usb_qualifier_descriptor *) dev->buf;
884 /* might not have [9.6.2] any other-speed configs [9.6.4] */
886 unsigned max = d->bNumConfigurations;
887 for (i = 0; i < max; i++) {
888 retval = usb_get_descriptor(udev,
889 USB_DT_OTHER_SPEED_CONFIG, i,
890 dev->buf, TBUF_SIZE);
891 if (!is_good_config(dev, retval)) {
893 "other speed config --> %d\n",
895 return (retval < 0) ? retval : -EDOM;
900 /* FIXME fetch strings from at least the device descriptor */
902 /* [9.4.5] get_status always works */
903 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
905 dev_err(&iface->dev, "get dev status --> %d\n", retval);
909 /* FIXME configuration.bmAttributes says if we could try to set/clear
910 * the device's remote wakeup feature ... if we can, test that here
913 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
914 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
916 dev_err(&iface->dev, "get interface status --> %d\n", retval);
919 /* FIXME get status for each endpoint in the interface */
924 /*-------------------------------------------------------------------------*/
926 /* use ch9 requests to test whether:
927 * (a) queues work for control, keeping N subtests queued and
928 * active (auto-resubmit) for M loops through the queue.
929 * (b) protocol stalls (control-only) will autorecover.
930 * it's not like bulk/intr; no halt clearing.
931 * (c) short control reads are reported and handled.
932 * (d) queues are always processed in-order
937 struct usbtest_dev *dev;
938 struct completion complete;
943 struct usbtest_param *param;
947 #define NUM_SUBCASES 15 /* how many test subcases here? */
950 struct usb_ctrlrequest setup;
955 static void ctrl_complete(struct urb *urb)
957 struct ctrl_ctx *ctx = urb->context;
958 struct usb_ctrlrequest *reqp;
959 struct subcase *subcase;
960 int status = urb->status;
962 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
963 subcase = container_of(reqp, struct subcase, setup);
965 spin_lock(&ctx->lock);
969 /* queue must transfer and complete in fifo order, unless
970 * usb_unlink_urb() is used to unlink something not at the
971 * physical queue head (not tested).
973 if (subcase->number > 0) {
974 if ((subcase->number - ctx->last) != 1) {
976 "subcase %d completed out of order, last %d\n",
977 subcase->number, ctx->last);
979 ctx->last = subcase->number;
983 ctx->last = subcase->number;
985 /* succeed or fault in only one way? */
986 if (status == subcase->expected)
989 /* async unlink for cleanup? */
990 else if (status != -ECONNRESET) {
992 /* some faults are allowed, not required */
993 if (subcase->expected > 0 && (
994 ((status == -subcase->expected /* happened */
995 || status == 0)))) /* didn't */
997 /* sometimes more than one fault is allowed */
998 else if (subcase->number == 12 && status == -EPIPE)
1001 ERROR(ctx->dev, "subtest %d error, status %d\n",
1002 subcase->number, status);
1005 /* unexpected status codes mean errors; ideally, in hardware */
1008 if (ctx->status == 0) {
1011 ctx->status = status;
1012 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
1013 "%d left, subcase %d, len %d/%d\n",
1014 reqp->bRequestType, reqp->bRequest,
1015 status, ctx->count, subcase->number,
1017 urb->transfer_buffer_length);
1019 /* FIXME this "unlink everything" exit route should
1020 * be a separate test case.
1023 /* unlink whatever's still pending */
1024 for (i = 1; i < ctx->param->sglen; i++) {
1025 struct urb *u = ctx->urb[
1026 (i + subcase->number)
1027 % ctx->param->sglen];
1029 if (u == urb || !u->dev)
1031 spin_unlock(&ctx->lock);
1032 status = usb_unlink_urb(u);
1033 spin_lock(&ctx->lock);
1040 ERROR(ctx->dev, "urb unlink --> %d\n",
1044 status = ctx->status;
1048 /* resubmit if we need to, else mark this as done */
1049 if ((status == 0) && (ctx->pending < ctx->count)) {
1050 status = usb_submit_urb(urb, GFP_ATOMIC);
1053 "can't resubmit ctrl %02x.%02x, err %d\n",
1054 reqp->bRequestType, reqp->bRequest, status);
1061 /* signal completion when nothing's queued */
1062 if (ctx->pending == 0)
1063 complete(&ctx->complete);
1064 spin_unlock(&ctx->lock);
1068 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
1070 struct usb_device *udev = testdev_to_usbdev(dev);
1072 struct ctrl_ctx context;
1075 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1078 spin_lock_init(&context.lock);
1080 init_completion(&context.complete);
1081 context.count = param->sglen * param->iterations;
1082 context.pending = 0;
1083 context.status = -ENOMEM;
1084 context.param = param;
1087 /* allocate and init the urbs we'll queue.
1088 * as with bulk/intr sglists, sglen is the queue depth; it also
1089 * controls which subtests run (more tests than sglen) or rerun.
1091 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1094 for (i = 0; i < param->sglen; i++) {
1095 int pipe = usb_rcvctrlpipe(udev, 0);
1098 struct usb_ctrlrequest req;
1099 struct subcase *reqp;
1101 /* sign of this variable means:
1102 * -: tested code must return this (negative) error code
1103 * +: tested code may return this (negative too) error code
1107 /* requests here are mostly expected to succeed on any
1108 * device, but some are chosen to trigger protocol stalls
1111 memset(&req, 0, sizeof(req));
1112 req.bRequest = USB_REQ_GET_DESCRIPTOR;
1113 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1115 switch (i % NUM_SUBCASES) {
1116 case 0: /* get device descriptor */
1117 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1118 len = sizeof(struct usb_device_descriptor);
1120 case 1: /* get first config descriptor (only) */
1121 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1122 len = sizeof(struct usb_config_descriptor);
1124 case 2: /* get altsetting (OFTEN STALLS) */
1125 req.bRequest = USB_REQ_GET_INTERFACE;
1126 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1127 /* index = 0 means first interface */
1131 case 3: /* get interface status */
1132 req.bRequest = USB_REQ_GET_STATUS;
1133 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1137 case 4: /* get device status */
1138 req.bRequest = USB_REQ_GET_STATUS;
1139 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1142 case 5: /* get device qualifier (MAY STALL) */
1143 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1144 len = sizeof(struct usb_qualifier_descriptor);
1145 if (udev->speed != USB_SPEED_HIGH)
1148 case 6: /* get first config descriptor, plus interface */
1149 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1150 len = sizeof(struct usb_config_descriptor);
1151 len += sizeof(struct usb_interface_descriptor);
1153 case 7: /* get interface descriptor (ALWAYS STALLS) */
1154 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1155 /* interface == 0 */
1156 len = sizeof(struct usb_interface_descriptor);
1159 /* NOTE: two consecutive stalls in the queue here.
1160 * that tests fault recovery a bit more aggressively. */
1161 case 8: /* clear endpoint halt (MAY STALL) */
1162 req.bRequest = USB_REQ_CLEAR_FEATURE;
1163 req.bRequestType = USB_RECIP_ENDPOINT;
1164 /* wValue 0 == ep halt */
1165 /* wIndex 0 == ep0 (shouldn't halt!) */
1167 pipe = usb_sndctrlpipe(udev, 0);
1170 case 9: /* get endpoint status */
1171 req.bRequest = USB_REQ_GET_STATUS;
1172 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1176 case 10: /* trigger short read (EREMOTEIO) */
1177 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1179 expected = -EREMOTEIO;
1181 /* NOTE: two consecutive _different_ faults in the queue. */
1182 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1183 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1185 len = sizeof(struct usb_interface_descriptor);
1188 /* NOTE: sometimes even a third fault in the queue! */
1189 case 12: /* get string 0 descriptor (MAY STALL) */
1190 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1191 /* string == 0, for language IDs */
1192 len = sizeof(struct usb_interface_descriptor);
1193 /* may succeed when > 4 languages */
1194 expected = EREMOTEIO; /* or EPIPE, if no strings */
1196 case 13: /* short read, resembling case 10 */
1197 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1198 /* last data packet "should" be DATA1, not DATA0 */
1199 if (udev->speed == USB_SPEED_SUPER)
1202 len = 1024 - udev->descriptor.bMaxPacketSize0;
1203 expected = -EREMOTEIO;
1205 case 14: /* short read; try to fill the last packet */
1206 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1207 /* device descriptor size == 18 bytes */
1208 len = udev->descriptor.bMaxPacketSize0;
1209 if (udev->speed == USB_SPEED_SUPER)
1219 expected = -EREMOTEIO;
1222 ERROR(dev, "bogus number of ctrl queue testcases!\n");
1223 context.status = -EINVAL;
1226 req.wLength = cpu_to_le16(len);
1227 urb[i] = u = simple_alloc_urb(udev, pipe, len);
1231 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1235 reqp->number = i % NUM_SUBCASES;
1236 reqp->expected = expected;
1237 u->setup_packet = (char *) &reqp->setup;
1239 u->context = &context;
1240 u->complete = ctrl_complete;
1243 /* queue the urbs */
1245 spin_lock_irq(&context.lock);
1246 for (i = 0; i < param->sglen; i++) {
1247 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1248 if (context.status != 0) {
1249 ERROR(dev, "can't submit urb[%d], status %d\n",
1251 context.count = context.pending;
1256 spin_unlock_irq(&context.lock);
1258 /* FIXME set timer and time out; provide a disconnect hook */
1260 /* wait for the last one to complete */
1261 if (context.pending > 0)
1262 wait_for_completion(&context.complete);
1265 for (i = 0; i < param->sglen; i++) {
1269 kfree(urb[i]->setup_packet);
1270 simple_free_urb(urb[i]);
1273 return context.status;
1278 /*-------------------------------------------------------------------------*/
1280 static void unlink1_callback(struct urb *urb)
1282 int status = urb->status;
1284 /* we "know" -EPIPE (stall) never happens */
1286 status = usb_submit_urb(urb, GFP_ATOMIC);
1288 urb->status = status;
1289 complete(urb->context);
1293 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1296 struct completion completion;
1299 init_completion(&completion);
1300 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1303 urb->context = &completion;
1304 urb->complete = unlink1_callback;
1306 /* keep the endpoint busy. there are lots of hc/hcd-internal
1307 * states, and testing should get to all of them over time.
1309 * FIXME want additional tests for when endpoint is STALLing
1310 * due to errors, or is just NAKing requests.
1312 retval = usb_submit_urb(urb, GFP_KERNEL);
1314 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1318 /* unlinking that should always work. variable delay tests more
1319 * hcd states and code paths, even with little other system load.
1321 msleep(jiffies % (2 * INTERRUPT_RATE));
1323 while (!completion_done(&completion)) {
1324 retval = usb_unlink_urb(urb);
1329 /* we can't unlink urbs while they're completing
1330 * or if they've completed, and we haven't
1331 * resubmitted. "normal" drivers would prevent
1332 * resubmission, but since we're testing unlink
1335 ERROR(dev, "unlink retry\n");
1342 dev_err(&dev->intf->dev,
1343 "unlink fail %d\n", retval);
1352 wait_for_completion(&completion);
1353 retval = urb->status;
1354 simple_free_urb(urb);
1357 return (retval == -ECONNRESET) ? 0 : retval - 1000;
1359 return (retval == -ENOENT || retval == -EPERM) ?
1363 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1367 /* test sync and async paths */
1368 retval = unlink1(dev, pipe, len, 1);
1370 retval = unlink1(dev, pipe, len, 0);
1374 /*-------------------------------------------------------------------------*/
1377 struct completion complete;
1384 static void unlink_queued_callback(struct urb *urb)
1386 int status = urb->status;
1387 struct queued_ctx *ctx = urb->context;
1391 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1392 if (status == -ECONNRESET)
1394 /* What error should we report if the URB completed normally? */
1397 ctx->status = status;
1400 if (atomic_dec_and_test(&ctx->pending))
1401 complete(&ctx->complete);
1404 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1407 struct queued_ctx ctx;
1408 struct usb_device *udev = testdev_to_usbdev(dev);
1412 int retval = -ENOMEM;
1414 init_completion(&ctx.complete);
1415 atomic_set(&ctx.pending, 1); /* One more than the actual value */
1419 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1422 memset(buf, 0, size);
1424 /* Allocate and init the urbs we'll queue */
1425 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1428 for (i = 0; i < num; i++) {
1429 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1432 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1433 unlink_queued_callback, &ctx);
1434 ctx.urbs[i]->transfer_dma = buf_dma;
1435 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1438 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1439 for (i = 0; i < num; i++) {
1440 atomic_inc(&ctx.pending);
1441 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1443 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1445 atomic_dec(&ctx.pending);
1446 ctx.status = retval;
1451 usb_unlink_urb(ctx.urbs[num - 4]);
1452 usb_unlink_urb(ctx.urbs[num - 2]);
1455 usb_unlink_urb(ctx.urbs[i]);
1458 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
1459 complete(&ctx.complete);
1460 wait_for_completion(&ctx.complete);
1461 retval = ctx.status;
1464 for (i = 0; i < num; i++)
1465 usb_free_urb(ctx.urbs[i]);
1468 usb_free_coherent(udev, size, buf, buf_dma);
1472 /*-------------------------------------------------------------------------*/
1474 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1479 /* shouldn't look or act halted */
1480 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1482 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1487 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1490 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1496 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1501 /* should look and act halted */
1502 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1504 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1509 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1512 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1513 if (retval != -EPIPE)
1515 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1516 if (retval != -EPIPE)
1521 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1525 /* shouldn't look or act halted now */
1526 retval = verify_not_halted(tdev, ep, urb);
1530 /* set halt (protocol test only), verify it worked */
1531 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1532 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1533 USB_ENDPOINT_HALT, ep,
1534 NULL, 0, USB_CTRL_SET_TIMEOUT);
1536 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1539 retval = verify_halted(tdev, ep, urb);
1543 /* clear halt (tests API + protocol), verify it worked */
1544 retval = usb_clear_halt(urb->dev, urb->pipe);
1546 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1549 retval = verify_not_halted(tdev, ep, urb);
1553 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1558 static int halt_simple(struct usbtest_dev *dev)
1563 struct usb_device *udev = testdev_to_usbdev(dev);
1565 if (udev->speed == USB_SPEED_SUPER)
1566 urb = simple_alloc_urb(udev, 0, 1024);
1568 urb = simple_alloc_urb(udev, 0, 512);
1573 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1574 urb->pipe = dev->in_pipe;
1575 retval = test_halt(dev, ep, urb);
1580 if (dev->out_pipe) {
1581 ep = usb_pipeendpoint(dev->out_pipe);
1582 urb->pipe = dev->out_pipe;
1583 retval = test_halt(dev, ep, urb);
1586 simple_free_urb(urb);
1590 /*-------------------------------------------------------------------------*/
1592 /* Control OUT tests use the vendor control requests from Intel's
1593 * USB 2.0 compliance test device: write a buffer, read it back.
1595 * Intel's spec only _requires_ that it work for one packet, which
1596 * is pretty weak. Some HCDs place limits here; most devices will
1597 * need to be able to handle more than one OUT data packet. We'll
1598 * try whatever we're told to try.
1600 static int ctrl_out(struct usbtest_dev *dev,
1601 unsigned count, unsigned length, unsigned vary, unsigned offset)
1607 struct usb_device *udev;
1609 if (length < 1 || length > 0xffff || vary >= length)
1612 buf = kmalloc(length + offset, GFP_KERNEL);
1617 udev = testdev_to_usbdev(dev);
1621 /* NOTE: hardware might well act differently if we pushed it
1622 * with lots back-to-back queued requests.
1624 for (i = 0; i < count; i++) {
1625 /* write patterned data */
1626 for (j = 0; j < len; j++)
1628 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1629 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1630 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1631 if (retval != len) {
1634 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1641 /* read it back -- assuming nothing intervened!! */
1642 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1643 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1644 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1645 if (retval != len) {
1648 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1655 /* fail if we can't verify */
1656 for (j = 0; j < len; j++) {
1657 if (buf[j] != (u8) (i + j)) {
1658 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1659 j, buf[j], (u8) i + j);
1671 /* [real world] the "zero bytes IN" case isn't really used.
1672 * hardware can easily trip up in this weird case, since its
1673 * status stage is IN, not OUT like other ep0in transfers.
1676 len = realworld ? 1 : 0;
1680 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1683 kfree(buf - offset);
1687 /*-------------------------------------------------------------------------*/
1689 /* ISO tests ... mimics common usage
1690 * - buffer length is split into N packets (mostly maxpacket sized)
1691 * - multi-buffers according to sglen
1694 struct iso_context {
1698 struct completion done;
1700 unsigned long errors;
1701 unsigned long packet_count;
1702 struct usbtest_dev *dev;
1705 static void iso_callback(struct urb *urb)
1707 struct iso_context *ctx = urb->context;
1709 spin_lock(&ctx->lock);
1712 ctx->packet_count += urb->number_of_packets;
1713 if (urb->error_count > 0)
1714 ctx->errors += urb->error_count;
1715 else if (urb->status != 0)
1716 ctx->errors += urb->number_of_packets;
1717 else if (urb->actual_length != urb->transfer_buffer_length)
1719 else if (check_guard_bytes(ctx->dev, urb) != 0)
1722 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1723 && !ctx->submit_error) {
1724 int status = usb_submit_urb(urb, GFP_ATOMIC);
1729 dev_err(&ctx->dev->intf->dev,
1730 "iso resubmit err %d\n",
1733 case -ENODEV: /* disconnected */
1734 case -ESHUTDOWN: /* endpoint disabled */
1735 ctx->submit_error = 1;
1741 if (ctx->pending == 0) {
1743 dev_err(&ctx->dev->intf->dev,
1744 "iso test, %lu errors out of %lu\n",
1745 ctx->errors, ctx->packet_count);
1746 complete(&ctx->done);
1749 spin_unlock(&ctx->lock);
1752 static struct urb *iso_alloc_urb(
1753 struct usb_device *udev,
1755 struct usb_endpoint_descriptor *desc,
1761 unsigned i, maxp, packets;
1763 if (bytes < 0 || !desc)
1765 maxp = 0x7ff & usb_endpoint_maxp(desc);
1766 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1767 packets = DIV_ROUND_UP(bytes, maxp);
1769 urb = usb_alloc_urb(packets, GFP_KERNEL);
1775 urb->number_of_packets = packets;
1776 urb->transfer_buffer_length = bytes;
1777 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1779 &urb->transfer_dma);
1780 if (!urb->transfer_buffer) {
1785 memset(urb->transfer_buffer, GUARD_BYTE, offset);
1786 urb->transfer_buffer += offset;
1787 urb->transfer_dma += offset;
1789 /* For inbound transfers use guard byte so that test fails if
1790 data not correctly copied */
1791 memset(urb->transfer_buffer,
1792 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1795 for (i = 0; i < packets; i++) {
1796 /* here, only the last packet will be short */
1797 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1798 bytes -= urb->iso_frame_desc[i].length;
1800 urb->iso_frame_desc[i].offset = maxp * i;
1803 urb->complete = iso_callback;
1804 /* urb->context = SET BY CALLER */
1805 urb->interval = 1 << (desc->bInterval - 1);
1806 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1811 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1812 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1814 struct iso_context context;
1815 struct usb_device *udev;
1817 unsigned long packets = 0;
1819 struct urb *urbs[10]; /* FIXME no limit */
1821 if (param->sglen > 10)
1824 memset(&context, 0, sizeof(context));
1825 context.count = param->iterations * param->sglen;
1827 init_completion(&context.done);
1828 spin_lock_init(&context.lock);
1830 memset(urbs, 0, sizeof(urbs));
1831 udev = testdev_to_usbdev(dev);
1832 dev_info(&dev->intf->dev,
1833 "... iso period %d %sframes, wMaxPacket %04x\n",
1834 1 << (desc->bInterval - 1),
1835 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1836 usb_endpoint_maxp(desc));
1838 for (i = 0; i < param->sglen; i++) {
1839 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1840 param->length, offset);
1845 packets += urbs[i]->number_of_packets;
1846 urbs[i]->context = &context;
1848 packets *= param->iterations;
1849 dev_info(&dev->intf->dev,
1850 "... total %lu msec (%lu packets)\n",
1851 (packets * (1 << (desc->bInterval - 1)))
1852 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1855 spin_lock_irq(&context.lock);
1856 for (i = 0; i < param->sglen; i++) {
1858 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1860 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1862 spin_unlock_irq(&context.lock);
1866 simple_free_urb(urbs[i]);
1869 context.submit_error = 1;
1873 spin_unlock_irq(&context.lock);
1875 wait_for_completion(&context.done);
1877 for (i = 0; i < param->sglen; i++) {
1879 simple_free_urb(urbs[i]);
1882 * Isochronous transfers are expected to fail sometimes. As an
1883 * arbitrary limit, we will report an error if any submissions
1884 * fail or if the transfer failure rate is > 10%.
1888 else if (context.submit_error)
1890 else if (context.errors > context.packet_count / 10)
1895 for (i = 0; i < param->sglen; i++) {
1897 simple_free_urb(urbs[i]);
1902 static int test_unaligned_bulk(
1903 struct usbtest_dev *tdev,
1907 unsigned transfer_flags,
1911 struct urb *urb = usbtest_alloc_urb(
1912 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1917 retval = simple_io(tdev, urb, iterations, 0, 0, label);
1918 simple_free_urb(urb);
1922 /*-------------------------------------------------------------------------*/
1924 /* We only have this one interface to user space, through usbfs.
1925 * User mode code can scan usbfs to find N different devices (maybe on
1926 * different busses) to use when testing, and allocate one thread per
1927 * test. So discovery is simplified, and we have no device naming issues.
1929 * Don't use these only as stress/load tests. Use them along with with
1930 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1931 * video capture, and so on. Run different tests at different times, in
1932 * different sequences. Nothing here should interact with other devices,
1933 * except indirectly by consuming USB bandwidth and CPU resources for test
1934 * threads and request completion. But the only way to know that for sure
1935 * is to test when HC queues are in use by many devices.
1937 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1938 * it locks out usbcore in certain code paths. Notably, if you disconnect
1939 * the device-under-test, khubd will wait block forever waiting for the
1940 * ioctl to complete ... so that usb_disconnect() can abort the pending
1941 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1942 * off just killing the userspace task and waiting for it to exit.
1946 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1948 struct usbtest_dev *dev = usb_get_intfdata(intf);
1949 struct usb_device *udev = testdev_to_usbdev(dev);
1950 struct usbtest_param *param = buf;
1951 int retval = -EOPNOTSUPP;
1953 struct scatterlist *sg;
1954 struct usb_sg_request req;
1955 struct timeval start;
1958 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1960 pattern = mod_pattern;
1962 if (code != USBTEST_REQUEST)
1965 if (param->iterations <= 0)
1968 if (mutex_lock_interruptible(&dev->lock))
1969 return -ERESTARTSYS;
1971 /* FIXME: What if a system sleep starts while a test is running? */
1973 /* some devices, like ez-usb default devices, need a non-default
1974 * altsetting to have any active endpoints. some tests change
1975 * altsettings; force a default so most tests don't need to check.
1977 if (dev->info->alt >= 0) {
1980 if (intf->altsetting->desc.bInterfaceNumber) {
1981 mutex_unlock(&dev->lock);
1984 res = set_altsetting(dev, dev->info->alt);
1987 "set altsetting to %d failed, %d\n",
1988 dev->info->alt, res);
1989 mutex_unlock(&dev->lock);
1995 * Just a bunch of test cases that every HCD is expected to handle.
1997 * Some may need specific firmware, though it'd be good to have
1998 * one firmware image to handle all the test cases.
2000 * FIXME add more tests! cancel requests, verify the data, control
2001 * queueing, concurrent read+write threads, and so on.
2003 do_gettimeofday(&start);
2004 switch (param->test_num) {
2007 dev_info(&intf->dev, "TEST 0: NOP\n");
2011 /* Simple non-queued bulk I/O tests */
2013 if (dev->out_pipe == 0)
2015 dev_info(&intf->dev,
2016 "TEST 1: write %d bytes %u times\n",
2017 param->length, param->iterations);
2018 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
2023 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2024 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
2025 simple_free_urb(urb);
2028 if (dev->in_pipe == 0)
2030 dev_info(&intf->dev,
2031 "TEST 2: read %d bytes %u times\n",
2032 param->length, param->iterations);
2033 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2038 /* FIRMWARE: bulk source (maybe generates short writes) */
2039 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2040 simple_free_urb(urb);
2043 if (dev->out_pipe == 0 || param->vary == 0)
2045 dev_info(&intf->dev,
2046 "TEST 3: write/%d 0..%d bytes %u times\n",
2047 param->vary, param->length, param->iterations);
2048 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
2053 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2054 retval = simple_io(dev, urb, param->iterations, param->vary,
2056 simple_free_urb(urb);
2059 if (dev->in_pipe == 0 || param->vary == 0)
2061 dev_info(&intf->dev,
2062 "TEST 4: read/%d 0..%d bytes %u times\n",
2063 param->vary, param->length, param->iterations);
2064 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2069 /* FIRMWARE: bulk source (maybe generates short writes) */
2070 retval = simple_io(dev, urb, param->iterations, param->vary,
2072 simple_free_urb(urb);
2075 /* Queued bulk I/O tests */
2077 if (dev->out_pipe == 0 || param->sglen == 0)
2079 dev_info(&intf->dev,
2080 "TEST 5: write %d sglists %d entries of %d bytes\n",
2082 param->sglen, param->length);
2083 sg = alloc_sglist(param->sglen, param->length, 0);
2088 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2089 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2090 &req, sg, param->sglen);
2091 free_sglist(sg, param->sglen);
2095 if (dev->in_pipe == 0 || param->sglen == 0)
2097 dev_info(&intf->dev,
2098 "TEST 6: read %d sglists %d entries of %d bytes\n",
2100 param->sglen, param->length);
2101 sg = alloc_sglist(param->sglen, param->length, 0);
2106 /* FIRMWARE: bulk source (maybe generates short writes) */
2107 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2108 &req, sg, param->sglen);
2109 free_sglist(sg, param->sglen);
2112 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2114 dev_info(&intf->dev,
2115 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
2116 param->vary, param->iterations,
2117 param->sglen, param->length);
2118 sg = alloc_sglist(param->sglen, param->length, param->vary);
2123 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2124 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2125 &req, sg, param->sglen);
2126 free_sglist(sg, param->sglen);
2129 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2131 dev_info(&intf->dev,
2132 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
2133 param->vary, param->iterations,
2134 param->sglen, param->length);
2135 sg = alloc_sglist(param->sglen, param->length, param->vary);
2140 /* FIRMWARE: bulk source (maybe generates short writes) */
2141 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2142 &req, sg, param->sglen);
2143 free_sglist(sg, param->sglen);
2146 /* non-queued sanity tests for control (chapter 9 subset) */
2149 dev_info(&intf->dev,
2150 "TEST 9: ch9 (subset) control tests, %d times\n",
2152 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2153 retval = ch9_postconfig(dev);
2155 dev_err(&intf->dev, "ch9 subset failed, "
2156 "iterations left %d\n", i);
2159 /* queued control messaging */
2162 dev_info(&intf->dev,
2163 "TEST 10: queue %d control calls, %d times\n",
2166 retval = test_ctrl_queue(dev, param);
2169 /* simple non-queued unlinks (ring with one urb) */
2171 if (dev->in_pipe == 0 || !param->length)
2174 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
2175 param->iterations, param->length);
2176 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2177 retval = unlink_simple(dev, dev->in_pipe,
2180 dev_err(&intf->dev, "unlink reads failed %d, "
2181 "iterations left %d\n", retval, i);
2184 if (dev->out_pipe == 0 || !param->length)
2187 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
2188 param->iterations, param->length);
2189 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2190 retval = unlink_simple(dev, dev->out_pipe,
2193 dev_err(&intf->dev, "unlink writes failed %d, "
2194 "iterations left %d\n", retval, i);
2199 if (dev->out_pipe == 0 && dev->in_pipe == 0)
2202 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
2204 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2205 retval = halt_simple(dev);
2208 ERROR(dev, "halts failed, iterations left %d\n", i);
2211 /* control write tests */
2213 if (!dev->info->ctrl_out)
2215 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
2217 realworld ? 1 : 0, param->length,
2219 retval = ctrl_out(dev, param->iterations,
2220 param->length, param->vary, 0);
2223 /* iso write tests */
2225 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2227 dev_info(&intf->dev,
2228 "TEST 15: write %d iso, %d entries of %d bytes\n",
2230 param->sglen, param->length);
2231 /* FIRMWARE: iso sink */
2232 retval = test_iso_queue(dev, param,
2233 dev->out_iso_pipe, dev->iso_out, 0);
2236 /* iso read tests */
2238 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2240 dev_info(&intf->dev,
2241 "TEST 16: read %d iso, %d entries of %d bytes\n",
2243 param->sglen, param->length);
2244 /* FIRMWARE: iso source */
2245 retval = test_iso_queue(dev, param,
2246 dev->in_iso_pipe, dev->iso_in, 0);
2249 /* FIXME scatterlist cancel (needs helper thread) */
2251 /* Tests for bulk I/O using DMA mapping by core and odd address */
2253 if (dev->out_pipe == 0)
2255 dev_info(&intf->dev,
2256 "TEST 17: write odd addr %d bytes %u times core map\n",
2257 param->length, param->iterations);
2259 retval = test_unaligned_bulk(
2261 param->length, param->iterations,
2266 if (dev->in_pipe == 0)
2268 dev_info(&intf->dev,
2269 "TEST 18: read odd addr %d bytes %u times core map\n",
2270 param->length, param->iterations);
2272 retval = test_unaligned_bulk(
2274 param->length, param->iterations,
2278 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2280 if (dev->out_pipe == 0)
2282 dev_info(&intf->dev,
2283 "TEST 19: write odd addr %d bytes %u times premapped\n",
2284 param->length, param->iterations);
2286 retval = test_unaligned_bulk(
2288 param->length, param->iterations,
2289 URB_NO_TRANSFER_DMA_MAP, "test19");
2293 if (dev->in_pipe == 0)
2295 dev_info(&intf->dev,
2296 "TEST 20: read odd addr %d bytes %u times premapped\n",
2297 param->length, param->iterations);
2299 retval = test_unaligned_bulk(
2301 param->length, param->iterations,
2302 URB_NO_TRANSFER_DMA_MAP, "test20");
2305 /* control write tests with unaligned buffer */
2307 if (!dev->info->ctrl_out)
2309 dev_info(&intf->dev,
2310 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2312 realworld ? 1 : 0, param->length,
2314 retval = ctrl_out(dev, param->iterations,
2315 param->length, param->vary, 1);
2318 /* unaligned iso tests */
2320 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2322 dev_info(&intf->dev,
2323 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2325 param->sglen, param->length);
2326 retval = test_iso_queue(dev, param,
2327 dev->out_iso_pipe, dev->iso_out, 1);
2331 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2333 dev_info(&intf->dev,
2334 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2336 param->sglen, param->length);
2337 retval = test_iso_queue(dev, param,
2338 dev->in_iso_pipe, dev->iso_in, 1);
2341 /* unlink URBs from a bulk-OUT queue */
2343 if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2346 dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
2347 "%d %d-byte writes\n",
2348 param->iterations, param->sglen, param->length);
2349 for (i = param->iterations; retval == 0 && i > 0; --i) {
2350 retval = unlink_queued(dev, dev->out_pipe,
2351 param->sglen, param->length);
2354 "unlink queued writes failed %d, "
2355 "iterations left %d\n", retval, i);
2362 do_gettimeofday(¶m->duration);
2363 param->duration.tv_sec -= start.tv_sec;
2364 param->duration.tv_usec -= start.tv_usec;
2365 if (param->duration.tv_usec < 0) {
2366 param->duration.tv_usec += 1000 * 1000;
2367 param->duration.tv_sec -= 1;
2369 mutex_unlock(&dev->lock);
2373 /*-------------------------------------------------------------------------*/
2375 static unsigned force_interrupt;
2376 module_param(force_interrupt, uint, 0);
2377 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2380 static unsigned short vendor;
2381 module_param(vendor, ushort, 0);
2382 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2384 static unsigned short product;
2385 module_param(product, ushort, 0);
2386 MODULE_PARM_DESC(product, "product code (from vendor)");
2390 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2392 struct usb_device *udev;
2393 struct usbtest_dev *dev;
2394 struct usbtest_info *info;
2395 char *rtest, *wtest;
2396 char *irtest, *iwtest;
2398 udev = interface_to_usbdev(intf);
2401 /* specify devices by module parameters? */
2402 if (id->match_flags == 0) {
2403 /* vendor match required, product match optional */
2404 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2406 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2408 dev_info(&intf->dev, "matched module params, "
2409 "vend=0x%04x prod=0x%04x\n",
2410 le16_to_cpu(udev->descriptor.idVendor),
2411 le16_to_cpu(udev->descriptor.idProduct));
2415 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2418 info = (struct usbtest_info *) id->driver_info;
2420 mutex_init(&dev->lock);
2424 /* cacheline-aligned scratch for i/o */
2425 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2426 if (dev->buf == NULL) {
2431 /* NOTE this doesn't yet test the handful of difference that are
2432 * visible with high speed interrupts: bigger maxpacket (1K) and
2433 * "high bandwidth" modes (up to 3 packets/uframe).
2436 irtest = iwtest = "";
2437 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2439 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2443 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2444 wtest = " intr-out";
2447 if (override_alt >= 0 || info->autoconf) {
2450 status = get_endpoints(dev, intf);
2452 WARNING(dev, "couldn't get endpoints, %d\n",
2458 /* may find bulk or ISO pipes */
2461 dev->in_pipe = usb_rcvbulkpipe(udev,
2464 dev->out_pipe = usb_sndbulkpipe(udev,
2470 wtest = " bulk-out";
2471 if (dev->in_iso_pipe)
2473 if (dev->out_iso_pipe)
2474 iwtest = " iso-out";
2477 usb_set_intfdata(intf, dev);
2478 dev_info(&intf->dev, "%s\n", info->name);
2479 dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2480 usb_speed_string(udev->speed),
2481 info->ctrl_out ? " in/out" : "",
2484 info->alt >= 0 ? " (+alt)" : "");
2488 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2493 static int usbtest_resume(struct usb_interface *intf)
2499 static void usbtest_disconnect(struct usb_interface *intf)
2501 struct usbtest_dev *dev = usb_get_intfdata(intf);
2503 usb_set_intfdata(intf, NULL);
2504 dev_dbg(&intf->dev, "disconnect\n");
2508 /* Basic testing only needs a device that can source or sink bulk traffic.
2509 * Any device can test control transfers (default with GENERIC binding).
2511 * Several entries work with the default EP0 implementation that's built
2512 * into EZ-USB chips. There's a default vendor ID which can be overridden
2513 * by (very) small config EEPROMS, but otherwise all these devices act
2514 * identically until firmware is loaded: only EP0 works. It turns out
2515 * to be easy to make other endpoints work, without modifying that EP0
2516 * behavior. For now, we expect that kind of firmware.
2519 /* an21xx or fx versions of ez-usb */
2520 static struct usbtest_info ez1_info = {
2521 .name = "EZ-USB device",
2527 /* fx2 version of ez-usb */
2528 static struct usbtest_info ez2_info = {
2529 .name = "FX2 device",
2535 /* ezusb family device with dedicated usb test firmware,
2537 static struct usbtest_info fw_info = {
2538 .name = "usb test device",
2542 .autoconf = 1, /* iso and ctrl_out need autoconf */
2544 .iso = 1, /* iso_ep's are #8 in/out */
2547 /* peripheral running Linux and 'zero.c' test firmware, or
2548 * its user-mode cousin. different versions of this use
2549 * different hardware with the same vendor/product codes.
2550 * host side MUST rely on the endpoint descriptors.
2552 static struct usbtest_info gz_info = {
2553 .name = "Linux gadget zero",
2560 static struct usbtest_info um_info = {
2561 .name = "Linux user mode test driver",
2566 static struct usbtest_info um2_info = {
2567 .name = "Linux user mode ISO test driver",
2574 /* this is a nice source of high speed bulk data;
2575 * uses an FX2, with firmware provided in the device
2577 static struct usbtest_info ibot2_info = {
2578 .name = "iBOT2 webcam",
2585 /* we can use any device to test control traffic */
2586 static struct usbtest_info generic_info = {
2587 .name = "Generic USB device",
2593 static const struct usb_device_id id_table[] = {
2595 /*-------------------------------------------------------------*/
2597 /* EZ-USB devices which download firmware to replace (or in our
2598 * case augment) the default device implementation.
2601 /* generic EZ-USB FX controller */
2602 { USB_DEVICE(0x0547, 0x2235),
2603 .driver_info = (unsigned long) &ez1_info,
2606 /* CY3671 development board with EZ-USB FX */
2607 { USB_DEVICE(0x0547, 0x0080),
2608 .driver_info = (unsigned long) &ez1_info,
2611 /* generic EZ-USB FX2 controller (or development board) */
2612 { USB_DEVICE(0x04b4, 0x8613),
2613 .driver_info = (unsigned long) &ez2_info,
2616 /* re-enumerated usb test device firmware */
2617 { USB_DEVICE(0xfff0, 0xfff0),
2618 .driver_info = (unsigned long) &fw_info,
2621 /* "Gadget Zero" firmware runs under Linux */
2622 { USB_DEVICE(0x0525, 0xa4a0),
2623 .driver_info = (unsigned long) &gz_info,
2626 /* so does a user-mode variant */
2627 { USB_DEVICE(0x0525, 0xa4a4),
2628 .driver_info = (unsigned long) &um_info,
2631 /* ... and a user-mode variant that talks iso */
2632 { USB_DEVICE(0x0525, 0xa4a3),
2633 .driver_info = (unsigned long) &um2_info,
2637 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2638 /* this does not coexist with the real Keyspan 19qi driver! */
2639 { USB_DEVICE(0x06cd, 0x010b),
2640 .driver_info = (unsigned long) &ez1_info,
2644 /*-------------------------------------------------------------*/
2647 /* iBOT2 makes a nice source of high speed bulk-in data */
2648 /* this does not coexist with a real iBOT2 driver! */
2649 { USB_DEVICE(0x0b62, 0x0059),
2650 .driver_info = (unsigned long) &ibot2_info,
2654 /*-------------------------------------------------------------*/
2657 /* module params can specify devices to use for control tests */
2658 { .driver_info = (unsigned long) &generic_info, },
2661 /*-------------------------------------------------------------*/
2665 MODULE_DEVICE_TABLE(usb, id_table);
2667 static struct usb_driver usbtest_driver = {
2669 .id_table = id_table,
2670 .probe = usbtest_probe,
2671 .unlocked_ioctl = usbtest_ioctl,
2672 .disconnect = usbtest_disconnect,
2673 .suspend = usbtest_suspend,
2674 .resume = usbtest_resume,
2677 /*-------------------------------------------------------------------------*/
2679 static int __init usbtest_init(void)
2683 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2685 return usb_register(&usbtest_driver);
2687 module_init(usbtest_init);
2689 static void __exit usbtest_exit(void)
2691 usb_deregister(&usbtest_driver);
2693 module_exit(usbtest_exit);
2695 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2696 MODULE_LICENSE("GPL");