1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10 #include <linux/timer.h>
11 #include <linux/usb.h>
12 
13 #define SIMPLE_IO_TIMEOUT	10000	/* in milliseconds */
14 
15 /*-------------------------------------------------------------------------*/
16 
17 /* FIXME make these public somewhere; usbdevfs.h? */
18 struct usbtest_param {
19 	/* inputs */
20 	unsigned		test_num;	/* 0..(TEST_CASES-1) */
21 	unsigned		iterations;
22 	unsigned		length;
23 	unsigned		vary;
24 	unsigned		sglen;
25 
26 	/* outputs */
27 	struct timeval		duration;
28 };
29 #define USBTEST_REQUEST	_IOWR('U', 100, struct usbtest_param)
30 
31 /*-------------------------------------------------------------------------*/
32 
33 #define	GENERIC		/* let probe() bind using module params */
34 
35 /* Some devices that can be used for testing will have "real" drivers.
36  * Entries for those need to be enabled here by hand, after disabling
37  * that "real" driver.
38  */
39 //#define	IBOT2		/* grab iBOT2 webcams */
40 //#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
41 
42 /*-------------------------------------------------------------------------*/
43 
44 struct usbtest_info {
45 	const char		*name;
46 	u8			ep_in;		/* bulk/intr source */
47 	u8			ep_out;		/* bulk/intr sink */
48 	unsigned		autoconf:1;
49 	unsigned		ctrl_out:1;
50 	unsigned		iso:1;		/* try iso in/out */
51 	int			alt;
52 };
53 
54 /* this is accessed only through usbfs ioctl calls.
55  * one ioctl to issue a test ... one lock per device.
56  * tests create other threads if they need them.
57  * urbs and buffers are allocated dynamically,
58  * and data generated deterministically.
59  */
60 struct usbtest_dev {
61 	struct usb_interface	*intf;
62 	struct usbtest_info	*info;
63 	int			in_pipe;
64 	int			out_pipe;
65 	int			in_iso_pipe;
66 	int			out_iso_pipe;
67 	struct usb_endpoint_descriptor	*iso_in, *iso_out;
68 	struct mutex		lock;
69 
70 #define TBUF_SIZE	256
71 	u8			*buf;
72 };
73 
testdev_to_usbdev(struct usbtest_dev * test)74 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
75 {
76 	return interface_to_usbdev(test->intf);
77 }
78 
79 /* set up all urbs so they can be used with either bulk or interrupt */
80 #define	INTERRUPT_RATE		1	/* msec/transfer */
81 
82 #define ERROR(tdev, fmt, args...) \
83 	dev_err(&(tdev)->intf->dev , fmt , ## args)
84 #define WARNING(tdev, fmt, args...) \
85 	dev_warn(&(tdev)->intf->dev , fmt , ## args)
86 
87 #define GUARD_BYTE	0xA5
88 
89 /*-------------------------------------------------------------------------*/
90 
91 static int
get_endpoints(struct usbtest_dev * dev,struct usb_interface * intf)92 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
93 {
94 	int				tmp;
95 	struct usb_host_interface	*alt;
96 	struct usb_host_endpoint	*in, *out;
97 	struct usb_host_endpoint	*iso_in, *iso_out;
98 	struct usb_device		*udev;
99 
100 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
101 		unsigned	ep;
102 
103 		in = out = NULL;
104 		iso_in = iso_out = NULL;
105 		alt = intf->altsetting + tmp;
106 
107 		/* take the first altsetting with in-bulk + out-bulk;
108 		 * ignore other endpoints and altsettings.
109 		 */
110 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
111 			struct usb_host_endpoint	*e;
112 
113 			e = alt->endpoint + ep;
114 			switch (e->desc.bmAttributes) {
115 			case USB_ENDPOINT_XFER_BULK:
116 				break;
117 			case USB_ENDPOINT_XFER_ISOC:
118 				if (dev->info->iso)
119 					goto try_iso;
120 				/* FALLTHROUGH */
121 			default:
122 				continue;
123 			}
124 			if (usb_endpoint_dir_in(&e->desc)) {
125 				if (!in)
126 					in = e;
127 			} else {
128 				if (!out)
129 					out = e;
130 			}
131 			continue;
132 try_iso:
133 			if (usb_endpoint_dir_in(&e->desc)) {
134 				if (!iso_in)
135 					iso_in = e;
136 			} else {
137 				if (!iso_out)
138 					iso_out = e;
139 			}
140 		}
141 		if ((in && out)  ||  iso_in || iso_out)
142 			goto found;
143 	}
144 	return -EINVAL;
145 
146 found:
147 	udev = testdev_to_usbdev(dev);
148 	if (alt->desc.bAlternateSetting != 0) {
149 		tmp = usb_set_interface(udev,
150 				alt->desc.bInterfaceNumber,
151 				alt->desc.bAlternateSetting);
152 		if (tmp < 0)
153 			return tmp;
154 	}
155 
156 	if (in) {
157 		dev->in_pipe = usb_rcvbulkpipe(udev,
158 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
159 		dev->out_pipe = usb_sndbulkpipe(udev,
160 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
161 	}
162 	if (iso_in) {
163 		dev->iso_in = &iso_in->desc;
164 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
165 				iso_in->desc.bEndpointAddress
166 					& USB_ENDPOINT_NUMBER_MASK);
167 	}
168 
169 	if (iso_out) {
170 		dev->iso_out = &iso_out->desc;
171 		dev->out_iso_pipe = usb_sndisocpipe(udev,
172 				iso_out->desc.bEndpointAddress
173 					& USB_ENDPOINT_NUMBER_MASK);
174 	}
175 	return 0;
176 }
177 
178 /*-------------------------------------------------------------------------*/
179 
180 /* Support for testing basic non-queued I/O streams.
181  *
182  * These just package urbs as requests that can be easily canceled.
183  * Each urb's data buffer is dynamically allocated; callers can fill
184  * them with non-zero test data (or test for it) when appropriate.
185  */
186 
simple_callback(struct urb * urb)187 static void simple_callback(struct urb *urb)
188 {
189 	complete(urb->context);
190 }
191 
usbtest_alloc_urb(struct usb_device * udev,int pipe,unsigned long bytes,unsigned transfer_flags,unsigned offset)192 static struct urb *usbtest_alloc_urb(
193 	struct usb_device	*udev,
194 	int			pipe,
195 	unsigned long		bytes,
196 	unsigned		transfer_flags,
197 	unsigned		offset)
198 {
199 	struct urb		*urb;
200 
201 	urb = usb_alloc_urb(0, GFP_KERNEL);
202 	if (!urb)
203 		return urb;
204 	usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
205 	urb->interval = (udev->speed == USB_SPEED_HIGH)
206 			? (INTERRUPT_RATE << 3)
207 			: INTERRUPT_RATE;
208 	urb->transfer_flags = transfer_flags;
209 	if (usb_pipein(pipe))
210 		urb->transfer_flags |= URB_SHORT_NOT_OK;
211 
212 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
213 		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
214 			GFP_KERNEL, &urb->transfer_dma);
215 	else
216 		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
217 
218 	if (!urb->transfer_buffer) {
219 		usb_free_urb(urb);
220 		return NULL;
221 	}
222 
223 	/* To test unaligned transfers add an offset and fill the
224 		unused memory with a guard value */
225 	if (offset) {
226 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
227 		urb->transfer_buffer += offset;
228 		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
229 			urb->transfer_dma += offset;
230 	}
231 
232 	/* For inbound transfers use guard byte so that test fails if
233 		data not correctly copied */
234 	memset(urb->transfer_buffer,
235 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
236 			bytes);
237 	return urb;
238 }
239 
simple_alloc_urb(struct usb_device * udev,int pipe,unsigned long bytes)240 static struct urb *simple_alloc_urb(
241 	struct usb_device	*udev,
242 	int			pipe,
243 	unsigned long		bytes)
244 {
245 	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
246 }
247 
248 static unsigned pattern;
249 static unsigned mod_pattern;
250 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
251 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
252 
simple_fill_buf(struct urb * urb)253 static inline void simple_fill_buf(struct urb *urb)
254 {
255 	unsigned	i;
256 	u8		*buf = urb->transfer_buffer;
257 	unsigned	len = urb->transfer_buffer_length;
258 
259 	switch (pattern) {
260 	default:
261 		/* FALLTHROUGH */
262 	case 0:
263 		memset(buf, 0, len);
264 		break;
265 	case 1:			/* mod63 */
266 		for (i = 0; i < len; i++)
267 			*buf++ = (u8) (i % 63);
268 		break;
269 	}
270 }
271 
buffer_offset(void * buf)272 static inline unsigned long buffer_offset(void *buf)
273 {
274 	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
275 }
276 
check_guard_bytes(struct usbtest_dev * tdev,struct urb * urb)277 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
278 {
279 	u8 *buf = urb->transfer_buffer;
280 	u8 *guard = buf - buffer_offset(buf);
281 	unsigned i;
282 
283 	for (i = 0; guard < buf; i++, guard++) {
284 		if (*guard != GUARD_BYTE) {
285 			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
286 				i, *guard, GUARD_BYTE);
287 			return -EINVAL;
288 		}
289 	}
290 	return 0;
291 }
292 
simple_check_buf(struct usbtest_dev * tdev,struct urb * urb)293 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
294 {
295 	unsigned	i;
296 	u8		expected;
297 	u8		*buf = urb->transfer_buffer;
298 	unsigned	len = urb->actual_length;
299 
300 	int ret = check_guard_bytes(tdev, urb);
301 	if (ret)
302 		return ret;
303 
304 	for (i = 0; i < len; i++, buf++) {
305 		switch (pattern) {
306 		/* all-zeroes has no synchronization issues */
307 		case 0:
308 			expected = 0;
309 			break;
310 		/* mod63 stays in sync with short-terminated transfers,
311 		 * or otherwise when host and gadget agree on how large
312 		 * each usb transfer request should be.  resync is done
313 		 * with set_interface or set_config.
314 		 */
315 		case 1:			/* mod63 */
316 			expected = i % 63;
317 			break;
318 		/* always fail unsupported patterns */
319 		default:
320 			expected = !*buf;
321 			break;
322 		}
323 		if (*buf == expected)
324 			continue;
325 		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
326 		return -EINVAL;
327 	}
328 	return 0;
329 }
330 
simple_free_urb(struct urb * urb)331 static void simple_free_urb(struct urb *urb)
332 {
333 	unsigned long offset = buffer_offset(urb->transfer_buffer);
334 
335 	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
336 		usb_free_coherent(
337 			urb->dev,
338 			urb->transfer_buffer_length + offset,
339 			urb->transfer_buffer - offset,
340 			urb->transfer_dma - offset);
341 	else
342 		kfree(urb->transfer_buffer - offset);
343 	usb_free_urb(urb);
344 }
345 
simple_io(struct usbtest_dev * tdev,struct urb * urb,int iterations,int vary,int expected,const char * label)346 static int simple_io(
347 	struct usbtest_dev	*tdev,
348 	struct urb		*urb,
349 	int			iterations,
350 	int			vary,
351 	int			expected,
352 	const char		*label
353 )
354 {
355 	struct usb_device	*udev = urb->dev;
356 	int			max = urb->transfer_buffer_length;
357 	struct completion	completion;
358 	int			retval = 0;
359 	unsigned long		expire;
360 
361 	urb->context = &completion;
362 	while (retval == 0 && iterations-- > 0) {
363 		init_completion(&completion);
364 		if (usb_pipeout(urb->pipe)) {
365 			simple_fill_buf(urb);
366 			urb->transfer_flags |= URB_ZERO_PACKET;
367 		}
368 		retval = usb_submit_urb(urb, GFP_KERNEL);
369 		if (retval != 0)
370 			break;
371 
372 		expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
373 		if (!wait_for_completion_timeout(&completion, expire)) {
374 			usb_kill_urb(urb);
375 			retval = (urb->status == -ENOENT ?
376 				  -ETIMEDOUT : urb->status);
377 		} else {
378 			retval = urb->status;
379 		}
380 
381 		urb->dev = udev;
382 		if (retval == 0 && usb_pipein(urb->pipe))
383 			retval = simple_check_buf(tdev, urb);
384 
385 		if (vary) {
386 			int	len = urb->transfer_buffer_length;
387 
388 			len += vary;
389 			len %= max;
390 			if (len == 0)
391 				len = (vary < max) ? vary : max;
392 			urb->transfer_buffer_length = len;
393 		}
394 
395 		/* FIXME if endpoint halted, clear halt (and log) */
396 	}
397 	urb->transfer_buffer_length = max;
398 
399 	if (expected != retval)
400 		dev_err(&udev->dev,
401 			"%s failed, iterations left %d, status %d (not %d)\n",
402 				label, iterations, retval, expected);
403 	return retval;
404 }
405 
406 
407 /*-------------------------------------------------------------------------*/
408 
409 /* We use scatterlist primitives to test queued I/O.
410  * Yes, this also tests the scatterlist primitives.
411  */
412 
free_sglist(struct scatterlist * sg,int nents)413 static void free_sglist(struct scatterlist *sg, int nents)
414 {
415 	unsigned		i;
416 
417 	if (!sg)
418 		return;
419 	for (i = 0; i < nents; i++) {
420 		if (!sg_page(&sg[i]))
421 			continue;
422 		kfree(sg_virt(&sg[i]));
423 	}
424 	kfree(sg);
425 }
426 
427 static struct scatterlist *
alloc_sglist(int nents,int max,int vary)428 alloc_sglist(int nents, int max, int vary)
429 {
430 	struct scatterlist	*sg;
431 	unsigned		i;
432 	unsigned		size = max;
433 
434 	sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
435 	if (!sg)
436 		return NULL;
437 	sg_init_table(sg, nents);
438 
439 	for (i = 0; i < nents; i++) {
440 		char		*buf;
441 		unsigned	j;
442 
443 		buf = kzalloc(size, GFP_KERNEL);
444 		if (!buf) {
445 			free_sglist(sg, i);
446 			return NULL;
447 		}
448 
449 		/* kmalloc pages are always physically contiguous! */
450 		sg_set_buf(&sg[i], buf, size);
451 
452 		switch (pattern) {
453 		case 0:
454 			/* already zeroed */
455 			break;
456 		case 1:
457 			for (j = 0; j < size; j++)
458 				*buf++ = (u8) (j % 63);
459 			break;
460 		}
461 
462 		if (vary) {
463 			size += vary;
464 			size %= max;
465 			if (size == 0)
466 				size = (vary < max) ? vary : max;
467 		}
468 	}
469 
470 	return sg;
471 }
472 
sg_timeout(unsigned long _req)473 static void sg_timeout(unsigned long _req)
474 {
475 	struct usb_sg_request	*req = (struct usb_sg_request *) _req;
476 
477 	req->status = -ETIMEDOUT;
478 	usb_sg_cancel(req);
479 }
480 
perform_sglist(struct usbtest_dev * tdev,unsigned iterations,int pipe,struct usb_sg_request * req,struct scatterlist * sg,int nents)481 static int perform_sglist(
482 	struct usbtest_dev	*tdev,
483 	unsigned		iterations,
484 	int			pipe,
485 	struct usb_sg_request	*req,
486 	struct scatterlist	*sg,
487 	int			nents
488 )
489 {
490 	struct usb_device	*udev = testdev_to_usbdev(tdev);
491 	int			retval = 0;
492 	struct timer_list	sg_timer;
493 
494 	setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
495 
496 	while (retval == 0 && iterations-- > 0) {
497 		retval = usb_sg_init(req, udev, pipe,
498 				(udev->speed == USB_SPEED_HIGH)
499 					? (INTERRUPT_RATE << 3)
500 					: INTERRUPT_RATE,
501 				sg, nents, 0, GFP_KERNEL);
502 
503 		if (retval)
504 			break;
505 		mod_timer(&sg_timer, jiffies +
506 				msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
507 		usb_sg_wait(req);
508 		del_timer_sync(&sg_timer);
509 		retval = req->status;
510 
511 		/* FIXME check resulting data pattern */
512 
513 		/* FIXME if endpoint halted, clear halt (and log) */
514 	}
515 
516 	/* FIXME for unlink or fault handling tests, don't report
517 	 * failure if retval is as we expected ...
518 	 */
519 	if (retval)
520 		ERROR(tdev, "perform_sglist failed, "
521 				"iterations left %d, status %d\n",
522 				iterations, retval);
523 	return retval;
524 }
525 
526 
527 /*-------------------------------------------------------------------------*/
528 
529 /* unqueued control message testing
530  *
531  * there's a nice set of device functional requirements in chapter 9 of the
532  * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
533  * special test firmware.
534  *
535  * we know the device is configured (or suspended) by the time it's visible
536  * through usbfs.  we can't change that, so we won't test enumeration (which
537  * worked 'well enough' to get here, this time), power management (ditto),
538  * or remote wakeup (which needs human interaction).
539  */
540 
541 static unsigned realworld = 1;
542 module_param(realworld, uint, 0);
543 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
544 
get_altsetting(struct usbtest_dev * dev)545 static int get_altsetting(struct usbtest_dev *dev)
546 {
547 	struct usb_interface	*iface = dev->intf;
548 	struct usb_device	*udev = interface_to_usbdev(iface);
549 	int			retval;
550 
551 	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
552 			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
553 			0, iface->altsetting[0].desc.bInterfaceNumber,
554 			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
555 	switch (retval) {
556 	case 1:
557 		return dev->buf[0];
558 	case 0:
559 		retval = -ERANGE;
560 		/* FALLTHROUGH */
561 	default:
562 		return retval;
563 	}
564 }
565 
set_altsetting(struct usbtest_dev * dev,int alternate)566 static int set_altsetting(struct usbtest_dev *dev, int alternate)
567 {
568 	struct usb_interface		*iface = dev->intf;
569 	struct usb_device		*udev;
570 
571 	if (alternate < 0 || alternate >= 256)
572 		return -EINVAL;
573 
574 	udev = interface_to_usbdev(iface);
575 	return usb_set_interface(udev,
576 			iface->altsetting[0].desc.bInterfaceNumber,
577 			alternate);
578 }
579 
is_good_config(struct usbtest_dev * tdev,int len)580 static int is_good_config(struct usbtest_dev *tdev, int len)
581 {
582 	struct usb_config_descriptor	*config;
583 
584 	if (len < sizeof *config)
585 		return 0;
586 	config = (struct usb_config_descriptor *) tdev->buf;
587 
588 	switch (config->bDescriptorType) {
589 	case USB_DT_CONFIG:
590 	case USB_DT_OTHER_SPEED_CONFIG:
591 		if (config->bLength != 9) {
592 			ERROR(tdev, "bogus config descriptor length\n");
593 			return 0;
594 		}
595 		/* this bit 'must be 1' but often isn't */
596 		if (!realworld && !(config->bmAttributes & 0x80)) {
597 			ERROR(tdev, "high bit of config attributes not set\n");
598 			return 0;
599 		}
600 		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
601 			ERROR(tdev, "reserved config bits set\n");
602 			return 0;
603 		}
604 		break;
605 	default:
606 		return 0;
607 	}
608 
609 	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
610 		return 1;
611 	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
612 		return 1;
613 	ERROR(tdev, "bogus config descriptor read size\n");
614 	return 0;
615 }
616 
617 /* sanity test for standard requests working with usb_control_mesg() and some
618  * of the utility functions which use it.
619  *
620  * this doesn't test how endpoint halts behave or data toggles get set, since
621  * we won't do I/O to bulk/interrupt endpoints here (which is how to change
622  * halt or toggle).  toggle testing is impractical without support from hcds.
623  *
624  * this avoids failing devices linux would normally work with, by not testing
625  * config/altsetting operations for devices that only support their defaults.
626  * such devices rarely support those needless operations.
627  *
628  * NOTE that since this is a sanity test, it's not examining boundary cases
629  * to see if usbcore, hcd, and device all behave right.  such testing would
630  * involve varied read sizes and other operation sequences.
631  */
ch9_postconfig(struct usbtest_dev * dev)632 static int ch9_postconfig(struct usbtest_dev *dev)
633 {
634 	struct usb_interface	*iface = dev->intf;
635 	struct usb_device	*udev = interface_to_usbdev(iface);
636 	int			i, alt, retval;
637 
638 	/* [9.2.3] if there's more than one altsetting, we need to be able to
639 	 * set and get each one.  mostly trusts the descriptors from usbcore.
640 	 */
641 	for (i = 0; i < iface->num_altsetting; i++) {
642 
643 		/* 9.2.3 constrains the range here */
644 		alt = iface->altsetting[i].desc.bAlternateSetting;
645 		if (alt < 0 || alt >= iface->num_altsetting) {
646 			dev_err(&iface->dev,
647 					"invalid alt [%d].bAltSetting = %d\n",
648 					i, alt);
649 		}
650 
651 		/* [real world] get/set unimplemented if there's only one */
652 		if (realworld && iface->num_altsetting == 1)
653 			continue;
654 
655 		/* [9.4.10] set_interface */
656 		retval = set_altsetting(dev, alt);
657 		if (retval) {
658 			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
659 					alt, retval);
660 			return retval;
661 		}
662 
663 		/* [9.4.4] get_interface always works */
664 		retval = get_altsetting(dev);
665 		if (retval != alt) {
666 			dev_err(&iface->dev, "get alt should be %d, was %d\n",
667 					alt, retval);
668 			return (retval < 0) ? retval : -EDOM;
669 		}
670 
671 	}
672 
673 	/* [real world] get_config unimplemented if there's only one */
674 	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
675 		int	expected = udev->actconfig->desc.bConfigurationValue;
676 
677 		/* [9.4.2] get_configuration always works
678 		 * ... although some cheap devices (like one TI Hub I've got)
679 		 * won't return config descriptors except before set_config.
680 		 */
681 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
682 				USB_REQ_GET_CONFIGURATION,
683 				USB_DIR_IN | USB_RECIP_DEVICE,
684 				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
685 		if (retval != 1 || dev->buf[0] != expected) {
686 			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
687 				retval, dev->buf[0], expected);
688 			return (retval < 0) ? retval : -EDOM;
689 		}
690 	}
691 
692 	/* there's always [9.4.3] a device descriptor [9.6.1] */
693 	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
694 			dev->buf, sizeof udev->descriptor);
695 	if (retval != sizeof udev->descriptor) {
696 		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
697 		return (retval < 0) ? retval : -EDOM;
698 	}
699 
700 	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
701 	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
702 		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
703 				dev->buf, TBUF_SIZE);
704 		if (!is_good_config(dev, retval)) {
705 			dev_err(&iface->dev,
706 					"config [%d] descriptor --> %d\n",
707 					i, retval);
708 			return (retval < 0) ? retval : -EDOM;
709 		}
710 
711 		/* FIXME cross-checking udev->config[i] to make sure usbcore
712 		 * parsed it right (etc) would be good testing paranoia
713 		 */
714 	}
715 
716 	/* and sometimes [9.2.6.6] speed dependent descriptors */
717 	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
718 		struct usb_qualifier_descriptor *d = NULL;
719 
720 		/* device qualifier [9.6.2] */
721 		retval = usb_get_descriptor(udev,
722 				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
723 				sizeof(struct usb_qualifier_descriptor));
724 		if (retval == -EPIPE) {
725 			if (udev->speed == USB_SPEED_HIGH) {
726 				dev_err(&iface->dev,
727 						"hs dev qualifier --> %d\n",
728 						retval);
729 				return (retval < 0) ? retval : -EDOM;
730 			}
731 			/* usb2.0 but not high-speed capable; fine */
732 		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
733 			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
734 			return (retval < 0) ? retval : -EDOM;
735 		} else
736 			d = (struct usb_qualifier_descriptor *) dev->buf;
737 
738 		/* might not have [9.6.2] any other-speed configs [9.6.4] */
739 		if (d) {
740 			unsigned max = d->bNumConfigurations;
741 			for (i = 0; i < max; i++) {
742 				retval = usb_get_descriptor(udev,
743 					USB_DT_OTHER_SPEED_CONFIG, i,
744 					dev->buf, TBUF_SIZE);
745 				if (!is_good_config(dev, retval)) {
746 					dev_err(&iface->dev,
747 						"other speed config --> %d\n",
748 						retval);
749 					return (retval < 0) ? retval : -EDOM;
750 				}
751 			}
752 		}
753 	}
754 	/* FIXME fetch strings from at least the device descriptor */
755 
756 	/* [9.4.5] get_status always works */
757 	retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
758 	if (retval != 2) {
759 		dev_err(&iface->dev, "get dev status --> %d\n", retval);
760 		return (retval < 0) ? retval : -EDOM;
761 	}
762 
763 	/* FIXME configuration.bmAttributes says if we could try to set/clear
764 	 * the device's remote wakeup feature ... if we can, test that here
765 	 */
766 
767 	retval = usb_get_status(udev, USB_RECIP_INTERFACE,
768 			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
769 	if (retval != 2) {
770 		dev_err(&iface->dev, "get interface status --> %d\n", retval);
771 		return (retval < 0) ? retval : -EDOM;
772 	}
773 	/* FIXME get status for each endpoint in the interface */
774 
775 	return 0;
776 }
777 
778 /*-------------------------------------------------------------------------*/
779 
780 /* use ch9 requests to test whether:
781  *   (a) queues work for control, keeping N subtests queued and
782  *       active (auto-resubmit) for M loops through the queue.
783  *   (b) protocol stalls (control-only) will autorecover.
784  *       it's not like bulk/intr; no halt clearing.
785  *   (c) short control reads are reported and handled.
786  *   (d) queues are always processed in-order
787  */
788 
789 struct ctrl_ctx {
790 	spinlock_t		lock;
791 	struct usbtest_dev	*dev;
792 	struct completion	complete;
793 	unsigned		count;
794 	unsigned		pending;
795 	int			status;
796 	struct urb		**urb;
797 	struct usbtest_param	*param;
798 	int			last;
799 };
800 
801 #define NUM_SUBCASES	15		/* how many test subcases here? */
802 
803 struct subcase {
804 	struct usb_ctrlrequest	setup;
805 	int			number;
806 	int			expected;
807 };
808 
ctrl_complete(struct urb * urb)809 static void ctrl_complete(struct urb *urb)
810 {
811 	struct ctrl_ctx		*ctx = urb->context;
812 	struct usb_ctrlrequest	*reqp;
813 	struct subcase		*subcase;
814 	int			status = urb->status;
815 
816 	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
817 	subcase = container_of(reqp, struct subcase, setup);
818 
819 	spin_lock(&ctx->lock);
820 	ctx->count--;
821 	ctx->pending--;
822 
823 	/* queue must transfer and complete in fifo order, unless
824 	 * usb_unlink_urb() is used to unlink something not at the
825 	 * physical queue head (not tested).
826 	 */
827 	if (subcase->number > 0) {
828 		if ((subcase->number - ctx->last) != 1) {
829 			ERROR(ctx->dev,
830 				"subcase %d completed out of order, last %d\n",
831 				subcase->number, ctx->last);
832 			status = -EDOM;
833 			ctx->last = subcase->number;
834 			goto error;
835 		}
836 	}
837 	ctx->last = subcase->number;
838 
839 	/* succeed or fault in only one way? */
840 	if (status == subcase->expected)
841 		status = 0;
842 
843 	/* async unlink for cleanup? */
844 	else if (status != -ECONNRESET) {
845 
846 		/* some faults are allowed, not required */
847 		if (subcase->expected > 0 && (
848 			  ((status == -subcase->expected	/* happened */
849 			   || status == 0))))			/* didn't */
850 			status = 0;
851 		/* sometimes more than one fault is allowed */
852 		else if (subcase->number == 12 && status == -EPIPE)
853 			status = 0;
854 		else
855 			ERROR(ctx->dev, "subtest %d error, status %d\n",
856 					subcase->number, status);
857 	}
858 
859 	/* unexpected status codes mean errors; ideally, in hardware */
860 	if (status) {
861 error:
862 		if (ctx->status == 0) {
863 			int		i;
864 
865 			ctx->status = status;
866 			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
867 					"%d left, subcase %d, len %d/%d\n",
868 					reqp->bRequestType, reqp->bRequest,
869 					status, ctx->count, subcase->number,
870 					urb->actual_length,
871 					urb->transfer_buffer_length);
872 
873 			/* FIXME this "unlink everything" exit route should
874 			 * be a separate test case.
875 			 */
876 
877 			/* unlink whatever's still pending */
878 			for (i = 1; i < ctx->param->sglen; i++) {
879 				struct urb *u = ctx->urb[
880 							(i + subcase->number)
881 							% ctx->param->sglen];
882 
883 				if (u == urb || !u->dev)
884 					continue;
885 				spin_unlock(&ctx->lock);
886 				status = usb_unlink_urb(u);
887 				spin_lock(&ctx->lock);
888 				switch (status) {
889 				case -EINPROGRESS:
890 				case -EBUSY:
891 				case -EIDRM:
892 					continue;
893 				default:
894 					ERROR(ctx->dev, "urb unlink --> %d\n",
895 							status);
896 				}
897 			}
898 			status = ctx->status;
899 		}
900 	}
901 
902 	/* resubmit if we need to, else mark this as done */
903 	if ((status == 0) && (ctx->pending < ctx->count)) {
904 		status = usb_submit_urb(urb, GFP_ATOMIC);
905 		if (status != 0) {
906 			ERROR(ctx->dev,
907 				"can't resubmit ctrl %02x.%02x, err %d\n",
908 				reqp->bRequestType, reqp->bRequest, status);
909 			urb->dev = NULL;
910 		} else
911 			ctx->pending++;
912 	} else
913 		urb->dev = NULL;
914 
915 	/* signal completion when nothing's queued */
916 	if (ctx->pending == 0)
917 		complete(&ctx->complete);
918 	spin_unlock(&ctx->lock);
919 }
920 
921 static int
test_ctrl_queue(struct usbtest_dev * dev,struct usbtest_param * param)922 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
923 {
924 	struct usb_device	*udev = testdev_to_usbdev(dev);
925 	struct urb		**urb;
926 	struct ctrl_ctx		context;
927 	int			i;
928 
929 	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
930 		return -EOPNOTSUPP;
931 
932 	spin_lock_init(&context.lock);
933 	context.dev = dev;
934 	init_completion(&context.complete);
935 	context.count = param->sglen * param->iterations;
936 	context.pending = 0;
937 	context.status = -ENOMEM;
938 	context.param = param;
939 	context.last = -1;
940 
941 	/* allocate and init the urbs we'll queue.
942 	 * as with bulk/intr sglists, sglen is the queue depth; it also
943 	 * controls which subtests run (more tests than sglen) or rerun.
944 	 */
945 	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
946 	if (!urb)
947 		return -ENOMEM;
948 	for (i = 0; i < param->sglen; i++) {
949 		int			pipe = usb_rcvctrlpipe(udev, 0);
950 		unsigned		len;
951 		struct urb		*u;
952 		struct usb_ctrlrequest	req;
953 		struct subcase		*reqp;
954 
955 		/* sign of this variable means:
956 		 *  -: tested code must return this (negative) error code
957 		 *  +: tested code may return this (negative too) error code
958 		 */
959 		int			expected = 0;
960 
961 		/* requests here are mostly expected to succeed on any
962 		 * device, but some are chosen to trigger protocol stalls
963 		 * or short reads.
964 		 */
965 		memset(&req, 0, sizeof req);
966 		req.bRequest = USB_REQ_GET_DESCRIPTOR;
967 		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
968 
969 		switch (i % NUM_SUBCASES) {
970 		case 0:		/* get device descriptor */
971 			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
972 			len = sizeof(struct usb_device_descriptor);
973 			break;
974 		case 1:		/* get first config descriptor (only) */
975 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
976 			len = sizeof(struct usb_config_descriptor);
977 			break;
978 		case 2:		/* get altsetting (OFTEN STALLS) */
979 			req.bRequest = USB_REQ_GET_INTERFACE;
980 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
981 			/* index = 0 means first interface */
982 			len = 1;
983 			expected = EPIPE;
984 			break;
985 		case 3:		/* get interface status */
986 			req.bRequest = USB_REQ_GET_STATUS;
987 			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
988 			/* interface 0 */
989 			len = 2;
990 			break;
991 		case 4:		/* get device status */
992 			req.bRequest = USB_REQ_GET_STATUS;
993 			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
994 			len = 2;
995 			break;
996 		case 5:		/* get device qualifier (MAY STALL) */
997 			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
998 			len = sizeof(struct usb_qualifier_descriptor);
999 			if (udev->speed != USB_SPEED_HIGH)
1000 				expected = EPIPE;
1001 			break;
1002 		case 6:		/* get first config descriptor, plus interface */
1003 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1004 			len = sizeof(struct usb_config_descriptor);
1005 			len += sizeof(struct usb_interface_descriptor);
1006 			break;
1007 		case 7:		/* get interface descriptor (ALWAYS STALLS) */
1008 			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1009 			/* interface == 0 */
1010 			len = sizeof(struct usb_interface_descriptor);
1011 			expected = -EPIPE;
1012 			break;
1013 		/* NOTE: two consecutive stalls in the queue here.
1014 		 *  that tests fault recovery a bit more aggressively. */
1015 		case 8:		/* clear endpoint halt (MAY STALL) */
1016 			req.bRequest = USB_REQ_CLEAR_FEATURE;
1017 			req.bRequestType = USB_RECIP_ENDPOINT;
1018 			/* wValue 0 == ep halt */
1019 			/* wIndex 0 == ep0 (shouldn't halt!) */
1020 			len = 0;
1021 			pipe = usb_sndctrlpipe(udev, 0);
1022 			expected = EPIPE;
1023 			break;
1024 		case 9:		/* get endpoint status */
1025 			req.bRequest = USB_REQ_GET_STATUS;
1026 			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1027 			/* endpoint 0 */
1028 			len = 2;
1029 			break;
1030 		case 10:	/* trigger short read (EREMOTEIO) */
1031 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1032 			len = 1024;
1033 			expected = -EREMOTEIO;
1034 			break;
1035 		/* NOTE: two consecutive _different_ faults in the queue. */
1036 		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1037 			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1038 			/* endpoint == 0 */
1039 			len = sizeof(struct usb_interface_descriptor);
1040 			expected = EPIPE;
1041 			break;
1042 		/* NOTE: sometimes even a third fault in the queue! */
1043 		case 12:	/* get string 0 descriptor (MAY STALL) */
1044 			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1045 			/* string == 0, for language IDs */
1046 			len = sizeof(struct usb_interface_descriptor);
1047 			/* may succeed when > 4 languages */
1048 			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1049 			break;
1050 		case 13:	/* short read, resembling case 10 */
1051 			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1052 			/* last data packet "should" be DATA1, not DATA0 */
1053 			if (udev->speed == USB_SPEED_SUPER)
1054 				len = 1024 - 512;
1055 			else
1056 				len = 1024 - udev->descriptor.bMaxPacketSize0;
1057 			expected = -EREMOTEIO;
1058 			break;
1059 		case 14:	/* short read; try to fill the last packet */
1060 			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1061 			/* device descriptor size == 18 bytes */
1062 			len = udev->descriptor.bMaxPacketSize0;
1063 			if (udev->speed == USB_SPEED_SUPER)
1064 				len = 512;
1065 			switch (len) {
1066 			case 8:
1067 				len = 24;
1068 				break;
1069 			case 16:
1070 				len = 32;
1071 				break;
1072 			}
1073 			expected = -EREMOTEIO;
1074 			break;
1075 		default:
1076 			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1077 			context.status = -EINVAL;
1078 			goto cleanup;
1079 		}
1080 		req.wLength = cpu_to_le16(len);
1081 		urb[i] = u = simple_alloc_urb(udev, pipe, len);
1082 		if (!u)
1083 			goto cleanup;
1084 
1085 		reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1086 		if (!reqp)
1087 			goto cleanup;
1088 		reqp->setup = req;
1089 		reqp->number = i % NUM_SUBCASES;
1090 		reqp->expected = expected;
1091 		u->setup_packet = (char *) &reqp->setup;
1092 
1093 		u->context = &context;
1094 		u->complete = ctrl_complete;
1095 	}
1096 
1097 	/* queue the urbs */
1098 	context.urb = urb;
1099 	spin_lock_irq(&context.lock);
1100 	for (i = 0; i < param->sglen; i++) {
1101 		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1102 		if (context.status != 0) {
1103 			ERROR(dev, "can't submit urb[%d], status %d\n",
1104 					i, context.status);
1105 			context.count = context.pending;
1106 			break;
1107 		}
1108 		context.pending++;
1109 	}
1110 	spin_unlock_irq(&context.lock);
1111 
1112 	/* FIXME  set timer and time out; provide a disconnect hook */
1113 
1114 	/* wait for the last one to complete */
1115 	if (context.pending > 0)
1116 		wait_for_completion(&context.complete);
1117 
1118 cleanup:
1119 	for (i = 0; i < param->sglen; i++) {
1120 		if (!urb[i])
1121 			continue;
1122 		urb[i]->dev = udev;
1123 		kfree(urb[i]->setup_packet);
1124 		simple_free_urb(urb[i]);
1125 	}
1126 	kfree(urb);
1127 	return context.status;
1128 }
1129 #undef NUM_SUBCASES
1130 
1131 
1132 /*-------------------------------------------------------------------------*/
1133 
unlink1_callback(struct urb * urb)1134 static void unlink1_callback(struct urb *urb)
1135 {
1136 	int	status = urb->status;
1137 
1138 	/* we "know" -EPIPE (stall) never happens */
1139 	if (!status)
1140 		status = usb_submit_urb(urb, GFP_ATOMIC);
1141 	if (status) {
1142 		urb->status = status;
1143 		complete(urb->context);
1144 	}
1145 }
1146 
unlink1(struct usbtest_dev * dev,int pipe,int size,int async)1147 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1148 {
1149 	struct urb		*urb;
1150 	struct completion	completion;
1151 	int			retval = 0;
1152 
1153 	init_completion(&completion);
1154 	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1155 	if (!urb)
1156 		return -ENOMEM;
1157 	urb->context = &completion;
1158 	urb->complete = unlink1_callback;
1159 
1160 	if (usb_pipeout(urb->pipe)) {
1161 		simple_fill_buf(urb);
1162 		urb->transfer_flags |= URB_ZERO_PACKET;
1163 	}
1164 
1165 	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1166 	 * states, and testing should get to all of them over time.
1167 	 *
1168 	 * FIXME want additional tests for when endpoint is STALLing
1169 	 * due to errors, or is just NAKing requests.
1170 	 */
1171 	retval = usb_submit_urb(urb, GFP_KERNEL);
1172 	if (retval != 0) {
1173 		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1174 		return retval;
1175 	}
1176 
1177 	/* unlinking that should always work.  variable delay tests more
1178 	 * hcd states and code paths, even with little other system load.
1179 	 */
1180 	msleep(jiffies % (2 * INTERRUPT_RATE));
1181 	if (async) {
1182 		while (!completion_done(&completion)) {
1183 			retval = usb_unlink_urb(urb);
1184 
1185 			switch (retval) {
1186 			case -EBUSY:
1187 			case -EIDRM:
1188 				/* we can't unlink urbs while they're completing
1189 				 * or if they've completed, and we haven't
1190 				 * resubmitted. "normal" drivers would prevent
1191 				 * resubmission, but since we're testing unlink
1192 				 * paths, we can't.
1193 				 */
1194 				ERROR(dev, "unlink retry\n");
1195 				continue;
1196 			case 0:
1197 			case -EINPROGRESS:
1198 				break;
1199 
1200 			default:
1201 				dev_err(&dev->intf->dev,
1202 					"unlink fail %d\n", retval);
1203 				return retval;
1204 			}
1205 
1206 			break;
1207 		}
1208 	} else
1209 		usb_kill_urb(urb);
1210 
1211 	wait_for_completion(&completion);
1212 	retval = urb->status;
1213 	simple_free_urb(urb);
1214 
1215 	if (async)
1216 		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1217 	else
1218 		return (retval == -ENOENT || retval == -EPERM) ?
1219 				0 : retval - 2000;
1220 }
1221 
unlink_simple(struct usbtest_dev * dev,int pipe,int len)1222 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1223 {
1224 	int			retval = 0;
1225 
1226 	/* test sync and async paths */
1227 	retval = unlink1(dev, pipe, len, 1);
1228 	if (!retval)
1229 		retval = unlink1(dev, pipe, len, 0);
1230 	return retval;
1231 }
1232 
1233 /*-------------------------------------------------------------------------*/
1234 
1235 struct queued_ctx {
1236 	struct completion	complete;
1237 	atomic_t		pending;
1238 	unsigned		num;
1239 	int			status;
1240 	struct urb		**urbs;
1241 };
1242 
unlink_queued_callback(struct urb * urb)1243 static void unlink_queued_callback(struct urb *urb)
1244 {
1245 	int			status = urb->status;
1246 	struct queued_ctx	*ctx = urb->context;
1247 
1248 	if (ctx->status)
1249 		goto done;
1250 	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1251 		if (status == -ECONNRESET)
1252 			goto done;
1253 		/* What error should we report if the URB completed normally? */
1254 	}
1255 	if (status != 0)
1256 		ctx->status = status;
1257 
1258  done:
1259 	if (atomic_dec_and_test(&ctx->pending))
1260 		complete(&ctx->complete);
1261 }
1262 
unlink_queued(struct usbtest_dev * dev,int pipe,unsigned num,unsigned size)1263 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1264 		unsigned size)
1265 {
1266 	struct queued_ctx	ctx;
1267 	struct usb_device	*udev = testdev_to_usbdev(dev);
1268 	void			*buf;
1269 	dma_addr_t		buf_dma;
1270 	int			i;
1271 	int			retval = -ENOMEM;
1272 
1273 	init_completion(&ctx.complete);
1274 	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1275 	ctx.num = num;
1276 	ctx.status = 0;
1277 
1278 	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1279 	if (!buf)
1280 		return retval;
1281 	memset(buf, 0, size);
1282 
1283 	/* Allocate and init the urbs we'll queue */
1284 	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1285 	if (!ctx.urbs)
1286 		goto free_buf;
1287 	for (i = 0; i < num; i++) {
1288 		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1289 		if (!ctx.urbs[i])
1290 			goto free_urbs;
1291 		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1292 				unlink_queued_callback, &ctx);
1293 		ctx.urbs[i]->transfer_dma = buf_dma;
1294 		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1295 
1296 		if (usb_pipeout(ctx.urbs[i]->pipe)) {
1297 			simple_fill_buf(ctx.urbs[i]);
1298 			ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1299 		}
1300 	}
1301 
1302 	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1303 	for (i = 0; i < num; i++) {
1304 		atomic_inc(&ctx.pending);
1305 		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1306 		if (retval != 0) {
1307 			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1308 					i, retval);
1309 			atomic_dec(&ctx.pending);
1310 			ctx.status = retval;
1311 			break;
1312 		}
1313 	}
1314 	if (i == num) {
1315 		usb_unlink_urb(ctx.urbs[num - 4]);
1316 		usb_unlink_urb(ctx.urbs[num - 2]);
1317 	} else {
1318 		while (--i >= 0)
1319 			usb_unlink_urb(ctx.urbs[i]);
1320 	}
1321 
1322 	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1323 		complete(&ctx.complete);
1324 	wait_for_completion(&ctx.complete);
1325 	retval = ctx.status;
1326 
1327  free_urbs:
1328 	for (i = 0; i < num; i++)
1329 		usb_free_urb(ctx.urbs[i]);
1330 	kfree(ctx.urbs);
1331  free_buf:
1332 	usb_free_coherent(udev, size, buf, buf_dma);
1333 	return retval;
1334 }
1335 
1336 /*-------------------------------------------------------------------------*/
1337 
verify_not_halted(struct usbtest_dev * tdev,int ep,struct urb * urb)1338 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1339 {
1340 	int	retval;
1341 	u16	status;
1342 
1343 	/* shouldn't look or act halted */
1344 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1345 	if (retval < 0) {
1346 		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1347 				ep, retval);
1348 		return retval;
1349 	}
1350 	if (status != 0) {
1351 		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1352 		return -EINVAL;
1353 	}
1354 	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1355 	if (retval != 0)
1356 		return -EINVAL;
1357 	return 0;
1358 }
1359 
verify_halted(struct usbtest_dev * tdev,int ep,struct urb * urb)1360 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1361 {
1362 	int	retval;
1363 	u16	status;
1364 
1365 	/* should look and act halted */
1366 	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1367 	if (retval < 0) {
1368 		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1369 				ep, retval);
1370 		return retval;
1371 	}
1372 	le16_to_cpus(&status);
1373 	if (status != 1) {
1374 		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1375 		return -EINVAL;
1376 	}
1377 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1378 	if (retval != -EPIPE)
1379 		return -EINVAL;
1380 	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1381 	if (retval != -EPIPE)
1382 		return -EINVAL;
1383 	return 0;
1384 }
1385 
test_halt(struct usbtest_dev * tdev,int ep,struct urb * urb)1386 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1387 {
1388 	int	retval;
1389 
1390 	/* shouldn't look or act halted now */
1391 	retval = verify_not_halted(tdev, ep, urb);
1392 	if (retval < 0)
1393 		return retval;
1394 
1395 	/* set halt (protocol test only), verify it worked */
1396 	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1397 			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1398 			USB_ENDPOINT_HALT, ep,
1399 			NULL, 0, USB_CTRL_SET_TIMEOUT);
1400 	if (retval < 0) {
1401 		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1402 		return retval;
1403 	}
1404 	retval = verify_halted(tdev, ep, urb);
1405 	if (retval < 0)
1406 		return retval;
1407 
1408 	/* clear halt (tests API + protocol), verify it worked */
1409 	retval = usb_clear_halt(urb->dev, urb->pipe);
1410 	if (retval < 0) {
1411 		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1412 		return retval;
1413 	}
1414 	retval = verify_not_halted(tdev, ep, urb);
1415 	if (retval < 0)
1416 		return retval;
1417 
1418 	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1419 
1420 	return 0;
1421 }
1422 
halt_simple(struct usbtest_dev * dev)1423 static int halt_simple(struct usbtest_dev *dev)
1424 {
1425 	int			ep;
1426 	int			retval = 0;
1427 	struct urb		*urb;
1428 	struct usb_device	*udev = testdev_to_usbdev(dev);
1429 
1430 	if (udev->speed == USB_SPEED_SUPER)
1431 		urb = simple_alloc_urb(udev, 0, 1024);
1432 	else
1433 		urb = simple_alloc_urb(udev, 0, 512);
1434 	if (urb == NULL)
1435 		return -ENOMEM;
1436 
1437 	if (dev->in_pipe) {
1438 		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1439 		urb->pipe = dev->in_pipe;
1440 		retval = test_halt(dev, ep, urb);
1441 		if (retval < 0)
1442 			goto done;
1443 	}
1444 
1445 	if (dev->out_pipe) {
1446 		ep = usb_pipeendpoint(dev->out_pipe);
1447 		urb->pipe = dev->out_pipe;
1448 		retval = test_halt(dev, ep, urb);
1449 	}
1450 done:
1451 	simple_free_urb(urb);
1452 	return retval;
1453 }
1454 
1455 /*-------------------------------------------------------------------------*/
1456 
1457 /* Control OUT tests use the vendor control requests from Intel's
1458  * USB 2.0 compliance test device:  write a buffer, read it back.
1459  *
1460  * Intel's spec only _requires_ that it work for one packet, which
1461  * is pretty weak.   Some HCDs place limits here; most devices will
1462  * need to be able to handle more than one OUT data packet.  We'll
1463  * try whatever we're told to try.
1464  */
ctrl_out(struct usbtest_dev * dev,unsigned count,unsigned length,unsigned vary,unsigned offset)1465 static int ctrl_out(struct usbtest_dev *dev,
1466 		unsigned count, unsigned length, unsigned vary, unsigned offset)
1467 {
1468 	unsigned		i, j, len;
1469 	int			retval;
1470 	u8			*buf;
1471 	char			*what = "?";
1472 	struct usb_device	*udev;
1473 
1474 	if (length < 1 || length > 0xffff || vary >= length)
1475 		return -EINVAL;
1476 
1477 	buf = kmalloc(length + offset, GFP_KERNEL);
1478 	if (!buf)
1479 		return -ENOMEM;
1480 
1481 	buf += offset;
1482 	udev = testdev_to_usbdev(dev);
1483 	len = length;
1484 	retval = 0;
1485 
1486 	/* NOTE:  hardware might well act differently if we pushed it
1487 	 * with lots back-to-back queued requests.
1488 	 */
1489 	for (i = 0; i < count; i++) {
1490 		/* write patterned data */
1491 		for (j = 0; j < len; j++)
1492 			buf[j] = i + j;
1493 		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1494 				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1495 				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1496 		if (retval != len) {
1497 			what = "write";
1498 			if (retval >= 0) {
1499 				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1500 						retval, len);
1501 				retval = -EBADMSG;
1502 			}
1503 			break;
1504 		}
1505 
1506 		/* read it back -- assuming nothing intervened!!  */
1507 		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1508 				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1509 				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1510 		if (retval != len) {
1511 			what = "read";
1512 			if (retval >= 0) {
1513 				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1514 						retval, len);
1515 				retval = -EBADMSG;
1516 			}
1517 			break;
1518 		}
1519 
1520 		/* fail if we can't verify */
1521 		for (j = 0; j < len; j++) {
1522 			if (buf[j] != (u8) (i + j)) {
1523 				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1524 					j, buf[j], (u8) i + j);
1525 				retval = -EBADMSG;
1526 				break;
1527 			}
1528 		}
1529 		if (retval < 0) {
1530 			what = "verify";
1531 			break;
1532 		}
1533 
1534 		len += vary;
1535 
1536 		/* [real world] the "zero bytes IN" case isn't really used.
1537 		 * hardware can easily trip up in this weird case, since its
1538 		 * status stage is IN, not OUT like other ep0in transfers.
1539 		 */
1540 		if (len > length)
1541 			len = realworld ? 1 : 0;
1542 	}
1543 
1544 	if (retval < 0)
1545 		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1546 			what, retval, i);
1547 
1548 	kfree(buf - offset);
1549 	return retval;
1550 }
1551 
1552 /*-------------------------------------------------------------------------*/
1553 
1554 /* ISO tests ... mimics common usage
1555  *  - buffer length is split into N packets (mostly maxpacket sized)
1556  *  - multi-buffers according to sglen
1557  */
1558 
1559 struct iso_context {
1560 	unsigned		count;
1561 	unsigned		pending;
1562 	spinlock_t		lock;
1563 	struct completion	done;
1564 	int			submit_error;
1565 	unsigned long		errors;
1566 	unsigned long		packet_count;
1567 	struct usbtest_dev	*dev;
1568 };
1569 
iso_callback(struct urb * urb)1570 static void iso_callback(struct urb *urb)
1571 {
1572 	struct iso_context	*ctx = urb->context;
1573 
1574 	spin_lock(&ctx->lock);
1575 	ctx->count--;
1576 
1577 	ctx->packet_count += urb->number_of_packets;
1578 	if (urb->error_count > 0)
1579 		ctx->errors += urb->error_count;
1580 	else if (urb->status != 0)
1581 		ctx->errors += urb->number_of_packets;
1582 	else if (urb->actual_length != urb->transfer_buffer_length)
1583 		ctx->errors++;
1584 	else if (check_guard_bytes(ctx->dev, urb) != 0)
1585 		ctx->errors++;
1586 
1587 	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1588 			&& !ctx->submit_error) {
1589 		int status = usb_submit_urb(urb, GFP_ATOMIC);
1590 		switch (status) {
1591 		case 0:
1592 			goto done;
1593 		default:
1594 			dev_err(&ctx->dev->intf->dev,
1595 					"iso resubmit err %d\n",
1596 					status);
1597 			/* FALLTHROUGH */
1598 		case -ENODEV:			/* disconnected */
1599 		case -ESHUTDOWN:		/* endpoint disabled */
1600 			ctx->submit_error = 1;
1601 			break;
1602 		}
1603 	}
1604 
1605 	ctx->pending--;
1606 	if (ctx->pending == 0) {
1607 		if (ctx->errors)
1608 			dev_err(&ctx->dev->intf->dev,
1609 				"iso test, %lu errors out of %lu\n",
1610 				ctx->errors, ctx->packet_count);
1611 		complete(&ctx->done);
1612 	}
1613 done:
1614 	spin_unlock(&ctx->lock);
1615 }
1616 
iso_alloc_urb(struct usb_device * udev,int pipe,struct usb_endpoint_descriptor * desc,long bytes,unsigned offset)1617 static struct urb *iso_alloc_urb(
1618 	struct usb_device	*udev,
1619 	int			pipe,
1620 	struct usb_endpoint_descriptor	*desc,
1621 	long			bytes,
1622 	unsigned offset
1623 )
1624 {
1625 	struct urb		*urb;
1626 	unsigned		i, maxp, packets;
1627 
1628 	if (bytes < 0 || !desc)
1629 		return NULL;
1630 	maxp = 0x7ff & usb_endpoint_maxp(desc);
1631 	maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1632 	packets = DIV_ROUND_UP(bytes, maxp);
1633 
1634 	urb = usb_alloc_urb(packets, GFP_KERNEL);
1635 	if (!urb)
1636 		return urb;
1637 	urb->dev = udev;
1638 	urb->pipe = pipe;
1639 
1640 	urb->number_of_packets = packets;
1641 	urb->transfer_buffer_length = bytes;
1642 	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1643 							GFP_KERNEL,
1644 							&urb->transfer_dma);
1645 	if (!urb->transfer_buffer) {
1646 		usb_free_urb(urb);
1647 		return NULL;
1648 	}
1649 	if (offset) {
1650 		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1651 		urb->transfer_buffer += offset;
1652 		urb->transfer_dma += offset;
1653 	}
1654 	/* For inbound transfers use guard byte so that test fails if
1655 		data not correctly copied */
1656 	memset(urb->transfer_buffer,
1657 			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1658 			bytes);
1659 
1660 	for (i = 0; i < packets; i++) {
1661 		/* here, only the last packet will be short */
1662 		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1663 		bytes -= urb->iso_frame_desc[i].length;
1664 
1665 		urb->iso_frame_desc[i].offset = maxp * i;
1666 	}
1667 
1668 	urb->complete = iso_callback;
1669 	/* urb->context = SET BY CALLER */
1670 	urb->interval = 1 << (desc->bInterval - 1);
1671 	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1672 	return urb;
1673 }
1674 
1675 static int
test_iso_queue(struct usbtest_dev * dev,struct usbtest_param * param,int pipe,struct usb_endpoint_descriptor * desc,unsigned offset)1676 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1677 		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1678 {
1679 	struct iso_context	context;
1680 	struct usb_device	*udev;
1681 	unsigned		i;
1682 	unsigned long		packets = 0;
1683 	int			status = 0;
1684 	struct urb		*urbs[10];	/* FIXME no limit */
1685 
1686 	if (param->sglen > 10)
1687 		return -EDOM;
1688 
1689 	memset(&context, 0, sizeof context);
1690 	context.count = param->iterations * param->sglen;
1691 	context.dev = dev;
1692 	init_completion(&context.done);
1693 	spin_lock_init(&context.lock);
1694 
1695 	memset(urbs, 0, sizeof urbs);
1696 	udev = testdev_to_usbdev(dev);
1697 	dev_info(&dev->intf->dev,
1698 		"... iso period %d %sframes, wMaxPacket %04x\n",
1699 		1 << (desc->bInterval - 1),
1700 		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1701 		usb_endpoint_maxp(desc));
1702 
1703 	for (i = 0; i < param->sglen; i++) {
1704 		urbs[i] = iso_alloc_urb(udev, pipe, desc,
1705 					param->length, offset);
1706 		if (!urbs[i]) {
1707 			status = -ENOMEM;
1708 			goto fail;
1709 		}
1710 		packets += urbs[i]->number_of_packets;
1711 		urbs[i]->context = &context;
1712 	}
1713 	packets *= param->iterations;
1714 	dev_info(&dev->intf->dev,
1715 		"... total %lu msec (%lu packets)\n",
1716 		(packets * (1 << (desc->bInterval - 1)))
1717 			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1718 		packets);
1719 
1720 	spin_lock_irq(&context.lock);
1721 	for (i = 0; i < param->sglen; i++) {
1722 		++context.pending;
1723 		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1724 		if (status < 0) {
1725 			ERROR(dev, "submit iso[%d], error %d\n", i, status);
1726 			if (i == 0) {
1727 				spin_unlock_irq(&context.lock);
1728 				goto fail;
1729 			}
1730 
1731 			simple_free_urb(urbs[i]);
1732 			urbs[i] = NULL;
1733 			context.pending--;
1734 			context.submit_error = 1;
1735 			break;
1736 		}
1737 	}
1738 	spin_unlock_irq(&context.lock);
1739 
1740 	wait_for_completion(&context.done);
1741 
1742 	for (i = 0; i < param->sglen; i++) {
1743 		if (urbs[i])
1744 			simple_free_urb(urbs[i]);
1745 	}
1746 	/*
1747 	 * Isochronous transfers are expected to fail sometimes.  As an
1748 	 * arbitrary limit, we will report an error if any submissions
1749 	 * fail or if the transfer failure rate is > 10%.
1750 	 */
1751 	if (status != 0)
1752 		;
1753 	else if (context.submit_error)
1754 		status = -EACCES;
1755 	else if (context.errors > context.packet_count / 10)
1756 		status = -EIO;
1757 	return status;
1758 
1759 fail:
1760 	for (i = 0; i < param->sglen; i++) {
1761 		if (urbs[i])
1762 			simple_free_urb(urbs[i]);
1763 	}
1764 	return status;
1765 }
1766 
test_unaligned_bulk(struct usbtest_dev * tdev,int pipe,unsigned length,int iterations,unsigned transfer_flags,const char * label)1767 static int test_unaligned_bulk(
1768 	struct usbtest_dev *tdev,
1769 	int pipe,
1770 	unsigned length,
1771 	int iterations,
1772 	unsigned transfer_flags,
1773 	const char *label)
1774 {
1775 	int retval;
1776 	struct urb *urb = usbtest_alloc_urb(
1777 		testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1778 
1779 	if (!urb)
1780 		return -ENOMEM;
1781 
1782 	retval = simple_io(tdev, urb, iterations, 0, 0, label);
1783 	simple_free_urb(urb);
1784 	return retval;
1785 }
1786 
1787 /*-------------------------------------------------------------------------*/
1788 
1789 /* We only have this one interface to user space, through usbfs.
1790  * User mode code can scan usbfs to find N different devices (maybe on
1791  * different busses) to use when testing, and allocate one thread per
1792  * test.  So discovery is simplified, and we have no device naming issues.
1793  *
1794  * Don't use these only as stress/load tests.  Use them along with with
1795  * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
1796  * video capture, and so on.  Run different tests at different times, in
1797  * different sequences.  Nothing here should interact with other devices,
1798  * except indirectly by consuming USB bandwidth and CPU resources for test
1799  * threads and request completion.  But the only way to know that for sure
1800  * is to test when HC queues are in use by many devices.
1801  *
1802  * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
1803  * it locks out usbcore in certain code paths.  Notably, if you disconnect
1804  * the device-under-test, khubd will wait block forever waiting for the
1805  * ioctl to complete ... so that usb_disconnect() can abort the pending
1806  * urbs and then call usbtest_disconnect().  To abort a test, you're best
1807  * off just killing the userspace task and waiting for it to exit.
1808  */
1809 
1810 static int
usbtest_ioctl(struct usb_interface * intf,unsigned int code,void * buf)1811 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1812 {
1813 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
1814 	struct usb_device	*udev = testdev_to_usbdev(dev);
1815 	struct usbtest_param	*param = buf;
1816 	int			retval = -EOPNOTSUPP;
1817 	struct urb		*urb;
1818 	struct scatterlist	*sg;
1819 	struct usb_sg_request	req;
1820 	struct timeval		start;
1821 	unsigned		i;
1822 
1823 	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1824 
1825 	pattern = mod_pattern;
1826 
1827 	if (code != USBTEST_REQUEST)
1828 		return -EOPNOTSUPP;
1829 
1830 	if (param->iterations <= 0)
1831 		return -EINVAL;
1832 
1833 	if (mutex_lock_interruptible(&dev->lock))
1834 		return -ERESTARTSYS;
1835 
1836 	/* FIXME: What if a system sleep starts while a test is running? */
1837 
1838 	/* some devices, like ez-usb default devices, need a non-default
1839 	 * altsetting to have any active endpoints.  some tests change
1840 	 * altsettings; force a default so most tests don't need to check.
1841 	 */
1842 	if (dev->info->alt >= 0) {
1843 		int	res;
1844 
1845 		if (intf->altsetting->desc.bInterfaceNumber) {
1846 			mutex_unlock(&dev->lock);
1847 			return -ENODEV;
1848 		}
1849 		res = set_altsetting(dev, dev->info->alt);
1850 		if (res) {
1851 			dev_err(&intf->dev,
1852 					"set altsetting to %d failed, %d\n",
1853 					dev->info->alt, res);
1854 			mutex_unlock(&dev->lock);
1855 			return res;
1856 		}
1857 	}
1858 
1859 	/*
1860 	 * Just a bunch of test cases that every HCD is expected to handle.
1861 	 *
1862 	 * Some may need specific firmware, though it'd be good to have
1863 	 * one firmware image to handle all the test cases.
1864 	 *
1865 	 * FIXME add more tests!  cancel requests, verify the data, control
1866 	 * queueing, concurrent read+write threads, and so on.
1867 	 */
1868 	do_gettimeofday(&start);
1869 	switch (param->test_num) {
1870 
1871 	case 0:
1872 		dev_info(&intf->dev, "TEST 0:  NOP\n");
1873 		retval = 0;
1874 		break;
1875 
1876 	/* Simple non-queued bulk I/O tests */
1877 	case 1:
1878 		if (dev->out_pipe == 0)
1879 			break;
1880 		dev_info(&intf->dev,
1881 				"TEST 1:  write %d bytes %u times\n",
1882 				param->length, param->iterations);
1883 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1884 		if (!urb) {
1885 			retval = -ENOMEM;
1886 			break;
1887 		}
1888 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1889 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1890 		simple_free_urb(urb);
1891 		break;
1892 	case 2:
1893 		if (dev->in_pipe == 0)
1894 			break;
1895 		dev_info(&intf->dev,
1896 				"TEST 2:  read %d bytes %u times\n",
1897 				param->length, param->iterations);
1898 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1899 		if (!urb) {
1900 			retval = -ENOMEM;
1901 			break;
1902 		}
1903 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1904 		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1905 		simple_free_urb(urb);
1906 		break;
1907 	case 3:
1908 		if (dev->out_pipe == 0 || param->vary == 0)
1909 			break;
1910 		dev_info(&intf->dev,
1911 				"TEST 3:  write/%d 0..%d bytes %u times\n",
1912 				param->vary, param->length, param->iterations);
1913 		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1914 		if (!urb) {
1915 			retval = -ENOMEM;
1916 			break;
1917 		}
1918 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1919 		retval = simple_io(dev, urb, param->iterations, param->vary,
1920 					0, "test3");
1921 		simple_free_urb(urb);
1922 		break;
1923 	case 4:
1924 		if (dev->in_pipe == 0 || param->vary == 0)
1925 			break;
1926 		dev_info(&intf->dev,
1927 				"TEST 4:  read/%d 0..%d bytes %u times\n",
1928 				param->vary, param->length, param->iterations);
1929 		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1930 		if (!urb) {
1931 			retval = -ENOMEM;
1932 			break;
1933 		}
1934 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1935 		retval = simple_io(dev, urb, param->iterations, param->vary,
1936 					0, "test4");
1937 		simple_free_urb(urb);
1938 		break;
1939 
1940 	/* Queued bulk I/O tests */
1941 	case 5:
1942 		if (dev->out_pipe == 0 || param->sglen == 0)
1943 			break;
1944 		dev_info(&intf->dev,
1945 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
1946 				param->iterations,
1947 				param->sglen, param->length);
1948 		sg = alloc_sglist(param->sglen, param->length, 0);
1949 		if (!sg) {
1950 			retval = -ENOMEM;
1951 			break;
1952 		}
1953 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1954 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1955 				&req, sg, param->sglen);
1956 		free_sglist(sg, param->sglen);
1957 		break;
1958 
1959 	case 6:
1960 		if (dev->in_pipe == 0 || param->sglen == 0)
1961 			break;
1962 		dev_info(&intf->dev,
1963 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
1964 				param->iterations,
1965 				param->sglen, param->length);
1966 		sg = alloc_sglist(param->sglen, param->length, 0);
1967 		if (!sg) {
1968 			retval = -ENOMEM;
1969 			break;
1970 		}
1971 		/* FIRMWARE:  bulk source (maybe generates short writes) */
1972 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1973 				&req, sg, param->sglen);
1974 		free_sglist(sg, param->sglen);
1975 		break;
1976 	case 7:
1977 		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1978 			break;
1979 		dev_info(&intf->dev,
1980 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
1981 				param->vary, param->iterations,
1982 				param->sglen, param->length);
1983 		sg = alloc_sglist(param->sglen, param->length, param->vary);
1984 		if (!sg) {
1985 			retval = -ENOMEM;
1986 			break;
1987 		}
1988 		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1989 		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1990 				&req, sg, param->sglen);
1991 		free_sglist(sg, param->sglen);
1992 		break;
1993 	case 8:
1994 		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1995 			break;
1996 		dev_info(&intf->dev,
1997 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
1998 				param->vary, param->iterations,
1999 				param->sglen, param->length);
2000 		sg = alloc_sglist(param->sglen, param->length, param->vary);
2001 		if (!sg) {
2002 			retval = -ENOMEM;
2003 			break;
2004 		}
2005 		/* FIRMWARE:  bulk source (maybe generates short writes) */
2006 		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2007 				&req, sg, param->sglen);
2008 		free_sglist(sg, param->sglen);
2009 		break;
2010 
2011 	/* non-queued sanity tests for control (chapter 9 subset) */
2012 	case 9:
2013 		retval = 0;
2014 		dev_info(&intf->dev,
2015 			"TEST 9:  ch9 (subset) control tests, %d times\n",
2016 				param->iterations);
2017 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2018 			retval = ch9_postconfig(dev);
2019 		if (retval)
2020 			dev_err(&intf->dev, "ch9 subset failed, "
2021 					"iterations left %d\n", i);
2022 		break;
2023 
2024 	/* queued control messaging */
2025 	case 10:
2026 		retval = 0;
2027 		dev_info(&intf->dev,
2028 				"TEST 10:  queue %d control calls, %d times\n",
2029 				param->sglen,
2030 				param->iterations);
2031 		retval = test_ctrl_queue(dev, param);
2032 		break;
2033 
2034 	/* simple non-queued unlinks (ring with one urb) */
2035 	case 11:
2036 		if (dev->in_pipe == 0 || !param->length)
2037 			break;
2038 		retval = 0;
2039 		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
2040 				param->iterations, param->length);
2041 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2042 			retval = unlink_simple(dev, dev->in_pipe,
2043 						param->length);
2044 		if (retval)
2045 			dev_err(&intf->dev, "unlink reads failed %d, "
2046 				"iterations left %d\n", retval, i);
2047 		break;
2048 	case 12:
2049 		if (dev->out_pipe == 0 || !param->length)
2050 			break;
2051 		retval = 0;
2052 		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2053 				param->iterations, param->length);
2054 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2055 			retval = unlink_simple(dev, dev->out_pipe,
2056 						param->length);
2057 		if (retval)
2058 			dev_err(&intf->dev, "unlink writes failed %d, "
2059 				"iterations left %d\n", retval, i);
2060 		break;
2061 
2062 	/* ep halt tests */
2063 	case 13:
2064 		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2065 			break;
2066 		retval = 0;
2067 		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2068 				param->iterations);
2069 		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2070 			retval = halt_simple(dev);
2071 
2072 		if (retval)
2073 			ERROR(dev, "halts failed, iterations left %d\n", i);
2074 		break;
2075 
2076 	/* control write tests */
2077 	case 14:
2078 		if (!dev->info->ctrl_out)
2079 			break;
2080 		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2081 				param->iterations,
2082 				realworld ? 1 : 0, param->length,
2083 				param->vary);
2084 		retval = ctrl_out(dev, param->iterations,
2085 				param->length, param->vary, 0);
2086 		break;
2087 
2088 	/* iso write tests */
2089 	case 15:
2090 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2091 			break;
2092 		dev_info(&intf->dev,
2093 			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2094 				param->iterations,
2095 				param->sglen, param->length);
2096 		/* FIRMWARE:  iso sink */
2097 		retval = test_iso_queue(dev, param,
2098 				dev->out_iso_pipe, dev->iso_out, 0);
2099 		break;
2100 
2101 	/* iso read tests */
2102 	case 16:
2103 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2104 			break;
2105 		dev_info(&intf->dev,
2106 			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2107 				param->iterations,
2108 				param->sglen, param->length);
2109 		/* FIRMWARE:  iso source */
2110 		retval = test_iso_queue(dev, param,
2111 				dev->in_iso_pipe, dev->iso_in, 0);
2112 		break;
2113 
2114 	/* FIXME scatterlist cancel (needs helper thread) */
2115 
2116 	/* Tests for bulk I/O using DMA mapping by core and odd address */
2117 	case 17:
2118 		if (dev->out_pipe == 0)
2119 			break;
2120 		dev_info(&intf->dev,
2121 			"TEST 17:  write odd addr %d bytes %u times core map\n",
2122 			param->length, param->iterations);
2123 
2124 		retval = test_unaligned_bulk(
2125 				dev, dev->out_pipe,
2126 				param->length, param->iterations,
2127 				0, "test17");
2128 		break;
2129 
2130 	case 18:
2131 		if (dev->in_pipe == 0)
2132 			break;
2133 		dev_info(&intf->dev,
2134 			"TEST 18:  read odd addr %d bytes %u times core map\n",
2135 			param->length, param->iterations);
2136 
2137 		retval = test_unaligned_bulk(
2138 				dev, dev->in_pipe,
2139 				param->length, param->iterations,
2140 				0, "test18");
2141 		break;
2142 
2143 	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2144 	case 19:
2145 		if (dev->out_pipe == 0)
2146 			break;
2147 		dev_info(&intf->dev,
2148 			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2149 			param->length, param->iterations);
2150 
2151 		retval = test_unaligned_bulk(
2152 				dev, dev->out_pipe,
2153 				param->length, param->iterations,
2154 				URB_NO_TRANSFER_DMA_MAP, "test19");
2155 		break;
2156 
2157 	case 20:
2158 		if (dev->in_pipe == 0)
2159 			break;
2160 		dev_info(&intf->dev,
2161 			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2162 			param->length, param->iterations);
2163 
2164 		retval = test_unaligned_bulk(
2165 				dev, dev->in_pipe,
2166 				param->length, param->iterations,
2167 				URB_NO_TRANSFER_DMA_MAP, "test20");
2168 		break;
2169 
2170 	/* control write tests with unaligned buffer */
2171 	case 21:
2172 		if (!dev->info->ctrl_out)
2173 			break;
2174 		dev_info(&intf->dev,
2175 				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2176 				param->iterations,
2177 				realworld ? 1 : 0, param->length,
2178 				param->vary);
2179 		retval = ctrl_out(dev, param->iterations,
2180 				param->length, param->vary, 1);
2181 		break;
2182 
2183 	/* unaligned iso tests */
2184 	case 22:
2185 		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2186 			break;
2187 		dev_info(&intf->dev,
2188 			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2189 				param->iterations,
2190 				param->sglen, param->length);
2191 		retval = test_iso_queue(dev, param,
2192 				dev->out_iso_pipe, dev->iso_out, 1);
2193 		break;
2194 
2195 	case 23:
2196 		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2197 			break;
2198 		dev_info(&intf->dev,
2199 			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2200 				param->iterations,
2201 				param->sglen, param->length);
2202 		retval = test_iso_queue(dev, param,
2203 				dev->in_iso_pipe, dev->iso_in, 1);
2204 		break;
2205 
2206 	/* unlink URBs from a bulk-OUT queue */
2207 	case 24:
2208 		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2209 			break;
2210 		retval = 0;
2211 		dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
2212 				"%d %d-byte writes\n",
2213 				param->iterations, param->sglen, param->length);
2214 		for (i = param->iterations; retval == 0 && i > 0; --i) {
2215 			retval = unlink_queued(dev, dev->out_pipe,
2216 						param->sglen, param->length);
2217 			if (retval) {
2218 				dev_err(&intf->dev,
2219 					"unlink queued writes failed %d, "
2220 					"iterations left %d\n", retval, i);
2221 				break;
2222 			}
2223 		}
2224 		break;
2225 
2226 	}
2227 	do_gettimeofday(&param->duration);
2228 	param->duration.tv_sec -= start.tv_sec;
2229 	param->duration.tv_usec -= start.tv_usec;
2230 	if (param->duration.tv_usec < 0) {
2231 		param->duration.tv_usec += 1000 * 1000;
2232 		param->duration.tv_sec -= 1;
2233 	}
2234 	mutex_unlock(&dev->lock);
2235 	return retval;
2236 }
2237 
2238 /*-------------------------------------------------------------------------*/
2239 
2240 static unsigned force_interrupt;
2241 module_param(force_interrupt, uint, 0);
2242 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2243 
2244 #ifdef	GENERIC
2245 static unsigned short vendor;
2246 module_param(vendor, ushort, 0);
2247 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2248 
2249 static unsigned short product;
2250 module_param(product, ushort, 0);
2251 MODULE_PARM_DESC(product, "product code (from vendor)");
2252 #endif
2253 
2254 static int
usbtest_probe(struct usb_interface * intf,const struct usb_device_id * id)2255 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2256 {
2257 	struct usb_device	*udev;
2258 	struct usbtest_dev	*dev;
2259 	struct usbtest_info	*info;
2260 	char			*rtest, *wtest;
2261 	char			*irtest, *iwtest;
2262 
2263 	udev = interface_to_usbdev(intf);
2264 
2265 #ifdef	GENERIC
2266 	/* specify devices by module parameters? */
2267 	if (id->match_flags == 0) {
2268 		/* vendor match required, product match optional */
2269 		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2270 			return -ENODEV;
2271 		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2272 			return -ENODEV;
2273 		dev_info(&intf->dev, "matched module params, "
2274 					"vend=0x%04x prod=0x%04x\n",
2275 				le16_to_cpu(udev->descriptor.idVendor),
2276 				le16_to_cpu(udev->descriptor.idProduct));
2277 	}
2278 #endif
2279 
2280 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2281 	if (!dev)
2282 		return -ENOMEM;
2283 	info = (struct usbtest_info *) id->driver_info;
2284 	dev->info = info;
2285 	mutex_init(&dev->lock);
2286 
2287 	dev->intf = intf;
2288 
2289 	/* cacheline-aligned scratch for i/o */
2290 	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2291 	if (dev->buf == NULL) {
2292 		kfree(dev);
2293 		return -ENOMEM;
2294 	}
2295 
2296 	/* NOTE this doesn't yet test the handful of difference that are
2297 	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2298 	 * "high bandwidth" modes (up to 3 packets/uframe).
2299 	 */
2300 	rtest = wtest = "";
2301 	irtest = iwtest = "";
2302 	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2303 		if (info->ep_in) {
2304 			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2305 			rtest = " intr-in";
2306 		}
2307 		if (info->ep_out) {
2308 			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2309 			wtest = " intr-out";
2310 		}
2311 	} else {
2312 		if (info->autoconf) {
2313 			int status;
2314 
2315 			status = get_endpoints(dev, intf);
2316 			if (status < 0) {
2317 				WARNING(dev, "couldn't get endpoints, %d\n",
2318 						status);
2319 				kfree(dev->buf);
2320 				kfree(dev);
2321 				return status;
2322 			}
2323 			/* may find bulk or ISO pipes */
2324 		} else {
2325 			if (info->ep_in)
2326 				dev->in_pipe = usb_rcvbulkpipe(udev,
2327 							info->ep_in);
2328 			if (info->ep_out)
2329 				dev->out_pipe = usb_sndbulkpipe(udev,
2330 							info->ep_out);
2331 		}
2332 		if (dev->in_pipe)
2333 			rtest = " bulk-in";
2334 		if (dev->out_pipe)
2335 			wtest = " bulk-out";
2336 		if (dev->in_iso_pipe)
2337 			irtest = " iso-in";
2338 		if (dev->out_iso_pipe)
2339 			iwtest = " iso-out";
2340 	}
2341 
2342 	usb_set_intfdata(intf, dev);
2343 	dev_info(&intf->dev, "%s\n", info->name);
2344 	dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2345 			usb_speed_string(udev->speed),
2346 			info->ctrl_out ? " in/out" : "",
2347 			rtest, wtest,
2348 			irtest, iwtest,
2349 			info->alt >= 0 ? " (+alt)" : "");
2350 	return 0;
2351 }
2352 
usbtest_suspend(struct usb_interface * intf,pm_message_t message)2353 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2354 {
2355 	return 0;
2356 }
2357 
usbtest_resume(struct usb_interface * intf)2358 static int usbtest_resume(struct usb_interface *intf)
2359 {
2360 	return 0;
2361 }
2362 
2363 
usbtest_disconnect(struct usb_interface * intf)2364 static void usbtest_disconnect(struct usb_interface *intf)
2365 {
2366 	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2367 
2368 	usb_set_intfdata(intf, NULL);
2369 	dev_dbg(&intf->dev, "disconnect\n");
2370 	kfree(dev);
2371 }
2372 
2373 /* Basic testing only needs a device that can source or sink bulk traffic.
2374  * Any device can test control transfers (default with GENERIC binding).
2375  *
2376  * Several entries work with the default EP0 implementation that's built
2377  * into EZ-USB chips.  There's a default vendor ID which can be overridden
2378  * by (very) small config EEPROMS, but otherwise all these devices act
2379  * identically until firmware is loaded:  only EP0 works.  It turns out
2380  * to be easy to make other endpoints work, without modifying that EP0
2381  * behavior.  For now, we expect that kind of firmware.
2382  */
2383 
2384 /* an21xx or fx versions of ez-usb */
2385 static struct usbtest_info ez1_info = {
2386 	.name		= "EZ-USB device",
2387 	.ep_in		= 2,
2388 	.ep_out		= 2,
2389 	.alt		= 1,
2390 };
2391 
2392 /* fx2 version of ez-usb */
2393 static struct usbtest_info ez2_info = {
2394 	.name		= "FX2 device",
2395 	.ep_in		= 6,
2396 	.ep_out		= 2,
2397 	.alt		= 1,
2398 };
2399 
2400 /* ezusb family device with dedicated usb test firmware,
2401  */
2402 static struct usbtest_info fw_info = {
2403 	.name		= "usb test device",
2404 	.ep_in		= 2,
2405 	.ep_out		= 2,
2406 	.alt		= 1,
2407 	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2408 	.ctrl_out	= 1,
2409 	.iso		= 1,		/* iso_ep's are #8 in/out */
2410 };
2411 
2412 /* peripheral running Linux and 'zero.c' test firmware, or
2413  * its user-mode cousin. different versions of this use
2414  * different hardware with the same vendor/product codes.
2415  * host side MUST rely on the endpoint descriptors.
2416  */
2417 static struct usbtest_info gz_info = {
2418 	.name		= "Linux gadget zero",
2419 	.autoconf	= 1,
2420 	.ctrl_out	= 1,
2421 	.alt		= 0,
2422 };
2423 
2424 static struct usbtest_info um_info = {
2425 	.name		= "Linux user mode test driver",
2426 	.autoconf	= 1,
2427 	.alt		= -1,
2428 };
2429 
2430 static struct usbtest_info um2_info = {
2431 	.name		= "Linux user mode ISO test driver",
2432 	.autoconf	= 1,
2433 	.iso		= 1,
2434 	.alt		= -1,
2435 };
2436 
2437 #ifdef IBOT2
2438 /* this is a nice source of high speed bulk data;
2439  * uses an FX2, with firmware provided in the device
2440  */
2441 static struct usbtest_info ibot2_info = {
2442 	.name		= "iBOT2 webcam",
2443 	.ep_in		= 2,
2444 	.alt		= -1,
2445 };
2446 #endif
2447 
2448 #ifdef GENERIC
2449 /* we can use any device to test control traffic */
2450 static struct usbtest_info generic_info = {
2451 	.name		= "Generic USB device",
2452 	.alt		= -1,
2453 };
2454 #endif
2455 
2456 
2457 static const struct usb_device_id id_table[] = {
2458 
2459 	/*-------------------------------------------------------------*/
2460 
2461 	/* EZ-USB devices which download firmware to replace (or in our
2462 	 * case augment) the default device implementation.
2463 	 */
2464 
2465 	/* generic EZ-USB FX controller */
2466 	{ USB_DEVICE(0x0547, 0x2235),
2467 		.driver_info = (unsigned long) &ez1_info,
2468 	},
2469 
2470 	/* CY3671 development board with EZ-USB FX */
2471 	{ USB_DEVICE(0x0547, 0x0080),
2472 		.driver_info = (unsigned long) &ez1_info,
2473 	},
2474 
2475 	/* generic EZ-USB FX2 controller (or development board) */
2476 	{ USB_DEVICE(0x04b4, 0x8613),
2477 		.driver_info = (unsigned long) &ez2_info,
2478 	},
2479 
2480 	/* re-enumerated usb test device firmware */
2481 	{ USB_DEVICE(0xfff0, 0xfff0),
2482 		.driver_info = (unsigned long) &fw_info,
2483 	},
2484 
2485 	/* "Gadget Zero" firmware runs under Linux */
2486 	{ USB_DEVICE(0x0525, 0xa4a0),
2487 		.driver_info = (unsigned long) &gz_info,
2488 	},
2489 
2490 	/* so does a user-mode variant */
2491 	{ USB_DEVICE(0x0525, 0xa4a4),
2492 		.driver_info = (unsigned long) &um_info,
2493 	},
2494 
2495 	/* ... and a user-mode variant that talks iso */
2496 	{ USB_DEVICE(0x0525, 0xa4a3),
2497 		.driver_info = (unsigned long) &um2_info,
2498 	},
2499 
2500 #ifdef KEYSPAN_19Qi
2501 	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2502 	/* this does not coexist with the real Keyspan 19qi driver! */
2503 	{ USB_DEVICE(0x06cd, 0x010b),
2504 		.driver_info = (unsigned long) &ez1_info,
2505 	},
2506 #endif
2507 
2508 	/*-------------------------------------------------------------*/
2509 
2510 #ifdef IBOT2
2511 	/* iBOT2 makes a nice source of high speed bulk-in data */
2512 	/* this does not coexist with a real iBOT2 driver! */
2513 	{ USB_DEVICE(0x0b62, 0x0059),
2514 		.driver_info = (unsigned long) &ibot2_info,
2515 	},
2516 #endif
2517 
2518 	/*-------------------------------------------------------------*/
2519 
2520 #ifdef GENERIC
2521 	/* module params can specify devices to use for control tests */
2522 	{ .driver_info = (unsigned long) &generic_info, },
2523 #endif
2524 
2525 	/*-------------------------------------------------------------*/
2526 
2527 	{ }
2528 };
2529 MODULE_DEVICE_TABLE(usb, id_table);
2530 
2531 static struct usb_driver usbtest_driver = {
2532 	.name =		"usbtest",
2533 	.id_table =	id_table,
2534 	.probe =	usbtest_probe,
2535 	.unlocked_ioctl = usbtest_ioctl,
2536 	.disconnect =	usbtest_disconnect,
2537 	.suspend =	usbtest_suspend,
2538 	.resume =	usbtest_resume,
2539 };
2540 
2541 /*-------------------------------------------------------------------------*/
2542 
usbtest_init(void)2543 static int __init usbtest_init(void)
2544 {
2545 #ifdef GENERIC
2546 	if (vendor)
2547 		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2548 #endif
2549 	return usb_register(&usbtest_driver);
2550 }
2551 module_init(usbtest_init);
2552 
usbtest_exit(void)2553 static void __exit usbtest_exit(void)
2554 {
2555 	usb_deregister(&usbtest_driver);
2556 }
2557 module_exit(usbtest_exit);
2558 
2559 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2560 MODULE_LICENSE("GPL");
2561 
2562