1 /* $FreeBSD$ */
2 /*-
3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 *
5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54
55 #define USB_DEBUG_VAR usb_debug
56
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_debug.h>
63 #include <dev/usb/usb_util.h>
64
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #include <dev/usb/usb_pf.h>
68 #endif /* USB_GLOBAL_INCLUDE_FILE */
69
70 struct usb_std_packet_size {
71 struct {
72 uint16_t min; /* inclusive */
73 uint16_t max; /* inclusive */
74 } range;
75
76 uint16_t fixed[4];
77 };
78
79 static usb_callback_t usb_request_callback;
80
81 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
82 /* This transfer is used for generic control endpoint transfers */
83
84 [0] = {
85 .type = UE_CONTROL,
86 .endpoint = 0x00, /* Control endpoint */
87 .direction = UE_DIR_ANY,
88 .bufsize = USB_EP0_BUFSIZE, /* bytes */
89 .flags = {.proxy_buffer = 1,},
90 .callback = &usb_request_callback,
91 .usb_mode = USB_MODE_DUAL, /* both modes */
92 },
93
94 /* This transfer is used for generic clear stall only */
95
96 [1] = {
97 .type = UE_CONTROL,
98 .endpoint = 0x00, /* Control pipe */
99 .direction = UE_DIR_ANY,
100 .bufsize = sizeof(struct usb_device_request),
101 .callback = &usb_do_clear_stall_callback,
102 .timeout = 1000, /* 1 second */
103 .interval = 50, /* 50ms */
104 .usb_mode = USB_MODE_HOST,
105 },
106 };
107
108 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
109 /* This transfer is used for generic control endpoint transfers */
110
111 [0] = {
112 .type = UE_CONTROL,
113 .endpoint = 0x00, /* Control endpoint */
114 .direction = UE_DIR_ANY,
115 .bufsize = 65535, /* bytes */
116 .callback = &usb_request_callback,
117 .usb_mode = USB_MODE_DUAL, /* both modes */
118 },
119
120 /* This transfer is used for generic clear stall only */
121
122 [1] = {
123 .type = UE_CONTROL,
124 .endpoint = 0x00, /* Control pipe */
125 .direction = UE_DIR_ANY,
126 .bufsize = sizeof(struct usb_device_request),
127 .callback = &usb_do_clear_stall_callback,
128 .timeout = 1000, /* 1 second */
129 .interval = 50, /* 50ms */
130 .usb_mode = USB_MODE_HOST,
131 },
132 };
133
134 /* function prototypes */
135
136 static void usbd_update_max_frame_size(struct usb_xfer *);
137 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
138 static void usbd_control_transfer_init(struct usb_xfer *);
139 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
140 static void usb_callback_proc(struct usb_proc_msg *);
141 static void usbd_callback_ss_done_defer(struct usb_xfer *);
142 static void usbd_callback_wrapper(struct usb_xfer_queue *);
143 static void usbd_transfer_start_cb(void *);
144 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
145 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
146 uint8_t type, enum usb_dev_speed speed);
147
148 /*------------------------------------------------------------------------*
149 * usb_request_callback
150 *------------------------------------------------------------------------*/
151 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)152 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
153 {
154 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
155 usb_handle_request_callback(xfer, error);
156 else
157 usbd_do_request_callback(xfer, error);
158 }
159
160 /*------------------------------------------------------------------------*
161 * usbd_update_max_frame_size
162 *
163 * This function updates the maximum frame size, hence high speed USB
164 * can transfer multiple consecutive packets.
165 *------------------------------------------------------------------------*/
166 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)167 usbd_update_max_frame_size(struct usb_xfer *xfer)
168 {
169 /* compute maximum frame size */
170 /* this computation should not overflow 16-bit */
171 /* max = 15 * 1024 */
172
173 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
174 }
175
176 /*------------------------------------------------------------------------*
177 * usbd_get_dma_delay
178 *
179 * The following function is called when we need to
180 * synchronize with DMA hardware.
181 *
182 * Returns:
183 * 0: no DMA delay required
184 * Else: milliseconds of DMA delay
185 *------------------------------------------------------------------------*/
186 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)187 usbd_get_dma_delay(struct usb_device *udev)
188 {
189 const struct usb_bus_methods *mtod;
190 uint32_t temp;
191
192 mtod = udev->bus->methods;
193 temp = 0;
194
195 if (mtod->get_dma_delay) {
196 (mtod->get_dma_delay) (udev, &temp);
197 /*
198 * Round up and convert to milliseconds. Note that we use
199 * 1024 milliseconds per second. to save a division.
200 */
201 temp += 0x3FF;
202 temp /= 0x400;
203 }
204 return (temp);
205 }
206
207 /*------------------------------------------------------------------------*
208 * usbd_transfer_setup_sub_malloc
209 *
210 * This function will allocate one or more DMA'able memory chunks
211 * according to "size", "align" and "count" arguments. "ppc" is
212 * pointed to a linear array of USB page caches afterwards.
213 *
214 * If the "align" argument is equal to "1" a non-contiguous allocation
215 * can happen. Else if the "align" argument is greater than "1", the
216 * allocation will always be contiguous in memory.
217 *
218 * Returns:
219 * 0: Success
220 * Else: Failure
221 *------------------------------------------------------------------------*/
222 #if USB_HAVE_BUSDMA
223 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)224 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
225 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
226 usb_size_t count)
227 {
228 struct usb_page_cache *pc;
229 struct usb_page *pg;
230 void *buf;
231 usb_size_t n_dma_pc;
232 usb_size_t n_dma_pg;
233 usb_size_t n_obj;
234 usb_size_t x;
235 usb_size_t y;
236 usb_size_t r;
237 usb_size_t z;
238
239 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
240 align));
241 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
242
243 if (count == 0) {
244 return (0); /* nothing to allocate */
245 }
246 /*
247 * Make sure that the size is aligned properly.
248 */
249 size = -((-size) & (-align));
250
251 /*
252 * Try multi-allocation chunks to reduce the number of DMA
253 * allocations, hence DMA allocations are slow.
254 */
255 if (align == 1) {
256 /* special case - non-cached multi page DMA memory */
257 n_dma_pc = count;
258 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
259 n_obj = 1;
260 } else if (size >= USB_PAGE_SIZE) {
261 n_dma_pc = count;
262 n_dma_pg = 1;
263 n_obj = 1;
264 } else {
265 /* compute number of objects per page */
266 #ifdef USB_DMA_SINGLE_ALLOC
267 n_obj = 1;
268 #else
269 n_obj = (USB_PAGE_SIZE / size);
270 #endif
271 /*
272 * Compute number of DMA chunks, rounded up
273 * to nearest one:
274 */
275 n_dma_pc = howmany(count, n_obj);
276 n_dma_pg = 1;
277 }
278
279 /*
280 * DMA memory is allocated once, but mapped twice. That's why
281 * there is one list for auto-free and another list for
282 * non-auto-free which only holds the mapping and not the
283 * allocation.
284 */
285 if (parm->buf == NULL) {
286 /* reserve memory (auto-free) */
287 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
288 parm->dma_page_cache_ptr += n_dma_pc;
289
290 /* reserve memory (no-auto-free) */
291 parm->dma_page_ptr += count * n_dma_pg;
292 parm->xfer_page_cache_ptr += count;
293 return (0);
294 }
295 for (x = 0; x != n_dma_pc; x++) {
296 /* need to initialize the page cache */
297 parm->dma_page_cache_ptr[x].tag_parent =
298 &parm->curr_xfer->xroot->dma_parent_tag;
299 }
300 for (x = 0; x != count; x++) {
301 /* need to initialize the page cache */
302 parm->xfer_page_cache_ptr[x].tag_parent =
303 &parm->curr_xfer->xroot->dma_parent_tag;
304 }
305
306 if (ppc != NULL) {
307 if (n_obj != 1)
308 *ppc = parm->xfer_page_cache_ptr;
309 else
310 *ppc = parm->dma_page_cache_ptr;
311 }
312 r = count; /* set remainder count */
313 z = n_obj * size; /* set allocation size */
314 pc = parm->xfer_page_cache_ptr;
315 pg = parm->dma_page_ptr;
316
317 if (n_obj == 1) {
318 /*
319 * Avoid mapping memory twice if only a single object
320 * should be allocated per page cache:
321 */
322 for (x = 0; x != n_dma_pc; x++) {
323 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
324 pg, z, align)) {
325 return (1); /* failure */
326 }
327 /* Make room for one DMA page cache and "n_dma_pg" pages */
328 parm->dma_page_cache_ptr++;
329 pg += n_dma_pg;
330 }
331 } else {
332 for (x = 0; x != n_dma_pc; x++) {
333 if (r < n_obj) {
334 /* compute last remainder */
335 z = r * size;
336 n_obj = r;
337 }
338 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
339 pg, z, align)) {
340 return (1); /* failure */
341 }
342 /* Set beginning of current buffer */
343 buf = parm->dma_page_cache_ptr->buffer;
344 /* Make room for one DMA page cache and "n_dma_pg" pages */
345 parm->dma_page_cache_ptr++;
346 pg += n_dma_pg;
347
348 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
349 /* Load sub-chunk into DMA */
350 if (usb_pc_dmamap_create(pc, size)) {
351 return (1); /* failure */
352 }
353 pc->buffer = USB_ADD_BYTES(buf, y * size);
354 pc->page_start = pg;
355
356 USB_MTX_LOCK(pc->tag_parent->mtx);
357 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
358 USB_MTX_UNLOCK(pc->tag_parent->mtx);
359 return (1); /* failure */
360 }
361 USB_MTX_UNLOCK(pc->tag_parent->mtx);
362 }
363 }
364 }
365
366 parm->xfer_page_cache_ptr = pc;
367 parm->dma_page_ptr = pg;
368 return (0);
369 }
370 #endif
371
372 /*------------------------------------------------------------------------*
373 * usbd_get_max_frame_length
374 *
375 * This function returns the maximum single frame length as computed by
376 * usbd_transfer_setup(). It is useful when computing buffer sizes for
377 * devices having multiple alternate settings. The SuperSpeed endpoint
378 * companion pointer is allowed to be NULL.
379 *------------------------------------------------------------------------*/
380 uint32_t
usbd_get_max_frame_length(const struct usb_endpoint_descriptor * edesc,const struct usb_endpoint_ss_comp_descriptor * ecomp,enum usb_dev_speed speed)381 usbd_get_max_frame_length(const struct usb_endpoint_descriptor *edesc,
382 const struct usb_endpoint_ss_comp_descriptor *ecomp,
383 enum usb_dev_speed speed)
384 {
385 uint32_t max_packet_size;
386 uint32_t max_packet_count;
387 uint8_t type;
388
389 max_packet_size = UGETW(edesc->wMaxPacketSize);
390 max_packet_count = 1;
391 type = (edesc->bmAttributes & UE_XFERTYPE);
392
393 switch (speed) {
394 case USB_SPEED_HIGH:
395 switch (type) {
396 case UE_ISOCHRONOUS:
397 case UE_INTERRUPT:
398 max_packet_count +=
399 (max_packet_size >> 11) & 3;
400
401 /* check for invalid max packet count */
402 if (max_packet_count > 3)
403 max_packet_count = 3;
404 break;
405 default:
406 break;
407 }
408 max_packet_size &= 0x7FF;
409 break;
410 case USB_SPEED_SUPER:
411 max_packet_count += (max_packet_size >> 11) & 3;
412
413 if (ecomp != NULL)
414 max_packet_count += ecomp->bMaxBurst;
415
416 if ((max_packet_count == 0) ||
417 (max_packet_count > 16))
418 max_packet_count = 16;
419
420 switch (type) {
421 case UE_CONTROL:
422 max_packet_count = 1;
423 break;
424 case UE_ISOCHRONOUS:
425 if (ecomp != NULL) {
426 uint8_t mult;
427
428 mult = UE_GET_SS_ISO_MULT(
429 ecomp->bmAttributes) + 1;
430 if (mult > 3)
431 mult = 3;
432
433 max_packet_count *= mult;
434 }
435 break;
436 default:
437 break;
438 }
439 max_packet_size &= 0x7FF;
440 break;
441 default:
442 break;
443 }
444 return (max_packet_size * max_packet_count);
445 }
446
447 /*------------------------------------------------------------------------*
448 * usbd_transfer_setup_sub - transfer setup subroutine
449 *
450 * This function must be called from the "xfer_setup" callback of the
451 * USB Host or Device controller driver when setting up an USB
452 * transfer. This function will setup correct packet sizes, buffer
453 * sizes, flags and more, that are stored in the "usb_xfer"
454 * structure.
455 *------------------------------------------------------------------------*/
456 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)457 usbd_transfer_setup_sub(struct usb_setup_params *parm)
458 {
459 enum {
460 REQ_SIZE = 8,
461 MIN_PKT = 8,
462 };
463 struct usb_xfer *xfer = parm->curr_xfer;
464 const struct usb_config *setup = parm->curr_setup;
465 struct usb_endpoint_ss_comp_descriptor *ecomp;
466 struct usb_endpoint_descriptor *edesc;
467 struct usb_std_packet_size std_size;
468 usb_frcount_t n_frlengths;
469 usb_frcount_t n_frbuffers;
470 usb_frcount_t x;
471 uint16_t maxp_old;
472 uint8_t type;
473 uint8_t zmps;
474
475 /*
476 * Sanity check. The following parameters must be initialized before
477 * calling this function.
478 */
479 if ((parm->hc_max_packet_size == 0) ||
480 (parm->hc_max_packet_count == 0) ||
481 (parm->hc_max_frame_size == 0)) {
482 parm->err = USB_ERR_INVAL;
483 goto done;
484 }
485 edesc = xfer->endpoint->edesc;
486 ecomp = xfer->endpoint->ecomp;
487
488 type = (edesc->bmAttributes & UE_XFERTYPE);
489
490 xfer->flags = setup->flags;
491 xfer->nframes = setup->frames;
492 xfer->timeout = setup->timeout;
493 xfer->callback = setup->callback;
494 xfer->interval = setup->interval;
495 xfer->endpointno = edesc->bEndpointAddress;
496 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
497 xfer->max_packet_count = 1;
498 /* make a shadow copy: */
499 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
500
501 parm->bufsize = setup->bufsize;
502
503 switch (parm->speed) {
504 case USB_SPEED_HIGH:
505 switch (type) {
506 case UE_ISOCHRONOUS:
507 case UE_INTERRUPT:
508 xfer->max_packet_count +=
509 (xfer->max_packet_size >> 11) & 3;
510
511 /* check for invalid max packet count */
512 if (xfer->max_packet_count > 3)
513 xfer->max_packet_count = 3;
514 break;
515 default:
516 break;
517 }
518 xfer->max_packet_size &= 0x7FF;
519 break;
520 case USB_SPEED_SUPER:
521 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
522
523 if (ecomp != NULL)
524 xfer->max_packet_count += ecomp->bMaxBurst;
525
526 if ((xfer->max_packet_count == 0) ||
527 (xfer->max_packet_count > 16))
528 xfer->max_packet_count = 16;
529
530 switch (type) {
531 case UE_CONTROL:
532 xfer->max_packet_count = 1;
533 break;
534 case UE_ISOCHRONOUS:
535 if (ecomp != NULL) {
536 uint8_t mult;
537
538 mult = UE_GET_SS_ISO_MULT(
539 ecomp->bmAttributes) + 1;
540 if (mult > 3)
541 mult = 3;
542
543 xfer->max_packet_count *= mult;
544 }
545 break;
546 default:
547 break;
548 }
549 xfer->max_packet_size &= 0x7FF;
550 break;
551 default:
552 break;
553 }
554 /* range check "max_packet_count" */
555
556 if (xfer->max_packet_count > parm->hc_max_packet_count) {
557 xfer->max_packet_count = parm->hc_max_packet_count;
558 }
559
560 /* store max packet size value before filtering */
561
562 maxp_old = xfer->max_packet_size;
563
564 /* filter "wMaxPacketSize" according to HC capabilities */
565
566 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
567 (xfer->max_packet_size == 0)) {
568 xfer->max_packet_size = parm->hc_max_packet_size;
569 }
570 /* filter "wMaxPacketSize" according to standard sizes */
571
572 usbd_get_std_packet_size(&std_size, type, parm->speed);
573
574 if (std_size.range.min || std_size.range.max) {
575 if (xfer->max_packet_size < std_size.range.min) {
576 xfer->max_packet_size = std_size.range.min;
577 }
578 if (xfer->max_packet_size > std_size.range.max) {
579 xfer->max_packet_size = std_size.range.max;
580 }
581 } else {
582 if (xfer->max_packet_size >= std_size.fixed[3]) {
583 xfer->max_packet_size = std_size.fixed[3];
584 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
585 xfer->max_packet_size = std_size.fixed[2];
586 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
587 xfer->max_packet_size = std_size.fixed[1];
588 } else {
589 /* only one possibility left */
590 xfer->max_packet_size = std_size.fixed[0];
591 }
592 }
593
594 /*
595 * Check if the max packet size was outside its allowed range
596 * and clamped to a valid value:
597 */
598 if (maxp_old != xfer->max_packet_size)
599 xfer->flags_int.maxp_was_clamped = 1;
600
601 /* compute "max_frame_size" */
602
603 usbd_update_max_frame_size(xfer);
604
605 /* check interrupt interval and transfer pre-delay */
606
607 if (type == UE_ISOCHRONOUS) {
608 uint16_t frame_limit;
609
610 xfer->interval = 0; /* not used, must be zero */
611 xfer->flags_int.isochronous_xfr = 1; /* set flag */
612
613 if (xfer->timeout == 0) {
614 /*
615 * set a default timeout in
616 * case something goes wrong!
617 */
618 xfer->timeout = 1000 / 4;
619 }
620 switch (parm->speed) {
621 case USB_SPEED_LOW:
622 case USB_SPEED_FULL:
623 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
624 xfer->fps_shift = 0;
625 break;
626 default:
627 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
628 xfer->fps_shift = edesc->bInterval;
629 if (xfer->fps_shift > 0)
630 xfer->fps_shift--;
631 if (xfer->fps_shift > 3)
632 xfer->fps_shift = 3;
633 if (xfer->flags.pre_scale_frames != 0)
634 xfer->nframes <<= (3 - xfer->fps_shift);
635 break;
636 }
637
638 if (xfer->nframes > frame_limit) {
639 /*
640 * this is not going to work
641 * cross hardware
642 */
643 parm->err = USB_ERR_INVAL;
644 goto done;
645 }
646 if (xfer->nframes == 0) {
647 /*
648 * this is not a valid value
649 */
650 parm->err = USB_ERR_ZERO_NFRAMES;
651 goto done;
652 }
653 } else {
654 /*
655 * If a value is specified use that else check the
656 * endpoint descriptor!
657 */
658 if (type == UE_INTERRUPT) {
659 uint32_t temp;
660
661 if (xfer->interval == 0) {
662 xfer->interval = edesc->bInterval;
663
664 switch (parm->speed) {
665 case USB_SPEED_LOW:
666 case USB_SPEED_FULL:
667 break;
668 default:
669 /* 125us -> 1ms */
670 if (xfer->interval < 4)
671 xfer->interval = 1;
672 else if (xfer->interval > 16)
673 xfer->interval = (1 << (16 - 4));
674 else
675 xfer->interval =
676 (1 << (xfer->interval - 4));
677 break;
678 }
679 }
680
681 if (xfer->interval == 0) {
682 /*
683 * One millisecond is the smallest
684 * interval we support:
685 */
686 xfer->interval = 1;
687 }
688
689 xfer->fps_shift = 0;
690 temp = 1;
691
692 while ((temp != 0) && (temp < xfer->interval)) {
693 xfer->fps_shift++;
694 temp *= 2;
695 }
696
697 switch (parm->speed) {
698 case USB_SPEED_LOW:
699 case USB_SPEED_FULL:
700 break;
701 default:
702 xfer->fps_shift += 3;
703 break;
704 }
705 }
706 }
707
708 /*
709 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
710 * to be equal to zero when setting up USB transfers, hence
711 * this leads to a lot of extra code in the USB kernel.
712 */
713
714 if ((xfer->max_frame_size == 0) ||
715 (xfer->max_packet_size == 0)) {
716 zmps = 1;
717
718 if ((parm->bufsize <= MIN_PKT) &&
719 (type != UE_CONTROL) &&
720 (type != UE_BULK)) {
721 /* workaround */
722 xfer->max_packet_size = MIN_PKT;
723 xfer->max_packet_count = 1;
724 parm->bufsize = 0; /* automatic setup length */
725 usbd_update_max_frame_size(xfer);
726
727 } else {
728 parm->err = USB_ERR_ZERO_MAXP;
729 goto done;
730 }
731
732 } else {
733 zmps = 0;
734 }
735
736 /*
737 * check if we should setup a default
738 * length:
739 */
740
741 if (parm->bufsize == 0) {
742 parm->bufsize = xfer->max_frame_size;
743
744 if (type == UE_ISOCHRONOUS) {
745 parm->bufsize *= xfer->nframes;
746 }
747 }
748 /*
749 * check if we are about to setup a proxy
750 * type of buffer:
751 */
752
753 if (xfer->flags.proxy_buffer) {
754 /* round bufsize up */
755
756 parm->bufsize += (xfer->max_frame_size - 1);
757
758 if (parm->bufsize < xfer->max_frame_size) {
759 /* length wrapped around */
760 parm->err = USB_ERR_INVAL;
761 goto done;
762 }
763 /* subtract remainder */
764
765 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
766
767 /* add length of USB device request structure, if any */
768
769 if (type == UE_CONTROL) {
770 parm->bufsize += REQ_SIZE; /* SETUP message */
771 }
772 }
773 xfer->max_data_length = parm->bufsize;
774
775 /* Setup "n_frlengths" and "n_frbuffers" */
776
777 if (type == UE_ISOCHRONOUS) {
778 n_frlengths = xfer->nframes;
779 n_frbuffers = 1;
780 } else {
781 if (type == UE_CONTROL) {
782 xfer->flags_int.control_xfr = 1;
783 if (xfer->nframes == 0) {
784 if (parm->bufsize <= REQ_SIZE) {
785 /*
786 * there will never be any data
787 * stage
788 */
789 xfer->nframes = 1;
790 } else {
791 xfer->nframes = 2;
792 }
793 }
794 } else {
795 if (xfer->nframes == 0) {
796 xfer->nframes = 1;
797 }
798 }
799
800 n_frlengths = xfer->nframes;
801 n_frbuffers = xfer->nframes;
802 }
803
804 /*
805 * check if we have room for the
806 * USB device request structure:
807 */
808
809 if (type == UE_CONTROL) {
810 if (xfer->max_data_length < REQ_SIZE) {
811 /* length wrapped around or too small bufsize */
812 parm->err = USB_ERR_INVAL;
813 goto done;
814 }
815 xfer->max_data_length -= REQ_SIZE;
816 }
817 /*
818 * Setup "frlengths" and shadow "frlengths" for keeping the
819 * initial frame lengths when a USB transfer is complete. This
820 * information is useful when computing isochronous offsets.
821 */
822 xfer->frlengths = parm->xfer_length_ptr;
823 parm->xfer_length_ptr += 2 * n_frlengths;
824
825 /* setup "frbuffers" */
826 xfer->frbuffers = parm->xfer_page_cache_ptr;
827 parm->xfer_page_cache_ptr += n_frbuffers;
828
829 /* initialize max frame count */
830 xfer->max_frame_count = xfer->nframes;
831
832 /*
833 * check if we need to setup
834 * a local buffer:
835 */
836
837 if (!xfer->flags.ext_buffer) {
838 #if USB_HAVE_BUSDMA
839 struct usb_page_search page_info;
840 struct usb_page_cache *pc;
841
842 if (usbd_transfer_setup_sub_malloc(parm,
843 &pc, parm->bufsize, 1, 1)) {
844 parm->err = USB_ERR_NOMEM;
845 } else if (parm->buf != NULL) {
846 usbd_get_page(pc, 0, &page_info);
847
848 xfer->local_buffer = page_info.buffer;
849
850 usbd_xfer_set_frame_offset(xfer, 0, 0);
851
852 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
853 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
854 }
855 }
856 #else
857 /* align data */
858 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
859
860 if (parm->buf != NULL) {
861 xfer->local_buffer =
862 USB_ADD_BYTES(parm->buf, parm->size[0]);
863
864 usbd_xfer_set_frame_offset(xfer, 0, 0);
865
866 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
867 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
868 }
869 }
870 parm->size[0] += parm->bufsize;
871
872 /* align data again */
873 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
874 #endif
875 }
876 /*
877 * Compute maximum buffer size
878 */
879
880 if (parm->bufsize_max < parm->bufsize) {
881 parm->bufsize_max = parm->bufsize;
882 }
883 #if USB_HAVE_BUSDMA
884 if (xfer->flags_int.bdma_enable) {
885 /*
886 * Setup "dma_page_ptr".
887 *
888 * Proof for formula below:
889 *
890 * Assume there are three USB frames having length "a", "b" and
891 * "c". These USB frames will at maximum need "z"
892 * "usb_page" structures. "z" is given by:
893 *
894 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
895 * ((c / USB_PAGE_SIZE) + 2);
896 *
897 * Constraining "a", "b" and "c" like this:
898 *
899 * (a + b + c) <= parm->bufsize
900 *
901 * We know that:
902 *
903 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
904 *
905 * Here is the general formula:
906 */
907 xfer->dma_page_ptr = parm->dma_page_ptr;
908 parm->dma_page_ptr += (2 * n_frbuffers);
909 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
910 }
911 #endif
912 if (zmps) {
913 /* correct maximum data length */
914 xfer->max_data_length = 0;
915 }
916 /* subtract USB frame remainder from "hc_max_frame_size" */
917
918 xfer->max_hc_frame_size =
919 (parm->hc_max_frame_size -
920 (parm->hc_max_frame_size % xfer->max_frame_size));
921
922 if (xfer->max_hc_frame_size == 0) {
923 parm->err = USB_ERR_INVAL;
924 goto done;
925 }
926
927 /* initialize frame buffers */
928
929 if (parm->buf) {
930 for (x = 0; x != n_frbuffers; x++) {
931 xfer->frbuffers[x].tag_parent =
932 &xfer->xroot->dma_parent_tag;
933 #if USB_HAVE_BUSDMA
934 if (xfer->flags_int.bdma_enable &&
935 (parm->bufsize_max > 0)) {
936 if (usb_pc_dmamap_create(
937 xfer->frbuffers + x,
938 parm->bufsize_max)) {
939 parm->err = USB_ERR_NOMEM;
940 goto done;
941 }
942 }
943 #endif
944 }
945 }
946 done:
947 if (parm->err) {
948 /*
949 * Set some dummy values so that we avoid division by zero:
950 */
951 xfer->max_hc_frame_size = 1;
952 xfer->max_frame_size = 1;
953 xfer->max_packet_size = 1;
954 xfer->max_data_length = 0;
955 xfer->nframes = 0;
956 xfer->max_frame_count = 0;
957 }
958 }
959
960 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)961 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
962 uint16_t n_setup)
963 {
964 while (n_setup--) {
965 uint8_t type = setup_start[n_setup].type;
966 if (type == UE_BULK || type == UE_BULK_INTR ||
967 type == UE_TYPE_ANY)
968 return (1);
969 }
970 return (0);
971 }
972
973 /*------------------------------------------------------------------------*
974 * usbd_transfer_setup - setup an array of USB transfers
975 *
976 * NOTE: You must always call "usbd_transfer_unsetup" after calling
977 * "usbd_transfer_setup" if success was returned.
978 *
979 * The idea is that the USB device driver should pre-allocate all its
980 * transfers by one call to this function.
981 *
982 * Return values:
983 * 0: Success
984 * Else: Failure
985 *------------------------------------------------------------------------*/
986 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)987 usbd_transfer_setup(struct usb_device *udev,
988 const uint8_t *ifaces, struct usb_xfer **ppxfer,
989 const struct usb_config *setup_start, uint16_t n_setup,
990 void *priv_sc, struct mtx *xfer_mtx)
991 {
992 const struct usb_config *setup_end = setup_start + n_setup;
993 const struct usb_config *setup;
994 struct usb_setup_params *parm;
995 struct usb_endpoint *ep;
996 struct usb_xfer_root *info;
997 struct usb_xfer *xfer;
998 void *buf = NULL;
999 usb_error_t error = 0;
1000 uint16_t n;
1001 uint16_t refcount;
1002 uint8_t do_unlock;
1003
1004 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1005 "usbd_transfer_setup can sleep!");
1006
1007 /* do some checking first */
1008
1009 if (n_setup == 0) {
1010 DPRINTFN(6, "setup array has zero length!\n");
1011 return (USB_ERR_INVAL);
1012 }
1013 if (ifaces == NULL) {
1014 DPRINTFN(6, "ifaces array is NULL!\n");
1015 return (USB_ERR_INVAL);
1016 }
1017 if (xfer_mtx == NULL) {
1018 DPRINTFN(6, "using global lock\n");
1019 xfer_mtx = &Giant;
1020 }
1021
1022 /* more sanity checks */
1023
1024 for (setup = setup_start, n = 0;
1025 setup != setup_end; setup++, n++) {
1026 if (setup->bufsize == (usb_frlength_t)-1) {
1027 error = USB_ERR_BAD_BUFSIZE;
1028 DPRINTF("invalid bufsize\n");
1029 }
1030 if (setup->callback == NULL) {
1031 error = USB_ERR_NO_CALLBACK;
1032 DPRINTF("no callback\n");
1033 }
1034 ppxfer[n] = NULL;
1035 }
1036
1037 if (error)
1038 return (error);
1039
1040 /* Protect scratch area */
1041 do_unlock = usbd_ctrl_lock(udev);
1042
1043 refcount = 0;
1044 info = NULL;
1045
1046 parm = &udev->scratch.xfer_setup[0].parm;
1047 memset(parm, 0, sizeof(*parm));
1048
1049 parm->udev = udev;
1050 parm->speed = usbd_get_speed(udev);
1051 parm->hc_max_packet_count = 1;
1052
1053 if (parm->speed >= USB_SPEED_MAX) {
1054 parm->err = USB_ERR_INVAL;
1055 goto done;
1056 }
1057 /* setup all transfers */
1058
1059 while (1) {
1060 if (buf) {
1061 /*
1062 * Initialize the "usb_xfer_root" structure,
1063 * which is common for all our USB transfers.
1064 */
1065 info = USB_ADD_BYTES(buf, 0);
1066
1067 info->memory_base = buf;
1068 info->memory_size = parm->size[0];
1069
1070 #if USB_HAVE_BUSDMA
1071 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
1072 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
1073 #endif
1074 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
1075 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
1076
1077 cv_init(&info->cv_drain, "WDRAIN");
1078
1079 info->xfer_mtx = xfer_mtx;
1080 #if USB_HAVE_BUSDMA
1081 usb_dma_tag_setup(&info->dma_parent_tag,
1082 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1083 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1084 parm->dma_tag_max);
1085 #endif
1086
1087 info->bus = udev->bus;
1088 info->udev = udev;
1089
1090 TAILQ_INIT(&info->done_q.head);
1091 info->done_q.command = &usbd_callback_wrapper;
1092 #if USB_HAVE_BUSDMA
1093 TAILQ_INIT(&info->dma_q.head);
1094 info->dma_q.command = &usb_bdma_work_loop;
1095 #endif
1096 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1097 info->done_m[0].xroot = info;
1098 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1099 info->done_m[1].xroot = info;
1100
1101 /*
1102 * In device side mode control endpoint
1103 * requests need to run from a separate
1104 * context, else there is a chance of
1105 * deadlock!
1106 */
1107 if (setup_start == usb_control_ep_cfg ||
1108 setup_start == usb_control_ep_quirk_cfg)
1109 info->done_p =
1110 USB_BUS_CONTROL_XFER_PROC(udev->bus);
1111 else if (xfer_mtx == &Giant)
1112 info->done_p =
1113 USB_BUS_GIANT_PROC(udev->bus);
1114 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1115 info->done_p =
1116 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1117 else
1118 info->done_p =
1119 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1120 }
1121 /* reset sizes */
1122
1123 parm->size[0] = 0;
1124 parm->buf = buf;
1125 parm->size[0] += sizeof(info[0]);
1126
1127 for (setup = setup_start, n = 0;
1128 setup != setup_end; setup++, n++) {
1129 /* skip USB transfers without callbacks: */
1130 if (setup->callback == NULL) {
1131 continue;
1132 }
1133 /* see if there is a matching endpoint */
1134 ep = usbd_get_endpoint(udev,
1135 ifaces[setup->if_index], setup);
1136
1137 /*
1138 * Check that the USB PIPE is valid and that
1139 * the endpoint mode is proper.
1140 *
1141 * Make sure we don't allocate a streams
1142 * transfer when such a combination is not
1143 * valid.
1144 */
1145 if ((ep == NULL) || (ep->methods == NULL) ||
1146 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1147 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1148 (setup->stream_id != 0 &&
1149 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1150 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1151 if (setup->flags.no_pipe_ok)
1152 continue;
1153 if ((setup->usb_mode != USB_MODE_DUAL) &&
1154 (setup->usb_mode != udev->flags.usb_mode))
1155 continue;
1156 parm->err = USB_ERR_NO_PIPE;
1157 goto done;
1158 }
1159
1160 /* align data properly */
1161 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1162
1163 /* store current setup pointer */
1164 parm->curr_setup = setup;
1165
1166 if (buf) {
1167 /*
1168 * Common initialization of the
1169 * "usb_xfer" structure.
1170 */
1171 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1172 xfer->address = udev->address;
1173 xfer->priv_sc = priv_sc;
1174 xfer->xroot = info;
1175
1176 usb_callout_init_mtx(&xfer->timeout_handle,
1177 &udev->bus->bus_mtx, 0);
1178 } else {
1179 /*
1180 * Setup a dummy xfer, hence we are
1181 * writing to the "usb_xfer"
1182 * structure pointed to by "xfer"
1183 * before we have allocated any
1184 * memory:
1185 */
1186 xfer = &udev->scratch.xfer_setup[0].dummy;
1187 memset(xfer, 0, sizeof(*xfer));
1188 refcount++;
1189 }
1190
1191 /* set transfer endpoint pointer */
1192 xfer->endpoint = ep;
1193
1194 /* set transfer stream ID */
1195 xfer->stream_id = setup->stream_id;
1196
1197 parm->size[0] += sizeof(xfer[0]);
1198 parm->methods = xfer->endpoint->methods;
1199 parm->curr_xfer = xfer;
1200
1201 /*
1202 * Call the Host or Device controller transfer
1203 * setup routine:
1204 */
1205 (udev->bus->methods->xfer_setup) (parm);
1206
1207 /* check for error */
1208 if (parm->err)
1209 goto done;
1210
1211 if (buf) {
1212 /*
1213 * Increment the endpoint refcount. This
1214 * basically prevents setting a new
1215 * configuration and alternate setting
1216 * when USB transfers are in use on
1217 * the given interface. Search the USB
1218 * code for "endpoint->refcount_alloc" if you
1219 * want more information.
1220 */
1221 USB_BUS_LOCK(info->bus);
1222 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1223 parm->err = USB_ERR_INVAL;
1224
1225 xfer->endpoint->refcount_alloc++;
1226
1227 if (xfer->endpoint->refcount_alloc == 0)
1228 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1229 USB_BUS_UNLOCK(info->bus);
1230
1231 /*
1232 * Whenever we set ppxfer[] then we
1233 * also need to increment the
1234 * "setup_refcount":
1235 */
1236 info->setup_refcount++;
1237
1238 /*
1239 * Transfer is successfully setup and
1240 * can be used:
1241 */
1242 ppxfer[n] = xfer;
1243 }
1244
1245 /* check for error */
1246 if (parm->err)
1247 goto done;
1248 }
1249
1250 if (buf != NULL || parm->err != 0)
1251 goto done;
1252
1253 /* if no transfers, nothing to do */
1254 if (refcount == 0)
1255 goto done;
1256
1257 /* align data properly */
1258 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1259
1260 /* store offset temporarily */
1261 parm->size[1] = parm->size[0];
1262
1263 /*
1264 * The number of DMA tags required depends on
1265 * the number of endpoints. The current estimate
1266 * for maximum number of DMA tags per endpoint
1267 * is three:
1268 * 1) for loading memory
1269 * 2) for allocating memory
1270 * 3) for fixing memory [UHCI]
1271 */
1272 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1273
1274 /*
1275 * DMA tags for QH, TD, Data and more.
1276 */
1277 parm->dma_tag_max += 8;
1278
1279 parm->dma_tag_p += parm->dma_tag_max;
1280
1281 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1282 ((uint8_t *)0);
1283
1284 /* align data properly */
1285 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1286
1287 /* store offset temporarily */
1288 parm->size[3] = parm->size[0];
1289
1290 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1291 ((uint8_t *)0);
1292
1293 /* align data properly */
1294 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1295
1296 /* store offset temporarily */
1297 parm->size[4] = parm->size[0];
1298
1299 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1300 ((uint8_t *)0);
1301
1302 /* store end offset temporarily */
1303 parm->size[5] = parm->size[0];
1304
1305 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1306 ((uint8_t *)0);
1307
1308 /* store end offset temporarily */
1309
1310 parm->size[2] = parm->size[0];
1311
1312 /* align data properly */
1313 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1314
1315 parm->size[6] = parm->size[0];
1316
1317 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1318 ((uint8_t *)0);
1319
1320 /* align data properly */
1321 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1322
1323 /* allocate zeroed memory */
1324 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1325 #if (USB_HAVE_MALLOC_WAITOK == 0)
1326 if (buf == NULL) {
1327 parm->err = USB_ERR_NOMEM;
1328 DPRINTFN(0, "cannot allocate memory block for "
1329 "configuration (%d bytes)\n",
1330 parm->size[0]);
1331 goto done;
1332 }
1333 #endif
1334 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1335 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1336 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1337 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1338 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1339 }
1340
1341 done:
1342 if (buf) {
1343 if (info->setup_refcount == 0) {
1344 /*
1345 * "usbd_transfer_unsetup_sub" will unlock
1346 * the bus mutex before returning !
1347 */
1348 USB_BUS_LOCK(info->bus);
1349
1350 /* something went wrong */
1351 usbd_transfer_unsetup_sub(info, 0);
1352 }
1353 }
1354
1355 /* check if any errors happened */
1356 if (parm->err)
1357 usbd_transfer_unsetup(ppxfer, n_setup);
1358
1359 error = parm->err;
1360
1361 if (do_unlock)
1362 usbd_ctrl_unlock(udev);
1363
1364 return (error);
1365 }
1366
1367 /*------------------------------------------------------------------------*
1368 * usbd_transfer_unsetup_sub - factored out code
1369 *------------------------------------------------------------------------*/
1370 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1371 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1372 {
1373 #if USB_HAVE_BUSDMA
1374 struct usb_page_cache *pc;
1375 #endif
1376
1377 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1378
1379 /* wait for any outstanding DMA operations */
1380
1381 if (needs_delay) {
1382 usb_timeout_t temp;
1383 temp = usbd_get_dma_delay(info->udev);
1384 if (temp != 0) {
1385 usb_pause_mtx(&info->bus->bus_mtx,
1386 USB_MS_TO_TICKS(temp));
1387 }
1388 }
1389
1390 /* make sure that our done messages are not queued anywhere */
1391 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1392
1393 USB_BUS_UNLOCK(info->bus);
1394
1395 #if USB_HAVE_BUSDMA
1396 /* free DMA'able memory, if any */
1397 pc = info->dma_page_cache_start;
1398 while (pc != info->dma_page_cache_end) {
1399 usb_pc_free_mem(pc);
1400 pc++;
1401 }
1402
1403 /* free DMA maps in all "xfer->frbuffers" */
1404 pc = info->xfer_page_cache_start;
1405 while (pc != info->xfer_page_cache_end) {
1406 usb_pc_dmamap_destroy(pc);
1407 pc++;
1408 }
1409
1410 /* free all DMA tags */
1411 usb_dma_tag_unsetup(&info->dma_parent_tag);
1412 #endif
1413
1414 cv_destroy(&info->cv_drain);
1415
1416 /*
1417 * free the "memory_base" last, hence the "info" structure is
1418 * contained within the "memory_base"!
1419 */
1420 free(info->memory_base, M_USB);
1421 }
1422
1423 /*------------------------------------------------------------------------*
1424 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1425 *
1426 * NOTE: All USB transfers in progress will get called back passing
1427 * the error code "USB_ERR_CANCELLED" before this function
1428 * returns.
1429 *------------------------------------------------------------------------*/
1430 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1431 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1432 {
1433 struct usb_xfer *xfer;
1434 struct usb_xfer_root *info;
1435 uint8_t needs_delay = 0;
1436
1437 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1438 "usbd_transfer_unsetup can sleep!");
1439
1440 while (n_setup--) {
1441 xfer = pxfer[n_setup];
1442
1443 if (xfer == NULL)
1444 continue;
1445
1446 info = xfer->xroot;
1447
1448 USB_XFER_LOCK(xfer);
1449 USB_BUS_LOCK(info->bus);
1450
1451 /*
1452 * HINT: when you start/stop a transfer, it might be a
1453 * good idea to directly use the "pxfer[]" structure:
1454 *
1455 * usbd_transfer_start(sc->pxfer[0]);
1456 * usbd_transfer_stop(sc->pxfer[0]);
1457 *
1458 * That way, if your code has many parts that will not
1459 * stop running under the same lock, in other words
1460 * "xfer_mtx", the usbd_transfer_start and
1461 * usbd_transfer_stop functions will simply return
1462 * when they detect a NULL pointer argument.
1463 *
1464 * To avoid any races we clear the "pxfer[]" pointer
1465 * while holding the private mutex of the driver:
1466 */
1467 pxfer[n_setup] = NULL;
1468
1469 USB_BUS_UNLOCK(info->bus);
1470 USB_XFER_UNLOCK(xfer);
1471
1472 usbd_transfer_drain(xfer);
1473
1474 #if USB_HAVE_BUSDMA
1475 if (xfer->flags_int.bdma_enable)
1476 needs_delay = 1;
1477 #endif
1478 /*
1479 * NOTE: default endpoint does not have an
1480 * interface, even if endpoint->iface_index == 0
1481 */
1482 USB_BUS_LOCK(info->bus);
1483 xfer->endpoint->refcount_alloc--;
1484 USB_BUS_UNLOCK(info->bus);
1485
1486 usb_callout_drain(&xfer->timeout_handle);
1487
1488 USB_BUS_LOCK(info->bus);
1489
1490 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1491 "reference count\n"));
1492
1493 info->setup_refcount--;
1494
1495 if (info->setup_refcount == 0) {
1496 usbd_transfer_unsetup_sub(info,
1497 needs_delay);
1498 } else {
1499 USB_BUS_UNLOCK(info->bus);
1500 }
1501 }
1502 }
1503
1504 /*------------------------------------------------------------------------*
1505 * usbd_control_transfer_init - factored out code
1506 *
1507 * In USB Device Mode we have to wait for the SETUP packet which
1508 * containst the "struct usb_device_request" structure, before we can
1509 * transfer any data. In USB Host Mode we already have the SETUP
1510 * packet at the moment the USB transfer is started. This leads us to
1511 * having to setup the USB transfer at two different places in
1512 * time. This function just contains factored out control transfer
1513 * initialisation code, so that we don't duplicate the code.
1514 *------------------------------------------------------------------------*/
1515 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1516 usbd_control_transfer_init(struct usb_xfer *xfer)
1517 {
1518 struct usb_device_request req;
1519
1520 /* copy out the USB request header */
1521
1522 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1523
1524 /* setup remainder */
1525
1526 xfer->flags_int.control_rem = UGETW(req.wLength);
1527
1528 /* copy direction to endpoint variable */
1529
1530 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1531 xfer->endpointno |=
1532 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1533 }
1534
1535 /*------------------------------------------------------------------------*
1536 * usbd_control_transfer_did_data
1537 *
1538 * This function returns non-zero if a control endpoint has
1539 * transferred the first DATA packet after the SETUP packet.
1540 * Else it returns zero.
1541 *------------------------------------------------------------------------*/
1542 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1543 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1544 {
1545 struct usb_device_request req;
1546
1547 /* SETUP packet is not yet sent */
1548 if (xfer->flags_int.control_hdr != 0)
1549 return (0);
1550
1551 /* copy out the USB request header */
1552 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1553
1554 /* compare remainder to the initial value */
1555 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1556 }
1557
1558 /*------------------------------------------------------------------------*
1559 * usbd_setup_ctrl_transfer
1560 *
1561 * This function handles initialisation of control transfers. Control
1562 * transfers are special in that regard that they can both transmit
1563 * and receive data.
1564 *
1565 * Return values:
1566 * 0: Success
1567 * Else: Failure
1568 *------------------------------------------------------------------------*/
1569 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1570 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1571 {
1572 usb_frlength_t len;
1573
1574 /* Check for control endpoint stall */
1575 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1576 /* the control transfer is no longer active */
1577 xfer->flags_int.control_stall = 1;
1578 xfer->flags_int.control_act = 0;
1579 } else {
1580 /* don't stall control transfer by default */
1581 xfer->flags_int.control_stall = 0;
1582 }
1583
1584 /* Check for invalid number of frames */
1585 if (xfer->nframes > 2) {
1586 /*
1587 * If you need to split a control transfer, you
1588 * have to do one part at a time. Only with
1589 * non-control transfers you can do multiple
1590 * parts a time.
1591 */
1592 DPRINTFN(0, "Too many frames: %u\n",
1593 (unsigned int)xfer->nframes);
1594 goto error;
1595 }
1596
1597 /*
1598 * Check if there is a control
1599 * transfer in progress:
1600 */
1601 if (xfer->flags_int.control_act) {
1602 if (xfer->flags_int.control_hdr) {
1603 /* clear send header flag */
1604
1605 xfer->flags_int.control_hdr = 0;
1606
1607 /* setup control transfer */
1608 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1609 usbd_control_transfer_init(xfer);
1610 }
1611 }
1612 /* get data length */
1613
1614 len = xfer->sumlen;
1615
1616 } else {
1617 /* the size of the SETUP structure is hardcoded ! */
1618
1619 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1620 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1621 xfer->frlengths[0], sizeof(struct
1622 usb_device_request));
1623 goto error;
1624 }
1625 /* check USB mode */
1626 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1627 /* check number of frames */
1628 if (xfer->nframes != 1) {
1629 /*
1630 * We need to receive the setup
1631 * message first so that we know the
1632 * data direction!
1633 */
1634 DPRINTF("Misconfigured transfer\n");
1635 goto error;
1636 }
1637 /*
1638 * Set a dummy "control_rem" value. This
1639 * variable will be overwritten later by a
1640 * call to "usbd_control_transfer_init()" !
1641 */
1642 xfer->flags_int.control_rem = 0xFFFF;
1643 } else {
1644 /* setup "endpoint" and "control_rem" */
1645
1646 usbd_control_transfer_init(xfer);
1647 }
1648
1649 /* set transfer-header flag */
1650
1651 xfer->flags_int.control_hdr = 1;
1652
1653 /* get data length */
1654
1655 len = (xfer->sumlen - sizeof(struct usb_device_request));
1656 }
1657
1658 /* update did data flag */
1659
1660 xfer->flags_int.control_did_data =
1661 usbd_control_transfer_did_data(xfer);
1662
1663 /* check if there is a length mismatch */
1664
1665 if (len > xfer->flags_int.control_rem) {
1666 DPRINTFN(0, "Length (%d) greater than "
1667 "remaining length (%d)\n", len,
1668 xfer->flags_int.control_rem);
1669 goto error;
1670 }
1671 /* check if we are doing a short transfer */
1672
1673 if (xfer->flags.force_short_xfer) {
1674 xfer->flags_int.control_rem = 0;
1675 } else {
1676 if ((len != xfer->max_data_length) &&
1677 (len != xfer->flags_int.control_rem) &&
1678 (xfer->nframes != 1)) {
1679 DPRINTFN(0, "Short control transfer without "
1680 "force_short_xfer set\n");
1681 goto error;
1682 }
1683 xfer->flags_int.control_rem -= len;
1684 }
1685
1686 /* the status part is executed when "control_act" is 0 */
1687
1688 if ((xfer->flags_int.control_rem > 0) ||
1689 (xfer->flags.manual_status)) {
1690 /* don't execute the STATUS stage yet */
1691 xfer->flags_int.control_act = 1;
1692
1693 /* sanity check */
1694 if ((!xfer->flags_int.control_hdr) &&
1695 (xfer->nframes == 1)) {
1696 /*
1697 * This is not a valid operation!
1698 */
1699 DPRINTFN(0, "Invalid parameter "
1700 "combination\n");
1701 goto error;
1702 }
1703 } else {
1704 /* time to execute the STATUS stage */
1705 xfer->flags_int.control_act = 0;
1706 }
1707 return (0); /* success */
1708
1709 error:
1710 return (1); /* failure */
1711 }
1712
1713 /*------------------------------------------------------------------------*
1714 * usbd_transfer_submit - start USB hardware for the given transfer
1715 *
1716 * This function should only be called from the USB callback.
1717 *------------------------------------------------------------------------*/
1718 void
usbd_transfer_submit(struct usb_xfer * xfer)1719 usbd_transfer_submit(struct usb_xfer *xfer)
1720 {
1721 struct usb_xfer_root *info;
1722 struct usb_bus *bus;
1723 usb_frcount_t x;
1724
1725 info = xfer->xroot;
1726 bus = info->bus;
1727
1728 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1729 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1730 "read" : "write");
1731
1732 #ifdef USB_DEBUG
1733 if (USB_DEBUG_VAR > 0) {
1734 USB_BUS_LOCK(bus);
1735
1736 usb_dump_endpoint(xfer->endpoint);
1737
1738 USB_BUS_UNLOCK(bus);
1739 }
1740 #endif
1741
1742 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1743 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1744
1745 /* Only open the USB transfer once! */
1746 if (!xfer->flags_int.open) {
1747 xfer->flags_int.open = 1;
1748
1749 DPRINTF("open\n");
1750
1751 USB_BUS_LOCK(bus);
1752 (xfer->endpoint->methods->open) (xfer);
1753 USB_BUS_UNLOCK(bus);
1754 }
1755 /* set "transferring" flag */
1756 xfer->flags_int.transferring = 1;
1757
1758 #if USB_HAVE_POWERD
1759 /* increment power reference */
1760 usbd_transfer_power_ref(xfer, 1);
1761 #endif
1762 /*
1763 * Check if the transfer is waiting on a queue, most
1764 * frequently the "done_q":
1765 */
1766 if (xfer->wait_queue) {
1767 USB_BUS_LOCK(bus);
1768 usbd_transfer_dequeue(xfer);
1769 USB_BUS_UNLOCK(bus);
1770 }
1771 /* clear "did_dma_delay" flag */
1772 xfer->flags_int.did_dma_delay = 0;
1773
1774 /* clear "did_close" flag */
1775 xfer->flags_int.did_close = 0;
1776
1777 #if USB_HAVE_BUSDMA
1778 /* clear "bdma_setup" flag */
1779 xfer->flags_int.bdma_setup = 0;
1780 #endif
1781 /* by default we cannot cancel any USB transfer immediately */
1782 xfer->flags_int.can_cancel_immed = 0;
1783
1784 /* clear lengths and frame counts by default */
1785 xfer->sumlen = 0;
1786 xfer->actlen = 0;
1787 xfer->aframes = 0;
1788
1789 /* clear any previous errors */
1790 xfer->error = 0;
1791
1792 /* Check if the device is still alive */
1793 if (info->udev->state < USB_STATE_POWERED) {
1794 USB_BUS_LOCK(bus);
1795 /*
1796 * Must return cancelled error code else
1797 * device drivers can hang.
1798 */
1799 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1800 USB_BUS_UNLOCK(bus);
1801 return;
1802 }
1803
1804 /* sanity check */
1805 if (xfer->nframes == 0) {
1806 if (xfer->flags.stall_pipe) {
1807 /*
1808 * Special case - want to stall without transferring
1809 * any data:
1810 */
1811 DPRINTF("xfer=%p nframes=0: stall "
1812 "or clear stall!\n", xfer);
1813 USB_BUS_LOCK(bus);
1814 xfer->flags_int.can_cancel_immed = 1;
1815 /* start the transfer */
1816 usb_command_wrapper(&xfer->endpoint->
1817 endpoint_q[xfer->stream_id], xfer);
1818 USB_BUS_UNLOCK(bus);
1819 return;
1820 }
1821 USB_BUS_LOCK(bus);
1822 usbd_transfer_done(xfer, USB_ERR_INVAL);
1823 USB_BUS_UNLOCK(bus);
1824 return;
1825 }
1826 /* compute some variables */
1827
1828 for (x = 0; x != xfer->nframes; x++) {
1829 /* make a copy of the frlenghts[] */
1830 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1831 /* compute total transfer length */
1832 xfer->sumlen += xfer->frlengths[x];
1833 if (xfer->sumlen < xfer->frlengths[x]) {
1834 /* length wrapped around */
1835 USB_BUS_LOCK(bus);
1836 usbd_transfer_done(xfer, USB_ERR_INVAL);
1837 USB_BUS_UNLOCK(bus);
1838 return;
1839 }
1840 }
1841
1842 /* clear some internal flags */
1843
1844 xfer->flags_int.short_xfer_ok = 0;
1845 xfer->flags_int.short_frames_ok = 0;
1846
1847 /* check if this is a control transfer */
1848
1849 if (xfer->flags_int.control_xfr) {
1850 if (usbd_setup_ctrl_transfer(xfer)) {
1851 USB_BUS_LOCK(bus);
1852 usbd_transfer_done(xfer, USB_ERR_STALLED);
1853 USB_BUS_UNLOCK(bus);
1854 return;
1855 }
1856 }
1857 /*
1858 * Setup filtered version of some transfer flags,
1859 * in case of data read direction
1860 */
1861 if (USB_GET_DATA_ISREAD(xfer)) {
1862 if (xfer->flags.short_frames_ok) {
1863 xfer->flags_int.short_xfer_ok = 1;
1864 xfer->flags_int.short_frames_ok = 1;
1865 } else if (xfer->flags.short_xfer_ok) {
1866 xfer->flags_int.short_xfer_ok = 1;
1867
1868 /* check for control transfer */
1869 if (xfer->flags_int.control_xfr) {
1870 /*
1871 * 1) Control transfers do not support
1872 * reception of multiple short USB
1873 * frames in host mode and device side
1874 * mode, with exception of:
1875 *
1876 * 2) Due to sometimes buggy device
1877 * side firmware we need to do a
1878 * STATUS stage in case of short
1879 * control transfers in USB host mode.
1880 * The STATUS stage then becomes the
1881 * "alt_next" to the DATA stage.
1882 */
1883 xfer->flags_int.short_frames_ok = 1;
1884 }
1885 }
1886 }
1887 /*
1888 * Check if BUS-DMA support is enabled and try to load virtual
1889 * buffers into DMA, if any:
1890 */
1891 #if USB_HAVE_BUSDMA
1892 if (xfer->flags_int.bdma_enable) {
1893 /* insert the USB transfer last in the BUS-DMA queue */
1894 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1895 return;
1896 }
1897 #endif
1898 /*
1899 * Enter the USB transfer into the Host Controller or
1900 * Device Controller schedule:
1901 */
1902 usbd_pipe_enter(xfer);
1903 }
1904
1905 /*------------------------------------------------------------------------*
1906 * usbd_pipe_enter - factored out code
1907 *------------------------------------------------------------------------*/
1908 void
usbd_pipe_enter(struct usb_xfer * xfer)1909 usbd_pipe_enter(struct usb_xfer *xfer)
1910 {
1911 struct usb_endpoint *ep;
1912
1913 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1914
1915 USB_BUS_LOCK(xfer->xroot->bus);
1916
1917 ep = xfer->endpoint;
1918
1919 DPRINTF("enter\n");
1920
1921 /* the transfer can now be cancelled */
1922 xfer->flags_int.can_cancel_immed = 1;
1923
1924 /* enter the transfer */
1925 (ep->methods->enter) (xfer);
1926
1927 /* check for transfer error */
1928 if (xfer->error) {
1929 /* some error has happened */
1930 usbd_transfer_done(xfer, 0);
1931 USB_BUS_UNLOCK(xfer->xroot->bus);
1932 return;
1933 }
1934
1935 /* start the transfer */
1936 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1937 USB_BUS_UNLOCK(xfer->xroot->bus);
1938 }
1939
1940 /*------------------------------------------------------------------------*
1941 * usbd_transfer_start - start an USB transfer
1942 *
1943 * NOTE: Calling this function more than one time will only
1944 * result in a single transfer start, until the USB transfer
1945 * completes.
1946 *------------------------------------------------------------------------*/
1947 void
usbd_transfer_start(struct usb_xfer * xfer)1948 usbd_transfer_start(struct usb_xfer *xfer)
1949 {
1950 if (xfer == NULL) {
1951 /* transfer is gone */
1952 return;
1953 }
1954 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1955
1956 /* mark the USB transfer started */
1957
1958 if (!xfer->flags_int.started) {
1959 /* lock the BUS lock to avoid races updating flags_int */
1960 USB_BUS_LOCK(xfer->xroot->bus);
1961 xfer->flags_int.started = 1;
1962 USB_BUS_UNLOCK(xfer->xroot->bus);
1963 }
1964 /* check if the USB transfer callback is already transferring */
1965
1966 if (xfer->flags_int.transferring) {
1967 return;
1968 }
1969 USB_BUS_LOCK(xfer->xroot->bus);
1970 /* call the USB transfer callback */
1971 usbd_callback_ss_done_defer(xfer);
1972 USB_BUS_UNLOCK(xfer->xroot->bus);
1973 }
1974
1975 /*------------------------------------------------------------------------*
1976 * usbd_transfer_stop - stop an USB transfer
1977 *
1978 * NOTE: Calling this function more than one time will only
1979 * result in a single transfer stop.
1980 * NOTE: When this function returns it is not safe to free nor
1981 * reuse any DMA buffers. See "usbd_transfer_drain()".
1982 *------------------------------------------------------------------------*/
1983 void
usbd_transfer_stop(struct usb_xfer * xfer)1984 usbd_transfer_stop(struct usb_xfer *xfer)
1985 {
1986 struct usb_endpoint *ep;
1987
1988 if (xfer == NULL) {
1989 /* transfer is gone */
1990 return;
1991 }
1992 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1993
1994 /* check if the USB transfer was ever opened */
1995
1996 if (!xfer->flags_int.open) {
1997 if (xfer->flags_int.started) {
1998 /* nothing to do except clearing the "started" flag */
1999 /* lock the BUS lock to avoid races updating flags_int */
2000 USB_BUS_LOCK(xfer->xroot->bus);
2001 xfer->flags_int.started = 0;
2002 USB_BUS_UNLOCK(xfer->xroot->bus);
2003 }
2004 return;
2005 }
2006 /* try to stop the current USB transfer */
2007
2008 USB_BUS_LOCK(xfer->xroot->bus);
2009 /* override any previous error */
2010 xfer->error = USB_ERR_CANCELLED;
2011
2012 /*
2013 * Clear "open" and "started" when both private and USB lock
2014 * is locked so that we don't get a race updating "flags_int"
2015 */
2016 xfer->flags_int.open = 0;
2017 xfer->flags_int.started = 0;
2018
2019 /*
2020 * Check if we can cancel the USB transfer immediately.
2021 */
2022 if (xfer->flags_int.transferring) {
2023 if (xfer->flags_int.can_cancel_immed &&
2024 (!xfer->flags_int.did_close)) {
2025 DPRINTF("close\n");
2026 /*
2027 * The following will lead to an USB_ERR_CANCELLED
2028 * error code being passed to the USB callback.
2029 */
2030 (xfer->endpoint->methods->close) (xfer);
2031 /* only close once */
2032 xfer->flags_int.did_close = 1;
2033 } else {
2034 /* need to wait for the next done callback */
2035 }
2036 } else {
2037 DPRINTF("close\n");
2038
2039 /* close here and now */
2040 (xfer->endpoint->methods->close) (xfer);
2041
2042 /*
2043 * Any additional DMA delay is done by
2044 * "usbd_transfer_unsetup()".
2045 */
2046
2047 /*
2048 * Special case. Check if we need to restart a blocked
2049 * endpoint.
2050 */
2051 ep = xfer->endpoint;
2052
2053 /*
2054 * If the current USB transfer is completing we need
2055 * to start the next one:
2056 */
2057 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2058 usb_command_wrapper(
2059 &ep->endpoint_q[xfer->stream_id], NULL);
2060 }
2061 }
2062
2063 USB_BUS_UNLOCK(xfer->xroot->bus);
2064 }
2065
2066 /*------------------------------------------------------------------------*
2067 * usbd_transfer_pending
2068 *
2069 * This function will check if an USB transfer is pending which is a
2070 * little bit complicated!
2071 * Return values:
2072 * 0: Not pending
2073 * 1: Pending: The USB transfer will receive a callback in the future.
2074 *------------------------------------------------------------------------*/
2075 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)2076 usbd_transfer_pending(struct usb_xfer *xfer)
2077 {
2078 struct usb_xfer_root *info;
2079 struct usb_xfer_queue *pq;
2080
2081 if (xfer == NULL) {
2082 /* transfer is gone */
2083 return (0);
2084 }
2085 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2086
2087 if (xfer->flags_int.transferring) {
2088 /* trivial case */
2089 return (1);
2090 }
2091 USB_BUS_LOCK(xfer->xroot->bus);
2092 if (xfer->wait_queue) {
2093 /* we are waiting on a queue somewhere */
2094 USB_BUS_UNLOCK(xfer->xroot->bus);
2095 return (1);
2096 }
2097 info = xfer->xroot;
2098 pq = &info->done_q;
2099
2100 if (pq->curr == xfer) {
2101 /* we are currently scheduled for callback */
2102 USB_BUS_UNLOCK(xfer->xroot->bus);
2103 return (1);
2104 }
2105 /* we are not pending */
2106 USB_BUS_UNLOCK(xfer->xroot->bus);
2107 return (0);
2108 }
2109
2110 /*------------------------------------------------------------------------*
2111 * usbd_transfer_drain
2112 *
2113 * This function will stop the USB transfer and wait for any
2114 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2115 * are loaded into DMA can safely be freed or reused after that this
2116 * function has returned.
2117 *------------------------------------------------------------------------*/
2118 void
usbd_transfer_drain(struct usb_xfer * xfer)2119 usbd_transfer_drain(struct usb_xfer *xfer)
2120 {
2121 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2122 "usbd_transfer_drain can sleep!");
2123
2124 if (xfer == NULL) {
2125 /* transfer is gone */
2126 return;
2127 }
2128 if (xfer->xroot->xfer_mtx != &Giant) {
2129 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2130 }
2131 USB_XFER_LOCK(xfer);
2132
2133 usbd_transfer_stop(xfer);
2134
2135 while (usbd_transfer_pending(xfer) ||
2136 xfer->flags_int.doing_callback) {
2137 /*
2138 * It is allowed that the callback can drop its
2139 * transfer mutex. In that case checking only
2140 * "usbd_transfer_pending()" is not enough to tell if
2141 * the USB transfer is fully drained. We also need to
2142 * check the internal "doing_callback" flag.
2143 */
2144 xfer->flags_int.draining = 1;
2145
2146 /*
2147 * Wait until the current outstanding USB
2148 * transfer is complete !
2149 */
2150 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2151 }
2152 USB_XFER_UNLOCK(xfer);
2153 }
2154
2155 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2156 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2157 {
2158 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2159
2160 return (&xfer->frbuffers[frindex]);
2161 }
2162
2163 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2164 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2165 {
2166 struct usb_page_search page_info;
2167
2168 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2169
2170 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2171 return (page_info.buffer);
2172 }
2173
2174 /*------------------------------------------------------------------------*
2175 * usbd_xfer_get_fps_shift
2176 *
2177 * The following function is only useful for isochronous transfers. It
2178 * returns how many times the frame execution rate has been shifted
2179 * down.
2180 *
2181 * Return value:
2182 * Success: 0..3
2183 * Failure: 0
2184 *------------------------------------------------------------------------*/
2185 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2186 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2187 {
2188 return (xfer->fps_shift);
2189 }
2190
2191 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2192 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2193 {
2194 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2195
2196 return (xfer->frlengths[frindex]);
2197 }
2198
2199 /*------------------------------------------------------------------------*
2200 * usbd_xfer_set_frame_data
2201 *
2202 * This function sets the pointer of the buffer that should
2203 * loaded directly into DMA for the given USB frame. Passing "ptr"
2204 * equal to NULL while the corresponding "frlength" is greater
2205 * than zero gives undefined results!
2206 *------------------------------------------------------------------------*/
2207 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2208 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2209 void *ptr, usb_frlength_t len)
2210 {
2211 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2212
2213 /* set virtual address to load and length */
2214 xfer->frbuffers[frindex].buffer = ptr;
2215 usbd_xfer_set_frame_len(xfer, frindex, len);
2216 }
2217
2218 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2219 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2220 void **ptr, int *len)
2221 {
2222 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2223
2224 if (ptr != NULL)
2225 *ptr = xfer->frbuffers[frindex].buffer;
2226 if (len != NULL)
2227 *len = xfer->frlengths[frindex];
2228 }
2229
2230 /*------------------------------------------------------------------------*
2231 * usbd_xfer_old_frame_length
2232 *
2233 * This function returns the framelength of the given frame at the
2234 * time the transfer was submitted. This function can be used to
2235 * compute the starting data pointer of the next isochronous frame
2236 * when an isochronous transfer has completed.
2237 *------------------------------------------------------------------------*/
2238 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2239 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2240 {
2241 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2242
2243 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2244 }
2245
2246 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2247 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2248 int *nframes)
2249 {
2250 if (actlen != NULL)
2251 *actlen = xfer->actlen;
2252 if (sumlen != NULL)
2253 *sumlen = xfer->sumlen;
2254 if (aframes != NULL)
2255 *aframes = xfer->aframes;
2256 if (nframes != NULL)
2257 *nframes = xfer->nframes;
2258 }
2259
2260 /*------------------------------------------------------------------------*
2261 * usbd_xfer_set_frame_offset
2262 *
2263 * This function sets the frame data buffer offset relative to the beginning
2264 * of the USB DMA buffer allocated for this USB transfer.
2265 *------------------------------------------------------------------------*/
2266 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2267 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2268 usb_frcount_t frindex)
2269 {
2270 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2271 "when the USB buffer is external\n"));
2272 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2273
2274 /* set virtual address to load */
2275 xfer->frbuffers[frindex].buffer =
2276 USB_ADD_BYTES(xfer->local_buffer, offset);
2277 }
2278
2279 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2280 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2281 {
2282 xfer->interval = i;
2283 }
2284
2285 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2286 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2287 {
2288 xfer->timeout = t;
2289 }
2290
2291 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2292 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2293 {
2294 xfer->nframes = n;
2295 }
2296
2297 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2298 usbd_xfer_max_frames(struct usb_xfer *xfer)
2299 {
2300 return (xfer->max_frame_count);
2301 }
2302
2303 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2304 usbd_xfer_max_len(struct usb_xfer *xfer)
2305 {
2306 return (xfer->max_data_length);
2307 }
2308
2309 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2310 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2311 {
2312 return (xfer->max_frame_size);
2313 }
2314
2315 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2316 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2317 usb_frlength_t len)
2318 {
2319 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2320
2321 xfer->frlengths[frindex] = len;
2322 }
2323
2324 /*------------------------------------------------------------------------*
2325 * usb_callback_proc - factored out code
2326 *
2327 * This function performs USB callbacks.
2328 *------------------------------------------------------------------------*/
2329 static void
usb_callback_proc(struct usb_proc_msg * _pm)2330 usb_callback_proc(struct usb_proc_msg *_pm)
2331 {
2332 struct usb_done_msg *pm = (void *)_pm;
2333 struct usb_xfer_root *info = pm->xroot;
2334
2335 /* Change locking order */
2336 USB_BUS_UNLOCK(info->bus);
2337
2338 /*
2339 * We exploit the fact that the mutex is the same for all
2340 * callbacks that will be called from this thread:
2341 */
2342 USB_MTX_LOCK(info->xfer_mtx);
2343 USB_BUS_LOCK(info->bus);
2344
2345 /* Continue where we lost track */
2346 usb_command_wrapper(&info->done_q,
2347 info->done_q.curr);
2348
2349 USB_MTX_UNLOCK(info->xfer_mtx);
2350 }
2351
2352 /*------------------------------------------------------------------------*
2353 * usbd_callback_ss_done_defer
2354 *
2355 * This function will defer the start, stop and done callback to the
2356 * correct thread.
2357 *------------------------------------------------------------------------*/
2358 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2359 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2360 {
2361 struct usb_xfer_root *info = xfer->xroot;
2362 struct usb_xfer_queue *pq = &info->done_q;
2363
2364 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2365
2366 if (pq->curr != xfer) {
2367 usbd_transfer_enqueue(pq, xfer);
2368 }
2369 if (!pq->recurse_1) {
2370 /*
2371 * We have to postpone the callback due to the fact we
2372 * will have a Lock Order Reversal, LOR, if we try to
2373 * proceed !
2374 */
2375 (void) usb_proc_msignal(info->done_p,
2376 &info->done_m[0], &info->done_m[1]);
2377 } else {
2378 /* clear second recurse flag */
2379 pq->recurse_2 = 0;
2380 }
2381 return;
2382
2383 }
2384
2385 /*------------------------------------------------------------------------*
2386 * usbd_callback_wrapper
2387 *
2388 * This is a wrapper for USB callbacks. This wrapper does some
2389 * auto-magic things like figuring out if we can call the callback
2390 * directly from the current context or if we need to wakeup the
2391 * interrupt process.
2392 *------------------------------------------------------------------------*/
2393 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2394 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2395 {
2396 struct usb_xfer *xfer = pq->curr;
2397 struct usb_xfer_root *info = xfer->xroot;
2398
2399 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2400 if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2401 USB_IN_POLLING_MODE_FUNC() == 0) {
2402 /*
2403 * Cases that end up here:
2404 *
2405 * 5) HW interrupt done callback or other source.
2406 * 6) HW completed transfer during callback
2407 */
2408 DPRINTFN(3, "case 5 and 6\n");
2409
2410 /*
2411 * We have to postpone the callback due to the fact we
2412 * will have a Lock Order Reversal, LOR, if we try to
2413 * proceed!
2414 *
2415 * Postponing the callback also ensures that other USB
2416 * transfer queues get a chance.
2417 */
2418 (void) usb_proc_msignal(info->done_p,
2419 &info->done_m[0], &info->done_m[1]);
2420 return;
2421 }
2422 /*
2423 * Cases that end up here:
2424 *
2425 * 1) We are starting a transfer
2426 * 2) We are prematurely calling back a transfer
2427 * 3) We are stopping a transfer
2428 * 4) We are doing an ordinary callback
2429 */
2430 DPRINTFN(3, "case 1-4\n");
2431 /* get next USB transfer in the queue */
2432 info->done_q.curr = NULL;
2433
2434 /* set flag in case of drain */
2435 xfer->flags_int.doing_callback = 1;
2436
2437 USB_BUS_UNLOCK(info->bus);
2438 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2439
2440 /* set correct USB state for callback */
2441 if (!xfer->flags_int.transferring) {
2442 xfer->usb_state = USB_ST_SETUP;
2443 if (!xfer->flags_int.started) {
2444 /* we got stopped before we even got started */
2445 USB_BUS_LOCK(info->bus);
2446 goto done;
2447 }
2448 } else {
2449 if (usbd_callback_wrapper_sub(xfer)) {
2450 /* the callback has been deferred */
2451 USB_BUS_LOCK(info->bus);
2452 goto done;
2453 }
2454 #if USB_HAVE_POWERD
2455 /* decrement power reference */
2456 usbd_transfer_power_ref(xfer, -1);
2457 #endif
2458 xfer->flags_int.transferring = 0;
2459
2460 if (xfer->error) {
2461 xfer->usb_state = USB_ST_ERROR;
2462 } else {
2463 /* set transferred state */
2464 xfer->usb_state = USB_ST_TRANSFERRED;
2465 #if USB_HAVE_BUSDMA
2466 /* sync DMA memory, if any */
2467 if (xfer->flags_int.bdma_enable &&
2468 (!xfer->flags_int.bdma_no_post_sync)) {
2469 usb_bdma_post_sync(xfer);
2470 }
2471 #endif
2472 }
2473 }
2474
2475 #if USB_HAVE_PF
2476 if (xfer->usb_state != USB_ST_SETUP) {
2477 USB_BUS_LOCK(info->bus);
2478 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2479 USB_BUS_UNLOCK(info->bus);
2480 }
2481 #endif
2482 /* call processing routine */
2483 (xfer->callback) (xfer, xfer->error);
2484
2485 /* pickup the USB mutex again */
2486 USB_BUS_LOCK(info->bus);
2487
2488 /*
2489 * Check if we got started after that we got cancelled, but
2490 * before we managed to do the callback.
2491 */
2492 if ((!xfer->flags_int.open) &&
2493 (xfer->flags_int.started) &&
2494 (xfer->usb_state == USB_ST_ERROR)) {
2495 /* clear flag in case of drain */
2496 xfer->flags_int.doing_callback = 0;
2497 /* try to loop, but not recursivly */
2498 usb_command_wrapper(&info->done_q, xfer);
2499 return;
2500 }
2501
2502 done:
2503 /* clear flag in case of drain */
2504 xfer->flags_int.doing_callback = 0;
2505
2506 /*
2507 * Check if we are draining.
2508 */
2509 if (xfer->flags_int.draining &&
2510 (!xfer->flags_int.transferring)) {
2511 /* "usbd_transfer_drain()" is waiting for end of transfer */
2512 xfer->flags_int.draining = 0;
2513 cv_broadcast(&info->cv_drain);
2514 }
2515
2516 /* do the next callback, if any */
2517 usb_command_wrapper(&info->done_q,
2518 info->done_q.curr);
2519 }
2520
2521 /*------------------------------------------------------------------------*
2522 * usb_dma_delay_done_cb
2523 *
2524 * This function is called when the DMA delay has been exectuded, and
2525 * will make sure that the callback is called to complete the USB
2526 * transfer. This code path is usually only used when there is an USB
2527 * error like USB_ERR_CANCELLED.
2528 *------------------------------------------------------------------------*/
2529 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2530 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2531 {
2532 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2533
2534 DPRINTFN(3, "Completed %p\n", xfer);
2535
2536 /* queue callback for execution, again */
2537 usbd_transfer_done(xfer, 0);
2538 }
2539
2540 /*------------------------------------------------------------------------*
2541 * usbd_transfer_dequeue
2542 *
2543 * - This function is used to remove an USB transfer from a USB
2544 * transfer queue.
2545 *
2546 * - This function can be called multiple times in a row.
2547 *------------------------------------------------------------------------*/
2548 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2549 usbd_transfer_dequeue(struct usb_xfer *xfer)
2550 {
2551 struct usb_xfer_queue *pq;
2552
2553 pq = xfer->wait_queue;
2554 if (pq) {
2555 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2556 xfer->wait_queue = NULL;
2557 }
2558 }
2559
2560 /*------------------------------------------------------------------------*
2561 * usbd_transfer_enqueue
2562 *
2563 * - This function is used to insert an USB transfer into a USB *
2564 * transfer queue.
2565 *
2566 * - This function can be called multiple times in a row.
2567 *------------------------------------------------------------------------*/
2568 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2569 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2570 {
2571 /*
2572 * Insert the USB transfer into the queue, if it is not
2573 * already on a USB transfer queue:
2574 */
2575 if (xfer->wait_queue == NULL) {
2576 xfer->wait_queue = pq;
2577 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2578 }
2579 }
2580
2581 /*------------------------------------------------------------------------*
2582 * usbd_transfer_done
2583 *
2584 * - This function is used to remove an USB transfer from the busdma,
2585 * pipe or interrupt queue.
2586 *
2587 * - This function is used to queue the USB transfer on the done
2588 * queue.
2589 *
2590 * - This function is used to stop any USB transfer timeouts.
2591 *------------------------------------------------------------------------*/
2592 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2593 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2594 {
2595 struct usb_xfer_root *info = xfer->xroot;
2596
2597 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2598
2599 DPRINTF("err=%s\n", usbd_errstr(error));
2600
2601 /*
2602 * If we are not transferring then just return.
2603 * This can happen during transfer cancel.
2604 */
2605 if (!xfer->flags_int.transferring) {
2606 DPRINTF("not transferring\n");
2607 /* end of control transfer, if any */
2608 xfer->flags_int.control_act = 0;
2609 return;
2610 }
2611 /* only set transfer error, if not already set */
2612 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2613 xfer->error = error;
2614
2615 /* stop any callouts */
2616 usb_callout_stop(&xfer->timeout_handle);
2617
2618 /*
2619 * If we are waiting on a queue, just remove the USB transfer
2620 * from the queue, if any. We should have the required locks
2621 * locked to do the remove when this function is called.
2622 */
2623 usbd_transfer_dequeue(xfer);
2624
2625 #if USB_HAVE_BUSDMA
2626 if (mtx_owned(info->xfer_mtx)) {
2627 struct usb_xfer_queue *pq;
2628
2629 /*
2630 * If the private USB lock is not locked, then we assume
2631 * that the BUS-DMA load stage has been passed:
2632 */
2633 pq = &info->dma_q;
2634
2635 if (pq->curr == xfer) {
2636 /* start the next BUS-DMA load, if any */
2637 usb_command_wrapper(pq, NULL);
2638 }
2639 }
2640 #endif
2641 /* keep some statistics */
2642 if (xfer->error == USB_ERR_CANCELLED) {
2643 info->udev->stats_cancelled.uds_requests
2644 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2645 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2646 info->udev->stats_err.uds_requests
2647 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2648 } else {
2649 info->udev->stats_ok.uds_requests
2650 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2651 }
2652
2653 /* call the USB transfer callback */
2654 usbd_callback_ss_done_defer(xfer);
2655 }
2656
2657 /*------------------------------------------------------------------------*
2658 * usbd_transfer_start_cb
2659 *
2660 * This function is called to start the USB transfer when
2661 * "xfer->interval" is greater than zero, and and the endpoint type is
2662 * BULK or CONTROL.
2663 *------------------------------------------------------------------------*/
2664 static void
usbd_transfer_start_cb(void * arg)2665 usbd_transfer_start_cb(void *arg)
2666 {
2667 struct usb_xfer *xfer = arg;
2668 struct usb_endpoint *ep = xfer->endpoint;
2669
2670 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2671
2672 DPRINTF("start\n");
2673
2674 #if USB_HAVE_PF
2675 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2676 #endif
2677
2678 /* the transfer can now be cancelled */
2679 xfer->flags_int.can_cancel_immed = 1;
2680
2681 /* start USB transfer, if no error */
2682 if (xfer->error == 0)
2683 (ep->methods->start) (xfer);
2684
2685 /* check for transfer error */
2686 if (xfer->error) {
2687 /* some error has happened */
2688 usbd_transfer_done(xfer, 0);
2689 }
2690 }
2691
2692 /*------------------------------------------------------------------------*
2693 * usbd_xfer_set_stall
2694 *
2695 * This function is used to set the stall flag outside the
2696 * callback. This function is NULL safe.
2697 *------------------------------------------------------------------------*/
2698 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2699 usbd_xfer_set_stall(struct usb_xfer *xfer)
2700 {
2701 if (xfer == NULL) {
2702 /* tearing down */
2703 return;
2704 }
2705 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2706
2707 /* avoid any races by locking the USB mutex */
2708 USB_BUS_LOCK(xfer->xroot->bus);
2709 xfer->flags.stall_pipe = 1;
2710 USB_BUS_UNLOCK(xfer->xroot->bus);
2711 }
2712
2713 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2714 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2715 {
2716 return (xfer->endpoint->is_stalled);
2717 }
2718
2719 /*------------------------------------------------------------------------*
2720 * usbd_transfer_clear_stall
2721 *
2722 * This function is used to clear the stall flag outside the
2723 * callback. This function is NULL safe.
2724 *------------------------------------------------------------------------*/
2725 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2726 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2727 {
2728 if (xfer == NULL) {
2729 /* tearing down */
2730 return;
2731 }
2732 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2733
2734 /* avoid any races by locking the USB mutex */
2735 USB_BUS_LOCK(xfer->xroot->bus);
2736
2737 xfer->flags.stall_pipe = 0;
2738
2739 USB_BUS_UNLOCK(xfer->xroot->bus);
2740 }
2741
2742 /*------------------------------------------------------------------------*
2743 * usbd_pipe_start
2744 *
2745 * This function is used to add an USB transfer to the pipe transfer list.
2746 *------------------------------------------------------------------------*/
2747 void
usbd_pipe_start(struct usb_xfer_queue * pq)2748 usbd_pipe_start(struct usb_xfer_queue *pq)
2749 {
2750 struct usb_endpoint *ep;
2751 struct usb_xfer *xfer;
2752 uint8_t type;
2753
2754 xfer = pq->curr;
2755 ep = xfer->endpoint;
2756
2757 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2758
2759 /*
2760 * If the endpoint is already stalled we do nothing !
2761 */
2762 if (ep->is_stalled) {
2763 return;
2764 }
2765 /*
2766 * Check if we are supposed to stall the endpoint:
2767 */
2768 if (xfer->flags.stall_pipe) {
2769 struct usb_device *udev;
2770 struct usb_xfer_root *info;
2771
2772 /* clear stall command */
2773 xfer->flags.stall_pipe = 0;
2774
2775 /* get pointer to USB device */
2776 info = xfer->xroot;
2777 udev = info->udev;
2778
2779 /*
2780 * Only stall BULK and INTERRUPT endpoints.
2781 */
2782 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2783 if ((type == UE_BULK) ||
2784 (type == UE_INTERRUPT)) {
2785 uint8_t did_stall;
2786
2787 did_stall = 1;
2788
2789 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2790 (udev->bus->methods->set_stall) (
2791 udev, ep, &did_stall);
2792 } else if (udev->ctrl_xfer[1]) {
2793 info = udev->ctrl_xfer[1]->xroot;
2794 usb_proc_msignal(
2795 USB_BUS_CS_PROC(info->bus),
2796 &udev->cs_msg[0], &udev->cs_msg[1]);
2797 } else {
2798 /* should not happen */
2799 DPRINTFN(0, "No stall handler\n");
2800 }
2801 /*
2802 * Check if we should stall. Some USB hardware
2803 * handles set- and clear-stall in hardware.
2804 */
2805 if (did_stall) {
2806 /*
2807 * The transfer will be continued when
2808 * the clear-stall control endpoint
2809 * message is received.
2810 */
2811 ep->is_stalled = 1;
2812 return;
2813 }
2814 } else if (type == UE_ISOCHRONOUS) {
2815 /*
2816 * Make sure any FIFO overflow or other FIFO
2817 * error conditions go away by resetting the
2818 * endpoint FIFO through the clear stall
2819 * method.
2820 */
2821 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2822 (udev->bus->methods->clear_stall) (udev, ep);
2823 }
2824 }
2825 }
2826 /* Set or clear stall complete - special case */
2827 if (xfer->nframes == 0) {
2828 /* we are complete */
2829 xfer->aframes = 0;
2830 usbd_transfer_done(xfer, 0);
2831 return;
2832 }
2833 /*
2834 * Handled cases:
2835 *
2836 * 1) Start the first transfer queued.
2837 *
2838 * 2) Re-start the current USB transfer.
2839 */
2840 /*
2841 * Check if there should be any
2842 * pre transfer start delay:
2843 */
2844 if (xfer->interval > 0) {
2845 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2846 if ((type == UE_BULK) ||
2847 (type == UE_CONTROL)) {
2848 usbd_transfer_timeout_ms(xfer,
2849 &usbd_transfer_start_cb,
2850 xfer->interval);
2851 return;
2852 }
2853 }
2854 DPRINTF("start\n");
2855
2856 #if USB_HAVE_PF
2857 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2858 #endif
2859 /* the transfer can now be cancelled */
2860 xfer->flags_int.can_cancel_immed = 1;
2861
2862 /* start USB transfer, if no error */
2863 if (xfer->error == 0)
2864 (ep->methods->start) (xfer);
2865
2866 /* check for transfer error */
2867 if (xfer->error) {
2868 /* some error has happened */
2869 usbd_transfer_done(xfer, 0);
2870 }
2871 }
2872
2873 /*------------------------------------------------------------------------*
2874 * usbd_transfer_timeout_ms
2875 *
2876 * This function is used to setup a timeout on the given USB
2877 * transfer. If the timeout has been deferred the callback given by
2878 * "cb" will get called after "ms" milliseconds.
2879 *------------------------------------------------------------------------*/
2880 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2881 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2882 void (*cb) (void *arg), usb_timeout_t ms)
2883 {
2884 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2885
2886 /* defer delay */
2887 usb_callout_reset(&xfer->timeout_handle,
2888 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2889 }
2890
2891 /*------------------------------------------------------------------------*
2892 * usbd_callback_wrapper_sub
2893 *
2894 * - This function will update variables in an USB transfer after
2895 * that the USB transfer is complete.
2896 *
2897 * - This function is used to start the next USB transfer on the
2898 * ep transfer queue, if any.
2899 *
2900 * NOTE: In some special cases the USB transfer will not be removed from
2901 * the pipe queue, but remain first. To enforce USB transfer removal call
2902 * this function passing the error code "USB_ERR_CANCELLED".
2903 *
2904 * Return values:
2905 * 0: Success.
2906 * Else: The callback has been deferred.
2907 *------------------------------------------------------------------------*/
2908 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2909 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2910 {
2911 struct usb_endpoint *ep;
2912 struct usb_bus *bus;
2913 usb_frcount_t x;
2914
2915 bus = xfer->xroot->bus;
2916
2917 if ((!xfer->flags_int.open) &&
2918 (!xfer->flags_int.did_close)) {
2919 DPRINTF("close\n");
2920 USB_BUS_LOCK(bus);
2921 (xfer->endpoint->methods->close) (xfer);
2922 USB_BUS_UNLOCK(bus);
2923 /* only close once */
2924 xfer->flags_int.did_close = 1;
2925 return (1); /* wait for new callback */
2926 }
2927 /*
2928 * If we have a non-hardware induced error we
2929 * need to do the DMA delay!
2930 */
2931 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2932 (xfer->error == USB_ERR_CANCELLED ||
2933 xfer->error == USB_ERR_TIMEOUT ||
2934 bus->methods->start_dma_delay != NULL)) {
2935 usb_timeout_t temp;
2936
2937 /* only delay once */
2938 xfer->flags_int.did_dma_delay = 1;
2939
2940 /* we can not cancel this delay */
2941 xfer->flags_int.can_cancel_immed = 0;
2942
2943 temp = usbd_get_dma_delay(xfer->xroot->udev);
2944
2945 DPRINTFN(3, "DMA delay, %u ms, "
2946 "on %p\n", temp, xfer);
2947
2948 if (temp != 0) {
2949 USB_BUS_LOCK(bus);
2950 /*
2951 * Some hardware solutions have dedicated
2952 * events when it is safe to free DMA'ed
2953 * memory. For the other hardware platforms we
2954 * use a static delay.
2955 */
2956 if (bus->methods->start_dma_delay != NULL) {
2957 (bus->methods->start_dma_delay) (xfer);
2958 } else {
2959 usbd_transfer_timeout_ms(xfer,
2960 (void (*)(void *))&usb_dma_delay_done_cb,
2961 temp);
2962 }
2963 USB_BUS_UNLOCK(bus);
2964 return (1); /* wait for new callback */
2965 }
2966 }
2967 /* check actual number of frames */
2968 if (xfer->aframes > xfer->nframes) {
2969 if (xfer->error == 0) {
2970 panic("%s: actual number of frames, %d, is "
2971 "greater than initial number of frames, %d\n",
2972 __FUNCTION__, xfer->aframes, xfer->nframes);
2973 } else {
2974 /* just set some valid value */
2975 xfer->aframes = xfer->nframes;
2976 }
2977 }
2978 /* compute actual length */
2979 xfer->actlen = 0;
2980
2981 for (x = 0; x != xfer->aframes; x++) {
2982 xfer->actlen += xfer->frlengths[x];
2983 }
2984
2985 /*
2986 * Frames that were not transferred get zero actual length in
2987 * case the USB device driver does not check the actual number
2988 * of frames transferred, "xfer->aframes":
2989 */
2990 for (; x < xfer->nframes; x++) {
2991 usbd_xfer_set_frame_len(xfer, x, 0);
2992 }
2993
2994 /* check actual length */
2995 if (xfer->actlen > xfer->sumlen) {
2996 if (xfer->error == 0) {
2997 panic("%s: actual length, %d, is greater than "
2998 "initial length, %d\n",
2999 __FUNCTION__, xfer->actlen, xfer->sumlen);
3000 } else {
3001 /* just set some valid value */
3002 xfer->actlen = xfer->sumlen;
3003 }
3004 }
3005 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
3006 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
3007 xfer->aframes, xfer->nframes);
3008
3009 if (xfer->error) {
3010 /* end of control transfer, if any */
3011 xfer->flags_int.control_act = 0;
3012
3013 #if USB_HAVE_TT_SUPPORT
3014 switch (xfer->error) {
3015 case USB_ERR_NORMAL_COMPLETION:
3016 case USB_ERR_SHORT_XFER:
3017 case USB_ERR_STALLED:
3018 case USB_ERR_CANCELLED:
3019 /* nothing to do */
3020 break;
3021 default:
3022 /* try to reset the TT, if any */
3023 USB_BUS_LOCK(bus);
3024 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
3025 USB_BUS_UNLOCK(bus);
3026 break;
3027 }
3028 #endif
3029 /* check if we should block the execution queue */
3030 if ((xfer->error != USB_ERR_CANCELLED) &&
3031 (xfer->flags.pipe_bof)) {
3032 DPRINTFN(2, "xfer=%p: Block On Failure "
3033 "on endpoint=%p\n", xfer, xfer->endpoint);
3034 goto done;
3035 }
3036 } else {
3037 /* check for short transfers */
3038 if (xfer->actlen < xfer->sumlen) {
3039 /* end of control transfer, if any */
3040 xfer->flags_int.control_act = 0;
3041
3042 if (!xfer->flags_int.short_xfer_ok) {
3043 xfer->error = USB_ERR_SHORT_XFER;
3044 if (xfer->flags.pipe_bof) {
3045 DPRINTFN(2, "xfer=%p: Block On Failure on "
3046 "Short Transfer on endpoint %p.\n",
3047 xfer, xfer->endpoint);
3048 goto done;
3049 }
3050 }
3051 } else {
3052 /*
3053 * Check if we are in the middle of a
3054 * control transfer:
3055 */
3056 if (xfer->flags_int.control_act) {
3057 DPRINTFN(5, "xfer=%p: Control transfer "
3058 "active on endpoint=%p\n", xfer, xfer->endpoint);
3059 goto done;
3060 }
3061 }
3062 }
3063
3064 ep = xfer->endpoint;
3065
3066 /*
3067 * If the current USB transfer is completing we need to start the
3068 * next one:
3069 */
3070 USB_BUS_LOCK(bus);
3071 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
3072 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
3073
3074 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3075 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3076 /* there is another USB transfer waiting */
3077 } else {
3078 /* this is the last USB transfer */
3079 /* clear isochronous sync flag */
3080 xfer->endpoint->is_synced = 0;
3081 }
3082 }
3083 USB_BUS_UNLOCK(bus);
3084 done:
3085 return (0);
3086 }
3087
3088 /*------------------------------------------------------------------------*
3089 * usb_command_wrapper
3090 *
3091 * This function is used to execute commands non-recursivly on an USB
3092 * transfer.
3093 *------------------------------------------------------------------------*/
3094 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)3095 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3096 {
3097 if (xfer) {
3098 /*
3099 * If the transfer is not already processing,
3100 * queue it!
3101 */
3102 if (pq->curr != xfer) {
3103 usbd_transfer_enqueue(pq, xfer);
3104 if (pq->curr != NULL) {
3105 /* something is already processing */
3106 DPRINTFN(6, "busy %p\n", pq->curr);
3107 return;
3108 }
3109 }
3110 } else {
3111 /* Get next element in queue */
3112 pq->curr = NULL;
3113 }
3114
3115 if (!pq->recurse_1) {
3116 /* clear third recurse flag */
3117 pq->recurse_3 = 0;
3118
3119 do {
3120 /* set two first recurse flags */
3121 pq->recurse_1 = 1;
3122 pq->recurse_2 = 1;
3123
3124 if (pq->curr == NULL) {
3125 xfer = TAILQ_FIRST(&pq->head);
3126 if (xfer) {
3127 TAILQ_REMOVE(&pq->head, xfer,
3128 wait_entry);
3129 xfer->wait_queue = NULL;
3130 pq->curr = xfer;
3131 } else {
3132 break;
3133 }
3134 }
3135 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3136 (pq->command) (pq);
3137 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3138
3139 /*
3140 * Set third recurse flag to indicate
3141 * recursion happened:
3142 */
3143 pq->recurse_3 = 1;
3144
3145 } while (!pq->recurse_2);
3146
3147 /* clear first recurse flag */
3148 pq->recurse_1 = 0;
3149
3150 } else {
3151 /* clear second recurse flag */
3152 pq->recurse_2 = 0;
3153 }
3154 }
3155
3156 /*------------------------------------------------------------------------*
3157 * usbd_ctrl_transfer_setup
3158 *
3159 * This function is used to setup the default USB control endpoint
3160 * transfer.
3161 *------------------------------------------------------------------------*/
3162 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3163 usbd_ctrl_transfer_setup(struct usb_device *udev)
3164 {
3165 struct usb_xfer *xfer;
3166 uint8_t no_resetup;
3167 uint8_t iface_index;
3168
3169 /* check for root HUB */
3170 if (udev->parent_hub == NULL)
3171 return;
3172 repeat:
3173
3174 xfer = udev->ctrl_xfer[0];
3175 if (xfer) {
3176 USB_XFER_LOCK(xfer);
3177 no_resetup =
3178 ((xfer->address == udev->address) &&
3179 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3180 udev->ddesc.bMaxPacketSize));
3181 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3182 if (no_resetup) {
3183 /*
3184 * NOTE: checking "xfer->address" and
3185 * starting the USB transfer must be
3186 * atomic!
3187 */
3188 usbd_transfer_start(xfer);
3189 }
3190 }
3191 USB_XFER_UNLOCK(xfer);
3192 } else {
3193 no_resetup = 0;
3194 }
3195
3196 if (no_resetup) {
3197 /*
3198 * All parameters are exactly the same like before.
3199 * Just return.
3200 */
3201 return;
3202 }
3203 /*
3204 * Update wMaxPacketSize for the default control endpoint:
3205 */
3206 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3207 udev->ddesc.bMaxPacketSize;
3208
3209 /*
3210 * Unsetup any existing USB transfer:
3211 */
3212 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3213
3214 /*
3215 * Reset clear stall error counter.
3216 */
3217 udev->clear_stall_errors = 0;
3218
3219 /*
3220 * Try to setup a new USB transfer for the
3221 * default control endpoint:
3222 */
3223 iface_index = 0;
3224 if (usbd_transfer_setup(udev, &iface_index,
3225 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3226 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3227 &udev->device_mtx)) {
3228 DPRINTFN(0, "could not setup default "
3229 "USB transfer\n");
3230 } else {
3231 goto repeat;
3232 }
3233 }
3234
3235 /*------------------------------------------------------------------------*
3236 * usbd_clear_data_toggle - factored out code
3237 *
3238 * NOTE: the intention of this function is not to reset the hardware
3239 * data toggle.
3240 *------------------------------------------------------------------------*/
3241 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3242 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3243 {
3244 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3245
3246 /* check that we have a valid case */
3247 if (udev->flags.usb_mode == USB_MODE_HOST &&
3248 udev->parent_hub != NULL &&
3249 udev->bus->methods->clear_stall != NULL &&
3250 ep->methods != NULL) {
3251 (udev->bus->methods->clear_stall) (udev, ep);
3252 }
3253 }
3254
3255 /*------------------------------------------------------------------------*
3256 * usbd_clear_data_toggle - factored out code
3257 *
3258 * NOTE: the intention of this function is not to reset the hardware
3259 * data toggle on the USB device side.
3260 *------------------------------------------------------------------------*/
3261 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3262 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3263 {
3264 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3265
3266 USB_BUS_LOCK(udev->bus);
3267 ep->toggle_next = 0;
3268 /* some hardware needs a callback to clear the data toggle */
3269 usbd_clear_stall_locked(udev, ep);
3270 USB_BUS_UNLOCK(udev->bus);
3271 }
3272
3273 /*------------------------------------------------------------------------*
3274 * usbd_clear_stall_callback - factored out clear stall callback
3275 *
3276 * Input parameters:
3277 * xfer1: Clear Stall Control Transfer
3278 * xfer2: Stalled USB Transfer
3279 *
3280 * This function is NULL safe.
3281 *
3282 * Return values:
3283 * 0: In progress
3284 * Else: Finished
3285 *
3286 * Clear stall config example:
3287 *
3288 * static const struct usb_config my_clearstall = {
3289 * .type = UE_CONTROL,
3290 * .endpoint = 0,
3291 * .direction = UE_DIR_ANY,
3292 * .interval = 50, //50 milliseconds
3293 * .bufsize = sizeof(struct usb_device_request),
3294 * .timeout = 1000, //1.000 seconds
3295 * .callback = &my_clear_stall_callback, // **
3296 * .usb_mode = USB_MODE_HOST,
3297 * };
3298 *
3299 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3300 * passing the correct parameters.
3301 *------------------------------------------------------------------------*/
3302 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3303 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3304 struct usb_xfer *xfer2)
3305 {
3306 struct usb_device_request req;
3307
3308 if (xfer2 == NULL) {
3309 /* looks like we are tearing down */
3310 DPRINTF("NULL input parameter\n");
3311 return (0);
3312 }
3313 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3314 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3315
3316 switch (USB_GET_STATE(xfer1)) {
3317 case USB_ST_SETUP:
3318
3319 /*
3320 * pre-clear the data toggle to DATA0 ("umass.c" and
3321 * "ata-usb.c" depends on this)
3322 */
3323
3324 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3325
3326 /* setup a clear-stall packet */
3327
3328 req.bmRequestType = UT_WRITE_ENDPOINT;
3329 req.bRequest = UR_CLEAR_FEATURE;
3330 USETW(req.wValue, UF_ENDPOINT_HALT);
3331 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3332 req.wIndex[1] = 0;
3333 USETW(req.wLength, 0);
3334
3335 /*
3336 * "usbd_transfer_setup_sub()" will ensure that
3337 * we have sufficient room in the buffer for
3338 * the request structure!
3339 */
3340
3341 /* copy in the transfer */
3342
3343 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3344
3345 /* set length */
3346 xfer1->frlengths[0] = sizeof(req);
3347 xfer1->nframes = 1;
3348
3349 usbd_transfer_submit(xfer1);
3350 return (0);
3351
3352 case USB_ST_TRANSFERRED:
3353 break;
3354
3355 default: /* Error */
3356 if (xfer1->error == USB_ERR_CANCELLED) {
3357 return (0);
3358 }
3359 break;
3360 }
3361 return (1); /* Clear Stall Finished */
3362 }
3363
3364 /*------------------------------------------------------------------------*
3365 * usbd_transfer_poll
3366 *
3367 * The following function gets called from the USB keyboard driver and
3368 * UMASS when the system has paniced.
3369 *
3370 * NOTE: It is currently not possible to resume normal operation on
3371 * the USB controller which has been polled, due to clearing of the
3372 * "up_dsleep" and "up_msleep" flags.
3373 *------------------------------------------------------------------------*/
3374 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3375 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3376 {
3377 struct usb_xfer *xfer;
3378 struct usb_xfer_root *xroot;
3379 struct usb_device *udev;
3380 struct usb_proc_msg *pm;
3381 struct usb_bus *bus;
3382 uint16_t n;
3383 uint16_t drop_bus_spin;
3384 uint16_t drop_bus;
3385 uint16_t drop_xfer;
3386
3387 for (n = 0; n != max; n++) {
3388 /* Extra checks to avoid panic */
3389 xfer = ppxfer[n];
3390 if (xfer == NULL)
3391 continue; /* no USB transfer */
3392 xroot = xfer->xroot;
3393 if (xroot == NULL)
3394 continue; /* no USB root */
3395 udev = xroot->udev;
3396 if (udev == NULL)
3397 continue; /* no USB device */
3398 bus = udev->bus;
3399 if (bus == NULL)
3400 continue; /* no BUS structure */
3401 if (bus->methods == NULL)
3402 continue; /* no BUS methods */
3403 if (bus->methods->xfer_poll == NULL)
3404 continue; /* no poll method */
3405
3406 drop_bus_spin = 0;
3407 drop_bus = 0;
3408 drop_xfer = 0;
3409
3410 if (USB_IN_POLLING_MODE_FUNC() == 0) {
3411 /* make sure that the BUS spin mutex is not locked */
3412 while (mtx_owned(&bus->bus_spin_lock)) {
3413 mtx_unlock_spin(&bus->bus_spin_lock);
3414 drop_bus_spin++;
3415 }
3416
3417 /* make sure that the BUS mutex is not locked */
3418 while (mtx_owned(&bus->bus_mtx)) {
3419 mtx_unlock(&bus->bus_mtx);
3420 drop_bus++;
3421 }
3422
3423 /* make sure that the transfer mutex is not locked */
3424 while (mtx_owned(xroot->xfer_mtx)) {
3425 mtx_unlock(xroot->xfer_mtx);
3426 drop_xfer++;
3427 }
3428 }
3429
3430 /* Make sure cv_signal() and cv_broadcast() is not called */
3431 USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3432 USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3433 USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3434 USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3435 USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3436
3437 /* poll USB hardware */
3438 (bus->methods->xfer_poll) (bus);
3439
3440 USB_BUS_LOCK(xroot->bus);
3441
3442 /* check for clear stall */
3443 if (udev->ctrl_xfer[1] != NULL) {
3444 /* poll clear stall start */
3445 pm = &udev->cs_msg[0].hdr;
3446 (pm->pm_callback) (pm);
3447 /* poll clear stall done thread */
3448 pm = &udev->ctrl_xfer[1]->
3449 xroot->done_m[0].hdr;
3450 (pm->pm_callback) (pm);
3451 }
3452
3453 /* poll done thread */
3454 pm = &xroot->done_m[0].hdr;
3455 (pm->pm_callback) (pm);
3456
3457 USB_BUS_UNLOCK(xroot->bus);
3458
3459 /* restore transfer mutex */
3460 while (drop_xfer--)
3461 mtx_lock(xroot->xfer_mtx);
3462
3463 /* restore BUS mutex */
3464 while (drop_bus--)
3465 mtx_lock(&bus->bus_mtx);
3466
3467 /* restore BUS spin mutex */
3468 while (drop_bus_spin--)
3469 mtx_lock_spin(&bus->bus_spin_lock);
3470 }
3471 }
3472
3473 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3474 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3475 uint8_t type, enum usb_dev_speed speed)
3476 {
3477 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3478 [USB_SPEED_LOW] = 8,
3479 [USB_SPEED_FULL] = 64,
3480 [USB_SPEED_HIGH] = 1024,
3481 [USB_SPEED_VARIABLE] = 1024,
3482 [USB_SPEED_SUPER] = 1024,
3483 };
3484
3485 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3486 [USB_SPEED_LOW] = 0, /* invalid */
3487 [USB_SPEED_FULL] = 1023,
3488 [USB_SPEED_HIGH] = 1024,
3489 [USB_SPEED_VARIABLE] = 3584,
3490 [USB_SPEED_SUPER] = 1024,
3491 };
3492
3493 static const uint16_t control_min[USB_SPEED_MAX] = {
3494 [USB_SPEED_LOW] = 8,
3495 [USB_SPEED_FULL] = 8,
3496 [USB_SPEED_HIGH] = 64,
3497 [USB_SPEED_VARIABLE] = 512,
3498 [USB_SPEED_SUPER] = 512,
3499 };
3500
3501 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3502 [USB_SPEED_LOW] = 8,
3503 [USB_SPEED_FULL] = 8,
3504 [USB_SPEED_HIGH] = 512,
3505 [USB_SPEED_VARIABLE] = 512,
3506 [USB_SPEED_SUPER] = 1024,
3507 };
3508
3509 uint16_t temp;
3510
3511 memset(ptr, 0, sizeof(*ptr));
3512
3513 switch (type) {
3514 case UE_INTERRUPT:
3515 ptr->range.max = intr_range_max[speed];
3516 break;
3517 case UE_ISOCHRONOUS:
3518 ptr->range.max = isoc_range_max[speed];
3519 break;
3520 default:
3521 if (type == UE_BULK)
3522 temp = bulk_min[speed];
3523 else /* UE_CONTROL */
3524 temp = control_min[speed];
3525
3526 /* default is fixed */
3527 ptr->fixed[0] = temp;
3528 ptr->fixed[1] = temp;
3529 ptr->fixed[2] = temp;
3530 ptr->fixed[3] = temp;
3531
3532 if (speed == USB_SPEED_FULL) {
3533 /* multiple sizes */
3534 ptr->fixed[1] = 16;
3535 ptr->fixed[2] = 32;
3536 ptr->fixed[3] = 64;
3537 }
3538 if ((speed == USB_SPEED_VARIABLE) &&
3539 (type == UE_BULK)) {
3540 /* multiple sizes */
3541 ptr->fixed[2] = 1024;
3542 ptr->fixed[3] = 1536;
3543 }
3544 break;
3545 }
3546 }
3547
3548 void *
usbd_xfer_softc(struct usb_xfer * xfer)3549 usbd_xfer_softc(struct usb_xfer *xfer)
3550 {
3551 return (xfer->priv_sc);
3552 }
3553
3554 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3555 usbd_xfer_get_priv(struct usb_xfer *xfer)
3556 {
3557 return (xfer->priv_fifo);
3558 }
3559
3560 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3561 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3562 {
3563 xfer->priv_fifo = ptr;
3564 }
3565
3566 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3567 usbd_xfer_state(struct usb_xfer *xfer)
3568 {
3569 return (xfer->usb_state);
3570 }
3571
3572 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3573 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3574 {
3575 switch (flag) {
3576 case USB_FORCE_SHORT_XFER:
3577 xfer->flags.force_short_xfer = 1;
3578 break;
3579 case USB_SHORT_XFER_OK:
3580 xfer->flags.short_xfer_ok = 1;
3581 break;
3582 case USB_MULTI_SHORT_OK:
3583 xfer->flags.short_frames_ok = 1;
3584 break;
3585 case USB_MANUAL_STATUS:
3586 xfer->flags.manual_status = 1;
3587 break;
3588 }
3589 }
3590
3591 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3592 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3593 {
3594 switch (flag) {
3595 case USB_FORCE_SHORT_XFER:
3596 xfer->flags.force_short_xfer = 0;
3597 break;
3598 case USB_SHORT_XFER_OK:
3599 xfer->flags.short_xfer_ok = 0;
3600 break;
3601 case USB_MULTI_SHORT_OK:
3602 xfer->flags.short_frames_ok = 0;
3603 break;
3604 case USB_MANUAL_STATUS:
3605 xfer->flags.manual_status = 0;
3606 break;
3607 }
3608 }
3609
3610 /*
3611 * The following function returns in milliseconds when the isochronous
3612 * transfer was completed by the hardware. The returned value wraps
3613 * around 65536 milliseconds.
3614 */
3615 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3616 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3617 {
3618 return (xfer->isoc_time_complete);
3619 }
3620
3621 /*
3622 * The following function returns non-zero if the max packet size
3623 * field was clamped to a valid value. Else it returns zero.
3624 */
3625 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3626 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3627 {
3628 return (xfer->flags_int.maxp_was_clamped);
3629 }
3630
3631 /*
3632 * The following function computes the next isochronous frame number
3633 * where the first isochronous packet should be queued.
3634 *
3635 * The function returns non-zero if there was a discontinuity.
3636 * Else zero is returned for normal operation.
3637 */
3638 uint8_t
usbd_xfer_get_isochronous_start_frame(struct usb_xfer * xfer,uint32_t frame_curr,uint32_t frame_min,uint32_t frame_ms,uint32_t frame_mask,uint32_t * p_frame_start)3639 usbd_xfer_get_isochronous_start_frame(struct usb_xfer *xfer, uint32_t frame_curr,
3640 uint32_t frame_min, uint32_t frame_ms, uint32_t frame_mask, uint32_t *p_frame_start)
3641 {
3642 uint32_t duration;
3643 uint32_t delta;
3644 uint8_t retval;
3645 uint8_t shift;
3646
3647 /* Compute time ahead of current schedule. */
3648 delta = (xfer->endpoint->isoc_next - frame_curr) & frame_mask;
3649
3650 /*
3651 * Check if it is the first transfer or if the future frame
3652 * delta is less than one millisecond or if the frame delta is
3653 * negative:
3654 */
3655 if (xfer->endpoint->is_synced == 0 ||
3656 delta < (frame_ms + frame_min) ||
3657 delta > (frame_mask / 2)) {
3658 /* Schedule transfer 2 milliseconds into the future. */
3659 xfer->endpoint->isoc_next = (frame_curr + 2 * frame_ms + frame_min) & frame_mask;
3660 xfer->endpoint->is_synced = 1;
3661
3662 retval = 1;
3663 } else {
3664 retval = 0;
3665 }
3666
3667 /* Store start time, if any. */
3668 if (p_frame_start != NULL)
3669 *p_frame_start = xfer->endpoint->isoc_next & frame_mask;
3670
3671 /* Get relative completion time, in milliseconds. */
3672 delta = xfer->endpoint->isoc_next - frame_curr + (frame_curr % frame_ms);
3673 delta &= frame_mask;
3674 delta /= frame_ms;
3675
3676 switch (usbd_get_speed(xfer->xroot->udev)) {
3677 case USB_SPEED_FULL:
3678 shift = 3;
3679 break;
3680 default:
3681 shift = usbd_xfer_get_fps_shift(xfer);
3682 break;
3683 }
3684
3685 /* Get duration in milliseconds, rounded up. */
3686 duration = ((xfer->nframes << shift) + 7) / 8;
3687
3688 /* Compute full 32-bit completion time, in milliseconds. */
3689 xfer->isoc_time_complete =
3690 usb_isoc_time_expand(xfer->xroot->bus, frame_curr / frame_ms) +
3691 delta + duration;
3692
3693 /* Compute next isochronous frame. */
3694 xfer->endpoint->isoc_next += duration * frame_ms;
3695 xfer->endpoint->isoc_next &= frame_mask;
3696
3697 return (retval);
3698 }
3699