1 /* $FreeBSD$ */
2 /*-
3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 *
5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54
55 #define USB_DEBUG_VAR usb_debug
56
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_debug.h>
63 #include <dev/usb/usb_util.h>
64
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #include <dev/usb/usb_pf.h>
68 #endif /* USB_GLOBAL_INCLUDE_FILE */
69
70 struct usb_std_packet_size {
71 struct {
72 uint16_t min; /* inclusive */
73 uint16_t max; /* inclusive */
74 } range;
75
76 uint16_t fixed[4];
77 };
78
79 static usb_callback_t usb_request_callback;
80
81 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
82
83 /* This transfer is used for generic control endpoint transfers */
84
85 [0] = {
86 .type = UE_CONTROL,
87 .endpoint = 0x00, /* Control endpoint */
88 .direction = UE_DIR_ANY,
89 .bufsize = USB_EP0_BUFSIZE, /* bytes */
90 .flags = {.proxy_buffer = 1,},
91 .callback = &usb_request_callback,
92 .usb_mode = USB_MODE_DUAL, /* both modes */
93 },
94
95 /* This transfer is used for generic clear stall only */
96
97 [1] = {
98 .type = UE_CONTROL,
99 .endpoint = 0x00, /* Control pipe */
100 .direction = UE_DIR_ANY,
101 .bufsize = sizeof(struct usb_device_request),
102 .callback = &usb_do_clear_stall_callback,
103 .timeout = 1000, /* 1 second */
104 .interval = 50, /* 50ms */
105 .usb_mode = USB_MODE_HOST,
106 },
107 };
108
109 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
110
111 /* This transfer is used for generic control endpoint transfers */
112
113 [0] = {
114 .type = UE_CONTROL,
115 .endpoint = 0x00, /* Control endpoint */
116 .direction = UE_DIR_ANY,
117 .bufsize = 65535, /* bytes */
118 .callback = &usb_request_callback,
119 .usb_mode = USB_MODE_DUAL, /* both modes */
120 },
121
122 /* This transfer is used for generic clear stall only */
123
124 [1] = {
125 .type = UE_CONTROL,
126 .endpoint = 0x00, /* Control pipe */
127 .direction = UE_DIR_ANY,
128 .bufsize = sizeof(struct usb_device_request),
129 .callback = &usb_do_clear_stall_callback,
130 .timeout = 1000, /* 1 second */
131 .interval = 50, /* 50ms */
132 .usb_mode = USB_MODE_HOST,
133 },
134 };
135
136 /* function prototypes */
137
138 static void usbd_update_max_frame_size(struct usb_xfer *);
139 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
140 static void usbd_control_transfer_init(struct usb_xfer *);
141 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
142 static void usb_callback_proc(struct usb_proc_msg *);
143 static void usbd_callback_ss_done_defer(struct usb_xfer *);
144 static void usbd_callback_wrapper(struct usb_xfer_queue *);
145 static void usbd_transfer_start_cb(void *);
146 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
147 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
148 uint8_t type, enum usb_dev_speed speed);
149
150 /*------------------------------------------------------------------------*
151 * usb_request_callback
152 *------------------------------------------------------------------------*/
153 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)154 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
155 {
156 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
157 usb_handle_request_callback(xfer, error);
158 else
159 usbd_do_request_callback(xfer, error);
160 }
161
162 /*------------------------------------------------------------------------*
163 * usbd_update_max_frame_size
164 *
165 * This function updates the maximum frame size, hence high speed USB
166 * can transfer multiple consecutive packets.
167 *------------------------------------------------------------------------*/
168 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)169 usbd_update_max_frame_size(struct usb_xfer *xfer)
170 {
171 /* compute maximum frame size */
172 /* this computation should not overflow 16-bit */
173 /* max = 15 * 1024 */
174
175 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
176 }
177
178 /*------------------------------------------------------------------------*
179 * usbd_get_dma_delay
180 *
181 * The following function is called when we need to
182 * synchronize with DMA hardware.
183 *
184 * Returns:
185 * 0: no DMA delay required
186 * Else: milliseconds of DMA delay
187 *------------------------------------------------------------------------*/
188 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)189 usbd_get_dma_delay(struct usb_device *udev)
190 {
191 const struct usb_bus_methods *mtod;
192 uint32_t temp;
193
194 mtod = udev->bus->methods;
195 temp = 0;
196
197 if (mtod->get_dma_delay) {
198 (mtod->get_dma_delay) (udev, &temp);
199 /*
200 * Round up and convert to milliseconds. Note that we use
201 * 1024 milliseconds per second. to save a division.
202 */
203 temp += 0x3FF;
204 temp /= 0x400;
205 }
206 return (temp);
207 }
208
209 /*------------------------------------------------------------------------*
210 * usbd_transfer_setup_sub_malloc
211 *
212 * This function will allocate one or more DMA'able memory chunks
213 * according to "size", "align" and "count" arguments. "ppc" is
214 * pointed to a linear array of USB page caches afterwards.
215 *
216 * If the "align" argument is equal to "1" a non-contiguous allocation
217 * can happen. Else if the "align" argument is greater than "1", the
218 * allocation will always be contiguous in memory.
219 *
220 * Returns:
221 * 0: Success
222 * Else: Failure
223 *------------------------------------------------------------------------*/
224 #if USB_HAVE_BUSDMA
225 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)226 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
227 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
228 usb_size_t count)
229 {
230 struct usb_page_cache *pc;
231 struct usb_page *pg;
232 void *buf;
233 usb_size_t n_dma_pc;
234 usb_size_t n_dma_pg;
235 usb_size_t n_obj;
236 usb_size_t x;
237 usb_size_t y;
238 usb_size_t r;
239 usb_size_t z;
240
241 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
242 align));
243 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
244
245 if (count == 0) {
246 return (0); /* nothing to allocate */
247 }
248 /*
249 * Make sure that the size is aligned properly.
250 */
251 size = -((-size) & (-align));
252
253 /*
254 * Try multi-allocation chunks to reduce the number of DMA
255 * allocations, hence DMA allocations are slow.
256 */
257 if (align == 1) {
258 /* special case - non-cached multi page DMA memory */
259 n_dma_pc = count;
260 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
261 n_obj = 1;
262 } else if (size >= USB_PAGE_SIZE) {
263 n_dma_pc = count;
264 n_dma_pg = 1;
265 n_obj = 1;
266 } else {
267 /* compute number of objects per page */
268 #ifdef USB_DMA_SINGLE_ALLOC
269 n_obj = 1;
270 #else
271 n_obj = (USB_PAGE_SIZE / size);
272 #endif
273 /*
274 * Compute number of DMA chunks, rounded up
275 * to nearest one:
276 */
277 n_dma_pc = howmany(count, n_obj);
278 n_dma_pg = 1;
279 }
280
281 /*
282 * DMA memory is allocated once, but mapped twice. That's why
283 * there is one list for auto-free and another list for
284 * non-auto-free which only holds the mapping and not the
285 * allocation.
286 */
287 if (parm->buf == NULL) {
288 /* reserve memory (auto-free) */
289 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
290 parm->dma_page_cache_ptr += n_dma_pc;
291
292 /* reserve memory (no-auto-free) */
293 parm->dma_page_ptr += count * n_dma_pg;
294 parm->xfer_page_cache_ptr += count;
295 return (0);
296 }
297 for (x = 0; x != n_dma_pc; x++) {
298 /* need to initialize the page cache */
299 parm->dma_page_cache_ptr[x].tag_parent =
300 &parm->curr_xfer->xroot->dma_parent_tag;
301 }
302 for (x = 0; x != count; x++) {
303 /* need to initialize the page cache */
304 parm->xfer_page_cache_ptr[x].tag_parent =
305 &parm->curr_xfer->xroot->dma_parent_tag;
306 }
307
308 if (ppc != NULL) {
309 if (n_obj != 1)
310 *ppc = parm->xfer_page_cache_ptr;
311 else
312 *ppc = parm->dma_page_cache_ptr;
313 }
314 r = count; /* set remainder count */
315 z = n_obj * size; /* set allocation size */
316 pc = parm->xfer_page_cache_ptr;
317 pg = parm->dma_page_ptr;
318
319 if (n_obj == 1) {
320 /*
321 * Avoid mapping memory twice if only a single object
322 * should be allocated per page cache:
323 */
324 for (x = 0; x != n_dma_pc; x++) {
325 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
326 pg, z, align)) {
327 return (1); /* failure */
328 }
329 /* Make room for one DMA page cache and "n_dma_pg" pages */
330 parm->dma_page_cache_ptr++;
331 pg += n_dma_pg;
332 }
333 } else {
334 for (x = 0; x != n_dma_pc; x++) {
335
336 if (r < n_obj) {
337 /* compute last remainder */
338 z = r * size;
339 n_obj = r;
340 }
341 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
342 pg, z, align)) {
343 return (1); /* failure */
344 }
345 /* Set beginning of current buffer */
346 buf = parm->dma_page_cache_ptr->buffer;
347 /* Make room for one DMA page cache and "n_dma_pg" pages */
348 parm->dma_page_cache_ptr++;
349 pg += n_dma_pg;
350
351 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
352
353 /* Load sub-chunk into DMA */
354 if (usb_pc_dmamap_create(pc, size)) {
355 return (1); /* failure */
356 }
357 pc->buffer = USB_ADD_BYTES(buf, y * size);
358 pc->page_start = pg;
359
360 USB_MTX_LOCK(pc->tag_parent->mtx);
361 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
362 USB_MTX_UNLOCK(pc->tag_parent->mtx);
363 return (1); /* failure */
364 }
365 USB_MTX_UNLOCK(pc->tag_parent->mtx);
366 }
367 }
368 }
369
370 parm->xfer_page_cache_ptr = pc;
371 parm->dma_page_ptr = pg;
372 return (0);
373 }
374 #endif
375
376 /*------------------------------------------------------------------------*
377 * usbd_transfer_setup_sub - transfer setup subroutine
378 *
379 * This function must be called from the "xfer_setup" callback of the
380 * USB Host or Device controller driver when setting up an USB
381 * transfer. This function will setup correct packet sizes, buffer
382 * sizes, flags and more, that are stored in the "usb_xfer"
383 * structure.
384 *------------------------------------------------------------------------*/
385 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)386 usbd_transfer_setup_sub(struct usb_setup_params *parm)
387 {
388 enum {
389 REQ_SIZE = 8,
390 MIN_PKT = 8,
391 };
392 struct usb_xfer *xfer = parm->curr_xfer;
393 const struct usb_config *setup = parm->curr_setup;
394 struct usb_endpoint_ss_comp_descriptor *ecomp;
395 struct usb_endpoint_descriptor *edesc;
396 struct usb_std_packet_size std_size;
397 usb_frcount_t n_frlengths;
398 usb_frcount_t n_frbuffers;
399 usb_frcount_t x;
400 uint16_t maxp_old;
401 uint8_t type;
402 uint8_t zmps;
403
404 /*
405 * Sanity check. The following parameters must be initialized before
406 * calling this function.
407 */
408 if ((parm->hc_max_packet_size == 0) ||
409 (parm->hc_max_packet_count == 0) ||
410 (parm->hc_max_frame_size == 0)) {
411 parm->err = USB_ERR_INVAL;
412 goto done;
413 }
414 edesc = xfer->endpoint->edesc;
415 ecomp = xfer->endpoint->ecomp;
416
417 type = (edesc->bmAttributes & UE_XFERTYPE);
418
419 xfer->flags = setup->flags;
420 xfer->nframes = setup->frames;
421 xfer->timeout = setup->timeout;
422 xfer->callback = setup->callback;
423 xfer->interval = setup->interval;
424 xfer->endpointno = edesc->bEndpointAddress;
425 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
426 xfer->max_packet_count = 1;
427 /* make a shadow copy: */
428 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
429
430 parm->bufsize = setup->bufsize;
431
432 switch (parm->speed) {
433 case USB_SPEED_HIGH:
434 switch (type) {
435 case UE_ISOCHRONOUS:
436 case UE_INTERRUPT:
437 xfer->max_packet_count +=
438 (xfer->max_packet_size >> 11) & 3;
439
440 /* check for invalid max packet count */
441 if (xfer->max_packet_count > 3)
442 xfer->max_packet_count = 3;
443 break;
444 default:
445 break;
446 }
447 xfer->max_packet_size &= 0x7FF;
448 break;
449 case USB_SPEED_SUPER:
450 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
451
452 if (ecomp != NULL)
453 xfer->max_packet_count += ecomp->bMaxBurst;
454
455 if ((xfer->max_packet_count == 0) ||
456 (xfer->max_packet_count > 16))
457 xfer->max_packet_count = 16;
458
459 switch (type) {
460 case UE_CONTROL:
461 xfer->max_packet_count = 1;
462 break;
463 case UE_ISOCHRONOUS:
464 if (ecomp != NULL) {
465 uint8_t mult;
466
467 mult = UE_GET_SS_ISO_MULT(
468 ecomp->bmAttributes) + 1;
469 if (mult > 3)
470 mult = 3;
471
472 xfer->max_packet_count *= mult;
473 }
474 break;
475 default:
476 break;
477 }
478 xfer->max_packet_size &= 0x7FF;
479 break;
480 default:
481 break;
482 }
483 /* range check "max_packet_count" */
484
485 if (xfer->max_packet_count > parm->hc_max_packet_count) {
486 xfer->max_packet_count = parm->hc_max_packet_count;
487 }
488
489 /* store max packet size value before filtering */
490
491 maxp_old = xfer->max_packet_size;
492
493 /* filter "wMaxPacketSize" according to HC capabilities */
494
495 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
496 (xfer->max_packet_size == 0)) {
497 xfer->max_packet_size = parm->hc_max_packet_size;
498 }
499 /* filter "wMaxPacketSize" according to standard sizes */
500
501 usbd_get_std_packet_size(&std_size, type, parm->speed);
502
503 if (std_size.range.min || std_size.range.max) {
504
505 if (xfer->max_packet_size < std_size.range.min) {
506 xfer->max_packet_size = std_size.range.min;
507 }
508 if (xfer->max_packet_size > std_size.range.max) {
509 xfer->max_packet_size = std_size.range.max;
510 }
511 } else {
512
513 if (xfer->max_packet_size >= std_size.fixed[3]) {
514 xfer->max_packet_size = std_size.fixed[3];
515 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
516 xfer->max_packet_size = std_size.fixed[2];
517 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
518 xfer->max_packet_size = std_size.fixed[1];
519 } else {
520 /* only one possibility left */
521 xfer->max_packet_size = std_size.fixed[0];
522 }
523 }
524
525 /*
526 * Check if the max packet size was outside its allowed range
527 * and clamped to a valid value:
528 */
529 if (maxp_old != xfer->max_packet_size)
530 xfer->flags_int.maxp_was_clamped = 1;
531
532 /* compute "max_frame_size" */
533
534 usbd_update_max_frame_size(xfer);
535
536 /* check interrupt interval and transfer pre-delay */
537
538 if (type == UE_ISOCHRONOUS) {
539
540 uint16_t frame_limit;
541
542 xfer->interval = 0; /* not used, must be zero */
543 xfer->flags_int.isochronous_xfr = 1; /* set flag */
544
545 if (xfer->timeout == 0) {
546 /*
547 * set a default timeout in
548 * case something goes wrong!
549 */
550 xfer->timeout = 1000 / 4;
551 }
552 switch (parm->speed) {
553 case USB_SPEED_LOW:
554 case USB_SPEED_FULL:
555 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
556 xfer->fps_shift = 0;
557 break;
558 default:
559 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
560 xfer->fps_shift = edesc->bInterval;
561 if (xfer->fps_shift > 0)
562 xfer->fps_shift--;
563 if (xfer->fps_shift > 3)
564 xfer->fps_shift = 3;
565 if (xfer->flags.pre_scale_frames != 0)
566 xfer->nframes <<= (3 - xfer->fps_shift);
567 break;
568 }
569
570 if (xfer->nframes > frame_limit) {
571 /*
572 * this is not going to work
573 * cross hardware
574 */
575 parm->err = USB_ERR_INVAL;
576 goto done;
577 }
578 if (xfer->nframes == 0) {
579 /*
580 * this is not a valid value
581 */
582 parm->err = USB_ERR_ZERO_NFRAMES;
583 goto done;
584 }
585 } else {
586
587 /*
588 * If a value is specified use that else check the
589 * endpoint descriptor!
590 */
591 if (type == UE_INTERRUPT) {
592
593 uint32_t temp;
594
595 if (xfer->interval == 0) {
596
597 xfer->interval = edesc->bInterval;
598
599 switch (parm->speed) {
600 case USB_SPEED_LOW:
601 case USB_SPEED_FULL:
602 break;
603 default:
604 /* 125us -> 1ms */
605 if (xfer->interval < 4)
606 xfer->interval = 1;
607 else if (xfer->interval > 16)
608 xfer->interval = (1 << (16 - 4));
609 else
610 xfer->interval =
611 (1 << (xfer->interval - 4));
612 break;
613 }
614 }
615
616 if (xfer->interval == 0) {
617 /*
618 * One millisecond is the smallest
619 * interval we support:
620 */
621 xfer->interval = 1;
622 }
623
624 xfer->fps_shift = 0;
625 temp = 1;
626
627 while ((temp != 0) && (temp < xfer->interval)) {
628 xfer->fps_shift++;
629 temp *= 2;
630 }
631
632 switch (parm->speed) {
633 case USB_SPEED_LOW:
634 case USB_SPEED_FULL:
635 break;
636 default:
637 xfer->fps_shift += 3;
638 break;
639 }
640 }
641 }
642
643 /*
644 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
645 * to be equal to zero when setting up USB transfers, hence
646 * this leads to a lot of extra code in the USB kernel.
647 */
648
649 if ((xfer->max_frame_size == 0) ||
650 (xfer->max_packet_size == 0)) {
651
652 zmps = 1;
653
654 if ((parm->bufsize <= MIN_PKT) &&
655 (type != UE_CONTROL) &&
656 (type != UE_BULK)) {
657
658 /* workaround */
659 xfer->max_packet_size = MIN_PKT;
660 xfer->max_packet_count = 1;
661 parm->bufsize = 0; /* automatic setup length */
662 usbd_update_max_frame_size(xfer);
663
664 } else {
665 parm->err = USB_ERR_ZERO_MAXP;
666 goto done;
667 }
668
669 } else {
670 zmps = 0;
671 }
672
673 /*
674 * check if we should setup a default
675 * length:
676 */
677
678 if (parm->bufsize == 0) {
679
680 parm->bufsize = xfer->max_frame_size;
681
682 if (type == UE_ISOCHRONOUS) {
683 parm->bufsize *= xfer->nframes;
684 }
685 }
686 /*
687 * check if we are about to setup a proxy
688 * type of buffer:
689 */
690
691 if (xfer->flags.proxy_buffer) {
692
693 /* round bufsize up */
694
695 parm->bufsize += (xfer->max_frame_size - 1);
696
697 if (parm->bufsize < xfer->max_frame_size) {
698 /* length wrapped around */
699 parm->err = USB_ERR_INVAL;
700 goto done;
701 }
702 /* subtract remainder */
703
704 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
705
706 /* add length of USB device request structure, if any */
707
708 if (type == UE_CONTROL) {
709 parm->bufsize += REQ_SIZE; /* SETUP message */
710 }
711 }
712 xfer->max_data_length = parm->bufsize;
713
714 /* Setup "n_frlengths" and "n_frbuffers" */
715
716 if (type == UE_ISOCHRONOUS) {
717 n_frlengths = xfer->nframes;
718 n_frbuffers = 1;
719 } else {
720
721 if (type == UE_CONTROL) {
722 xfer->flags_int.control_xfr = 1;
723 if (xfer->nframes == 0) {
724 if (parm->bufsize <= REQ_SIZE) {
725 /*
726 * there will never be any data
727 * stage
728 */
729 xfer->nframes = 1;
730 } else {
731 xfer->nframes = 2;
732 }
733 }
734 } else {
735 if (xfer->nframes == 0) {
736 xfer->nframes = 1;
737 }
738 }
739
740 n_frlengths = xfer->nframes;
741 n_frbuffers = xfer->nframes;
742 }
743
744 /*
745 * check if we have room for the
746 * USB device request structure:
747 */
748
749 if (type == UE_CONTROL) {
750
751 if (xfer->max_data_length < REQ_SIZE) {
752 /* length wrapped around or too small bufsize */
753 parm->err = USB_ERR_INVAL;
754 goto done;
755 }
756 xfer->max_data_length -= REQ_SIZE;
757 }
758 /*
759 * Setup "frlengths" and shadow "frlengths" for keeping the
760 * initial frame lengths when a USB transfer is complete. This
761 * information is useful when computing isochronous offsets.
762 */
763 xfer->frlengths = parm->xfer_length_ptr;
764 parm->xfer_length_ptr += 2 * n_frlengths;
765
766 /* setup "frbuffers" */
767 xfer->frbuffers = parm->xfer_page_cache_ptr;
768 parm->xfer_page_cache_ptr += n_frbuffers;
769
770 /* initialize max frame count */
771 xfer->max_frame_count = xfer->nframes;
772
773 /*
774 * check if we need to setup
775 * a local buffer:
776 */
777
778 if (!xfer->flags.ext_buffer) {
779 #if USB_HAVE_BUSDMA
780 struct usb_page_search page_info;
781 struct usb_page_cache *pc;
782
783 if (usbd_transfer_setup_sub_malloc(parm,
784 &pc, parm->bufsize, 1, 1)) {
785 parm->err = USB_ERR_NOMEM;
786 } else if (parm->buf != NULL) {
787
788 usbd_get_page(pc, 0, &page_info);
789
790 xfer->local_buffer = page_info.buffer;
791
792 usbd_xfer_set_frame_offset(xfer, 0, 0);
793
794 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
795 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
796 }
797 }
798 #else
799 /* align data */
800 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
801
802 if (parm->buf != NULL) {
803 xfer->local_buffer =
804 USB_ADD_BYTES(parm->buf, parm->size[0]);
805
806 usbd_xfer_set_frame_offset(xfer, 0, 0);
807
808 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
809 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
810 }
811 }
812 parm->size[0] += parm->bufsize;
813
814 /* align data again */
815 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
816 #endif
817 }
818 /*
819 * Compute maximum buffer size
820 */
821
822 if (parm->bufsize_max < parm->bufsize) {
823 parm->bufsize_max = parm->bufsize;
824 }
825 #if USB_HAVE_BUSDMA
826 if (xfer->flags_int.bdma_enable) {
827 /*
828 * Setup "dma_page_ptr".
829 *
830 * Proof for formula below:
831 *
832 * Assume there are three USB frames having length "a", "b" and
833 * "c". These USB frames will at maximum need "z"
834 * "usb_page" structures. "z" is given by:
835 *
836 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
837 * ((c / USB_PAGE_SIZE) + 2);
838 *
839 * Constraining "a", "b" and "c" like this:
840 *
841 * (a + b + c) <= parm->bufsize
842 *
843 * We know that:
844 *
845 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
846 *
847 * Here is the general formula:
848 */
849 xfer->dma_page_ptr = parm->dma_page_ptr;
850 parm->dma_page_ptr += (2 * n_frbuffers);
851 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
852 }
853 #endif
854 if (zmps) {
855 /* correct maximum data length */
856 xfer->max_data_length = 0;
857 }
858 /* subtract USB frame remainder from "hc_max_frame_size" */
859
860 xfer->max_hc_frame_size =
861 (parm->hc_max_frame_size -
862 (parm->hc_max_frame_size % xfer->max_frame_size));
863
864 if (xfer->max_hc_frame_size == 0) {
865 parm->err = USB_ERR_INVAL;
866 goto done;
867 }
868
869 /* initialize frame buffers */
870
871 if (parm->buf) {
872 for (x = 0; x != n_frbuffers; x++) {
873 xfer->frbuffers[x].tag_parent =
874 &xfer->xroot->dma_parent_tag;
875 #if USB_HAVE_BUSDMA
876 if (xfer->flags_int.bdma_enable &&
877 (parm->bufsize_max > 0)) {
878
879 if (usb_pc_dmamap_create(
880 xfer->frbuffers + x,
881 parm->bufsize_max)) {
882 parm->err = USB_ERR_NOMEM;
883 goto done;
884 }
885 }
886 #endif
887 }
888 }
889 done:
890 if (parm->err) {
891 /*
892 * Set some dummy values so that we avoid division by zero:
893 */
894 xfer->max_hc_frame_size = 1;
895 xfer->max_frame_size = 1;
896 xfer->max_packet_size = 1;
897 xfer->max_data_length = 0;
898 xfer->nframes = 0;
899 xfer->max_frame_count = 0;
900 }
901 }
902
903 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)904 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
905 uint16_t n_setup)
906 {
907 while (n_setup--) {
908 uint8_t type = setup_start[n_setup].type;
909 if (type == UE_BULK || type == UE_BULK_INTR ||
910 type == UE_TYPE_ANY)
911 return (1);
912 }
913 return (0);
914 }
915
916 /*------------------------------------------------------------------------*
917 * usbd_transfer_setup - setup an array of USB transfers
918 *
919 * NOTE: You must always call "usbd_transfer_unsetup" after calling
920 * "usbd_transfer_setup" if success was returned.
921 *
922 * The idea is that the USB device driver should pre-allocate all its
923 * transfers by one call to this function.
924 *
925 * Return values:
926 * 0: Success
927 * Else: Failure
928 *------------------------------------------------------------------------*/
929 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)930 usbd_transfer_setup(struct usb_device *udev,
931 const uint8_t *ifaces, struct usb_xfer **ppxfer,
932 const struct usb_config *setup_start, uint16_t n_setup,
933 void *priv_sc, struct mtx *xfer_mtx)
934 {
935 const struct usb_config *setup_end = setup_start + n_setup;
936 const struct usb_config *setup;
937 struct usb_setup_params *parm;
938 struct usb_endpoint *ep;
939 struct usb_xfer_root *info;
940 struct usb_xfer *xfer;
941 void *buf = NULL;
942 usb_error_t error = 0;
943 uint16_t n;
944 uint16_t refcount;
945 uint8_t do_unlock;
946
947 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
948 "usbd_transfer_setup can sleep!");
949
950 /* do some checking first */
951
952 if (n_setup == 0) {
953 DPRINTFN(6, "setup array has zero length!\n");
954 return (USB_ERR_INVAL);
955 }
956 if (ifaces == NULL) {
957 DPRINTFN(6, "ifaces array is NULL!\n");
958 return (USB_ERR_INVAL);
959 }
960 if (xfer_mtx == NULL) {
961 DPRINTFN(6, "using global lock\n");
962 xfer_mtx = &Giant;
963 }
964
965 /* more sanity checks */
966
967 for (setup = setup_start, n = 0;
968 setup != setup_end; setup++, n++) {
969 if (setup->bufsize == (usb_frlength_t)-1) {
970 error = USB_ERR_BAD_BUFSIZE;
971 DPRINTF("invalid bufsize\n");
972 }
973 if (setup->callback == NULL) {
974 error = USB_ERR_NO_CALLBACK;
975 DPRINTF("no callback\n");
976 }
977 ppxfer[n] = NULL;
978 }
979
980 if (error)
981 return (error);
982
983 /* Protect scratch area */
984 do_unlock = usbd_ctrl_lock(udev);
985
986 refcount = 0;
987 info = NULL;
988
989 parm = &udev->scratch.xfer_setup[0].parm;
990 memset(parm, 0, sizeof(*parm));
991
992 parm->udev = udev;
993 parm->speed = usbd_get_speed(udev);
994 parm->hc_max_packet_count = 1;
995
996 if (parm->speed >= USB_SPEED_MAX) {
997 parm->err = USB_ERR_INVAL;
998 goto done;
999 }
1000 /* setup all transfers */
1001
1002 while (1) {
1003
1004 if (buf) {
1005 /*
1006 * Initialize the "usb_xfer_root" structure,
1007 * which is common for all our USB transfers.
1008 */
1009 info = USB_ADD_BYTES(buf, 0);
1010
1011 info->memory_base = buf;
1012 info->memory_size = parm->size[0];
1013
1014 #if USB_HAVE_BUSDMA
1015 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
1016 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
1017 #endif
1018 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
1019 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
1020
1021 cv_init(&info->cv_drain, "WDRAIN");
1022
1023 info->xfer_mtx = xfer_mtx;
1024 #if USB_HAVE_BUSDMA
1025 usb_dma_tag_setup(&info->dma_parent_tag,
1026 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1027 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1028 parm->dma_tag_max);
1029 #endif
1030
1031 info->bus = udev->bus;
1032 info->udev = udev;
1033
1034 TAILQ_INIT(&info->done_q.head);
1035 info->done_q.command = &usbd_callback_wrapper;
1036 #if USB_HAVE_BUSDMA
1037 TAILQ_INIT(&info->dma_q.head);
1038 info->dma_q.command = &usb_bdma_work_loop;
1039 #endif
1040 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1041 info->done_m[0].xroot = info;
1042 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1043 info->done_m[1].xroot = info;
1044
1045 /*
1046 * In device side mode control endpoint
1047 * requests need to run from a separate
1048 * context, else there is a chance of
1049 * deadlock!
1050 */
1051 if (setup_start == usb_control_ep_cfg ||
1052 setup_start == usb_control_ep_quirk_cfg)
1053 info->done_p =
1054 USB_BUS_CONTROL_XFER_PROC(udev->bus);
1055 else if (xfer_mtx == &Giant)
1056 info->done_p =
1057 USB_BUS_GIANT_PROC(udev->bus);
1058 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1059 info->done_p =
1060 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1061 else
1062 info->done_p =
1063 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1064 }
1065 /* reset sizes */
1066
1067 parm->size[0] = 0;
1068 parm->buf = buf;
1069 parm->size[0] += sizeof(info[0]);
1070
1071 for (setup = setup_start, n = 0;
1072 setup != setup_end; setup++, n++) {
1073
1074 /* skip USB transfers without callbacks: */
1075 if (setup->callback == NULL) {
1076 continue;
1077 }
1078 /* see if there is a matching endpoint */
1079 ep = usbd_get_endpoint(udev,
1080 ifaces[setup->if_index], setup);
1081
1082 /*
1083 * Check that the USB PIPE is valid and that
1084 * the endpoint mode is proper.
1085 *
1086 * Make sure we don't allocate a streams
1087 * transfer when such a combination is not
1088 * valid.
1089 */
1090 if ((ep == NULL) || (ep->methods == NULL) ||
1091 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1092 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1093 (setup->stream_id != 0 &&
1094 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1095 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1096 if (setup->flags.no_pipe_ok)
1097 continue;
1098 if ((setup->usb_mode != USB_MODE_DUAL) &&
1099 (setup->usb_mode != udev->flags.usb_mode))
1100 continue;
1101 parm->err = USB_ERR_NO_PIPE;
1102 goto done;
1103 }
1104
1105 /* align data properly */
1106 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1107
1108 /* store current setup pointer */
1109 parm->curr_setup = setup;
1110
1111 if (buf) {
1112 /*
1113 * Common initialization of the
1114 * "usb_xfer" structure.
1115 */
1116 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1117 xfer->address = udev->address;
1118 xfer->priv_sc = priv_sc;
1119 xfer->xroot = info;
1120
1121 usb_callout_init_mtx(&xfer->timeout_handle,
1122 &udev->bus->bus_mtx, 0);
1123 } else {
1124 /*
1125 * Setup a dummy xfer, hence we are
1126 * writing to the "usb_xfer"
1127 * structure pointed to by "xfer"
1128 * before we have allocated any
1129 * memory:
1130 */
1131 xfer = &udev->scratch.xfer_setup[0].dummy;
1132 memset(xfer, 0, sizeof(*xfer));
1133 refcount++;
1134 }
1135
1136 /* set transfer endpoint pointer */
1137 xfer->endpoint = ep;
1138
1139 /* set transfer stream ID */
1140 xfer->stream_id = setup->stream_id;
1141
1142 parm->size[0] += sizeof(xfer[0]);
1143 parm->methods = xfer->endpoint->methods;
1144 parm->curr_xfer = xfer;
1145
1146 /*
1147 * Call the Host or Device controller transfer
1148 * setup routine:
1149 */
1150 (udev->bus->methods->xfer_setup) (parm);
1151
1152 /* check for error */
1153 if (parm->err)
1154 goto done;
1155
1156 if (buf) {
1157 /*
1158 * Increment the endpoint refcount. This
1159 * basically prevents setting a new
1160 * configuration and alternate setting
1161 * when USB transfers are in use on
1162 * the given interface. Search the USB
1163 * code for "endpoint->refcount_alloc" if you
1164 * want more information.
1165 */
1166 USB_BUS_LOCK(info->bus);
1167 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1168 parm->err = USB_ERR_INVAL;
1169
1170 xfer->endpoint->refcount_alloc++;
1171
1172 if (xfer->endpoint->refcount_alloc == 0)
1173 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1174 USB_BUS_UNLOCK(info->bus);
1175
1176 /*
1177 * Whenever we set ppxfer[] then we
1178 * also need to increment the
1179 * "setup_refcount":
1180 */
1181 info->setup_refcount++;
1182
1183 /*
1184 * Transfer is successfully setup and
1185 * can be used:
1186 */
1187 ppxfer[n] = xfer;
1188 }
1189
1190 /* check for error */
1191 if (parm->err)
1192 goto done;
1193 }
1194
1195 if (buf != NULL || parm->err != 0)
1196 goto done;
1197
1198 /* if no transfers, nothing to do */
1199 if (refcount == 0)
1200 goto done;
1201
1202 /* align data properly */
1203 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1204
1205 /* store offset temporarily */
1206 parm->size[1] = parm->size[0];
1207
1208 /*
1209 * The number of DMA tags required depends on
1210 * the number of endpoints. The current estimate
1211 * for maximum number of DMA tags per endpoint
1212 * is three:
1213 * 1) for loading memory
1214 * 2) for allocating memory
1215 * 3) for fixing memory [UHCI]
1216 */
1217 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1218
1219 /*
1220 * DMA tags for QH, TD, Data and more.
1221 */
1222 parm->dma_tag_max += 8;
1223
1224 parm->dma_tag_p += parm->dma_tag_max;
1225
1226 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1227 ((uint8_t *)0);
1228
1229 /* align data properly */
1230 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1231
1232 /* store offset temporarily */
1233 parm->size[3] = parm->size[0];
1234
1235 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1236 ((uint8_t *)0);
1237
1238 /* align data properly */
1239 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1240
1241 /* store offset temporarily */
1242 parm->size[4] = parm->size[0];
1243
1244 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1245 ((uint8_t *)0);
1246
1247 /* store end offset temporarily */
1248 parm->size[5] = parm->size[0];
1249
1250 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1251 ((uint8_t *)0);
1252
1253 /* store end offset temporarily */
1254
1255 parm->size[2] = parm->size[0];
1256
1257 /* align data properly */
1258 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1259
1260 parm->size[6] = parm->size[0];
1261
1262 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1263 ((uint8_t *)0);
1264
1265 /* align data properly */
1266 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1267
1268 /* allocate zeroed memory */
1269 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1270
1271 if (buf == NULL) {
1272 parm->err = USB_ERR_NOMEM;
1273 DPRINTFN(0, "cannot allocate memory block for "
1274 "configuration (%d bytes)\n",
1275 parm->size[0]);
1276 goto done;
1277 }
1278 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1279 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1280 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1281 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1282 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1283 }
1284
1285 done:
1286 if (buf) {
1287 if (info->setup_refcount == 0) {
1288 /*
1289 * "usbd_transfer_unsetup_sub" will unlock
1290 * the bus mutex before returning !
1291 */
1292 USB_BUS_LOCK(info->bus);
1293
1294 /* something went wrong */
1295 usbd_transfer_unsetup_sub(info, 0);
1296 }
1297 }
1298
1299 /* check if any errors happened */
1300 if (parm->err)
1301 usbd_transfer_unsetup(ppxfer, n_setup);
1302
1303 error = parm->err;
1304
1305 if (do_unlock)
1306 usbd_ctrl_unlock(udev);
1307
1308 return (error);
1309 }
1310
1311 /*------------------------------------------------------------------------*
1312 * usbd_transfer_unsetup_sub - factored out code
1313 *------------------------------------------------------------------------*/
1314 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1315 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1316 {
1317 #if USB_HAVE_BUSDMA
1318 struct usb_page_cache *pc;
1319 #endif
1320
1321 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1322
1323 /* wait for any outstanding DMA operations */
1324
1325 if (needs_delay) {
1326 usb_timeout_t temp;
1327 temp = usbd_get_dma_delay(info->udev);
1328 if (temp != 0) {
1329 usb_pause_mtx(&info->bus->bus_mtx,
1330 USB_MS_TO_TICKS(temp));
1331 }
1332 }
1333
1334 /* make sure that our done messages are not queued anywhere */
1335 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1336
1337 USB_BUS_UNLOCK(info->bus);
1338
1339 #if USB_HAVE_BUSDMA
1340 /* free DMA'able memory, if any */
1341 pc = info->dma_page_cache_start;
1342 while (pc != info->dma_page_cache_end) {
1343 usb_pc_free_mem(pc);
1344 pc++;
1345 }
1346
1347 /* free DMA maps in all "xfer->frbuffers" */
1348 pc = info->xfer_page_cache_start;
1349 while (pc != info->xfer_page_cache_end) {
1350 usb_pc_dmamap_destroy(pc);
1351 pc++;
1352 }
1353
1354 /* free all DMA tags */
1355 usb_dma_tag_unsetup(&info->dma_parent_tag);
1356 #endif
1357
1358 cv_destroy(&info->cv_drain);
1359
1360 /*
1361 * free the "memory_base" last, hence the "info" structure is
1362 * contained within the "memory_base"!
1363 */
1364 free(info->memory_base, M_USB);
1365 }
1366
1367 /*------------------------------------------------------------------------*
1368 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1369 *
1370 * NOTE: All USB transfers in progress will get called back passing
1371 * the error code "USB_ERR_CANCELLED" before this function
1372 * returns.
1373 *------------------------------------------------------------------------*/
1374 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1375 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1376 {
1377 struct usb_xfer *xfer;
1378 struct usb_xfer_root *info;
1379 uint8_t needs_delay = 0;
1380
1381 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1382 "usbd_transfer_unsetup can sleep!");
1383
1384 while (n_setup--) {
1385 xfer = pxfer[n_setup];
1386
1387 if (xfer == NULL)
1388 continue;
1389
1390 info = xfer->xroot;
1391
1392 USB_XFER_LOCK(xfer);
1393 USB_BUS_LOCK(info->bus);
1394
1395 /*
1396 * HINT: when you start/stop a transfer, it might be a
1397 * good idea to directly use the "pxfer[]" structure:
1398 *
1399 * usbd_transfer_start(sc->pxfer[0]);
1400 * usbd_transfer_stop(sc->pxfer[0]);
1401 *
1402 * That way, if your code has many parts that will not
1403 * stop running under the same lock, in other words
1404 * "xfer_mtx", the usbd_transfer_start and
1405 * usbd_transfer_stop functions will simply return
1406 * when they detect a NULL pointer argument.
1407 *
1408 * To avoid any races we clear the "pxfer[]" pointer
1409 * while holding the private mutex of the driver:
1410 */
1411 pxfer[n_setup] = NULL;
1412
1413 USB_BUS_UNLOCK(info->bus);
1414 USB_XFER_UNLOCK(xfer);
1415
1416 usbd_transfer_drain(xfer);
1417
1418 #if USB_HAVE_BUSDMA
1419 if (xfer->flags_int.bdma_enable)
1420 needs_delay = 1;
1421 #endif
1422 /*
1423 * NOTE: default endpoint does not have an
1424 * interface, even if endpoint->iface_index == 0
1425 */
1426 USB_BUS_LOCK(info->bus);
1427 xfer->endpoint->refcount_alloc--;
1428 USB_BUS_UNLOCK(info->bus);
1429
1430 usb_callout_drain(&xfer->timeout_handle);
1431
1432 USB_BUS_LOCK(info->bus);
1433
1434 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1435 "reference count\n"));
1436
1437 info->setup_refcount--;
1438
1439 if (info->setup_refcount == 0) {
1440 usbd_transfer_unsetup_sub(info,
1441 needs_delay);
1442 } else {
1443 USB_BUS_UNLOCK(info->bus);
1444 }
1445 }
1446 }
1447
1448 /*------------------------------------------------------------------------*
1449 * usbd_control_transfer_init - factored out code
1450 *
1451 * In USB Device Mode we have to wait for the SETUP packet which
1452 * containst the "struct usb_device_request" structure, before we can
1453 * transfer any data. In USB Host Mode we already have the SETUP
1454 * packet at the moment the USB transfer is started. This leads us to
1455 * having to setup the USB transfer at two different places in
1456 * time. This function just contains factored out control transfer
1457 * initialisation code, so that we don't duplicate the code.
1458 *------------------------------------------------------------------------*/
1459 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1460 usbd_control_transfer_init(struct usb_xfer *xfer)
1461 {
1462 struct usb_device_request req;
1463
1464 /* copy out the USB request header */
1465
1466 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1467
1468 /* setup remainder */
1469
1470 xfer->flags_int.control_rem = UGETW(req.wLength);
1471
1472 /* copy direction to endpoint variable */
1473
1474 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1475 xfer->endpointno |=
1476 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1477 }
1478
1479 /*------------------------------------------------------------------------*
1480 * usbd_control_transfer_did_data
1481 *
1482 * This function returns non-zero if a control endpoint has
1483 * transferred the first DATA packet after the SETUP packet.
1484 * Else it returns zero.
1485 *------------------------------------------------------------------------*/
1486 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1487 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1488 {
1489 struct usb_device_request req;
1490
1491 /* SETUP packet is not yet sent */
1492 if (xfer->flags_int.control_hdr != 0)
1493 return (0);
1494
1495 /* copy out the USB request header */
1496 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1497
1498 /* compare remainder to the initial value */
1499 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1500 }
1501
1502 /*------------------------------------------------------------------------*
1503 * usbd_setup_ctrl_transfer
1504 *
1505 * This function handles initialisation of control transfers. Control
1506 * transfers are special in that regard that they can both transmit
1507 * and receive data.
1508 *
1509 * Return values:
1510 * 0: Success
1511 * Else: Failure
1512 *------------------------------------------------------------------------*/
1513 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1514 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1515 {
1516 usb_frlength_t len;
1517
1518 /* Check for control endpoint stall */
1519 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1520 /* the control transfer is no longer active */
1521 xfer->flags_int.control_stall = 1;
1522 xfer->flags_int.control_act = 0;
1523 } else {
1524 /* don't stall control transfer by default */
1525 xfer->flags_int.control_stall = 0;
1526 }
1527
1528 /* Check for invalid number of frames */
1529 if (xfer->nframes > 2) {
1530 /*
1531 * If you need to split a control transfer, you
1532 * have to do one part at a time. Only with
1533 * non-control transfers you can do multiple
1534 * parts a time.
1535 */
1536 DPRINTFN(0, "Too many frames: %u\n",
1537 (unsigned int)xfer->nframes);
1538 goto error;
1539 }
1540
1541 /*
1542 * Check if there is a control
1543 * transfer in progress:
1544 */
1545 if (xfer->flags_int.control_act) {
1546
1547 if (xfer->flags_int.control_hdr) {
1548
1549 /* clear send header flag */
1550
1551 xfer->flags_int.control_hdr = 0;
1552
1553 /* setup control transfer */
1554 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1555 usbd_control_transfer_init(xfer);
1556 }
1557 }
1558 /* get data length */
1559
1560 len = xfer->sumlen;
1561
1562 } else {
1563
1564 /* the size of the SETUP structure is hardcoded ! */
1565
1566 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1567 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1568 xfer->frlengths[0], sizeof(struct
1569 usb_device_request));
1570 goto error;
1571 }
1572 /* check USB mode */
1573 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1574
1575 /* check number of frames */
1576 if (xfer->nframes != 1) {
1577 /*
1578 * We need to receive the setup
1579 * message first so that we know the
1580 * data direction!
1581 */
1582 DPRINTF("Misconfigured transfer\n");
1583 goto error;
1584 }
1585 /*
1586 * Set a dummy "control_rem" value. This
1587 * variable will be overwritten later by a
1588 * call to "usbd_control_transfer_init()" !
1589 */
1590 xfer->flags_int.control_rem = 0xFFFF;
1591 } else {
1592
1593 /* setup "endpoint" and "control_rem" */
1594
1595 usbd_control_transfer_init(xfer);
1596 }
1597
1598 /* set transfer-header flag */
1599
1600 xfer->flags_int.control_hdr = 1;
1601
1602 /* get data length */
1603
1604 len = (xfer->sumlen - sizeof(struct usb_device_request));
1605 }
1606
1607 /* update did data flag */
1608
1609 xfer->flags_int.control_did_data =
1610 usbd_control_transfer_did_data(xfer);
1611
1612 /* check if there is a length mismatch */
1613
1614 if (len > xfer->flags_int.control_rem) {
1615 DPRINTFN(0, "Length (%d) greater than "
1616 "remaining length (%d)\n", len,
1617 xfer->flags_int.control_rem);
1618 goto error;
1619 }
1620 /* check if we are doing a short transfer */
1621
1622 if (xfer->flags.force_short_xfer) {
1623 xfer->flags_int.control_rem = 0;
1624 } else {
1625 if ((len != xfer->max_data_length) &&
1626 (len != xfer->flags_int.control_rem) &&
1627 (xfer->nframes != 1)) {
1628 DPRINTFN(0, "Short control transfer without "
1629 "force_short_xfer set\n");
1630 goto error;
1631 }
1632 xfer->flags_int.control_rem -= len;
1633 }
1634
1635 /* the status part is executed when "control_act" is 0 */
1636
1637 if ((xfer->flags_int.control_rem > 0) ||
1638 (xfer->flags.manual_status)) {
1639 /* don't execute the STATUS stage yet */
1640 xfer->flags_int.control_act = 1;
1641
1642 /* sanity check */
1643 if ((!xfer->flags_int.control_hdr) &&
1644 (xfer->nframes == 1)) {
1645 /*
1646 * This is not a valid operation!
1647 */
1648 DPRINTFN(0, "Invalid parameter "
1649 "combination\n");
1650 goto error;
1651 }
1652 } else {
1653 /* time to execute the STATUS stage */
1654 xfer->flags_int.control_act = 0;
1655 }
1656 return (0); /* success */
1657
1658 error:
1659 return (1); /* failure */
1660 }
1661
1662 /*------------------------------------------------------------------------*
1663 * usbd_transfer_submit - start USB hardware for the given transfer
1664 *
1665 * This function should only be called from the USB callback.
1666 *------------------------------------------------------------------------*/
1667 void
usbd_transfer_submit(struct usb_xfer * xfer)1668 usbd_transfer_submit(struct usb_xfer *xfer)
1669 {
1670 struct usb_xfer_root *info;
1671 struct usb_bus *bus;
1672 usb_frcount_t x;
1673
1674 info = xfer->xroot;
1675 bus = info->bus;
1676
1677 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1678 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1679 "read" : "write");
1680
1681 #ifdef USB_DEBUG
1682 if (USB_DEBUG_VAR > 0) {
1683 USB_BUS_LOCK(bus);
1684
1685 usb_dump_endpoint(xfer->endpoint);
1686
1687 USB_BUS_UNLOCK(bus);
1688 }
1689 #endif
1690
1691 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1692 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1693
1694 /* Only open the USB transfer once! */
1695 if (!xfer->flags_int.open) {
1696 xfer->flags_int.open = 1;
1697
1698 DPRINTF("open\n");
1699
1700 USB_BUS_LOCK(bus);
1701 (xfer->endpoint->methods->open) (xfer);
1702 USB_BUS_UNLOCK(bus);
1703 }
1704 /* set "transferring" flag */
1705 xfer->flags_int.transferring = 1;
1706
1707 #if USB_HAVE_POWERD
1708 /* increment power reference */
1709 usbd_transfer_power_ref(xfer, 1);
1710 #endif
1711 /*
1712 * Check if the transfer is waiting on a queue, most
1713 * frequently the "done_q":
1714 */
1715 if (xfer->wait_queue) {
1716 USB_BUS_LOCK(bus);
1717 usbd_transfer_dequeue(xfer);
1718 USB_BUS_UNLOCK(bus);
1719 }
1720 /* clear "did_dma_delay" flag */
1721 xfer->flags_int.did_dma_delay = 0;
1722
1723 /* clear "did_close" flag */
1724 xfer->flags_int.did_close = 0;
1725
1726 #if USB_HAVE_BUSDMA
1727 /* clear "bdma_setup" flag */
1728 xfer->flags_int.bdma_setup = 0;
1729 #endif
1730 /* by default we cannot cancel any USB transfer immediately */
1731 xfer->flags_int.can_cancel_immed = 0;
1732
1733 /* clear lengths and frame counts by default */
1734 xfer->sumlen = 0;
1735 xfer->actlen = 0;
1736 xfer->aframes = 0;
1737
1738 /* clear any previous errors */
1739 xfer->error = 0;
1740
1741 /* Check if the device is still alive */
1742 if (info->udev->state < USB_STATE_POWERED) {
1743 USB_BUS_LOCK(bus);
1744 /*
1745 * Must return cancelled error code else
1746 * device drivers can hang.
1747 */
1748 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1749 USB_BUS_UNLOCK(bus);
1750 return;
1751 }
1752
1753 /* sanity check */
1754 if (xfer->nframes == 0) {
1755 if (xfer->flags.stall_pipe) {
1756 /*
1757 * Special case - want to stall without transferring
1758 * any data:
1759 */
1760 DPRINTF("xfer=%p nframes=0: stall "
1761 "or clear stall!\n", xfer);
1762 USB_BUS_LOCK(bus);
1763 xfer->flags_int.can_cancel_immed = 1;
1764 /* start the transfer */
1765 usb_command_wrapper(&xfer->endpoint->
1766 endpoint_q[xfer->stream_id], xfer);
1767 USB_BUS_UNLOCK(bus);
1768 return;
1769 }
1770 USB_BUS_LOCK(bus);
1771 usbd_transfer_done(xfer, USB_ERR_INVAL);
1772 USB_BUS_UNLOCK(bus);
1773 return;
1774 }
1775 /* compute some variables */
1776
1777 for (x = 0; x != xfer->nframes; x++) {
1778 /* make a copy of the frlenghts[] */
1779 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1780 /* compute total transfer length */
1781 xfer->sumlen += xfer->frlengths[x];
1782 if (xfer->sumlen < xfer->frlengths[x]) {
1783 /* length wrapped around */
1784 USB_BUS_LOCK(bus);
1785 usbd_transfer_done(xfer, USB_ERR_INVAL);
1786 USB_BUS_UNLOCK(bus);
1787 return;
1788 }
1789 }
1790
1791 /* clear some internal flags */
1792
1793 xfer->flags_int.short_xfer_ok = 0;
1794 xfer->flags_int.short_frames_ok = 0;
1795
1796 /* check if this is a control transfer */
1797
1798 if (xfer->flags_int.control_xfr) {
1799
1800 if (usbd_setup_ctrl_transfer(xfer)) {
1801 USB_BUS_LOCK(bus);
1802 usbd_transfer_done(xfer, USB_ERR_STALLED);
1803 USB_BUS_UNLOCK(bus);
1804 return;
1805 }
1806 }
1807 /*
1808 * Setup filtered version of some transfer flags,
1809 * in case of data read direction
1810 */
1811 if (USB_GET_DATA_ISREAD(xfer)) {
1812
1813 if (xfer->flags.short_frames_ok) {
1814 xfer->flags_int.short_xfer_ok = 1;
1815 xfer->flags_int.short_frames_ok = 1;
1816 } else if (xfer->flags.short_xfer_ok) {
1817 xfer->flags_int.short_xfer_ok = 1;
1818
1819 /* check for control transfer */
1820 if (xfer->flags_int.control_xfr) {
1821 /*
1822 * 1) Control transfers do not support
1823 * reception of multiple short USB
1824 * frames in host mode and device side
1825 * mode, with exception of:
1826 *
1827 * 2) Due to sometimes buggy device
1828 * side firmware we need to do a
1829 * STATUS stage in case of short
1830 * control transfers in USB host mode.
1831 * The STATUS stage then becomes the
1832 * "alt_next" to the DATA stage.
1833 */
1834 xfer->flags_int.short_frames_ok = 1;
1835 }
1836 }
1837 }
1838 /*
1839 * Check if BUS-DMA support is enabled and try to load virtual
1840 * buffers into DMA, if any:
1841 */
1842 #if USB_HAVE_BUSDMA
1843 if (xfer->flags_int.bdma_enable) {
1844 /* insert the USB transfer last in the BUS-DMA queue */
1845 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1846 return;
1847 }
1848 #endif
1849 /*
1850 * Enter the USB transfer into the Host Controller or
1851 * Device Controller schedule:
1852 */
1853 usbd_pipe_enter(xfer);
1854 }
1855
1856 /*------------------------------------------------------------------------*
1857 * usbd_pipe_enter - factored out code
1858 *------------------------------------------------------------------------*/
1859 void
usbd_pipe_enter(struct usb_xfer * xfer)1860 usbd_pipe_enter(struct usb_xfer *xfer)
1861 {
1862 struct usb_endpoint *ep;
1863
1864 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1865
1866 USB_BUS_LOCK(xfer->xroot->bus);
1867
1868 ep = xfer->endpoint;
1869
1870 DPRINTF("enter\n");
1871
1872 /* the transfer can now be cancelled */
1873 xfer->flags_int.can_cancel_immed = 1;
1874
1875 /* enter the transfer */
1876 (ep->methods->enter) (xfer);
1877
1878 /* check for transfer error */
1879 if (xfer->error) {
1880 /* some error has happened */
1881 usbd_transfer_done(xfer, 0);
1882 USB_BUS_UNLOCK(xfer->xroot->bus);
1883 return;
1884 }
1885
1886 /* start the transfer */
1887 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1888 USB_BUS_UNLOCK(xfer->xroot->bus);
1889 }
1890
1891 /*------------------------------------------------------------------------*
1892 * usbd_transfer_start - start an USB transfer
1893 *
1894 * NOTE: Calling this function more than one time will only
1895 * result in a single transfer start, until the USB transfer
1896 * completes.
1897 *------------------------------------------------------------------------*/
1898 void
usbd_transfer_start(struct usb_xfer * xfer)1899 usbd_transfer_start(struct usb_xfer *xfer)
1900 {
1901 if (xfer == NULL) {
1902 /* transfer is gone */
1903 return;
1904 }
1905 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1906
1907 /* mark the USB transfer started */
1908
1909 if (!xfer->flags_int.started) {
1910 /* lock the BUS lock to avoid races updating flags_int */
1911 USB_BUS_LOCK(xfer->xroot->bus);
1912 xfer->flags_int.started = 1;
1913 USB_BUS_UNLOCK(xfer->xroot->bus);
1914 }
1915 /* check if the USB transfer callback is already transferring */
1916
1917 if (xfer->flags_int.transferring) {
1918 return;
1919 }
1920 USB_BUS_LOCK(xfer->xroot->bus);
1921 /* call the USB transfer callback */
1922 usbd_callback_ss_done_defer(xfer);
1923 USB_BUS_UNLOCK(xfer->xroot->bus);
1924 }
1925
1926 /*------------------------------------------------------------------------*
1927 * usbd_transfer_stop - stop an USB transfer
1928 *
1929 * NOTE: Calling this function more than one time will only
1930 * result in a single transfer stop.
1931 * NOTE: When this function returns it is not safe to free nor
1932 * reuse any DMA buffers. See "usbd_transfer_drain()".
1933 *------------------------------------------------------------------------*/
1934 void
usbd_transfer_stop(struct usb_xfer * xfer)1935 usbd_transfer_stop(struct usb_xfer *xfer)
1936 {
1937 struct usb_endpoint *ep;
1938
1939 if (xfer == NULL) {
1940 /* transfer is gone */
1941 return;
1942 }
1943 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1944
1945 /* check if the USB transfer was ever opened */
1946
1947 if (!xfer->flags_int.open) {
1948 if (xfer->flags_int.started) {
1949 /* nothing to do except clearing the "started" flag */
1950 /* lock the BUS lock to avoid races updating flags_int */
1951 USB_BUS_LOCK(xfer->xroot->bus);
1952 xfer->flags_int.started = 0;
1953 USB_BUS_UNLOCK(xfer->xroot->bus);
1954 }
1955 return;
1956 }
1957 /* try to stop the current USB transfer */
1958
1959 USB_BUS_LOCK(xfer->xroot->bus);
1960 /* override any previous error */
1961 xfer->error = USB_ERR_CANCELLED;
1962
1963 /*
1964 * Clear "open" and "started" when both private and USB lock
1965 * is locked so that we don't get a race updating "flags_int"
1966 */
1967 xfer->flags_int.open = 0;
1968 xfer->flags_int.started = 0;
1969
1970 /*
1971 * Check if we can cancel the USB transfer immediately.
1972 */
1973 if (xfer->flags_int.transferring) {
1974 if (xfer->flags_int.can_cancel_immed &&
1975 (!xfer->flags_int.did_close)) {
1976 DPRINTF("close\n");
1977 /*
1978 * The following will lead to an USB_ERR_CANCELLED
1979 * error code being passed to the USB callback.
1980 */
1981 (xfer->endpoint->methods->close) (xfer);
1982 /* only close once */
1983 xfer->flags_int.did_close = 1;
1984 } else {
1985 /* need to wait for the next done callback */
1986 }
1987 } else {
1988 DPRINTF("close\n");
1989
1990 /* close here and now */
1991 (xfer->endpoint->methods->close) (xfer);
1992
1993 /*
1994 * Any additional DMA delay is done by
1995 * "usbd_transfer_unsetup()".
1996 */
1997
1998 /*
1999 * Special case. Check if we need to restart a blocked
2000 * endpoint.
2001 */
2002 ep = xfer->endpoint;
2003
2004 /*
2005 * If the current USB transfer is completing we need
2006 * to start the next one:
2007 */
2008 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2009 usb_command_wrapper(
2010 &ep->endpoint_q[xfer->stream_id], NULL);
2011 }
2012 }
2013
2014 USB_BUS_UNLOCK(xfer->xroot->bus);
2015 }
2016
2017 /*------------------------------------------------------------------------*
2018 * usbd_transfer_pending
2019 *
2020 * This function will check if an USB transfer is pending which is a
2021 * little bit complicated!
2022 * Return values:
2023 * 0: Not pending
2024 * 1: Pending: The USB transfer will receive a callback in the future.
2025 *------------------------------------------------------------------------*/
2026 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)2027 usbd_transfer_pending(struct usb_xfer *xfer)
2028 {
2029 struct usb_xfer_root *info;
2030 struct usb_xfer_queue *pq;
2031
2032 if (xfer == NULL) {
2033 /* transfer is gone */
2034 return (0);
2035 }
2036 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2037
2038 if (xfer->flags_int.transferring) {
2039 /* trivial case */
2040 return (1);
2041 }
2042 USB_BUS_LOCK(xfer->xroot->bus);
2043 if (xfer->wait_queue) {
2044 /* we are waiting on a queue somewhere */
2045 USB_BUS_UNLOCK(xfer->xroot->bus);
2046 return (1);
2047 }
2048 info = xfer->xroot;
2049 pq = &info->done_q;
2050
2051 if (pq->curr == xfer) {
2052 /* we are currently scheduled for callback */
2053 USB_BUS_UNLOCK(xfer->xroot->bus);
2054 return (1);
2055 }
2056 /* we are not pending */
2057 USB_BUS_UNLOCK(xfer->xroot->bus);
2058 return (0);
2059 }
2060
2061 /*------------------------------------------------------------------------*
2062 * usbd_transfer_drain
2063 *
2064 * This function will stop the USB transfer and wait for any
2065 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2066 * are loaded into DMA can safely be freed or reused after that this
2067 * function has returned.
2068 *------------------------------------------------------------------------*/
2069 void
usbd_transfer_drain(struct usb_xfer * xfer)2070 usbd_transfer_drain(struct usb_xfer *xfer)
2071 {
2072 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2073 "usbd_transfer_drain can sleep!");
2074
2075 if (xfer == NULL) {
2076 /* transfer is gone */
2077 return;
2078 }
2079 if (xfer->xroot->xfer_mtx != &Giant) {
2080 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2081 }
2082 USB_XFER_LOCK(xfer);
2083
2084 usbd_transfer_stop(xfer);
2085
2086 while (usbd_transfer_pending(xfer) ||
2087 xfer->flags_int.doing_callback) {
2088
2089 /*
2090 * It is allowed that the callback can drop its
2091 * transfer mutex. In that case checking only
2092 * "usbd_transfer_pending()" is not enough to tell if
2093 * the USB transfer is fully drained. We also need to
2094 * check the internal "doing_callback" flag.
2095 */
2096 xfer->flags_int.draining = 1;
2097
2098 /*
2099 * Wait until the current outstanding USB
2100 * transfer is complete !
2101 */
2102 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2103 }
2104 USB_XFER_UNLOCK(xfer);
2105 }
2106
2107 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2108 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2109 {
2110 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2111
2112 return (&xfer->frbuffers[frindex]);
2113 }
2114
2115 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2116 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2117 {
2118 struct usb_page_search page_info;
2119
2120 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2121
2122 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2123 return (page_info.buffer);
2124 }
2125
2126 /*------------------------------------------------------------------------*
2127 * usbd_xfer_get_fps_shift
2128 *
2129 * The following function is only useful for isochronous transfers. It
2130 * returns how many times the frame execution rate has been shifted
2131 * down.
2132 *
2133 * Return value:
2134 * Success: 0..3
2135 * Failure: 0
2136 *------------------------------------------------------------------------*/
2137 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2138 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2139 {
2140 return (xfer->fps_shift);
2141 }
2142
2143 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2144 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2145 {
2146 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2147
2148 return (xfer->frlengths[frindex]);
2149 }
2150
2151 /*------------------------------------------------------------------------*
2152 * usbd_xfer_set_frame_data
2153 *
2154 * This function sets the pointer of the buffer that should
2155 * loaded directly into DMA for the given USB frame. Passing "ptr"
2156 * equal to NULL while the corresponding "frlength" is greater
2157 * than zero gives undefined results!
2158 *------------------------------------------------------------------------*/
2159 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2160 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2161 void *ptr, usb_frlength_t len)
2162 {
2163 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2164
2165 /* set virtual address to load and length */
2166 xfer->frbuffers[frindex].buffer = ptr;
2167 usbd_xfer_set_frame_len(xfer, frindex, len);
2168 }
2169
2170 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2171 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2172 void **ptr, int *len)
2173 {
2174 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2175
2176 if (ptr != NULL)
2177 *ptr = xfer->frbuffers[frindex].buffer;
2178 if (len != NULL)
2179 *len = xfer->frlengths[frindex];
2180 }
2181
2182 /*------------------------------------------------------------------------*
2183 * usbd_xfer_old_frame_length
2184 *
2185 * This function returns the framelength of the given frame at the
2186 * time the transfer was submitted. This function can be used to
2187 * compute the starting data pointer of the next isochronous frame
2188 * when an isochronous transfer has completed.
2189 *------------------------------------------------------------------------*/
2190 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2191 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2192 {
2193 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2194
2195 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2196 }
2197
2198 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2199 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2200 int *nframes)
2201 {
2202 if (actlen != NULL)
2203 *actlen = xfer->actlen;
2204 if (sumlen != NULL)
2205 *sumlen = xfer->sumlen;
2206 if (aframes != NULL)
2207 *aframes = xfer->aframes;
2208 if (nframes != NULL)
2209 *nframes = xfer->nframes;
2210 }
2211
2212 /*------------------------------------------------------------------------*
2213 * usbd_xfer_set_frame_offset
2214 *
2215 * This function sets the frame data buffer offset relative to the beginning
2216 * of the USB DMA buffer allocated for this USB transfer.
2217 *------------------------------------------------------------------------*/
2218 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2219 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2220 usb_frcount_t frindex)
2221 {
2222 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2223 "when the USB buffer is external\n"));
2224 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2225
2226 /* set virtual address to load */
2227 xfer->frbuffers[frindex].buffer =
2228 USB_ADD_BYTES(xfer->local_buffer, offset);
2229 }
2230
2231 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2232 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2233 {
2234 xfer->interval = i;
2235 }
2236
2237 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2238 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2239 {
2240 xfer->timeout = t;
2241 }
2242
2243 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2244 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2245 {
2246 xfer->nframes = n;
2247 }
2248
2249 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2250 usbd_xfer_max_frames(struct usb_xfer *xfer)
2251 {
2252 return (xfer->max_frame_count);
2253 }
2254
2255 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2256 usbd_xfer_max_len(struct usb_xfer *xfer)
2257 {
2258 return (xfer->max_data_length);
2259 }
2260
2261 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2262 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2263 {
2264 return (xfer->max_frame_size);
2265 }
2266
2267 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2268 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2269 usb_frlength_t len)
2270 {
2271 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2272
2273 xfer->frlengths[frindex] = len;
2274 }
2275
2276 /*------------------------------------------------------------------------*
2277 * usb_callback_proc - factored out code
2278 *
2279 * This function performs USB callbacks.
2280 *------------------------------------------------------------------------*/
2281 static void
usb_callback_proc(struct usb_proc_msg * _pm)2282 usb_callback_proc(struct usb_proc_msg *_pm)
2283 {
2284 struct usb_done_msg *pm = (void *)_pm;
2285 struct usb_xfer_root *info = pm->xroot;
2286
2287 /* Change locking order */
2288 USB_BUS_UNLOCK(info->bus);
2289
2290 /*
2291 * We exploit the fact that the mutex is the same for all
2292 * callbacks that will be called from this thread:
2293 */
2294 USB_MTX_LOCK(info->xfer_mtx);
2295 USB_BUS_LOCK(info->bus);
2296
2297 /* Continue where we lost track */
2298 usb_command_wrapper(&info->done_q,
2299 info->done_q.curr);
2300
2301 USB_MTX_UNLOCK(info->xfer_mtx);
2302 }
2303
2304 /*------------------------------------------------------------------------*
2305 * usbd_callback_ss_done_defer
2306 *
2307 * This function will defer the start, stop and done callback to the
2308 * correct thread.
2309 *------------------------------------------------------------------------*/
2310 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2311 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2312 {
2313 struct usb_xfer_root *info = xfer->xroot;
2314 struct usb_xfer_queue *pq = &info->done_q;
2315
2316 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2317
2318 if (pq->curr != xfer) {
2319 usbd_transfer_enqueue(pq, xfer);
2320 }
2321 if (!pq->recurse_1) {
2322
2323 /*
2324 * We have to postpone the callback due to the fact we
2325 * will have a Lock Order Reversal, LOR, if we try to
2326 * proceed !
2327 */
2328 (void) usb_proc_msignal(info->done_p,
2329 &info->done_m[0], &info->done_m[1]);
2330 } else {
2331 /* clear second recurse flag */
2332 pq->recurse_2 = 0;
2333 }
2334 return;
2335
2336 }
2337
2338 /*------------------------------------------------------------------------*
2339 * usbd_callback_wrapper
2340 *
2341 * This is a wrapper for USB callbacks. This wrapper does some
2342 * auto-magic things like figuring out if we can call the callback
2343 * directly from the current context or if we need to wakeup the
2344 * interrupt process.
2345 *------------------------------------------------------------------------*/
2346 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2347 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2348 {
2349 struct usb_xfer *xfer = pq->curr;
2350 struct usb_xfer_root *info = xfer->xroot;
2351
2352 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2353 if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2354 USB_IN_POLLING_MODE_FUNC() == 0) {
2355 /*
2356 * Cases that end up here:
2357 *
2358 * 5) HW interrupt done callback or other source.
2359 * 6) HW completed transfer during callback
2360 */
2361 DPRINTFN(3, "case 5 and 6\n");
2362
2363 /*
2364 * We have to postpone the callback due to the fact we
2365 * will have a Lock Order Reversal, LOR, if we try to
2366 * proceed!
2367 *
2368 * Postponing the callback also ensures that other USB
2369 * transfer queues get a chance.
2370 */
2371 (void) usb_proc_msignal(info->done_p,
2372 &info->done_m[0], &info->done_m[1]);
2373 return;
2374 }
2375 /*
2376 * Cases that end up here:
2377 *
2378 * 1) We are starting a transfer
2379 * 2) We are prematurely calling back a transfer
2380 * 3) We are stopping a transfer
2381 * 4) We are doing an ordinary callback
2382 */
2383 DPRINTFN(3, "case 1-4\n");
2384 /* get next USB transfer in the queue */
2385 info->done_q.curr = NULL;
2386
2387 /* set flag in case of drain */
2388 xfer->flags_int.doing_callback = 1;
2389
2390 USB_BUS_UNLOCK(info->bus);
2391 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2392
2393 /* set correct USB state for callback */
2394 if (!xfer->flags_int.transferring) {
2395 xfer->usb_state = USB_ST_SETUP;
2396 if (!xfer->flags_int.started) {
2397 /* we got stopped before we even got started */
2398 USB_BUS_LOCK(info->bus);
2399 goto done;
2400 }
2401 } else {
2402
2403 if (usbd_callback_wrapper_sub(xfer)) {
2404 /* the callback has been deferred */
2405 USB_BUS_LOCK(info->bus);
2406 goto done;
2407 }
2408 #if USB_HAVE_POWERD
2409 /* decrement power reference */
2410 usbd_transfer_power_ref(xfer, -1);
2411 #endif
2412 xfer->flags_int.transferring = 0;
2413
2414 if (xfer->error) {
2415 xfer->usb_state = USB_ST_ERROR;
2416 } else {
2417 /* set transferred state */
2418 xfer->usb_state = USB_ST_TRANSFERRED;
2419 #if USB_HAVE_BUSDMA
2420 /* sync DMA memory, if any */
2421 if (xfer->flags_int.bdma_enable &&
2422 (!xfer->flags_int.bdma_no_post_sync)) {
2423 usb_bdma_post_sync(xfer);
2424 }
2425 #endif
2426 }
2427 }
2428
2429 #if USB_HAVE_PF
2430 if (xfer->usb_state != USB_ST_SETUP) {
2431 USB_BUS_LOCK(info->bus);
2432 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2433 USB_BUS_UNLOCK(info->bus);
2434 }
2435 #endif
2436 /* call processing routine */
2437 (xfer->callback) (xfer, xfer->error);
2438
2439 /* pickup the USB mutex again */
2440 USB_BUS_LOCK(info->bus);
2441
2442 /*
2443 * Check if we got started after that we got cancelled, but
2444 * before we managed to do the callback.
2445 */
2446 if ((!xfer->flags_int.open) &&
2447 (xfer->flags_int.started) &&
2448 (xfer->usb_state == USB_ST_ERROR)) {
2449 /* clear flag in case of drain */
2450 xfer->flags_int.doing_callback = 0;
2451 /* try to loop, but not recursivly */
2452 usb_command_wrapper(&info->done_q, xfer);
2453 return;
2454 }
2455
2456 done:
2457 /* clear flag in case of drain */
2458 xfer->flags_int.doing_callback = 0;
2459
2460 /*
2461 * Check if we are draining.
2462 */
2463 if (xfer->flags_int.draining &&
2464 (!xfer->flags_int.transferring)) {
2465 /* "usbd_transfer_drain()" is waiting for end of transfer */
2466 xfer->flags_int.draining = 0;
2467 cv_broadcast(&info->cv_drain);
2468 }
2469
2470 /* do the next callback, if any */
2471 usb_command_wrapper(&info->done_q,
2472 info->done_q.curr);
2473 }
2474
2475 /*------------------------------------------------------------------------*
2476 * usb_dma_delay_done_cb
2477 *
2478 * This function is called when the DMA delay has been exectuded, and
2479 * will make sure that the callback is called to complete the USB
2480 * transfer. This code path is usually only used when there is an USB
2481 * error like USB_ERR_CANCELLED.
2482 *------------------------------------------------------------------------*/
2483 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2484 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2485 {
2486 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2487
2488 DPRINTFN(3, "Completed %p\n", xfer);
2489
2490 /* queue callback for execution, again */
2491 usbd_transfer_done(xfer, 0);
2492 }
2493
2494 /*------------------------------------------------------------------------*
2495 * usbd_transfer_dequeue
2496 *
2497 * - This function is used to remove an USB transfer from a USB
2498 * transfer queue.
2499 *
2500 * - This function can be called multiple times in a row.
2501 *------------------------------------------------------------------------*/
2502 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2503 usbd_transfer_dequeue(struct usb_xfer *xfer)
2504 {
2505 struct usb_xfer_queue *pq;
2506
2507 pq = xfer->wait_queue;
2508 if (pq) {
2509 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2510 xfer->wait_queue = NULL;
2511 }
2512 }
2513
2514 /*------------------------------------------------------------------------*
2515 * usbd_transfer_enqueue
2516 *
2517 * - This function is used to insert an USB transfer into a USB *
2518 * transfer queue.
2519 *
2520 * - This function can be called multiple times in a row.
2521 *------------------------------------------------------------------------*/
2522 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2523 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2524 {
2525 /*
2526 * Insert the USB transfer into the queue, if it is not
2527 * already on a USB transfer queue:
2528 */
2529 if (xfer->wait_queue == NULL) {
2530 xfer->wait_queue = pq;
2531 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2532 }
2533 }
2534
2535 /*------------------------------------------------------------------------*
2536 * usbd_transfer_done
2537 *
2538 * - This function is used to remove an USB transfer from the busdma,
2539 * pipe or interrupt queue.
2540 *
2541 * - This function is used to queue the USB transfer on the done
2542 * queue.
2543 *
2544 * - This function is used to stop any USB transfer timeouts.
2545 *------------------------------------------------------------------------*/
2546 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2547 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2548 {
2549 struct usb_xfer_root *info = xfer->xroot;
2550
2551 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2552
2553 DPRINTF("err=%s\n", usbd_errstr(error));
2554
2555 /*
2556 * If we are not transferring then just return.
2557 * This can happen during transfer cancel.
2558 */
2559 if (!xfer->flags_int.transferring) {
2560 DPRINTF("not transferring\n");
2561 /* end of control transfer, if any */
2562 xfer->flags_int.control_act = 0;
2563 return;
2564 }
2565 /* only set transfer error, if not already set */
2566 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2567 xfer->error = error;
2568
2569 /* stop any callouts */
2570 usb_callout_stop(&xfer->timeout_handle);
2571
2572 /*
2573 * If we are waiting on a queue, just remove the USB transfer
2574 * from the queue, if any. We should have the required locks
2575 * locked to do the remove when this function is called.
2576 */
2577 usbd_transfer_dequeue(xfer);
2578
2579 #if USB_HAVE_BUSDMA
2580 if (mtx_owned(info->xfer_mtx)) {
2581 struct usb_xfer_queue *pq;
2582
2583 /*
2584 * If the private USB lock is not locked, then we assume
2585 * that the BUS-DMA load stage has been passed:
2586 */
2587 pq = &info->dma_q;
2588
2589 if (pq->curr == xfer) {
2590 /* start the next BUS-DMA load, if any */
2591 usb_command_wrapper(pq, NULL);
2592 }
2593 }
2594 #endif
2595 /* keep some statistics */
2596 if (xfer->error) {
2597 info->bus->stats_err.uds_requests
2598 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2599 } else {
2600 info->bus->stats_ok.uds_requests
2601 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2602 }
2603
2604 /* call the USB transfer callback */
2605 usbd_callback_ss_done_defer(xfer);
2606 }
2607
2608 /*------------------------------------------------------------------------*
2609 * usbd_transfer_start_cb
2610 *
2611 * This function is called to start the USB transfer when
2612 * "xfer->interval" is greater than zero, and and the endpoint type is
2613 * BULK or CONTROL.
2614 *------------------------------------------------------------------------*/
2615 static void
usbd_transfer_start_cb(void * arg)2616 usbd_transfer_start_cb(void *arg)
2617 {
2618 struct usb_xfer *xfer = arg;
2619 struct usb_endpoint *ep = xfer->endpoint;
2620
2621 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2622
2623 DPRINTF("start\n");
2624
2625 #if USB_HAVE_PF
2626 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2627 #endif
2628
2629 /* the transfer can now be cancelled */
2630 xfer->flags_int.can_cancel_immed = 1;
2631
2632 /* start USB transfer, if no error */
2633 if (xfer->error == 0)
2634 (ep->methods->start) (xfer);
2635
2636 /* check for transfer error */
2637 if (xfer->error) {
2638 /* some error has happened */
2639 usbd_transfer_done(xfer, 0);
2640 }
2641 }
2642
2643 /*------------------------------------------------------------------------*
2644 * usbd_xfer_set_stall
2645 *
2646 * This function is used to set the stall flag outside the
2647 * callback. This function is NULL safe.
2648 *------------------------------------------------------------------------*/
2649 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2650 usbd_xfer_set_stall(struct usb_xfer *xfer)
2651 {
2652 if (xfer == NULL) {
2653 /* tearing down */
2654 return;
2655 }
2656 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2657
2658 /* avoid any races by locking the USB mutex */
2659 USB_BUS_LOCK(xfer->xroot->bus);
2660 xfer->flags.stall_pipe = 1;
2661 USB_BUS_UNLOCK(xfer->xroot->bus);
2662 }
2663
2664 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2665 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2666 {
2667 return (xfer->endpoint->is_stalled);
2668 }
2669
2670 /*------------------------------------------------------------------------*
2671 * usbd_transfer_clear_stall
2672 *
2673 * This function is used to clear the stall flag outside the
2674 * callback. This function is NULL safe.
2675 *------------------------------------------------------------------------*/
2676 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2677 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2678 {
2679 if (xfer == NULL) {
2680 /* tearing down */
2681 return;
2682 }
2683 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2684
2685 /* avoid any races by locking the USB mutex */
2686 USB_BUS_LOCK(xfer->xroot->bus);
2687
2688 xfer->flags.stall_pipe = 0;
2689
2690 USB_BUS_UNLOCK(xfer->xroot->bus);
2691 }
2692
2693 /*------------------------------------------------------------------------*
2694 * usbd_pipe_start
2695 *
2696 * This function is used to add an USB transfer to the pipe transfer list.
2697 *------------------------------------------------------------------------*/
2698 void
usbd_pipe_start(struct usb_xfer_queue * pq)2699 usbd_pipe_start(struct usb_xfer_queue *pq)
2700 {
2701 struct usb_endpoint *ep;
2702 struct usb_xfer *xfer;
2703 uint8_t type;
2704
2705 xfer = pq->curr;
2706 ep = xfer->endpoint;
2707
2708 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2709
2710 /*
2711 * If the endpoint is already stalled we do nothing !
2712 */
2713 if (ep->is_stalled) {
2714 return;
2715 }
2716 /*
2717 * Check if we are supposed to stall the endpoint:
2718 */
2719 if (xfer->flags.stall_pipe) {
2720 struct usb_device *udev;
2721 struct usb_xfer_root *info;
2722
2723 /* clear stall command */
2724 xfer->flags.stall_pipe = 0;
2725
2726 /* get pointer to USB device */
2727 info = xfer->xroot;
2728 udev = info->udev;
2729
2730 /*
2731 * Only stall BULK and INTERRUPT endpoints.
2732 */
2733 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2734 if ((type == UE_BULK) ||
2735 (type == UE_INTERRUPT)) {
2736 uint8_t did_stall;
2737
2738 did_stall = 1;
2739
2740 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2741 (udev->bus->methods->set_stall) (
2742 udev, ep, &did_stall);
2743 } else if (udev->ctrl_xfer[1]) {
2744 info = udev->ctrl_xfer[1]->xroot;
2745 usb_proc_msignal(
2746 USB_BUS_CS_PROC(info->bus),
2747 &udev->cs_msg[0], &udev->cs_msg[1]);
2748 } else {
2749 /* should not happen */
2750 DPRINTFN(0, "No stall handler\n");
2751 }
2752 /*
2753 * Check if we should stall. Some USB hardware
2754 * handles set- and clear-stall in hardware.
2755 */
2756 if (did_stall) {
2757 /*
2758 * The transfer will be continued when
2759 * the clear-stall control endpoint
2760 * message is received.
2761 */
2762 ep->is_stalled = 1;
2763 return;
2764 }
2765 } else if (type == UE_ISOCHRONOUS) {
2766
2767 /*
2768 * Make sure any FIFO overflow or other FIFO
2769 * error conditions go away by resetting the
2770 * endpoint FIFO through the clear stall
2771 * method.
2772 */
2773 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2774 (udev->bus->methods->clear_stall) (udev, ep);
2775 }
2776 }
2777 }
2778 /* Set or clear stall complete - special case */
2779 if (xfer->nframes == 0) {
2780 /* we are complete */
2781 xfer->aframes = 0;
2782 usbd_transfer_done(xfer, 0);
2783 return;
2784 }
2785 /*
2786 * Handled cases:
2787 *
2788 * 1) Start the first transfer queued.
2789 *
2790 * 2) Re-start the current USB transfer.
2791 */
2792 /*
2793 * Check if there should be any
2794 * pre transfer start delay:
2795 */
2796 if (xfer->interval > 0) {
2797 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2798 if ((type == UE_BULK) ||
2799 (type == UE_CONTROL)) {
2800 usbd_transfer_timeout_ms(xfer,
2801 &usbd_transfer_start_cb,
2802 xfer->interval);
2803 return;
2804 }
2805 }
2806 DPRINTF("start\n");
2807
2808 #if USB_HAVE_PF
2809 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2810 #endif
2811 /* the transfer can now be cancelled */
2812 xfer->flags_int.can_cancel_immed = 1;
2813
2814 /* start USB transfer, if no error */
2815 if (xfer->error == 0)
2816 (ep->methods->start) (xfer);
2817
2818 /* check for transfer error */
2819 if (xfer->error) {
2820 /* some error has happened */
2821 usbd_transfer_done(xfer, 0);
2822 }
2823 }
2824
2825 /*------------------------------------------------------------------------*
2826 * usbd_transfer_timeout_ms
2827 *
2828 * This function is used to setup a timeout on the given USB
2829 * transfer. If the timeout has been deferred the callback given by
2830 * "cb" will get called after "ms" milliseconds.
2831 *------------------------------------------------------------------------*/
2832 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2833 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2834 void (*cb) (void *arg), usb_timeout_t ms)
2835 {
2836 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2837
2838 /* defer delay */
2839 usb_callout_reset(&xfer->timeout_handle,
2840 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2841 }
2842
2843 /*------------------------------------------------------------------------*
2844 * usbd_callback_wrapper_sub
2845 *
2846 * - This function will update variables in an USB transfer after
2847 * that the USB transfer is complete.
2848 *
2849 * - This function is used to start the next USB transfer on the
2850 * ep transfer queue, if any.
2851 *
2852 * NOTE: In some special cases the USB transfer will not be removed from
2853 * the pipe queue, but remain first. To enforce USB transfer removal call
2854 * this function passing the error code "USB_ERR_CANCELLED".
2855 *
2856 * Return values:
2857 * 0: Success.
2858 * Else: The callback has been deferred.
2859 *------------------------------------------------------------------------*/
2860 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2861 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2862 {
2863 struct usb_endpoint *ep;
2864 struct usb_bus *bus;
2865 usb_frcount_t x;
2866
2867 bus = xfer->xroot->bus;
2868
2869 if ((!xfer->flags_int.open) &&
2870 (!xfer->flags_int.did_close)) {
2871 DPRINTF("close\n");
2872 USB_BUS_LOCK(bus);
2873 (xfer->endpoint->methods->close) (xfer);
2874 USB_BUS_UNLOCK(bus);
2875 /* only close once */
2876 xfer->flags_int.did_close = 1;
2877 return (1); /* wait for new callback */
2878 }
2879 /*
2880 * If we have a non-hardware induced error we
2881 * need to do the DMA delay!
2882 */
2883 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2884 (xfer->error == USB_ERR_CANCELLED ||
2885 xfer->error == USB_ERR_TIMEOUT ||
2886 bus->methods->start_dma_delay != NULL)) {
2887
2888 usb_timeout_t temp;
2889
2890 /* only delay once */
2891 xfer->flags_int.did_dma_delay = 1;
2892
2893 /* we can not cancel this delay */
2894 xfer->flags_int.can_cancel_immed = 0;
2895
2896 temp = usbd_get_dma_delay(xfer->xroot->udev);
2897
2898 DPRINTFN(3, "DMA delay, %u ms, "
2899 "on %p\n", temp, xfer);
2900
2901 if (temp != 0) {
2902 USB_BUS_LOCK(bus);
2903 /*
2904 * Some hardware solutions have dedicated
2905 * events when it is safe to free DMA'ed
2906 * memory. For the other hardware platforms we
2907 * use a static delay.
2908 */
2909 if (bus->methods->start_dma_delay != NULL) {
2910 (bus->methods->start_dma_delay) (xfer);
2911 } else {
2912 usbd_transfer_timeout_ms(xfer,
2913 (void (*)(void *))&usb_dma_delay_done_cb,
2914 temp);
2915 }
2916 USB_BUS_UNLOCK(bus);
2917 return (1); /* wait for new callback */
2918 }
2919 }
2920 /* check actual number of frames */
2921 if (xfer->aframes > xfer->nframes) {
2922 if (xfer->error == 0) {
2923 panic("%s: actual number of frames, %d, is "
2924 "greater than initial number of frames, %d\n",
2925 __FUNCTION__, xfer->aframes, xfer->nframes);
2926 } else {
2927 /* just set some valid value */
2928 xfer->aframes = xfer->nframes;
2929 }
2930 }
2931 /* compute actual length */
2932 xfer->actlen = 0;
2933
2934 for (x = 0; x != xfer->aframes; x++) {
2935 xfer->actlen += xfer->frlengths[x];
2936 }
2937
2938 /*
2939 * Frames that were not transferred get zero actual length in
2940 * case the USB device driver does not check the actual number
2941 * of frames transferred, "xfer->aframes":
2942 */
2943 for (; x < xfer->nframes; x++) {
2944 usbd_xfer_set_frame_len(xfer, x, 0);
2945 }
2946
2947 /* check actual length */
2948 if (xfer->actlen > xfer->sumlen) {
2949 if (xfer->error == 0) {
2950 panic("%s: actual length, %d, is greater than "
2951 "initial length, %d\n",
2952 __FUNCTION__, xfer->actlen, xfer->sumlen);
2953 } else {
2954 /* just set some valid value */
2955 xfer->actlen = xfer->sumlen;
2956 }
2957 }
2958 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2959 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2960 xfer->aframes, xfer->nframes);
2961
2962 if (xfer->error) {
2963 /* end of control transfer, if any */
2964 xfer->flags_int.control_act = 0;
2965
2966 #if USB_HAVE_TT_SUPPORT
2967 switch (xfer->error) {
2968 case USB_ERR_NORMAL_COMPLETION:
2969 case USB_ERR_SHORT_XFER:
2970 case USB_ERR_STALLED:
2971 case USB_ERR_CANCELLED:
2972 /* nothing to do */
2973 break;
2974 default:
2975 /* try to reset the TT, if any */
2976 USB_BUS_LOCK(bus);
2977 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2978 USB_BUS_UNLOCK(bus);
2979 break;
2980 }
2981 #endif
2982 /* check if we should block the execution queue */
2983 if ((xfer->error != USB_ERR_CANCELLED) &&
2984 (xfer->flags.pipe_bof)) {
2985 DPRINTFN(2, "xfer=%p: Block On Failure "
2986 "on endpoint=%p\n", xfer, xfer->endpoint);
2987 goto done;
2988 }
2989 } else {
2990 /* check for short transfers */
2991 if (xfer->actlen < xfer->sumlen) {
2992
2993 /* end of control transfer, if any */
2994 xfer->flags_int.control_act = 0;
2995
2996 if (!xfer->flags_int.short_xfer_ok) {
2997 xfer->error = USB_ERR_SHORT_XFER;
2998 if (xfer->flags.pipe_bof) {
2999 DPRINTFN(2, "xfer=%p: Block On Failure on "
3000 "Short Transfer on endpoint %p.\n",
3001 xfer, xfer->endpoint);
3002 goto done;
3003 }
3004 }
3005 } else {
3006 /*
3007 * Check if we are in the middle of a
3008 * control transfer:
3009 */
3010 if (xfer->flags_int.control_act) {
3011 DPRINTFN(5, "xfer=%p: Control transfer "
3012 "active on endpoint=%p\n", xfer, xfer->endpoint);
3013 goto done;
3014 }
3015 }
3016 }
3017
3018 ep = xfer->endpoint;
3019
3020 /*
3021 * If the current USB transfer is completing we need to start the
3022 * next one:
3023 */
3024 USB_BUS_LOCK(bus);
3025 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
3026 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
3027
3028 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3029 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3030 /* there is another USB transfer waiting */
3031 } else {
3032 /* this is the last USB transfer */
3033 /* clear isochronous sync flag */
3034 xfer->endpoint->is_synced = 0;
3035 }
3036 }
3037 USB_BUS_UNLOCK(bus);
3038 done:
3039 return (0);
3040 }
3041
3042 /*------------------------------------------------------------------------*
3043 * usb_command_wrapper
3044 *
3045 * This function is used to execute commands non-recursivly on an USB
3046 * transfer.
3047 *------------------------------------------------------------------------*/
3048 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)3049 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3050 {
3051 if (xfer) {
3052 /*
3053 * If the transfer is not already processing,
3054 * queue it!
3055 */
3056 if (pq->curr != xfer) {
3057 usbd_transfer_enqueue(pq, xfer);
3058 if (pq->curr != NULL) {
3059 /* something is already processing */
3060 DPRINTFN(6, "busy %p\n", pq->curr);
3061 return;
3062 }
3063 }
3064 } else {
3065 /* Get next element in queue */
3066 pq->curr = NULL;
3067 }
3068
3069 if (!pq->recurse_1) {
3070
3071 /* clear third recurse flag */
3072 pq->recurse_3 = 0;
3073
3074 do {
3075 /* set two first recurse flags */
3076 pq->recurse_1 = 1;
3077 pq->recurse_2 = 1;
3078
3079 if (pq->curr == NULL) {
3080 xfer = TAILQ_FIRST(&pq->head);
3081 if (xfer) {
3082 TAILQ_REMOVE(&pq->head, xfer,
3083 wait_entry);
3084 xfer->wait_queue = NULL;
3085 pq->curr = xfer;
3086 } else {
3087 break;
3088 }
3089 }
3090 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3091 (pq->command) (pq);
3092 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3093
3094 /*
3095 * Set third recurse flag to indicate
3096 * recursion happened:
3097 */
3098 pq->recurse_3 = 1;
3099
3100 } while (!pq->recurse_2);
3101
3102 /* clear first recurse flag */
3103 pq->recurse_1 = 0;
3104
3105 } else {
3106 /* clear second recurse flag */
3107 pq->recurse_2 = 0;
3108 }
3109 }
3110
3111 /*------------------------------------------------------------------------*
3112 * usbd_ctrl_transfer_setup
3113 *
3114 * This function is used to setup the default USB control endpoint
3115 * transfer.
3116 *------------------------------------------------------------------------*/
3117 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3118 usbd_ctrl_transfer_setup(struct usb_device *udev)
3119 {
3120 struct usb_xfer *xfer;
3121 uint8_t no_resetup;
3122 uint8_t iface_index;
3123
3124 /* check for root HUB */
3125 if (udev->parent_hub == NULL)
3126 return;
3127 repeat:
3128
3129 xfer = udev->ctrl_xfer[0];
3130 if (xfer) {
3131 USB_XFER_LOCK(xfer);
3132 no_resetup =
3133 ((xfer->address == udev->address) &&
3134 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3135 udev->ddesc.bMaxPacketSize));
3136 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3137 if (no_resetup) {
3138 /*
3139 * NOTE: checking "xfer->address" and
3140 * starting the USB transfer must be
3141 * atomic!
3142 */
3143 usbd_transfer_start(xfer);
3144 }
3145 }
3146 USB_XFER_UNLOCK(xfer);
3147 } else {
3148 no_resetup = 0;
3149 }
3150
3151 if (no_resetup) {
3152 /*
3153 * All parameters are exactly the same like before.
3154 * Just return.
3155 */
3156 return;
3157 }
3158 /*
3159 * Update wMaxPacketSize for the default control endpoint:
3160 */
3161 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3162 udev->ddesc.bMaxPacketSize;
3163
3164 /*
3165 * Unsetup any existing USB transfer:
3166 */
3167 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3168
3169 /*
3170 * Reset clear stall error counter.
3171 */
3172 udev->clear_stall_errors = 0;
3173
3174 /*
3175 * Try to setup a new USB transfer for the
3176 * default control endpoint:
3177 */
3178 iface_index = 0;
3179 if (usbd_transfer_setup(udev, &iface_index,
3180 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3181 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3182 &udev->device_mtx)) {
3183 DPRINTFN(0, "could not setup default "
3184 "USB transfer\n");
3185 } else {
3186 goto repeat;
3187 }
3188 }
3189
3190 /*------------------------------------------------------------------------*
3191 * usbd_clear_data_toggle - factored out code
3192 *
3193 * NOTE: the intention of this function is not to reset the hardware
3194 * data toggle.
3195 *------------------------------------------------------------------------*/
3196 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3197 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3198 {
3199 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3200
3201 /* check that we have a valid case */
3202 if (udev->flags.usb_mode == USB_MODE_HOST &&
3203 udev->parent_hub != NULL &&
3204 udev->bus->methods->clear_stall != NULL &&
3205 ep->methods != NULL) {
3206 (udev->bus->methods->clear_stall) (udev, ep);
3207 }
3208 }
3209
3210 /*------------------------------------------------------------------------*
3211 * usbd_clear_data_toggle - factored out code
3212 *
3213 * NOTE: the intention of this function is not to reset the hardware
3214 * data toggle on the USB device side.
3215 *------------------------------------------------------------------------*/
3216 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3217 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3218 {
3219 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3220
3221 USB_BUS_LOCK(udev->bus);
3222 ep->toggle_next = 0;
3223 /* some hardware needs a callback to clear the data toggle */
3224 usbd_clear_stall_locked(udev, ep);
3225 USB_BUS_UNLOCK(udev->bus);
3226 }
3227
3228 /*------------------------------------------------------------------------*
3229 * usbd_clear_stall_callback - factored out clear stall callback
3230 *
3231 * Input parameters:
3232 * xfer1: Clear Stall Control Transfer
3233 * xfer2: Stalled USB Transfer
3234 *
3235 * This function is NULL safe.
3236 *
3237 * Return values:
3238 * 0: In progress
3239 * Else: Finished
3240 *
3241 * Clear stall config example:
3242 *
3243 * static const struct usb_config my_clearstall = {
3244 * .type = UE_CONTROL,
3245 * .endpoint = 0,
3246 * .direction = UE_DIR_ANY,
3247 * .interval = 50, //50 milliseconds
3248 * .bufsize = sizeof(struct usb_device_request),
3249 * .timeout = 1000, //1.000 seconds
3250 * .callback = &my_clear_stall_callback, // **
3251 * .usb_mode = USB_MODE_HOST,
3252 * };
3253 *
3254 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3255 * passing the correct parameters.
3256 *------------------------------------------------------------------------*/
3257 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3258 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3259 struct usb_xfer *xfer2)
3260 {
3261 struct usb_device_request req;
3262
3263 if (xfer2 == NULL) {
3264 /* looks like we are tearing down */
3265 DPRINTF("NULL input parameter\n");
3266 return (0);
3267 }
3268 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3269 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3270
3271 switch (USB_GET_STATE(xfer1)) {
3272 case USB_ST_SETUP:
3273
3274 /*
3275 * pre-clear the data toggle to DATA0 ("umass.c" and
3276 * "ata-usb.c" depends on this)
3277 */
3278
3279 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3280
3281 /* setup a clear-stall packet */
3282
3283 req.bmRequestType = UT_WRITE_ENDPOINT;
3284 req.bRequest = UR_CLEAR_FEATURE;
3285 USETW(req.wValue, UF_ENDPOINT_HALT);
3286 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3287 req.wIndex[1] = 0;
3288 USETW(req.wLength, 0);
3289
3290 /*
3291 * "usbd_transfer_setup_sub()" will ensure that
3292 * we have sufficient room in the buffer for
3293 * the request structure!
3294 */
3295
3296 /* copy in the transfer */
3297
3298 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3299
3300 /* set length */
3301 xfer1->frlengths[0] = sizeof(req);
3302 xfer1->nframes = 1;
3303
3304 usbd_transfer_submit(xfer1);
3305 return (0);
3306
3307 case USB_ST_TRANSFERRED:
3308 break;
3309
3310 default: /* Error */
3311 if (xfer1->error == USB_ERR_CANCELLED) {
3312 return (0);
3313 }
3314 break;
3315 }
3316 return (1); /* Clear Stall Finished */
3317 }
3318
3319 /*------------------------------------------------------------------------*
3320 * usbd_transfer_poll
3321 *
3322 * The following function gets called from the USB keyboard driver and
3323 * UMASS when the system has paniced.
3324 *
3325 * NOTE: It is currently not possible to resume normal operation on
3326 * the USB controller which has been polled, due to clearing of the
3327 * "up_dsleep" and "up_msleep" flags.
3328 *------------------------------------------------------------------------*/
3329 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3330 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3331 {
3332 struct usb_xfer *xfer;
3333 struct usb_xfer_root *xroot;
3334 struct usb_device *udev;
3335 struct usb_proc_msg *pm;
3336 struct usb_bus *bus;
3337 uint16_t n;
3338 uint16_t drop_bus_spin;
3339 uint16_t drop_bus;
3340 uint16_t drop_xfer;
3341
3342 for (n = 0; n != max; n++) {
3343 /* Extra checks to avoid panic */
3344 xfer = ppxfer[n];
3345 if (xfer == NULL)
3346 continue; /* no USB transfer */
3347 xroot = xfer->xroot;
3348 if (xroot == NULL)
3349 continue; /* no USB root */
3350 udev = xroot->udev;
3351 if (udev == NULL)
3352 continue; /* no USB device */
3353 bus = udev->bus;
3354 if (bus == NULL)
3355 continue; /* no BUS structure */
3356 if (bus->methods == NULL)
3357 continue; /* no BUS methods */
3358 if (bus->methods->xfer_poll == NULL)
3359 continue; /* no poll method */
3360
3361 drop_bus_spin = 0;
3362 drop_bus = 0;
3363 drop_xfer = 0;
3364
3365 if (USB_IN_POLLING_MODE_FUNC() == 0) {
3366 /* make sure that the BUS spin mutex is not locked */
3367 while (mtx_owned(&bus->bus_spin_lock)) {
3368 mtx_unlock_spin(&bus->bus_spin_lock);
3369 drop_bus_spin++;
3370 }
3371
3372 /* make sure that the BUS mutex is not locked */
3373 while (mtx_owned(&bus->bus_mtx)) {
3374 mtx_unlock(&bus->bus_mtx);
3375 drop_bus++;
3376 }
3377
3378 /* make sure that the transfer mutex is not locked */
3379 while (mtx_owned(xroot->xfer_mtx)) {
3380 mtx_unlock(xroot->xfer_mtx);
3381 drop_xfer++;
3382 }
3383 }
3384
3385 /* Make sure cv_signal() and cv_broadcast() is not called */
3386 USB_BUS_CONTROL_XFER_PROC(bus)->up_msleep = 0;
3387 USB_BUS_EXPLORE_PROC(bus)->up_msleep = 0;
3388 USB_BUS_GIANT_PROC(bus)->up_msleep = 0;
3389 USB_BUS_NON_GIANT_ISOC_PROC(bus)->up_msleep = 0;
3390 USB_BUS_NON_GIANT_BULK_PROC(bus)->up_msleep = 0;
3391
3392 /* poll USB hardware */
3393 (bus->methods->xfer_poll) (bus);
3394
3395 USB_BUS_LOCK(xroot->bus);
3396
3397 /* check for clear stall */
3398 if (udev->ctrl_xfer[1] != NULL) {
3399
3400 /* poll clear stall start */
3401 pm = &udev->cs_msg[0].hdr;
3402 (pm->pm_callback) (pm);
3403 /* poll clear stall done thread */
3404 pm = &udev->ctrl_xfer[1]->
3405 xroot->done_m[0].hdr;
3406 (pm->pm_callback) (pm);
3407 }
3408
3409 /* poll done thread */
3410 pm = &xroot->done_m[0].hdr;
3411 (pm->pm_callback) (pm);
3412
3413 USB_BUS_UNLOCK(xroot->bus);
3414
3415 /* restore transfer mutex */
3416 while (drop_xfer--)
3417 mtx_lock(xroot->xfer_mtx);
3418
3419 /* restore BUS mutex */
3420 while (drop_bus--)
3421 mtx_lock(&bus->bus_mtx);
3422
3423 /* restore BUS spin mutex */
3424 while (drop_bus_spin--)
3425 mtx_lock_spin(&bus->bus_spin_lock);
3426 }
3427 }
3428
3429 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3430 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3431 uint8_t type, enum usb_dev_speed speed)
3432 {
3433 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3434 [USB_SPEED_LOW] = 8,
3435 [USB_SPEED_FULL] = 64,
3436 [USB_SPEED_HIGH] = 1024,
3437 [USB_SPEED_VARIABLE] = 1024,
3438 [USB_SPEED_SUPER] = 1024,
3439 };
3440
3441 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3442 [USB_SPEED_LOW] = 0, /* invalid */
3443 [USB_SPEED_FULL] = 1023,
3444 [USB_SPEED_HIGH] = 1024,
3445 [USB_SPEED_VARIABLE] = 3584,
3446 [USB_SPEED_SUPER] = 1024,
3447 };
3448
3449 static const uint16_t control_min[USB_SPEED_MAX] = {
3450 [USB_SPEED_LOW] = 8,
3451 [USB_SPEED_FULL] = 8,
3452 [USB_SPEED_HIGH] = 64,
3453 [USB_SPEED_VARIABLE] = 512,
3454 [USB_SPEED_SUPER] = 512,
3455 };
3456
3457 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3458 [USB_SPEED_LOW] = 8,
3459 [USB_SPEED_FULL] = 8,
3460 [USB_SPEED_HIGH] = 512,
3461 [USB_SPEED_VARIABLE] = 512,
3462 [USB_SPEED_SUPER] = 1024,
3463 };
3464
3465 uint16_t temp;
3466
3467 memset(ptr, 0, sizeof(*ptr));
3468
3469 switch (type) {
3470 case UE_INTERRUPT:
3471 ptr->range.max = intr_range_max[speed];
3472 break;
3473 case UE_ISOCHRONOUS:
3474 ptr->range.max = isoc_range_max[speed];
3475 break;
3476 default:
3477 if (type == UE_BULK)
3478 temp = bulk_min[speed];
3479 else /* UE_CONTROL */
3480 temp = control_min[speed];
3481
3482 /* default is fixed */
3483 ptr->fixed[0] = temp;
3484 ptr->fixed[1] = temp;
3485 ptr->fixed[2] = temp;
3486 ptr->fixed[3] = temp;
3487
3488 if (speed == USB_SPEED_FULL) {
3489 /* multiple sizes */
3490 ptr->fixed[1] = 16;
3491 ptr->fixed[2] = 32;
3492 ptr->fixed[3] = 64;
3493 }
3494 if ((speed == USB_SPEED_VARIABLE) &&
3495 (type == UE_BULK)) {
3496 /* multiple sizes */
3497 ptr->fixed[2] = 1024;
3498 ptr->fixed[3] = 1536;
3499 }
3500 break;
3501 }
3502 }
3503
3504 void *
usbd_xfer_softc(struct usb_xfer * xfer)3505 usbd_xfer_softc(struct usb_xfer *xfer)
3506 {
3507 return (xfer->priv_sc);
3508 }
3509
3510 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3511 usbd_xfer_get_priv(struct usb_xfer *xfer)
3512 {
3513 return (xfer->priv_fifo);
3514 }
3515
3516 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3517 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3518 {
3519 xfer->priv_fifo = ptr;
3520 }
3521
3522 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3523 usbd_xfer_state(struct usb_xfer *xfer)
3524 {
3525 return (xfer->usb_state);
3526 }
3527
3528 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3529 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3530 {
3531 switch (flag) {
3532 case USB_FORCE_SHORT_XFER:
3533 xfer->flags.force_short_xfer = 1;
3534 break;
3535 case USB_SHORT_XFER_OK:
3536 xfer->flags.short_xfer_ok = 1;
3537 break;
3538 case USB_MULTI_SHORT_OK:
3539 xfer->flags.short_frames_ok = 1;
3540 break;
3541 case USB_MANUAL_STATUS:
3542 xfer->flags.manual_status = 1;
3543 break;
3544 }
3545 }
3546
3547 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3548 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3549 {
3550 switch (flag) {
3551 case USB_FORCE_SHORT_XFER:
3552 xfer->flags.force_short_xfer = 0;
3553 break;
3554 case USB_SHORT_XFER_OK:
3555 xfer->flags.short_xfer_ok = 0;
3556 break;
3557 case USB_MULTI_SHORT_OK:
3558 xfer->flags.short_frames_ok = 0;
3559 break;
3560 case USB_MANUAL_STATUS:
3561 xfer->flags.manual_status = 0;
3562 break;
3563 }
3564 }
3565
3566 /*
3567 * The following function returns in milliseconds when the isochronous
3568 * transfer was completed by the hardware. The returned value wraps
3569 * around 65536 milliseconds.
3570 */
3571 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3572 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3573 {
3574 return (xfer->isoc_time_complete);
3575 }
3576
3577 /*
3578 * The following function returns non-zero if the max packet size
3579 * field was clamped to a valid value. Else it returns zero.
3580 */
3581 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3582 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3583 {
3584 return (xfer->flags_int.maxp_was_clamped);
3585 }
3586