1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011, Bryan Venteicher <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
32 */
33
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sdt.h>
40 #include <sys/sglist.h>
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 #include <machine/cpu.h>
45 #include <machine/bus.h>
46 #include <machine/atomic.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/virtio_ring.h>
54
55 #include "virtio_bus_if.h"
56
57 struct virtqueue {
58 device_t vq_dev;
59 uint16_t vq_queue_index;
60 uint16_t vq_nentries;
61 uint32_t vq_flags;
62 #define VIRTQUEUE_FLAG_MODERN 0x0001
63 #define VIRTQUEUE_FLAG_INDIRECT 0x0002
64 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0004
65
66 int vq_max_indirect_size;
67 bus_size_t vq_notify_offset;
68 virtqueue_intr_t *vq_intrhand;
69 void *vq_intrhand_arg;
70
71 struct vring vq_ring;
72 uint16_t vq_free_cnt;
73 uint16_t vq_queued_cnt;
74 /*
75 * Head of the free chain in the descriptor table. If
76 * there are no free descriptors, this will be set to
77 * VQ_RING_DESC_CHAIN_END.
78 */
79 uint16_t vq_desc_head_idx;
80 /*
81 * Last consumed descriptor in the used table,
82 * trails vq_ring.used->idx.
83 */
84 uint16_t vq_used_cons_idx;
85
86 void *vq_ring_mem;
87 int vq_indirect_mem_size;
88 int vq_alignment;
89 int vq_ring_size;
90 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
91
92 struct vq_desc_extra {
93 void *cookie;
94 struct vring_desc *indirect;
95 vm_paddr_t indirect_paddr;
96 uint16_t ndescs;
97 } vq_descx[0];
98 };
99
100 /*
101 * The maximum virtqueue size is 2^15. Use that value as the end of
102 * descriptor chain terminator since it will never be a valid index
103 * in the descriptor table. This is used to verify we are correctly
104 * handling vq_free_cnt.
105 */
106 #define VQ_RING_DESC_CHAIN_END 32768
107
108 #define VQASSERT(_vq, _exp, _msg, ...) \
109 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
110 ##__VA_ARGS__))
111
112 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
113 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
114 "invalid ring index: %d, max: %d", (_idx), \
115 (_vq)->vq_nentries)
116
117 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
118 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
119 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
120 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
121
122 static int virtqueue_init_indirect(struct virtqueue *vq, int);
123 static void virtqueue_free_indirect(struct virtqueue *vq);
124 static void virtqueue_init_indirect_list(struct virtqueue *,
125 struct vring_desc *);
126
127 static void vq_ring_init(struct virtqueue *);
128 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
129 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
130 struct vring_desc *, uint16_t, struct sglist *, int, int);
131 static int vq_ring_use_indirect(struct virtqueue *, int);
132 static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
133 struct sglist *, int, int);
134 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
135 static int vq_ring_must_notify_host(struct virtqueue *);
136 static void vq_ring_notify_host(struct virtqueue *);
137 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
138
139 SDT_PROVIDER_DEFINE(virtqueue);
140 SDT_PROBE_DEFINE6(virtqueue, , enqueue_segments, entry, "struct virtqueue *",
141 "struct vring_desc *", "uint16_t", "struct sglist *", "int", "int");
142 SDT_PROBE_DEFINE1(virtqueue, , enqueue_segments, return, "uint16_t");
143
144 #define vq_modern(_vq) (((_vq)->vq_flags & VIRTQUEUE_FLAG_MODERN) != 0)
145 #define vq_htog16(_vq, _val) virtio_htog16(vq_modern(_vq), _val)
146 #define vq_htog32(_vq, _val) virtio_htog32(vq_modern(_vq), _val)
147 #define vq_htog64(_vq, _val) virtio_htog64(vq_modern(_vq), _val)
148 #define vq_gtoh16(_vq, _val) virtio_gtoh16(vq_modern(_vq), _val)
149 #define vq_gtoh32(_vq, _val) virtio_gtoh32(vq_modern(_vq), _val)
150 #define vq_gtoh64(_vq, _val) virtio_gtoh64(vq_modern(_vq), _val)
151
152 int
virtqueue_alloc(device_t dev,uint16_t queue,uint16_t size,bus_size_t notify_offset,int align,vm_paddr_t highaddr,struct vq_alloc_info * info,struct virtqueue ** vqp)153 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
154 bus_size_t notify_offset, int align, vm_paddr_t highaddr,
155 struct vq_alloc_info *info, struct virtqueue **vqp)
156 {
157 struct virtqueue *vq;
158 int error;
159
160 *vqp = NULL;
161 error = 0;
162
163 if (size == 0) {
164 device_printf(dev,
165 "virtqueue %d (%s) does not exist (size is zero)\n",
166 queue, info->vqai_name);
167 return (ENODEV);
168 } else if (!powerof2(size)) {
169 device_printf(dev,
170 "virtqueue %d (%s) size is not a power of 2: %d\n",
171 queue, info->vqai_name, size);
172 return (ENXIO);
173 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
174 device_printf(dev, "virtqueue %d (%s) requested too many "
175 "indirect descriptors: %d, max %d\n",
176 queue, info->vqai_name, info->vqai_maxindirsz,
177 VIRTIO_MAX_INDIRECT);
178 return (EINVAL);
179 }
180
181 vq = malloc(sizeof(struct virtqueue) +
182 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
183 if (vq == NULL) {
184 device_printf(dev, "cannot allocate virtqueue\n");
185 return (ENOMEM);
186 }
187
188 vq->vq_dev = dev;
189 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
190 vq->vq_queue_index = queue;
191 vq->vq_notify_offset = notify_offset;
192 vq->vq_alignment = align;
193 vq->vq_nentries = size;
194 vq->vq_free_cnt = size;
195 vq->vq_intrhand = info->vqai_intr;
196 vq->vq_intrhand_arg = info->vqai_intr_arg;
197
198 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_VERSION_1) != 0)
199 vq->vq_flags |= VIRTQUEUE_FLAG_MODERN;
200 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
201 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
202
203 if (info->vqai_maxindirsz > 1) {
204 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
205 if (error)
206 goto fail;
207 }
208
209 vq->vq_ring_size = round_page(vring_size(size, align));
210 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
211 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
212 if (vq->vq_ring_mem == NULL) {
213 device_printf(dev,
214 "cannot allocate memory for virtqueue ring\n");
215 error = ENOMEM;
216 goto fail;
217 }
218
219 vq_ring_init(vq);
220 virtqueue_disable_intr(vq);
221
222 *vqp = vq;
223
224 fail:
225 if (error)
226 virtqueue_free(vq);
227
228 return (error);
229 }
230
231 static int
virtqueue_init_indirect(struct virtqueue * vq,int indirect_size)232 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
233 {
234 device_t dev;
235 struct vq_desc_extra *dxp;
236 int i, size;
237
238 dev = vq->vq_dev;
239
240 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
241 /*
242 * Indirect descriptors requested by the driver but not
243 * negotiated. Return zero to keep the initialization
244 * going: we'll run fine without.
245 */
246 if (bootverbose)
247 device_printf(dev, "virtqueue %d (%s) requested "
248 "indirect descriptors but not negotiated\n",
249 vq->vq_queue_index, vq->vq_name);
250 return (0);
251 }
252
253 size = indirect_size * sizeof(struct vring_desc);
254 vq->vq_max_indirect_size = indirect_size;
255 vq->vq_indirect_mem_size = size;
256 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
257
258 for (i = 0; i < vq->vq_nentries; i++) {
259 dxp = &vq->vq_descx[i];
260
261 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
262 if (dxp->indirect == NULL) {
263 device_printf(dev, "cannot allocate indirect list\n");
264 return (ENOMEM);
265 }
266
267 dxp->indirect_paddr = vtophys(dxp->indirect);
268 virtqueue_init_indirect_list(vq, dxp->indirect);
269 }
270
271 return (0);
272 }
273
274 static void
virtqueue_free_indirect(struct virtqueue * vq)275 virtqueue_free_indirect(struct virtqueue *vq)
276 {
277 struct vq_desc_extra *dxp;
278 int i;
279
280 for (i = 0; i < vq->vq_nentries; i++) {
281 dxp = &vq->vq_descx[i];
282
283 if (dxp->indirect == NULL)
284 break;
285
286 free(dxp->indirect, M_DEVBUF);
287 dxp->indirect = NULL;
288 dxp->indirect_paddr = 0;
289 }
290
291 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
292 vq->vq_indirect_mem_size = 0;
293 }
294
295 static void
virtqueue_init_indirect_list(struct virtqueue * vq,struct vring_desc * indirect)296 virtqueue_init_indirect_list(struct virtqueue *vq,
297 struct vring_desc *indirect)
298 {
299 int i;
300
301 bzero(indirect, vq->vq_indirect_mem_size);
302
303 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
304 indirect[i].next = vq_gtoh16(vq, i + 1);
305 indirect[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
306 }
307
308 int
virtqueue_reinit(struct virtqueue * vq,uint16_t size)309 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
310 {
311 struct vq_desc_extra *dxp;
312 int i;
313
314 if (vq->vq_nentries != size) {
315 device_printf(vq->vq_dev,
316 "%s: '%s' changed size; old=%hu, new=%hu\n",
317 __func__, vq->vq_name, vq->vq_nentries, size);
318 return (EINVAL);
319 }
320
321 /* Warn if the virtqueue was not properly cleaned up. */
322 if (vq->vq_free_cnt != vq->vq_nentries) {
323 device_printf(vq->vq_dev,
324 "%s: warning '%s' virtqueue not empty, "
325 "leaking %d entries\n", __func__, vq->vq_name,
326 vq->vq_nentries - vq->vq_free_cnt);
327 }
328
329 vq->vq_desc_head_idx = 0;
330 vq->vq_used_cons_idx = 0;
331 vq->vq_queued_cnt = 0;
332 vq->vq_free_cnt = vq->vq_nentries;
333
334 /* To be safe, reset all our allocated memory. */
335 bzero(vq->vq_ring_mem, vq->vq_ring_size);
336 for (i = 0; i < vq->vq_nentries; i++) {
337 dxp = &vq->vq_descx[i];
338 dxp->cookie = NULL;
339 dxp->ndescs = 0;
340 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
341 virtqueue_init_indirect_list(vq, dxp->indirect);
342 }
343
344 vq_ring_init(vq);
345 virtqueue_disable_intr(vq);
346
347 return (0);
348 }
349
350 void
virtqueue_free(struct virtqueue * vq)351 virtqueue_free(struct virtqueue *vq)
352 {
353
354 if (vq->vq_free_cnt != vq->vq_nentries) {
355 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
356 "leaking %d entries\n", vq->vq_name,
357 vq->vq_nentries - vq->vq_free_cnt);
358 }
359
360 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
361 virtqueue_free_indirect(vq);
362
363 if (vq->vq_ring_mem != NULL) {
364 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
365 vq->vq_ring_size = 0;
366 vq->vq_ring_mem = NULL;
367 }
368
369 free(vq, M_DEVBUF);
370 }
371
372 vm_paddr_t
virtqueue_paddr(struct virtqueue * vq)373 virtqueue_paddr(struct virtqueue *vq)
374 {
375
376 return (vtophys(vq->vq_ring_mem));
377 }
378
379 vm_paddr_t
virtqueue_desc_paddr(struct virtqueue * vq)380 virtqueue_desc_paddr(struct virtqueue *vq)
381 {
382
383 return (vtophys(vq->vq_ring.desc));
384 }
385
386 vm_paddr_t
virtqueue_avail_paddr(struct virtqueue * vq)387 virtqueue_avail_paddr(struct virtqueue *vq)
388 {
389
390 return (vtophys(vq->vq_ring.avail));
391 }
392
393 vm_paddr_t
virtqueue_used_paddr(struct virtqueue * vq)394 virtqueue_used_paddr(struct virtqueue *vq)
395 {
396
397 return (vtophys(vq->vq_ring.used));
398 }
399
400 uint16_t
virtqueue_index(struct virtqueue * vq)401 virtqueue_index(struct virtqueue *vq)
402 {
403
404 return (vq->vq_queue_index);
405 }
406
407 int
virtqueue_size(struct virtqueue * vq)408 virtqueue_size(struct virtqueue *vq)
409 {
410
411 return (vq->vq_nentries);
412 }
413
414 int
virtqueue_nfree(struct virtqueue * vq)415 virtqueue_nfree(struct virtqueue *vq)
416 {
417
418 return (vq->vq_free_cnt);
419 }
420
421 int
virtqueue_empty(struct virtqueue * vq)422 virtqueue_empty(struct virtqueue *vq)
423 {
424
425 return (vq->vq_nentries == vq->vq_free_cnt);
426 }
427
428 int
virtqueue_full(struct virtqueue * vq)429 virtqueue_full(struct virtqueue *vq)
430 {
431
432 return (vq->vq_free_cnt == 0);
433 }
434
435 void
virtqueue_notify(struct virtqueue * vq)436 virtqueue_notify(struct virtqueue *vq)
437 {
438
439 /* Ensure updated avail->idx is visible to host. */
440 mb();
441
442 if (vq_ring_must_notify_host(vq))
443 vq_ring_notify_host(vq);
444 vq->vq_queued_cnt = 0;
445 }
446
447 int
virtqueue_nused(struct virtqueue * vq)448 virtqueue_nused(struct virtqueue *vq)
449 {
450 uint16_t used_idx, nused;
451
452 used_idx = vq_htog16(vq, vq->vq_ring.used->idx);
453
454 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
455 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
456
457 return (nused);
458 }
459
460 int
virtqueue_intr_filter(struct virtqueue * vq)461 virtqueue_intr_filter(struct virtqueue *vq)
462 {
463
464 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
465 return (0);
466
467 virtqueue_disable_intr(vq);
468
469 return (1);
470 }
471
472 void
virtqueue_intr(struct virtqueue * vq)473 virtqueue_intr(struct virtqueue *vq)
474 {
475
476 vq->vq_intrhand(vq->vq_intrhand_arg);
477 }
478
479 int
virtqueue_enable_intr(struct virtqueue * vq)480 virtqueue_enable_intr(struct virtqueue *vq)
481 {
482
483 return (vq_ring_enable_interrupt(vq, 0));
484 }
485
486 int
virtqueue_postpone_intr(struct virtqueue * vq,vq_postpone_t hint)487 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
488 {
489 uint16_t ndesc, avail_idx;
490
491 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
492 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
493
494 switch (hint) {
495 case VQ_POSTPONE_SHORT:
496 ndesc = ndesc / 4;
497 break;
498 case VQ_POSTPONE_LONG:
499 ndesc = (ndesc * 3) / 4;
500 break;
501 case VQ_POSTPONE_EMPTIED:
502 break;
503 }
504
505 return (vq_ring_enable_interrupt(vq, ndesc));
506 }
507
508 /*
509 * Note this is only considered a hint to the host.
510 */
511 void
virtqueue_disable_intr(struct virtqueue * vq)512 virtqueue_disable_intr(struct virtqueue *vq)
513 {
514
515 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
516 vring_used_event(&vq->vq_ring) = vq_gtoh16(vq,
517 vq->vq_used_cons_idx - vq->vq_nentries - 1);
518 return;
519 }
520
521 vq->vq_ring.avail->flags |= vq_gtoh16(vq, VRING_AVAIL_F_NO_INTERRUPT);
522 }
523
524 int
virtqueue_enqueue(struct virtqueue * vq,void * cookie,struct sglist * sg,int readable,int writable)525 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
526 int readable, int writable)
527 {
528 struct vq_desc_extra *dxp;
529 int needed;
530 uint16_t head_idx, idx;
531
532 needed = readable + writable;
533
534 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
535 VQASSERT(vq, needed == sg->sg_nseg,
536 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
537 VQASSERT(vq,
538 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
539 "too many segments to enqueue: %d, %d/%d", needed,
540 vq->vq_nentries, vq->vq_max_indirect_size);
541
542 if (needed < 1)
543 return (EINVAL);
544 if (vq->vq_free_cnt == 0)
545 return (ENOSPC);
546
547 if (vq_ring_use_indirect(vq, needed)) {
548 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
549 return (0);
550 } else if (vq->vq_free_cnt < needed)
551 return (EMSGSIZE);
552
553 head_idx = vq->vq_desc_head_idx;
554 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
555 dxp = &vq->vq_descx[head_idx];
556
557 VQASSERT(vq, dxp->cookie == NULL,
558 "cookie already exists for index %d", head_idx);
559 dxp->cookie = cookie;
560 dxp->ndescs = needed;
561
562 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
563 sg, readable, writable);
564
565 vq->vq_desc_head_idx = idx;
566 vq->vq_free_cnt -= needed;
567 if (vq->vq_free_cnt == 0)
568 VQ_RING_ASSERT_CHAIN_TERM(vq);
569 else
570 VQ_RING_ASSERT_VALID_IDX(vq, idx);
571
572 vq_ring_update_avail(vq, head_idx);
573
574 return (0);
575 }
576
577 void *
virtqueue_dequeue(struct virtqueue * vq,uint32_t * len)578 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
579 {
580 struct vring_used_elem *uep;
581 void *cookie;
582 uint16_t used_idx, desc_idx;
583
584 if (vq->vq_used_cons_idx == vq_htog16(vq, vq->vq_ring.used->idx))
585 return (NULL);
586
587 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
588 uep = &vq->vq_ring.used->ring[used_idx];
589
590 rmb();
591 desc_idx = (uint16_t) vq_htog32(vq, uep->id);
592 if (len != NULL)
593 *len = vq_htog32(vq, uep->len);
594
595 vq_ring_free_chain(vq, desc_idx);
596
597 cookie = vq->vq_descx[desc_idx].cookie;
598 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
599 vq->vq_descx[desc_idx].cookie = NULL;
600
601 return (cookie);
602 }
603
604 void *
virtqueue_poll(struct virtqueue * vq,uint32_t * len)605 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
606 {
607 void *cookie;
608
609 VIRTIO_BUS_POLL(vq->vq_dev);
610 while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
611 cpu_spinwait();
612 VIRTIO_BUS_POLL(vq->vq_dev);
613 }
614
615 return (cookie);
616 }
617
618 void *
virtqueue_drain(struct virtqueue * vq,int * last)619 virtqueue_drain(struct virtqueue *vq, int *last)
620 {
621 void *cookie;
622 int idx;
623
624 cookie = NULL;
625 idx = *last;
626
627 while (idx < vq->vq_nentries && cookie == NULL) {
628 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
629 vq->vq_descx[idx].cookie = NULL;
630 /* Free chain to keep free count consistent. */
631 vq_ring_free_chain(vq, idx);
632 }
633 idx++;
634 }
635
636 *last = idx;
637
638 return (cookie);
639 }
640
641 void
virtqueue_dump(struct virtqueue * vq)642 virtqueue_dump(struct virtqueue *vq)
643 {
644
645 if (vq == NULL)
646 return;
647
648 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
649 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
650 "used.idx=%d; used_event_idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
651 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt, virtqueue_nused(vq),
652 vq->vq_queued_cnt, vq->vq_desc_head_idx,
653 vq_htog16(vq, vq->vq_ring.avail->idx), vq->vq_used_cons_idx,
654 vq_htog16(vq, vq->vq_ring.used->idx),
655 vq_htog16(vq, vring_used_event(&vq->vq_ring)),
656 vq_htog16(vq, vq->vq_ring.avail->flags),
657 vq_htog16(vq, vq->vq_ring.used->flags));
658 }
659
660 static void
vq_ring_init(struct virtqueue * vq)661 vq_ring_init(struct virtqueue *vq)
662 {
663 struct vring *vr;
664 char *ring_mem;
665 int i, size;
666
667 ring_mem = vq->vq_ring_mem;
668 size = vq->vq_nentries;
669 vr = &vq->vq_ring;
670
671 vring_init(vr, size, ring_mem, vq->vq_alignment);
672
673 for (i = 0; i < size - 1; i++)
674 vr->desc[i].next = vq_gtoh16(vq, i + 1);
675 vr->desc[i].next = vq_gtoh16(vq, VQ_RING_DESC_CHAIN_END);
676 }
677
678 static void
vq_ring_update_avail(struct virtqueue * vq,uint16_t desc_idx)679 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
680 {
681 uint16_t avail_idx, avail_ring_idx;
682
683 /*
684 * Place the head of the descriptor chain into the next slot and make
685 * it usable to the host. The chain is made available now rather than
686 * deferring to virtqueue_notify() in the hopes that if the host is
687 * currently running on another CPU, we can keep it processing the new
688 * descriptor.
689 */
690 avail_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
691 avail_ring_idx = avail_idx & (vq->vq_nentries - 1);
692 vq->vq_ring.avail->ring[avail_ring_idx] = vq_gtoh16(vq, desc_idx);
693
694 wmb();
695 vq->vq_ring.avail->idx = vq_gtoh16(vq, avail_idx + 1);
696
697 /* Keep pending count until virtqueue_notify(). */
698 vq->vq_queued_cnt++;
699 }
700
701 static uint16_t
vq_ring_enqueue_segments(struct virtqueue * vq,struct vring_desc * desc,uint16_t head_idx,struct sglist * sg,int readable,int writable)702 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
703 uint16_t head_idx, struct sglist *sg, int readable, int writable)
704 {
705 struct sglist_seg *seg;
706 struct vring_desc *dp;
707 int i, needed;
708 uint16_t idx;
709
710 SDT_PROBE6(virtqueue, , enqueue_segments, entry, vq, desc, head_idx,
711 sg, readable, writable);
712
713 needed = readable + writable;
714
715 for (i = 0, idx = head_idx, seg = sg->sg_segs;
716 i < needed;
717 i++, idx = vq_htog16(vq, dp->next), seg++) {
718 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
719 "premature end of free desc chain");
720
721 dp = &desc[idx];
722 dp->addr = vq_gtoh64(vq, seg->ss_paddr);
723 dp->len = vq_gtoh32(vq, seg->ss_len);
724 dp->flags = 0;
725
726 if (i < needed - 1)
727 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_NEXT);
728 if (i >= readable)
729 dp->flags |= vq_gtoh16(vq, VRING_DESC_F_WRITE);
730 }
731
732 SDT_PROBE1(virtqueue, , enqueue_segments, return, idx);
733 return (idx);
734 }
735
736 static int
vq_ring_use_indirect(struct virtqueue * vq,int needed)737 vq_ring_use_indirect(struct virtqueue *vq, int needed)
738 {
739
740 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
741 return (0);
742
743 if (vq->vq_max_indirect_size < needed)
744 return (0);
745
746 if (needed < 2)
747 return (0);
748
749 return (1);
750 }
751
752 static void
vq_ring_enqueue_indirect(struct virtqueue * vq,void * cookie,struct sglist * sg,int readable,int writable)753 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
754 struct sglist *sg, int readable, int writable)
755 {
756 struct vring_desc *dp;
757 struct vq_desc_extra *dxp;
758 int needed;
759 uint16_t head_idx;
760
761 needed = readable + writable;
762 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
763 "enqueuing too many indirect descriptors");
764
765 head_idx = vq->vq_desc_head_idx;
766 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
767 dp = &vq->vq_ring.desc[head_idx];
768 dxp = &vq->vq_descx[head_idx];
769
770 VQASSERT(vq, dxp->cookie == NULL,
771 "cookie already exists for index %d", head_idx);
772 dxp->cookie = cookie;
773 dxp->ndescs = 1;
774
775 dp->addr = vq_gtoh64(vq, dxp->indirect_paddr);
776 dp->len = vq_gtoh32(vq, needed * sizeof(struct vring_desc));
777 dp->flags = vq_gtoh16(vq, VRING_DESC_F_INDIRECT);
778
779 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
780 sg, readable, writable);
781
782 vq->vq_desc_head_idx = vq_htog16(vq, dp->next);
783 vq->vq_free_cnt--;
784 if (vq->vq_free_cnt == 0)
785 VQ_RING_ASSERT_CHAIN_TERM(vq);
786 else
787 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
788
789 vq_ring_update_avail(vq, head_idx);
790 }
791
792 static int
vq_ring_enable_interrupt(struct virtqueue * vq,uint16_t ndesc)793 vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
794 {
795
796 /*
797 * Enable interrupts, making sure we get the latest index of
798 * what's already been consumed.
799 */
800 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
801 vring_used_event(&vq->vq_ring) =
802 vq_gtoh16(vq, vq->vq_used_cons_idx + ndesc);
803 } else {
804 vq->vq_ring.avail->flags &=
805 vq_gtoh16(vq, ~VRING_AVAIL_F_NO_INTERRUPT);
806 }
807
808 mb();
809
810 /*
811 * Enough items may have already been consumed to meet our threshold
812 * since we last checked. Let our caller know so it processes the new
813 * entries.
814 */
815 if (virtqueue_nused(vq) > ndesc)
816 return (1);
817
818 return (0);
819 }
820
821 static int
vq_ring_must_notify_host(struct virtqueue * vq)822 vq_ring_must_notify_host(struct virtqueue *vq)
823 {
824 uint16_t new_idx, prev_idx, event_idx, flags;
825
826 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
827 new_idx = vq_htog16(vq, vq->vq_ring.avail->idx);
828 prev_idx = new_idx - vq->vq_queued_cnt;
829 event_idx = vq_htog16(vq, vring_avail_event(&vq->vq_ring));
830
831 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
832 }
833
834 flags = vq->vq_ring.used->flags;
835 return ((flags & vq_gtoh16(vq, VRING_USED_F_NO_NOTIFY)) == 0);
836 }
837
838 static void
vq_ring_notify_host(struct virtqueue * vq)839 vq_ring_notify_host(struct virtqueue *vq)
840 {
841
842 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index,
843 vq->vq_notify_offset);
844 }
845
846 static void
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)847 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
848 {
849 struct vring_desc *dp;
850 struct vq_desc_extra *dxp;
851
852 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
853 dp = &vq->vq_ring.desc[desc_idx];
854 dxp = &vq->vq_descx[desc_idx];
855
856 if (vq->vq_free_cnt == 0)
857 VQ_RING_ASSERT_CHAIN_TERM(vq);
858
859 vq->vq_free_cnt += dxp->ndescs;
860 dxp->ndescs--;
861
862 if ((dp->flags & vq_gtoh16(vq, VRING_DESC_F_INDIRECT)) == 0) {
863 while (dp->flags & vq_gtoh16(vq, VRING_DESC_F_NEXT)) {
864 uint16_t next_idx = vq_htog16(vq, dp->next);
865 VQ_RING_ASSERT_VALID_IDX(vq, next_idx);
866 dp = &vq->vq_ring.desc[next_idx];
867 dxp->ndescs--;
868 }
869 }
870
871 VQASSERT(vq, dxp->ndescs == 0,
872 "failed to free entire desc chain, remaining: %d", dxp->ndescs);
873
874 /*
875 * We must append the existing free chain, if any, to the end of
876 * newly freed chain. If the virtqueue was completely used, then
877 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
878 */
879 dp->next = vq_gtoh16(vq, vq->vq_desc_head_idx);
880 vq->vq_desc_head_idx = desc_idx;
881 }
882