xref: /freebsd-14.2/sys/dev/xen/gntdev/gntdev.c (revision 494fe2e0)
1 /*-
2  * Copyright (c) 2016 Akshay Jaggi <[email protected]>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * gntdev.c
27  *
28  * Interface to /dev/xen/gntdev.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/uio.h>
36 #include <sys/bus.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/rwlock.h>
42 #include <sys/selinfo.h>
43 #include <sys/poll.h>
44 #include <sys/conf.h>
45 #include <sys/fcntl.h>
46 #include <sys/ioccom.h>
47 #include <sys/rman.h>
48 #include <sys/tree.h>
49 #include <sys/module.h>
50 #include <sys/proc.h>
51 #include <sys/bitset.h>
52 #include <sys/queue.h>
53 #include <sys/mman.h>
54 #include <sys/syslog.h>
55 #include <sys/taskqueue.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_pager.h>
65 
66 #include <machine/md_var.h>
67 
68 #include <xen/xen-os.h>
69 #include <xen/hypervisor.h>
70 #include <xen/error.h>
71 #include <xen/xen_intr.h>
72 #include <xen/gnttab.h>
73 #include <xen/gntdev.h>
74 
75 MALLOC_DEFINE(M_GNTDEV, "gntdev", "Xen grant-table user-space device");
76 
77 #define MAX_OFFSET_COUNT ((0xffffffffffffffffull >> PAGE_SHIFT) + 1)
78 
79 static d_open_t gntdev_open;
80 static d_ioctl_t gntdev_ioctl;
81 static d_mmap_single_t gntdev_mmap_single;
82 
83 static struct cdevsw gntdev_devsw = {
84 	.d_version = D_VERSION,
85 	.d_open = gntdev_open,
86 	.d_ioctl = gntdev_ioctl,
87 	.d_mmap_single = gntdev_mmap_single,
88 	.d_name = "gntdev",
89 };
90 
91 static device_t gntdev_dev = NULL;
92 
93 struct gntdev_gref;
94 struct gntdev_gmap;
95 STAILQ_HEAD(gref_list_head, gntdev_gref);
96 STAILQ_HEAD(gmap_list_head, gntdev_gmap);
97 RB_HEAD(gref_tree_head, gntdev_gref);
98 RB_HEAD(gmap_tree_head, gntdev_gmap);
99 
100 struct file_offset_struct {
101 	RB_ENTRY(file_offset_struct)	next;
102 	uint64_t			file_offset;
103 	uint64_t			count;
104 };
105 
106 static int
offset_cmp(struct file_offset_struct * f1,struct file_offset_struct * f2)107 offset_cmp(struct file_offset_struct *f1, struct file_offset_struct *f2)
108 {
109 	return (f1->file_offset - f2->file_offset);
110 }
111 
112 RB_HEAD(file_offset_head, file_offset_struct);
113 RB_GENERATE_STATIC(file_offset_head, file_offset_struct, next, offset_cmp);
114 
115 struct per_user_data {
116 	struct mtx		user_data_lock;
117 	struct gref_tree_head	gref_tree;
118 	struct gmap_tree_head	gmap_tree;
119 	struct file_offset_head	file_offset;
120 };
121 
122 /*
123  * Get offset into the file which will be used while mmapping the
124  * appropriate pages by the userspace program.
125  */
126 static int
get_file_offset(struct per_user_data * priv_user,uint32_t count,uint64_t * file_offset)127 get_file_offset(struct per_user_data *priv_user, uint32_t count,
128     uint64_t *file_offset)
129 {
130 	struct file_offset_struct *offset, *offset_tmp;
131 
132 	if (count == 0)
133 		return (EINVAL);
134 	mtx_lock(&priv_user->user_data_lock);
135 	RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset,
136 	    offset_tmp) {
137 		if (offset->count >= count) {
138 			offset->count -= count;
139 			*file_offset = offset->file_offset + offset->count *
140 			    PAGE_SIZE;
141 			if (offset->count == 0) {
142 				RB_REMOVE(file_offset_head,
143 				    &priv_user->file_offset, offset);
144 				free(offset, M_GNTDEV);
145 			}
146 			mtx_unlock(&priv_user->user_data_lock);
147 			return (0);
148 		}
149 	}
150 	mtx_unlock(&priv_user->user_data_lock);
151 
152 	return (ENOSPC);
153 }
154 
155 static void
put_file_offset(struct per_user_data * priv_user,uint32_t count,uint64_t file_offset)156 put_file_offset(struct per_user_data *priv_user, uint32_t count,
157     uint64_t file_offset)
158 {
159 	struct file_offset_struct *offset, *offset_nxt, *offset_prv;
160 
161 	offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO);
162 	offset->file_offset = file_offset;
163 	offset->count = count;
164 
165 	mtx_lock(&priv_user->user_data_lock);
166 	RB_INSERT(file_offset_head, &priv_user->file_offset, offset);
167 	offset_nxt = RB_NEXT(file_offset_head, &priv_user->file_offset, offset);
168 	offset_prv = RB_PREV(file_offset_head, &priv_user->file_offset, offset);
169 	if (offset_nxt != NULL &&
170 	    offset_nxt->file_offset == offset->file_offset + offset->count *
171 	    PAGE_SIZE) {
172 		offset->count += offset_nxt->count;
173 		RB_REMOVE(file_offset_head, &priv_user->file_offset,
174 		    offset_nxt);
175 		free(offset_nxt, M_GNTDEV);
176 	}
177 	if (offset_prv != NULL &&
178 	    offset->file_offset == offset_prv->file_offset + offset_prv->count *
179 	    PAGE_SIZE) {
180 		offset_prv->count += offset->count;
181 		RB_REMOVE(file_offset_head, &priv_user->file_offset, offset);
182 		free(offset, M_GNTDEV);
183 	}
184 	mtx_unlock(&priv_user->user_data_lock);
185 }
186 
187 static int	gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size,
188     vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color);
189 static void	gntdev_gmap_pg_dtor(void *handle);
190 static int	gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset,
191     int prot, vm_page_t *mres);
192 
193 static struct cdev_pager_ops gntdev_gmap_pg_ops = {
194 	.cdev_pg_fault = gntdev_gmap_pg_fault,
195 	.cdev_pg_ctor =	gntdev_gmap_pg_ctor,
196 	.cdev_pg_dtor =	gntdev_gmap_pg_dtor,
197 };
198 
199 struct cleanup_data_struct {
200 	struct mtx to_kill_grefs_mtx;
201 	struct mtx to_kill_gmaps_mtx;
202 	struct gref_list_head to_kill_grefs;
203 	struct gmap_list_head to_kill_gmaps;
204 };
205 
206 static struct cleanup_data_struct cleanup_data = {
207 	.to_kill_grefs = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_grefs),
208 	.to_kill_gmaps = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_gmaps),
209 };
210 MTX_SYSINIT(to_kill_grefs_mtx, &cleanup_data.to_kill_grefs_mtx,
211     "gntdev to_kill_grefs mutex", MTX_DEF);
212 MTX_SYSINIT(to_kill_gmaps_mtx, &cleanup_data.to_kill_gmaps_mtx,
213     "gntdev to_kill_gmaps mutex", MTX_DEF);
214 
215 static void	cleanup_function(void *arg, __unused int pending);
216 static struct task cleanup_task = TASK_INITIALIZER(0, cleanup_function,
217     &cleanup_data);
218 
219 struct notify_data {
220 	uint64_t		index;
221 	uint32_t		action;
222 	uint32_t		event_channel_port;
223 	xen_intr_handle_t	notify_evtchn_handle;
224 };
225 
226 static void	notify(struct notify_data *notify, vm_page_t page);
227 
228 /*-------------------- Grant Allocation Methods  -----------------------------*/
229 
230 struct gntdev_gref {
231 	union gref_next_union {
232 		STAILQ_ENTRY(gntdev_gref) 		list;
233 		RB_ENTRY(gntdev_gref)	 		tree;
234 	}			gref_next;
235 	uint64_t		file_index;
236 	grant_ref_t		gref_id;
237 	vm_page_t		page;
238 	struct notify_data	*notify;
239 };
240 
241 static int
gref_cmp(struct gntdev_gref * g1,struct gntdev_gref * g2)242 gref_cmp(struct gntdev_gref *g1, struct gntdev_gref *g2)
243 {
244 	return (g1->file_index - g2->file_index);
245 }
246 
247 RB_GENERATE_STATIC(gref_tree_head, gntdev_gref, gref_next.tree, gref_cmp);
248 
249 /*
250  * Traverse over the device-list of to-be-deleted grants allocated, and
251  * if all accesses, both local mmaps and foreign maps, to them have ended,
252  * destroy them.
253  */
254 static void
gref_list_dtor(struct cleanup_data_struct * cleanup_data)255 gref_list_dtor(struct cleanup_data_struct *cleanup_data)
256 {
257 	struct gref_list_head tmp_grefs;
258 	struct gntdev_gref *gref, *gref_tmp, *gref_previous;
259 
260 	STAILQ_INIT(&tmp_grefs);
261 	mtx_lock(&cleanup_data->to_kill_grefs_mtx);
262 	STAILQ_SWAP(&cleanup_data->to_kill_grefs, &tmp_grefs, gntdev_gref);
263 	mtx_unlock(&cleanup_data->to_kill_grefs_mtx);
264 
265 	gref_previous = NULL;
266 	STAILQ_FOREACH_SAFE(gref, &tmp_grefs, gref_next.list, gref_tmp) {
267 		if (gref->page && gref->page->object == NULL) {
268 			if (gref->notify) {
269 				notify(gref->notify, gref->page);
270 			}
271 			if (gref->gref_id != GRANT_REF_INVALID) {
272 				if (gnttab_query_foreign_access(gref->gref_id))
273 					continue;
274 				if (gnttab_end_foreign_access_ref(gref->gref_id)
275 				    == 0)
276 					continue;
277 				gnttab_free_grant_reference(gref->gref_id);
278 			}
279 			vm_page_unwire_noq(gref->page);
280 			vm_page_free(gref->page);
281 			gref->page = NULL;
282 		}
283 		if (gref->page == NULL) {
284 			if (gref_previous == NULL)
285 				STAILQ_REMOVE_HEAD(&tmp_grefs, gref_next.list);
286 			else
287 				STAILQ_REMOVE_AFTER(&tmp_grefs, gref_previous,
288 				    gref_next.list);
289 			if (gref->notify)
290 				free(gref->notify, M_GNTDEV);
291 			free(gref, M_GNTDEV);
292 		}
293 		else
294 			gref_previous = gref;
295 	}
296 
297 	if (!STAILQ_EMPTY(&tmp_grefs)) {
298 		mtx_lock(&cleanup_data->to_kill_grefs_mtx);
299 		STAILQ_CONCAT(&cleanup_data->to_kill_grefs, &tmp_grefs);
300 		mtx_unlock(&cleanup_data->to_kill_grefs_mtx);
301 	}
302 }
303 
304 /*
305  * Find count number of contiguous allocated grants for a given userspace
306  * program by file-offset (index).
307  */
308 static struct gntdev_gref*
gntdev_find_grefs(struct per_user_data * priv_user,uint64_t index,uint32_t count)309 gntdev_find_grefs(struct per_user_data *priv_user,
310 	uint64_t index, uint32_t count)
311 {
312 	struct gntdev_gref find_gref, *gref, *gref_start = NULL;
313 
314 	find_gref.file_index = index;
315 
316 	mtx_lock(&priv_user->user_data_lock);
317 	gref_start = RB_FIND(gref_tree_head, &priv_user->gref_tree, &find_gref);
318 	for (gref = gref_start; gref != NULL && count > 0; gref =
319 	    RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
320 		if (index != gref->file_index)
321 			break;
322 		index += PAGE_SIZE;
323 		count--;
324 	}
325 	mtx_unlock(&priv_user->user_data_lock);
326 
327 	if (count)
328 		return (NULL);
329 	return (gref_start);
330 }
331 
332 /*
333  * IOCTL_GNTDEV_ALLOC_GREF
334  * Allocate required number of wired pages for the request, grant foreign
335  * access to the physical frames for these pages, and add details about
336  * this allocation to the per user private data, so that these pages can
337  * be mmapped by the userspace program.
338  */
339 static int
gntdev_alloc_gref(struct ioctl_gntdev_alloc_gref * arg)340 gntdev_alloc_gref(struct ioctl_gntdev_alloc_gref *arg)
341 {
342 	uint32_t i;
343 	int error, readonly;
344 	uint64_t file_offset;
345 	struct gntdev_gref *grefs;
346 	struct per_user_data *priv_user;
347 
348 	readonly = !(arg->flags & GNTDEV_ALLOC_FLAG_WRITABLE);
349 
350 	error = devfs_get_cdevpriv((void**) &priv_user);
351 	if (error != 0)
352 		return (EINVAL);
353 
354 	/* Cleanup grefs and free pages. */
355 	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
356 
357 	/* Get file offset for this request. */
358 	error = get_file_offset(priv_user, arg->count, &file_offset);
359 	if (error != 0)
360 		return (error);
361 
362 	/* Allocate grefs. */
363 	grefs = malloc(sizeof(*grefs) * arg->count, M_GNTDEV, M_WAITOK);
364 
365 	for (i = 0; i < arg->count; i++) {
366 		grefs[i].file_index = file_offset + i * PAGE_SIZE;
367 		grefs[i].gref_id = GRANT_REF_INVALID;
368 		grefs[i].notify = NULL;
369 		grefs[i].page = vm_page_alloc_noobj(VM_ALLOC_WIRED |
370 		    VM_ALLOC_ZERO);
371 		if (grefs[i].page == NULL) {
372 			log(LOG_ERR, "Page allocation failed.");
373 			error = ENOMEM;
374 			break;
375 		}
376 		grefs[i].page->valid = VM_PAGE_BITS_ALL;
377 
378 		error = gnttab_grant_foreign_access(arg->domid,
379 			(VM_PAGE_TO_PHYS(grefs[i].page) >> PAGE_SHIFT),
380 			readonly, &grefs[i].gref_id);
381 		if (error != 0) {
382 			log(LOG_ERR, "Grant Table Hypercall failed.");
383 			break;
384 		}
385 	}
386 
387 	/* Copy the output values. */
388 	arg->index = file_offset;
389 	for (i = 0; error == 0 && i < arg->count; i++) {
390 		if (suword32(&arg->gref_ids[i], grefs[i].gref_id) != 0)
391 			error = EFAULT;
392 	}
393 
394 	if (error != 0) {
395 		/*
396 		 * If target domain maps the gref (by guessing the gref-id),
397 		 * then we can't clean it up yet and we have to leave the
398 		 * page in place so as to not leak our memory to that domain.
399 		 * Add it to a global list to be cleaned up later.
400 		 */
401 		mtx_lock(&cleanup_data.to_kill_grefs_mtx);
402 		for (i = 0; i < arg->count; i++)
403 			STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs,
404 			    &grefs[i], gref_next.list);
405 		mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
406 
407 		taskqueue_enqueue(taskqueue_thread, &cleanup_task);
408 
409 		return (error);
410 	}
411 
412 	/* Modify the per user private data. */
413 	mtx_lock(&priv_user->user_data_lock);
414 	for (i = 0; i < arg->count; i++)
415 		RB_INSERT(gref_tree_head, &priv_user->gref_tree, &grefs[i]);
416 	mtx_unlock(&priv_user->user_data_lock);
417 
418 	return (error);
419 }
420 
421 /*
422  * IOCTL_GNTDEV_DEALLOC_GREF
423  * Remove grant allocation information from the per user private data, so
424  * that it can't be mmapped anymore by the userspace program, and add it
425  * to the to-be-deleted grants global device-list.
426  */
427 static int
gntdev_dealloc_gref(struct ioctl_gntdev_dealloc_gref * arg)428 gntdev_dealloc_gref(struct ioctl_gntdev_dealloc_gref *arg)
429 {
430 	int error;
431 	uint32_t count;
432 	struct gntdev_gref *gref, *gref_tmp;
433 	struct per_user_data *priv_user;
434 
435 	error = devfs_get_cdevpriv((void**) &priv_user);
436 	if (error != 0)
437 		return (EINVAL);
438 
439 	gref = gntdev_find_grefs(priv_user, arg->index, arg->count);
440 	if (gref == NULL) {
441 		log(LOG_ERR, "Can't find requested grant-refs.");
442 		return (EINVAL);
443 	}
444 
445 	/* Remove the grefs from user private data. */
446 	count = arg->count;
447 	mtx_lock(&priv_user->user_data_lock);
448 	mtx_lock(&cleanup_data.to_kill_grefs_mtx);
449 	for (; gref != NULL && count > 0; gref = gref_tmp) {
450 		gref_tmp = RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref);
451 		RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref);
452 		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref,
453 		    gref_next.list);
454 		count--;
455 	}
456 	mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
457 	mtx_unlock(&priv_user->user_data_lock);
458 
459 	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
460 	put_file_offset(priv_user, arg->count, arg->index);
461 
462 	return (0);
463 }
464 
465 /*-------------------- Grant Mapping Methods  --------------------------------*/
466 
467 struct gntdev_gmap_map {
468 	vm_object_t	mem;
469 	struct resource	*pseudo_phys_res;
470 	int 		pseudo_phys_res_id;
471 	vm_paddr_t	phys_base_addr;
472 };
473 
474 struct gntdev_gmap {
475 	union gmap_next_union {
476 		STAILQ_ENTRY(gntdev_gmap)		list;
477 		RB_ENTRY(gntdev_gmap)			tree;
478 	}				gmap_next;
479 	uint64_t			file_index;
480 	uint32_t			count;
481 	struct gnttab_map_grant_ref	*grant_map_ops;
482 	struct gntdev_gmap_map		*map;
483 	struct notify_data		*notify;
484 };
485 
486 static int
gmap_cmp(struct gntdev_gmap * g1,struct gntdev_gmap * g2)487 gmap_cmp(struct gntdev_gmap *g1, struct gntdev_gmap *g2)
488 {
489 	return (g1->file_index - g2->file_index);
490 }
491 
492 RB_GENERATE_STATIC(gmap_tree_head, gntdev_gmap, gmap_next.tree, gmap_cmp);
493 
494 /*
495  * Traverse over the device-list of to-be-deleted grant mappings, and if
496  * the region is no longer mmapped by anyone, free the memory used to
497  * store information about the mapping.
498  */
499 static void
gmap_list_dtor(struct cleanup_data_struct * cleanup_data)500 gmap_list_dtor(struct cleanup_data_struct *cleanup_data)
501 {
502 	struct gmap_list_head tmp_gmaps;
503 	struct gntdev_gmap *gmap, *gmap_tmp, *gmap_previous;
504 
505 	STAILQ_INIT(&tmp_gmaps);
506 	mtx_lock(&cleanup_data->to_kill_gmaps_mtx);
507 	STAILQ_SWAP(&cleanup_data->to_kill_gmaps, &tmp_gmaps, gntdev_gmap);
508 	mtx_unlock(&cleanup_data->to_kill_gmaps_mtx);
509 
510 	gmap_previous = NULL;
511 	STAILQ_FOREACH_SAFE(gmap, &tmp_gmaps, gmap_next.list, gmap_tmp) {
512 		if (gmap->map == NULL) {
513 			if (gmap_previous == NULL)
514 				STAILQ_REMOVE_HEAD(&tmp_gmaps, gmap_next.list);
515 			else
516 				STAILQ_REMOVE_AFTER(&tmp_gmaps, gmap_previous,
517 				    gmap_next.list);
518 
519 			if (gmap->notify)
520 				free(gmap->notify, M_GNTDEV);
521 			free(gmap->grant_map_ops, M_GNTDEV);
522 			free(gmap, M_GNTDEV);
523 		}
524 		else
525 			gmap_previous = gmap;
526 	}
527 
528 	if (!STAILQ_EMPTY(&tmp_gmaps)) {
529 		mtx_lock(&cleanup_data->to_kill_gmaps_mtx);
530 		STAILQ_CONCAT(&cleanup_data->to_kill_gmaps, &tmp_gmaps);
531 		mtx_unlock(&cleanup_data->to_kill_gmaps_mtx);
532 	}
533 }
534 
535 /*
536  * Find mapped grants for a given userspace program, by file-offset (index)
537  * and count, as supplied during the map-ioctl.
538  */
539 static struct gntdev_gmap*
gntdev_find_gmap(struct per_user_data * priv_user,uint64_t index,uint32_t count)540 gntdev_find_gmap(struct per_user_data *priv_user,
541 	uint64_t index, uint32_t count)
542 {
543 	struct gntdev_gmap find_gmap, *gmap;
544 
545 	find_gmap.file_index = index;
546 
547 	mtx_lock(&priv_user->user_data_lock);
548 	gmap = RB_FIND(gmap_tree_head, &priv_user->gmap_tree, &find_gmap);
549 	mtx_unlock(&priv_user->user_data_lock);
550 
551 	if (gmap != NULL && gmap->count == count)
552 		return (gmap);
553 	return (NULL);
554 }
555 
556 /*
557  * Remove the pages from the mgtdevice pager, call the unmap hypercall,
558  * free the xenmem resource. This function is called during the
559  * destruction of the mgtdevice pager, which happens when all mmaps to
560  * it have been removed, and the unmap-ioctl has been performed.
561  */
562 static int
notify_unmap_cleanup(struct gntdev_gmap * gmap)563 notify_unmap_cleanup(struct gntdev_gmap *gmap)
564 {
565 	uint32_t i;
566 	int error, count;
567 	vm_page_t m;
568 	struct gnttab_unmap_grant_ref *unmap_ops;
569 
570 	unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count,
571 			M_GNTDEV, M_WAITOK);
572 
573 	/* Enumerate freeable maps. */
574 	count = 0;
575 	for (i = 0; i < gmap->count; i++) {
576 		if (gmap->grant_map_ops[i].handle != -1) {
577 			unmap_ops[count].handle = gmap->grant_map_ops[i].handle;
578 			unmap_ops[count].host_addr =
579 				gmap->grant_map_ops[i].host_addr;
580 			unmap_ops[count].dev_bus_addr = 0;
581 			count++;
582 		}
583 	}
584 
585 	/* Perform notification. */
586 	if (count > 0 && gmap->notify) {
587 		vm_page_t page;
588 		uint64_t page_offset;
589 
590 		page_offset = gmap->notify->index - gmap->file_index;
591 		page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + page_offset);
592 		notify(gmap->notify, page);
593 	}
594 
595 	/* Free the pages. */
596 	VM_OBJECT_WLOCK(gmap->map->mem);
597 retry:
598 	for (i = 0; i < gmap->count; i++) {
599 		m = vm_page_lookup(gmap->map->mem, i);
600 		if (m == NULL)
601 			continue;
602 		if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
603 			goto retry;
604 		cdev_pager_free_page(gmap->map->mem, m);
605 	}
606 	VM_OBJECT_WUNLOCK(gmap->map->mem);
607 
608 	/* Perform unmap hypercall. */
609 	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
610 	    unmap_ops, count);
611 
612 	for (i = 0; i < gmap->count; i++) {
613 		gmap->grant_map_ops[i].handle = -1;
614 		gmap->grant_map_ops[i].host_addr = 0;
615 	}
616 
617 	if (gmap->map) {
618 		error = xenmem_free(gntdev_dev, gmap->map->pseudo_phys_res_id,
619 		    gmap->map->pseudo_phys_res);
620 		KASSERT(error == 0,
621 		    ("Unable to release memory resource: %d", error));
622 
623 		free(gmap->map, M_GNTDEV);
624 		gmap->map = NULL;
625 	}
626 
627 	free(unmap_ops, M_GNTDEV);
628 
629 	return (error);
630 }
631 
632 /*
633  * IOCTL_GNTDEV_MAP_GRANT_REF
634  * Populate structures for mapping the grant reference in the per user
635  * private data. Actual resource allocation and map hypercall is performed
636  * during the mmap.
637  */
638 static int
gntdev_map_grant_ref(struct ioctl_gntdev_map_grant_ref * arg)639 gntdev_map_grant_ref(struct ioctl_gntdev_map_grant_ref *arg)
640 {
641 	uint32_t i;
642 	int error;
643 	struct gntdev_gmap *gmap;
644 	struct per_user_data *priv_user;
645 
646 	error = devfs_get_cdevpriv((void**) &priv_user);
647 	if (error != 0)
648 		return (EINVAL);
649 
650 	gmap = malloc(sizeof(*gmap), M_GNTDEV, M_WAITOK | M_ZERO);
651 	gmap->count = arg->count;
652 	gmap->grant_map_ops =
653 	    malloc(sizeof(struct gnttab_map_grant_ref) * arg->count,
654 	        M_GNTDEV, M_WAITOK | M_ZERO);
655 
656 	for (i = 0; i < arg->count; i++) {
657 		struct ioctl_gntdev_grant_ref ref;
658 
659 		error = copyin(&arg->refs[i], &ref, sizeof(ref));
660 		if (error != 0) {
661 			free(gmap->grant_map_ops, M_GNTDEV);
662 			free(gmap, M_GNTDEV);
663 			return (error);
664 		}
665 		gmap->grant_map_ops[i].dom = ref.domid;
666 		gmap->grant_map_ops[i].ref = ref.ref;
667 		gmap->grant_map_ops[i].handle = -1;
668 		gmap->grant_map_ops[i].flags = GNTMAP_host_map;
669 	}
670 
671 	error = get_file_offset(priv_user, arg->count, &gmap->file_index);
672 	if (error != 0) {
673 		free(gmap->grant_map_ops, M_GNTDEV);
674 		free(gmap, M_GNTDEV);
675 		return (error);
676 	}
677 
678 	mtx_lock(&priv_user->user_data_lock);
679 	RB_INSERT(gmap_tree_head, &priv_user->gmap_tree, gmap);
680 	mtx_unlock(&priv_user->user_data_lock);
681 
682 	arg->index = gmap->file_index;
683 
684 	return (error);
685 }
686 
687 /*
688  * IOCTL_GNTDEV_UNMAP_GRANT_REF
689  * Remove the map information from the per user private data and add it
690  * to the global device-list of mappings to be deleted. A reference to
691  * the mgtdevice pager is also decreased, the reason for which is
692  * explained in mmap_gmap().
693  */
694 static int
gntdev_unmap_grant_ref(struct ioctl_gntdev_unmap_grant_ref * arg)695 gntdev_unmap_grant_ref(struct ioctl_gntdev_unmap_grant_ref *arg)
696 {
697 	int error;
698 	struct gntdev_gmap *gmap;
699 	struct per_user_data *priv_user;
700 
701 	error = devfs_get_cdevpriv((void**) &priv_user);
702 	if (error != 0)
703 		return (EINVAL);
704 
705 	gmap = gntdev_find_gmap(priv_user, arg->index, arg->count);
706 	if (gmap == NULL) {
707 		log(LOG_ERR, "Can't find requested grant-map.");
708 		return (EINVAL);
709 	}
710 
711 	mtx_lock(&priv_user->user_data_lock);
712 	mtx_lock(&cleanup_data.to_kill_gmaps_mtx);
713 	RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap);
714 	STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap, gmap_next.list);
715 	mtx_unlock(&cleanup_data.to_kill_gmaps_mtx);
716 	mtx_unlock(&priv_user->user_data_lock);
717 
718 	if (gmap->map)
719 		vm_object_deallocate(gmap->map->mem);
720 
721 	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
722 	put_file_offset(priv_user, arg->count, arg->index);
723 
724 	return (0);
725 }
726 
727 /*
728  * IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR
729  * Get file-offset and count for a given mapping, from the virtual address
730  * where the mapping is mmapped.
731  * Please note, this only works for grants mapped by this domain, and not
732  * grants allocated. Count doesn't make much sense in reference to grants
733  * allocated. Also, because this function is present in the linux gntdev
734  * device, but not in the linux gntalloc one, most userspace code only use
735  * it for mapped grants.
736  */
737 static int
gntdev_get_offset_for_vaddr(struct ioctl_gntdev_get_offset_for_vaddr * arg,struct thread * td)738 gntdev_get_offset_for_vaddr(struct ioctl_gntdev_get_offset_for_vaddr *arg,
739 	struct thread *td)
740 {
741 	int error;
742 	vm_map_t map;
743 	vm_map_entry_t entry;
744 	vm_object_t mem;
745 	vm_pindex_t pindex;
746 	vm_prot_t prot;
747 	boolean_t wired;
748 	struct gntdev_gmap *gmap;
749 	int rc;
750 
751 	map = &td->td_proc->p_vmspace->vm_map;
752 	error = vm_map_lookup(&map, arg->vaddr, VM_PROT_NONE, &entry,
753 		    &mem, &pindex, &prot, &wired);
754 	if (error != KERN_SUCCESS)
755 		return (EINVAL);
756 
757 	if ((mem->type != OBJT_MGTDEVICE) ||
758 	    (mem->un_pager.devp.ops != &gntdev_gmap_pg_ops)) {
759 		rc = EINVAL;
760 		goto out;
761 	}
762 
763 	gmap = mem->handle;
764 	if (gmap == NULL ||
765 	    (entry->end - entry->start) != (gmap->count * PAGE_SIZE)) {
766 		rc = EINVAL;
767 		goto out;
768 	}
769 
770 	arg->count = gmap->count;
771 	arg->offset = gmap->file_index;
772 	rc = 0;
773 
774 out:
775 	vm_map_lookup_done(map, entry);
776 	return (rc);
777 }
778 
779 /*-------------------- Grant Mapping Pager  ----------------------------------*/
780 
781 static int
gntdev_gmap_pg_ctor(void * handle,vm_ooffset_t size,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred,u_short * color)782 gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
783     vm_ooffset_t foff, struct ucred *cred, u_short *color)
784 {
785 
786 	return (0);
787 }
788 
789 static void
gntdev_gmap_pg_dtor(void * handle)790 gntdev_gmap_pg_dtor(void *handle)
791 {
792 
793 	notify_unmap_cleanup((struct gntdev_gmap *)handle);
794 }
795 
796 static int
gntdev_gmap_pg_fault(vm_object_t object,vm_ooffset_t offset,int prot,vm_page_t * mres)797 gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset, int prot,
798     vm_page_t *mres)
799 {
800 	struct gntdev_gmap *gmap = object->handle;
801 	vm_pindex_t pidx, ridx;
802 	vm_page_t page;
803 	vm_ooffset_t relative_offset;
804 
805 	if (gmap->map == NULL)
806 		return (VM_PAGER_FAIL);
807 
808 	relative_offset = offset - gmap->file_index;
809 
810 	pidx = OFF_TO_IDX(offset);
811 	ridx = OFF_TO_IDX(relative_offset);
812 	if (ridx >= gmap->count ||
813 	    gmap->grant_map_ops[ridx].status != GNTST_okay)
814 		return (VM_PAGER_FAIL);
815 
816 	page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + relative_offset);
817 	if (page == NULL)
818 		return (VM_PAGER_FAIL);
819 
820 	KASSERT((page->flags & PG_FICTITIOUS) != 0,
821 	    ("not fictitious %p", page));
822 	KASSERT(vm_page_wired(page), ("page %p is not wired", page));
823 	KASSERT(!vm_page_busied(page), ("page %p is busy", page));
824 
825 	vm_page_busy_acquire(page, 0);
826 	vm_page_valid(page);
827 	if (*mres != NULL)
828 		vm_page_replace(page, object, pidx, *mres);
829 	else
830 		vm_page_insert(page, object, pidx);
831 	*mres = page;
832 	return (VM_PAGER_OK);
833 }
834 
835 /*------------------ Grant Table Methods  ------------------------------------*/
836 
837 static void
notify(struct notify_data * notify,vm_page_t page)838 notify(struct notify_data *notify, vm_page_t page)
839 {
840 	if (notify->action & UNMAP_NOTIFY_CLEAR_BYTE) {
841 		uint8_t *mem;
842 		uint64_t offset;
843 
844 		offset = notify->index & PAGE_MASK;
845 		mem = (uint8_t *)pmap_quick_enter_page(page);
846 		mem[offset] = 0;
847 		pmap_quick_remove_page((vm_offset_t)mem);
848 	}
849 	if (notify->action & UNMAP_NOTIFY_SEND_EVENT) {
850 		xen_intr_signal(notify->notify_evtchn_handle);
851 		xen_intr_unbind(&notify->notify_evtchn_handle);
852 	}
853 	notify->action = 0;
854 }
855 
856 /*
857  * Helper to copy new arguments from the notify ioctl into
858  * the existing notify data.
859  */
860 static int
copy_notify_helper(struct notify_data * destination,struct ioctl_gntdev_unmap_notify * source)861 copy_notify_helper(struct notify_data *destination,
862     struct ioctl_gntdev_unmap_notify *source)
863 {
864 	xen_intr_handle_t handlep = NULL;
865 
866 	/*
867 	 * "Get" before "Put"ting previous reference, as we might be
868 	 * holding the last reference to the event channel port.
869 	 */
870 	if (source->action & UNMAP_NOTIFY_SEND_EVENT)
871 		if (xen_intr_get_evtchn_from_port(source->event_channel_port,
872 		    &handlep) != 0)
873 			return (EINVAL);
874 
875 	if (destination->action & UNMAP_NOTIFY_SEND_EVENT)
876 		xen_intr_unbind(&destination->notify_evtchn_handle);
877 
878 	destination->action = source->action;
879 	destination->event_channel_port = source->event_channel_port;
880 	destination->index = source->index;
881 	destination->notify_evtchn_handle = handlep;
882 
883 	return (0);
884 }
885 
886 /*
887  * IOCTL_GNTDEV_SET_UNMAP_NOTIFY
888  * Set unmap notification inside the appropriate grant. It sends a
889  * notification when the grant is completely munmapped by this domain
890  * and ready for destruction.
891  */
892 static int
gntdev_set_unmap_notify(struct ioctl_gntdev_unmap_notify * arg)893 gntdev_set_unmap_notify(struct ioctl_gntdev_unmap_notify *arg)
894 {
895 	int error;
896 	uint64_t index;
897 	struct per_user_data *priv_user;
898 	struct gntdev_gref *gref = NULL;
899 	struct gntdev_gmap *gmap;
900 
901 	error = devfs_get_cdevpriv((void**) &priv_user);
902 	if (error != 0)
903 		return (EINVAL);
904 
905 	if (arg->action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
906 		return (EINVAL);
907 
908 	index = arg->index & ~PAGE_MASK;
909 	gref = gntdev_find_grefs(priv_user, index, 1);
910 	if (gref) {
911 		if (gref->notify == NULL)
912 			gref->notify = malloc(sizeof(*arg), M_GNTDEV,
913 			    M_WAITOK | M_ZERO);
914 		return (copy_notify_helper(gref->notify, arg));
915 	}
916 
917 	error = EINVAL;
918 	mtx_lock(&priv_user->user_data_lock);
919 	RB_FOREACH(gmap, gmap_tree_head, &priv_user->gmap_tree) {
920 		if (arg->index >= gmap->file_index &&
921 		    arg->index < gmap->file_index + gmap->count * PAGE_SIZE) {
922 			if (gmap->notify == NULL)
923 				gmap->notify = malloc(sizeof(*arg), M_GNTDEV,
924 				    M_WAITOK | M_ZERO);
925 			error = copy_notify_helper(gmap->notify, arg);
926 			break;
927 		}
928 	}
929 	mtx_unlock(&priv_user->user_data_lock);
930 
931 	return (error);
932 }
933 
934 /*------------------ Gntdev Char Device Methods  -----------------------------*/
935 
936 static void
cleanup_function(void * arg,__unused int pending)937 cleanup_function(void *arg, __unused int pending)
938 {
939 
940 	gref_list_dtor((struct cleanup_data_struct *) arg);
941 	gmap_list_dtor((struct cleanup_data_struct *) arg);
942 }
943 
944 static void
per_user_data_dtor(void * arg)945 per_user_data_dtor(void *arg)
946 {
947 	struct gntdev_gref *gref, *gref_tmp;
948 	struct gntdev_gmap *gmap, *gmap_tmp;
949 	struct file_offset_struct *offset, *offset_tmp;
950 	struct per_user_data *priv_user;
951 
952 	priv_user = (struct per_user_data *) arg;
953 
954 	mtx_lock(&priv_user->user_data_lock);
955 
956 	mtx_lock(&cleanup_data.to_kill_grefs_mtx);
957 	RB_FOREACH_SAFE(gref, gref_tree_head, &priv_user->gref_tree, gref_tmp) {
958 		RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref);
959 		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref,
960 		    gref_next.list);
961 	}
962 	mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
963 
964 	mtx_lock(&cleanup_data.to_kill_gmaps_mtx);
965 	RB_FOREACH_SAFE(gmap, gmap_tree_head, &priv_user->gmap_tree, gmap_tmp) {
966 		RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap);
967 		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap,
968 		    gmap_next.list);
969 		if (gmap->map)
970 			vm_object_deallocate(gmap->map->mem);
971 	}
972 	mtx_unlock(&cleanup_data.to_kill_gmaps_mtx);
973 
974 	RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset,
975 	    offset_tmp) {
976 		RB_REMOVE(file_offset_head, &priv_user->file_offset, offset);
977 		free(offset, M_GNTDEV);
978 	}
979 
980 	mtx_unlock(&priv_user->user_data_lock);
981 
982 	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
983 
984 	mtx_destroy(&priv_user->user_data_lock);
985 	free(priv_user, M_GNTDEV);
986 }
987 
988 static int
gntdev_open(struct cdev * dev,int flag,int otyp,struct thread * td)989 gntdev_open(struct cdev *dev, int flag, int otyp, struct thread *td)
990 {
991 	int error;
992 	struct per_user_data *priv_user;
993 	struct file_offset_struct *offset;
994 
995 	priv_user = malloc(sizeof(*priv_user), M_GNTDEV, M_WAITOK | M_ZERO);
996 	RB_INIT(&priv_user->gref_tree);
997 	RB_INIT(&priv_user->gmap_tree);
998 	RB_INIT(&priv_user->file_offset);
999 	offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO);
1000 	offset->file_offset = 0;
1001 	offset->count = MAX_OFFSET_COUNT;
1002 	RB_INSERT(file_offset_head, &priv_user->file_offset, offset);
1003 	mtx_init(&priv_user->user_data_lock,
1004 	    "per user data mutex", NULL, MTX_DEF);
1005 
1006 	error = devfs_set_cdevpriv(priv_user, per_user_data_dtor);
1007 	if (error != 0)
1008 		per_user_data_dtor(priv_user);
1009 
1010 	return (error);
1011 }
1012 
1013 static int
gntdev_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)1014 gntdev_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
1015 	int fflag, struct thread *td)
1016 {
1017 	int error;
1018 
1019 	switch (cmd) {
1020 	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
1021 		error = gntdev_set_unmap_notify(
1022 		    (struct ioctl_gntdev_unmap_notify*) data);
1023 		break;
1024 	case IOCTL_GNTDEV_ALLOC_GREF:
1025 		error = gntdev_alloc_gref(
1026 		    (struct ioctl_gntdev_alloc_gref*) data);
1027 		break;
1028 	case IOCTL_GNTDEV_DEALLOC_GREF:
1029 		error = gntdev_dealloc_gref(
1030 		    (struct ioctl_gntdev_dealloc_gref*) data);
1031 		break;
1032 	case IOCTL_GNTDEV_MAP_GRANT_REF:
1033 		error = gntdev_map_grant_ref(
1034 		    (struct ioctl_gntdev_map_grant_ref*) data);
1035 		break;
1036 	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
1037 		error = gntdev_unmap_grant_ref(
1038 		    (struct ioctl_gntdev_unmap_grant_ref*) data);
1039 		break;
1040 	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
1041 		error = gntdev_get_offset_for_vaddr(
1042 		    (struct ioctl_gntdev_get_offset_for_vaddr*) data, td);
1043 		break;
1044 	default:
1045 		error = ENOSYS;
1046 		break;
1047 	}
1048 
1049 	return (error);
1050 }
1051 
1052 /*
1053  * MMAP an allocated grant into user memory.
1054  * Please note, that the grants must not already be mmapped, otherwise
1055  * this function will fail.
1056  */
1057 static int
mmap_gref(struct per_user_data * priv_user,struct gntdev_gref * gref_start,uint32_t count,vm_size_t size,struct vm_object ** object)1058 mmap_gref(struct per_user_data *priv_user, struct gntdev_gref *gref_start,
1059     uint32_t count, vm_size_t size, struct vm_object **object)
1060 {
1061 	vm_object_t mem_obj;
1062 	struct gntdev_gref *gref;
1063 
1064 	mem_obj = vm_pager_allocate(OBJT_PHYS, NULL, size, VM_PROT_ALL, 0,
1065 	    curthread->td_ucred);
1066 	if (mem_obj == NULL)
1067 		return (ENOMEM);
1068 
1069 	mtx_lock(&priv_user->user_data_lock);
1070 	VM_OBJECT_WLOCK(mem_obj);
1071 	for (gref = gref_start; gref != NULL && count > 0; gref =
1072 	    RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
1073 		if (gref->page->object)
1074 			break;
1075 
1076 		vm_page_insert(gref->page, mem_obj,
1077 		    OFF_TO_IDX(gref->file_index));
1078 
1079 		count--;
1080 	}
1081 	VM_OBJECT_WUNLOCK(mem_obj);
1082 	mtx_unlock(&priv_user->user_data_lock);
1083 
1084 	if (count) {
1085 		vm_object_deallocate(mem_obj);
1086 		return (EINVAL);
1087 	}
1088 
1089 	*object = mem_obj;
1090 
1091 	return (0);
1092 
1093 }
1094 
1095 /*
1096  * MMAP a mapped grant into user memory.
1097  */
1098 static int
mmap_gmap(struct per_user_data * priv_user,struct gntdev_gmap * gmap_start,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** object,int nprot)1099 mmap_gmap(struct per_user_data *priv_user, struct gntdev_gmap *gmap_start,
1100     vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot)
1101 {
1102 	uint32_t i;
1103 	int error;
1104 
1105 	/*
1106 	 * The grant map hypercall might already be done.
1107 	 * If that is the case, increase a reference to the
1108 	 * vm object and return the already allocated object.
1109 	 */
1110 	if (gmap_start->map) {
1111 		vm_object_reference(gmap_start->map->mem);
1112 		*object = gmap_start->map->mem;
1113 		return (0);
1114 	}
1115 
1116 	gmap_start->map = malloc(sizeof(*(gmap_start->map)), M_GNTDEV,
1117 	    M_WAITOK | M_ZERO);
1118 
1119 	/* Allocate the xen pseudo physical memory resource. */
1120 	gmap_start->map->pseudo_phys_res_id = 0;
1121 	gmap_start->map->pseudo_phys_res = xenmem_alloc(gntdev_dev,
1122 	    &gmap_start->map->pseudo_phys_res_id, size);
1123 	if (gmap_start->map->pseudo_phys_res == NULL) {
1124 		free(gmap_start->map, M_GNTDEV);
1125 		gmap_start->map = NULL;
1126 		return (ENOMEM);
1127 	}
1128 	gmap_start->map->phys_base_addr =
1129 	    rman_get_start(gmap_start->map->pseudo_phys_res);
1130 
1131 	/* Allocate the mgtdevice pager. */
1132 	gmap_start->map->mem = cdev_pager_allocate(gmap_start, OBJT_MGTDEVICE,
1133 	    &gntdev_gmap_pg_ops, size, nprot, *offset, NULL);
1134 	if (gmap_start->map->mem == NULL) {
1135 		xenmem_free(gntdev_dev, gmap_start->map->pseudo_phys_res_id,
1136 		    gmap_start->map->pseudo_phys_res);
1137 		free(gmap_start->map, M_GNTDEV);
1138 		gmap_start->map = NULL;
1139 		return (ENOMEM);
1140 	}
1141 
1142 	for (i = 0; i < gmap_start->count; i++) {
1143 		gmap_start->grant_map_ops[i].host_addr =
1144 		    gmap_start->map->phys_base_addr + i * PAGE_SIZE;
1145 
1146 		if ((nprot & PROT_WRITE) == 0)
1147 			gmap_start->grant_map_ops[i].flags |= GNTMAP_readonly;
1148 	}
1149 	/* Make the MAP hypercall. */
1150 	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
1151 	    gmap_start->grant_map_ops, gmap_start->count);
1152 	if (error != 0) {
1153 		/*
1154 		 * Deallocate pager.
1155 		 * Pager deallocation will automatically take care of
1156 		 * xenmem deallocation, etc.
1157 		 */
1158 		vm_object_deallocate(gmap_start->map->mem);
1159 
1160 		return (EINVAL);
1161 	}
1162 
1163 	/* Retry EAGAIN maps. */
1164 	for (i = 0; i < gmap_start->count; i++) {
1165 		int delay = 1;
1166 		while (delay < 256 &&
1167 		    gmap_start->grant_map_ops[i].status == GNTST_eagain) {
1168 			HYPERVISOR_grant_table_op( GNTTABOP_map_grant_ref,
1169 			    &gmap_start->grant_map_ops[i], 1);
1170 			pause(("gntmap"), delay * SBT_1MS);
1171 			delay++;
1172 		}
1173 		if (gmap_start->grant_map_ops[i].status == GNTST_eagain)
1174 			gmap_start->grant_map_ops[i].status = GNTST_bad_page;
1175 
1176 		if (gmap_start->grant_map_ops[i].status != GNTST_okay) {
1177 			/*
1178 			 * Deallocate pager.
1179 			 * Pager deallocation will automatically take care of
1180 			 * xenmem deallocation, notification, unmap hypercall,
1181 			 * etc.
1182 			 */
1183 			vm_object_deallocate(gmap_start->map->mem);
1184 
1185 			return (EINVAL);
1186 		}
1187 	}
1188 
1189 	/*
1190 	 * Add a reference to the vm object. We do not want
1191 	 * the vm object to be deleted when all the mmaps are
1192 	 * unmapped, because it may be re-mmapped. Instead,
1193 	 * we want the object to be deleted, when along with
1194 	 * munmaps, we have also processed the unmap-ioctl.
1195 	 */
1196 	vm_object_reference(gmap_start->map->mem);
1197 
1198 	*object = gmap_start->map->mem;
1199 
1200 	return (0);
1201 }
1202 
1203 static int
gntdev_mmap_single(struct cdev * cdev,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** object,int nprot)1204 gntdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
1205     struct vm_object **object, int nprot)
1206 {
1207 	int error;
1208 	uint32_t count;
1209 	struct gntdev_gref *gref_start;
1210 	struct gntdev_gmap *gmap_start;
1211 	struct per_user_data *priv_user;
1212 
1213 	error = devfs_get_cdevpriv((void**) &priv_user);
1214 	if (error != 0)
1215 		return (EINVAL);
1216 
1217 	count = OFF_TO_IDX(size);
1218 
1219 	gref_start = gntdev_find_grefs(priv_user, *offset, count);
1220 	if (gref_start) {
1221 		error = mmap_gref(priv_user, gref_start, count, size, object);
1222 		return (error);
1223 	}
1224 
1225 	gmap_start = gntdev_find_gmap(priv_user, *offset, count);
1226 	if (gmap_start) {
1227 		error = mmap_gmap(priv_user, gmap_start, offset, size, object,
1228 		    nprot);
1229 		return (error);
1230 	}
1231 
1232 	return (EINVAL);
1233 }
1234 
1235 /*------------------ Private Device Attachment Functions  --------------------*/
1236 static void
gntdev_identify(driver_t * driver,device_t parent)1237 gntdev_identify(driver_t *driver, device_t parent)
1238 {
1239 
1240 	KASSERT((xen_domain()),
1241 	    ("Trying to attach gntdev device on non Xen domain"));
1242 
1243 	if (BUS_ADD_CHILD(parent, 0, "gntdev", 0) == NULL)
1244 		panic("unable to attach gntdev user-space device");
1245 }
1246 
1247 static int
gntdev_probe(device_t dev)1248 gntdev_probe(device_t dev)
1249 {
1250 
1251 	gntdev_dev = dev;
1252 	device_set_desc(dev, "Xen grant-table user-space device");
1253 	return (BUS_PROBE_NOWILDCARD);
1254 }
1255 
1256 static int
gntdev_attach(device_t dev)1257 gntdev_attach(device_t dev)
1258 {
1259 
1260 	make_dev_credf(MAKEDEV_ETERNAL, &gntdev_devsw, 0, NULL, UID_ROOT,
1261 	    GID_WHEEL, 0600, "xen/gntdev");
1262 	return (0);
1263 }
1264 
1265 /*-------------------- Private Device Attachment Data  -----------------------*/
1266 static device_method_t gntdev_methods[] = {
1267 	DEVMETHOD(device_identify, gntdev_identify),
1268 	DEVMETHOD(device_probe, gntdev_probe),
1269 	DEVMETHOD(device_attach, gntdev_attach),
1270 	DEVMETHOD_END
1271 };
1272 
1273 static driver_t gntdev_driver = {
1274 	"gntdev",
1275 	gntdev_methods,
1276 	0,
1277 };
1278 
1279 DRIVER_MODULE(gntdev, xenpv, gntdev_driver, 0, 0);
1280 MODULE_DEPEND(gntdev, xenpv, 1, 1, 1);
1281