xref: /freebsd-src/sys/compat/linuxkpi/common/src/linux_compat.c (revision ffa548ae3e01619b2dd07d722eb8a00a711928bd)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/proc.h>
39 #include <sys/sglist.h>
40 #include <sys/sleepqueue.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/bus.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/filio.h>
47 #include <sys/rwlock.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #include <machine/stdarg.h>
53 #include <machine/pmap.h>
54 
55 #include <linux/kobject.h>
56 #include <linux/device.h>
57 #include <linux/slab.h>
58 #include <linux/module.h>
59 #include <linux/cdev.h>
60 #include <linux/file.h>
61 #include <linux/sysfs.h>
62 #include <linux/mm.h>
63 #include <linux/io.h>
64 #include <linux/vmalloc.h>
65 #include <linux/netdevice.h>
66 #include <linux/timer.h>
67 #include <linux/workqueue.h>
68 
69 #include <vm/vm_pager.h>
70 
71 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
72 
73 #include <linux/rbtree.h>
74 /* Undo Linux compat changes. */
75 #undef RB_ROOT
76 #undef file
77 #undef cdev
78 #define	RB_ROOT(head)	(head)->rbh_root
79 
80 struct kobject class_root;
81 struct device linux_rootdev;
82 struct class miscclass;
83 struct list_head pci_drivers;
84 struct list_head pci_devices;
85 struct net init_net;
86 spinlock_t pci_lock;
87 
88 unsigned long linux_timer_hz_mask;
89 
90 int
91 panic_cmp(struct rb_node *one, struct rb_node *two)
92 {
93 	panic("no cmp");
94 }
95 
96 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
97 
98 int
99 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
100 {
101 	va_list tmp_va;
102 	int len;
103 	char *old;
104 	char *name;
105 	char dummy;
106 
107 	old = kobj->name;
108 
109 	if (old && fmt == NULL)
110 		return (0);
111 
112 	/* compute length of string */
113 	va_copy(tmp_va, args);
114 	len = vsnprintf(&dummy, 0, fmt, tmp_va);
115 	va_end(tmp_va);
116 
117 	/* account for zero termination */
118 	len++;
119 
120 	/* check for error */
121 	if (len < 1)
122 		return (-EINVAL);
123 
124 	/* allocate memory for string */
125 	name = kzalloc(len, GFP_KERNEL);
126 	if (name == NULL)
127 		return (-ENOMEM);
128 	vsnprintf(name, len, fmt, args);
129 	kobj->name = name;
130 
131 	/* free old string */
132 	kfree(old);
133 
134 	/* filter new string */
135 	for (; *name != '\0'; name++)
136 		if (*name == '/')
137 			*name = '!';
138 	return (0);
139 }
140 
141 int
142 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
143 {
144 	va_list args;
145 	int error;
146 
147 	va_start(args, fmt);
148 	error = kobject_set_name_vargs(kobj, fmt, args);
149 	va_end(args);
150 
151 	return (error);
152 }
153 
154 static inline int
155 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
156 {
157 	struct kobj_type *t;
158 	int error;
159 
160 	kobj->parent = kobject_get(parent);
161 	error = sysfs_create_dir(kobj);
162 	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
163 		struct attribute **attr;
164 		t = kobj->ktype;
165 
166 		for (attr = t->default_attrs; *attr != NULL; attr++) {
167 			error = sysfs_create_file(kobj, *attr);
168 			if (error)
169 				break;
170 		}
171 		if (error)
172 			sysfs_remove_dir(kobj);
173 
174 	}
175 	return (error);
176 }
177 
178 int
179 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
180 {
181 	va_list args;
182 	int error;
183 
184 	va_start(args, fmt);
185 	error = kobject_set_name_vargs(kobj, fmt, args);
186 	va_end(args);
187 	if (error)
188 		return (error);
189 
190 	return kobject_add_complete(kobj, parent);
191 }
192 
193 void
194 kobject_release(struct kref *kref)
195 {
196 	struct kobject *kobj;
197 	char *name;
198 
199 	kobj = container_of(kref, struct kobject, kref);
200 	sysfs_remove_dir(kobj);
201 	if (kobj->parent)
202 		kobject_put(kobj->parent);
203 	kobj->parent = NULL;
204 	name = kobj->name;
205 	if (kobj->ktype && kobj->ktype->release)
206 		kobj->ktype->release(kobj);
207 	kfree(name);
208 }
209 
210 static void
211 kobject_kfree(struct kobject *kobj)
212 {
213 	kfree(kobj);
214 }
215 
216 static void
217 kobject_kfree_name(struct kobject *kobj)
218 {
219 	if (kobj) {
220 		kfree(kobj->name);
221 	}
222 }
223 
224 struct kobj_type kfree_type = { .release = kobject_kfree };
225 
226 static void
227 dev_release(struct device *dev)
228 {
229 	pr_debug("dev_release: %s\n", dev_name(dev));
230 	kfree(dev);
231 }
232 
233 struct device *
234 device_create(struct class *class, struct device *parent, dev_t devt,
235     void *drvdata, const char *fmt, ...)
236 {
237 	struct device *dev;
238 	va_list args;
239 
240 	dev = kzalloc(sizeof(*dev), M_WAITOK);
241 	dev->parent = parent;
242 	dev->class = class;
243 	dev->devt = devt;
244 	dev->driver_data = drvdata;
245 	dev->release = dev_release;
246 	va_start(args, fmt);
247 	kobject_set_name_vargs(&dev->kobj, fmt, args);
248 	va_end(args);
249 	device_register(dev);
250 
251 	return (dev);
252 }
253 
254 int
255 kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
256     struct kobject *parent, const char *fmt, ...)
257 {
258 	va_list args;
259 	int error;
260 
261 	kobject_init(kobj, ktype);
262 	kobj->ktype = ktype;
263 	kobj->parent = parent;
264 	kobj->name = NULL;
265 
266 	va_start(args, fmt);
267 	error = kobject_set_name_vargs(kobj, fmt, args);
268 	va_end(args);
269 	if (error)
270 		return (error);
271 	return kobject_add_complete(kobj, parent);
272 }
273 
274 static void
275 linux_file_dtor(void *cdp)
276 {
277 	struct linux_file *filp;
278 
279 	filp = cdp;
280 	filp->f_op->release(filp->f_vnode, filp);
281 	vdrop(filp->f_vnode);
282 	kfree(filp);
283 }
284 
285 static int
286 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
287 {
288 	struct linux_cdev *ldev;
289 	struct linux_file *filp;
290 	struct file *file;
291 	int error;
292 
293 	file = curthread->td_fpop;
294 	ldev = dev->si_drv1;
295 	if (ldev == NULL)
296 		return (ENODEV);
297 	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
298 	filp->f_dentry = &filp->f_dentry_store;
299 	filp->f_op = ldev->ops;
300 	filp->f_flags = file->f_flag;
301 	vhold(file->f_vnode);
302 	filp->f_vnode = file->f_vnode;
303 	if (filp->f_op->open) {
304 		error = -filp->f_op->open(file->f_vnode, filp);
305 		if (error) {
306 			kfree(filp);
307 			return (error);
308 		}
309 	}
310 	error = devfs_set_cdevpriv(filp, linux_file_dtor);
311 	if (error) {
312 		filp->f_op->release(file->f_vnode, filp);
313 		kfree(filp);
314 		return (error);
315 	}
316 
317 	return 0;
318 }
319 
320 static int
321 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
322 {
323 	struct linux_cdev *ldev;
324 	struct linux_file *filp;
325 	struct file *file;
326 	int error;
327 
328 	file = curthread->td_fpop;
329 	ldev = dev->si_drv1;
330 	if (ldev == NULL)
331 		return (0);
332 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
333 		return (error);
334 	filp->f_flags = file->f_flag;
335         devfs_clear_cdevpriv();
336 
337 
338 	return (0);
339 }
340 
341 static int
342 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
343     struct thread *td)
344 {
345 	struct linux_cdev *ldev;
346 	struct linux_file *filp;
347 	struct file *file;
348 	int error;
349 
350 	file = curthread->td_fpop;
351 	ldev = dev->si_drv1;
352 	if (ldev == NULL)
353 		return (0);
354 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
355 		return (error);
356 	filp->f_flags = file->f_flag;
357 	/*
358 	 * Linux does not have a generic ioctl copyin/copyout layer.  All
359 	 * linux ioctls must be converted to void ioctls which pass a
360 	 * pointer to the address of the data.  We want the actual user
361 	 * address so we dereference here.
362 	 */
363 	data = *(void **)data;
364 	if (filp->f_op->unlocked_ioctl)
365 		error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
366 	else
367 		error = ENOTTY;
368 
369 	return (error);
370 }
371 
372 static int
373 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
374 {
375 	struct linux_cdev *ldev;
376 	struct linux_file *filp;
377 	struct file *file;
378 	ssize_t bytes;
379 	int error;
380 
381 	file = curthread->td_fpop;
382 	ldev = dev->si_drv1;
383 	if (ldev == NULL)
384 		return (0);
385 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
386 		return (error);
387 	filp->f_flags = file->f_flag;
388 	if (uio->uio_iovcnt != 1)
389 		panic("linux_dev_read: uio %p iovcnt %d",
390 		    uio, uio->uio_iovcnt);
391 	if (filp->f_op->read) {
392 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
393 		    uio->uio_iov->iov_len, &uio->uio_offset);
394 		if (bytes >= 0) {
395 			uio->uio_iov->iov_base =
396 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
397 			uio->uio_iov->iov_len -= bytes;
398 			uio->uio_resid -= bytes;
399 		} else
400 			error = -bytes;
401 	} else
402 		error = ENXIO;
403 
404 	return (error);
405 }
406 
407 static int
408 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
409 {
410 	struct linux_cdev *ldev;
411 	struct linux_file *filp;
412 	struct file *file;
413 	ssize_t bytes;
414 	int error;
415 
416 	file = curthread->td_fpop;
417 	ldev = dev->si_drv1;
418 	if (ldev == NULL)
419 		return (0);
420 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
421 		return (error);
422 	filp->f_flags = file->f_flag;
423 	if (uio->uio_iovcnt != 1)
424 		panic("linux_dev_write: uio %p iovcnt %d",
425 		    uio, uio->uio_iovcnt);
426 	if (filp->f_op->write) {
427 		bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
428 		    uio->uio_iov->iov_len, &uio->uio_offset);
429 		if (bytes >= 0) {
430 			uio->uio_iov->iov_base =
431 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
432 			uio->uio_iov->iov_len -= bytes;
433 			uio->uio_resid -= bytes;
434 		} else
435 			error = -bytes;
436 	} else
437 		error = ENXIO;
438 
439 	return (error);
440 }
441 
442 static int
443 linux_dev_poll(struct cdev *dev, int events, struct thread *td)
444 {
445 	struct linux_cdev *ldev;
446 	struct linux_file *filp;
447 	struct file *file;
448 	int revents;
449 	int error;
450 
451 	file = curthread->td_fpop;
452 	ldev = dev->si_drv1;
453 	if (ldev == NULL)
454 		return (0);
455 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
456 		return (error);
457 	filp->f_flags = file->f_flag;
458 	if (filp->f_op->poll)
459 		revents = filp->f_op->poll(filp, NULL) & events;
460 	else
461 		revents = 0;
462 
463 	return (revents);
464 }
465 
466 static int
467 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
468     vm_size_t size, struct vm_object **object, int nprot)
469 {
470 	struct linux_cdev *ldev;
471 	struct linux_file *filp;
472 	struct file *file;
473 	struct vm_area_struct vma;
474 	int error;
475 
476 	file = curthread->td_fpop;
477 	ldev = dev->si_drv1;
478 	if (ldev == NULL)
479 		return (ENODEV);
480 	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
481 		return (error);
482 	filp->f_flags = file->f_flag;
483 	vma.vm_start = 0;
484 	vma.vm_end = size;
485 	vma.vm_pgoff = *offset / PAGE_SIZE;
486 	vma.vm_pfn = 0;
487 	vma.vm_page_prot = 0;
488 	if (filp->f_op->mmap) {
489 		error = -filp->f_op->mmap(filp, &vma);
490 		if (error == 0) {
491 			struct sglist *sg;
492 
493 			sg = sglist_alloc(1, M_WAITOK);
494 			sglist_append_phys(sg,
495 			    (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
496 			*object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
497 			    nprot, 0, curthread->td_ucred);
498 		        if (*object == NULL) {
499 				sglist_free(sg);
500 				return (EINVAL);
501 			}
502 			*offset = 0;
503 			if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
504 				VM_OBJECT_WLOCK(*object);
505 				vm_object_set_memattr(*object,
506 				    vma.vm_page_prot);
507 				VM_OBJECT_WUNLOCK(*object);
508 			}
509 		}
510 	} else
511 		error = ENODEV;
512 
513 	return (error);
514 }
515 
516 struct cdevsw linuxcdevsw = {
517 	.d_version = D_VERSION,
518 	.d_flags = D_TRACKCLOSE,
519 	.d_open = linux_dev_open,
520 	.d_close = linux_dev_close,
521 	.d_read = linux_dev_read,
522 	.d_write = linux_dev_write,
523 	.d_ioctl = linux_dev_ioctl,
524 	.d_mmap_single = linux_dev_mmap_single,
525 	.d_poll = linux_dev_poll,
526 };
527 
528 static int
529 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
530     int flags, struct thread *td)
531 {
532 	struct linux_file *filp;
533 	ssize_t bytes;
534 	int error;
535 
536 	error = 0;
537 	filp = (struct linux_file *)file->f_data;
538 	filp->f_flags = file->f_flag;
539 	if (uio->uio_iovcnt != 1)
540 		panic("linux_file_read: uio %p iovcnt %d",
541 		    uio, uio->uio_iovcnt);
542 	if (filp->f_op->read) {
543 		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
544 		    uio->uio_iov->iov_len, &uio->uio_offset);
545 		if (bytes >= 0) {
546 			uio->uio_iov->iov_base =
547 			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
548 			uio->uio_iov->iov_len -= bytes;
549 			uio->uio_resid -= bytes;
550 		} else
551 			error = -bytes;
552 	} else
553 		error = ENXIO;
554 
555 	return (error);
556 }
557 
558 static int
559 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
560     struct thread *td)
561 {
562 	struct linux_file *filp;
563 	int revents;
564 
565 	filp = (struct linux_file *)file->f_data;
566 	filp->f_flags = file->f_flag;
567 	if (filp->f_op->poll)
568 		revents = filp->f_op->poll(filp, NULL) & events;
569 	else
570 		revents = 0;
571 
572 	return (0);
573 }
574 
575 static int
576 linux_file_close(struct file *file, struct thread *td)
577 {
578 	struct linux_file *filp;
579 	int error;
580 
581 	filp = (struct linux_file *)file->f_data;
582 	filp->f_flags = file->f_flag;
583 	error = -filp->f_op->release(NULL, filp);
584 	funsetown(&filp->f_sigio);
585 	kfree(filp);
586 
587 	return (error);
588 }
589 
590 static int
591 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
592     struct thread *td)
593 {
594 	struct linux_file *filp;
595 	int error;
596 
597 	filp = (struct linux_file *)fp->f_data;
598 	filp->f_flags = fp->f_flag;
599 	error = 0;
600 
601 	switch (cmd) {
602 	case FIONBIO:
603 		break;
604 	case FIOASYNC:
605 		if (filp->f_op->fasync == NULL)
606 			break;
607 		error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
608 		break;
609 	case FIOSETOWN:
610 		error = fsetown(*(int *)data, &filp->f_sigio);
611 		if (error == 0)
612 			error = filp->f_op->fasync(0, filp,
613 			    fp->f_flag & FASYNC);
614 		break;
615 	case FIOGETOWN:
616 		*(int *)data = fgetown(&filp->f_sigio);
617 		break;
618 	default:
619 		error = ENOTTY;
620 		break;
621 	}
622 	return (error);
623 }
624 
625 static int
626 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
627     struct thread *td)
628 {
629 
630 	return (EOPNOTSUPP);
631 }
632 
633 static int
634 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
635     struct filedesc *fdp)
636 {
637 
638 	return (0);
639 }
640 
641 struct fileops linuxfileops = {
642 	.fo_read = linux_file_read,
643 	.fo_write = invfo_rdwr,
644 	.fo_truncate = invfo_truncate,
645 	.fo_kqfilter = invfo_kqfilter,
646 	.fo_stat = linux_file_stat,
647 	.fo_fill_kinfo = linux_file_fill_kinfo,
648 	.fo_poll = linux_file_poll,
649 	.fo_close = linux_file_close,
650 	.fo_ioctl = linux_file_ioctl,
651 	.fo_chmod = invfo_chmod,
652 	.fo_chown = invfo_chown,
653 	.fo_sendfile = invfo_sendfile,
654 };
655 
656 /*
657  * Hash of vmmap addresses.  This is infrequently accessed and does not
658  * need to be particularly large.  This is done because we must store the
659  * caller's idea of the map size to properly unmap.
660  */
661 struct vmmap {
662 	LIST_ENTRY(vmmap)	vm_next;
663 	void 			*vm_addr;
664 	unsigned long		vm_size;
665 };
666 
667 struct vmmaphd {
668 	struct vmmap *lh_first;
669 };
670 #define	VMMAP_HASH_SIZE	64
671 #define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
672 #define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
673 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
674 static struct mtx vmmaplock;
675 
676 static void
677 vmmap_add(void *addr, unsigned long size)
678 {
679 	struct vmmap *vmmap;
680 
681 	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
682 	mtx_lock(&vmmaplock);
683 	vmmap->vm_size = size;
684 	vmmap->vm_addr = addr;
685 	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
686 	mtx_unlock(&vmmaplock);
687 }
688 
689 static struct vmmap *
690 vmmap_remove(void *addr)
691 {
692 	struct vmmap *vmmap;
693 
694 	mtx_lock(&vmmaplock);
695 	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
696 		if (vmmap->vm_addr == addr)
697 			break;
698 	if (vmmap)
699 		LIST_REMOVE(vmmap, vm_next);
700 	mtx_unlock(&vmmaplock);
701 
702 	return (vmmap);
703 }
704 
705 #if defined(__i386__) || defined(__amd64__)
706 void *
707 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
708 {
709 	void *addr;
710 
711 	addr = pmap_mapdev_attr(phys_addr, size, attr);
712 	if (addr == NULL)
713 		return (NULL);
714 	vmmap_add(addr, size);
715 
716 	return (addr);
717 }
718 #endif
719 
720 void
721 iounmap(void *addr)
722 {
723 	struct vmmap *vmmap;
724 
725 	vmmap = vmmap_remove(addr);
726 	if (vmmap == NULL)
727 		return;
728 #if defined(__i386__) || defined(__amd64__)
729 	pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
730 #endif
731 	kfree(vmmap);
732 }
733 
734 
735 void *
736 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
737 {
738 	vm_offset_t off;
739 	size_t size;
740 
741 	size = count * PAGE_SIZE;
742 	off = kva_alloc(size);
743 	if (off == 0)
744 		return (NULL);
745 	vmmap_add((void *)off, size);
746 	pmap_qenter(off, pages, count);
747 
748 	return ((void *)off);
749 }
750 
751 void
752 vunmap(void *addr)
753 {
754 	struct vmmap *vmmap;
755 
756 	vmmap = vmmap_remove(addr);
757 	if (vmmap == NULL)
758 		return;
759 	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
760 	kva_free((vm_offset_t)addr, vmmap->vm_size);
761 	kfree(vmmap);
762 }
763 
764 char *
765 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
766 {
767 	unsigned int len;
768 	char *p;
769 	va_list aq;
770 
771 	va_copy(aq, ap);
772 	len = vsnprintf(NULL, 0, fmt, aq);
773 	va_end(aq);
774 
775 	p = kmalloc(len + 1, gfp);
776 	if (p != NULL)
777 		vsnprintf(p, len + 1, fmt, ap);
778 
779 	return (p);
780 }
781 
782 char *
783 kasprintf(gfp_t gfp, const char *fmt, ...)
784 {
785 	va_list ap;
786 	char *p;
787 
788 	va_start(ap, fmt);
789 	p = kvasprintf(gfp, fmt, ap);
790 	va_end(ap);
791 
792 	return (p);
793 }
794 
795 static int
796 linux_timer_jiffies_until(unsigned long expires)
797 {
798 	int delta = expires - jiffies;
799 	/* guard against already expired values */
800 	if (delta < 1)
801 		delta = 1;
802 	return (delta);
803 }
804 
805 static void
806 linux_timer_callback_wrapper(void *context)
807 {
808 	struct timer_list *timer;
809 
810 	timer = context;
811 	timer->function(timer->data);
812 }
813 
814 void
815 mod_timer(struct timer_list *timer, unsigned long expires)
816 {
817 
818 	timer->expires = expires;
819 	callout_reset(&timer->timer_callout,
820 	    linux_timer_jiffies_until(expires),
821 	    &linux_timer_callback_wrapper, timer);
822 }
823 
824 void
825 add_timer(struct timer_list *timer)
826 {
827 
828 	callout_reset(&timer->timer_callout,
829 	    linux_timer_jiffies_until(timer->expires),
830 	    &linux_timer_callback_wrapper, timer);
831 }
832 
833 static void
834 linux_timer_init(void *arg)
835 {
836 
837 	/*
838 	 * Compute an internal HZ value which can divide 2**32 to
839 	 * avoid timer rounding problems when the tick value wraps
840 	 * around 2**32:
841 	 */
842 	linux_timer_hz_mask = 1;
843 	while (linux_timer_hz_mask < (unsigned long)hz)
844 		linux_timer_hz_mask *= 2;
845 	linux_timer_hz_mask--;
846 }
847 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
848 
849 void
850 linux_complete_common(struct completion *c, int all)
851 {
852 	int wakeup_swapper;
853 
854 	sleepq_lock(c);
855 	c->done++;
856 	if (all)
857 		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
858 	else
859 		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
860 	sleepq_release(c);
861 	if (wakeup_swapper)
862 		kick_proc0();
863 }
864 
865 /*
866  * Indefinite wait for done != 0 with or without signals.
867  */
868 long
869 linux_wait_for_common(struct completion *c, int flags)
870 {
871 
872 	if (flags != 0)
873 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
874 	else
875 		flags = SLEEPQ_SLEEP;
876 	for (;;) {
877 		sleepq_lock(c);
878 		if (c->done)
879 			break;
880 		sleepq_add(c, NULL, "completion", flags, 0);
881 		if (flags & SLEEPQ_INTERRUPTIBLE) {
882 			if (sleepq_wait_sig(c, 0) != 0)
883 				return (-ERESTARTSYS);
884 		} else
885 			sleepq_wait(c, 0);
886 	}
887 	c->done--;
888 	sleepq_release(c);
889 
890 	return (0);
891 }
892 
893 /*
894  * Time limited wait for done != 0 with or without signals.
895  */
896 long
897 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
898 {
899 	long end = jiffies + timeout;
900 
901 	if (flags != 0)
902 		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
903 	else
904 		flags = SLEEPQ_SLEEP;
905 	for (;;) {
906 		int ret;
907 
908 		sleepq_lock(c);
909 		if (c->done)
910 			break;
911 		sleepq_add(c, NULL, "completion", flags, 0);
912 		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
913 		if (flags & SLEEPQ_INTERRUPTIBLE)
914 			ret = sleepq_timedwait_sig(c, 0);
915 		else
916 			ret = sleepq_timedwait(c, 0);
917 		if (ret != 0) {
918 			/* check for timeout or signal */
919 			if (ret == EWOULDBLOCK)
920 				return (0);
921 			else
922 				return (-ERESTARTSYS);
923 		}
924 	}
925 	c->done--;
926 	sleepq_release(c);
927 
928 	/* return how many jiffies are left */
929 	return (linux_timer_jiffies_until(end));
930 }
931 
932 int
933 linux_try_wait_for_completion(struct completion *c)
934 {
935 	int isdone;
936 
937 	isdone = 1;
938 	sleepq_lock(c);
939 	if (c->done)
940 		c->done--;
941 	else
942 		isdone = 0;
943 	sleepq_release(c);
944 	return (isdone);
945 }
946 
947 int
948 linux_completion_done(struct completion *c)
949 {
950 	int isdone;
951 
952 	isdone = 1;
953 	sleepq_lock(c);
954 	if (c->done == 0)
955 		isdone = 0;
956 	sleepq_release(c);
957 	return (isdone);
958 }
959 
960 void
961 linux_delayed_work_fn(void *arg)
962 {
963 	struct delayed_work *work;
964 
965 	work = arg;
966 	taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
967 }
968 
969 void
970 linux_work_fn(void *context, int pending)
971 {
972 	struct work_struct *work;
973 
974 	work = context;
975 	work->fn(work);
976 }
977 
978 void
979 linux_flush_fn(void *context, int pending)
980 {
981 }
982 
983 struct workqueue_struct *
984 linux_create_workqueue_common(const char *name, int cpus)
985 {
986 	struct workqueue_struct *wq;
987 
988 	wq = kmalloc(sizeof(*wq), M_WAITOK);
989 	wq->taskqueue = taskqueue_create(name, M_WAITOK,
990 	    taskqueue_thread_enqueue,  &wq->taskqueue);
991 	atomic_set(&wq->draining, 0);
992 	taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
993 
994 	return (wq);
995 }
996 
997 void
998 destroy_workqueue(struct workqueue_struct *wq)
999 {
1000 	taskqueue_free(wq->taskqueue);
1001 	kfree(wq);
1002 }
1003 
1004 static void
1005 linux_compat_init(void *arg)
1006 {
1007 	struct sysctl_oid *rootoid;
1008 	int i;
1009 
1010 	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
1011 	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
1012 	kobject_init(&class_root, &class_ktype);
1013 	kobject_set_name(&class_root, "class");
1014 	class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
1015 	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
1016 	kobject_init(&linux_rootdev.kobj, &dev_ktype);
1017 	kobject_set_name(&linux_rootdev.kobj, "device");
1018 	linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
1019 	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
1020 	    "device");
1021 	linux_rootdev.bsddev = root_bus;
1022 	miscclass.name = "misc";
1023 	class_register(&miscclass);
1024 	INIT_LIST_HEAD(&pci_drivers);
1025 	INIT_LIST_HEAD(&pci_devices);
1026 	spin_lock_init(&pci_lock);
1027 	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
1028 	for (i = 0; i < VMMAP_HASH_SIZE; i++)
1029 		LIST_INIT(&vmmaphead[i]);
1030 }
1031 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
1032 
1033 static void
1034 linux_compat_uninit(void *arg)
1035 {
1036 	kobject_kfree_name(&class_root);
1037 	kobject_kfree_name(&linux_rootdev.kobj);
1038 	kobject_kfree_name(&miscclass.kobj);
1039 }
1040 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
1041 
1042 /*
1043  * NOTE: Linux frequently uses "unsigned long" for pointer to integer
1044  * conversion and vice versa, where in FreeBSD "uintptr_t" would be
1045  * used. Assert these types have the same size, else some parts of the
1046  * LinuxKPI may not work like expected:
1047  */
1048 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
1049