xref: /netbsd-src/sys/kern/subr_copy.c (revision 4724848cf0da353df257f730694b7882798e5daf)
1 /*	$NetBSD: subr_copy.c,v 1.18 2023/04/11 10:22:04 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
5  *	The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Copyright (c) 1992, 1993
44  *	The Regents of the University of California.  All rights reserved.
45  *
46  * This software was developed by the Computer Systems Engineering group
47  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48  * contributed to Berkeley.
49  *
50  * All advertising materials mentioning features or use of this software
51  * must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Lawrence Berkeley Laboratory.
54  *
55  * Redistribution and use in source and binary forms, with or without
56  * modification, are permitted provided that the following conditions
57  * are met:
58  * 1. Redistributions of source code must retain the above copyright
59  *    notice, this list of conditions and the following disclaimer.
60  * 2. Redistributions in binary form must reproduce the above copyright
61  *    notice, this list of conditions and the following disclaimer in the
62  *    documentation and/or other materials provided with the distribution.
63  * 3. Neither the name of the University nor the names of its contributors
64  *    may be used to endorse or promote products derived from this software
65  *    without specific prior written permission.
66  *
67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77  * SUCH DAMAGE.
78  *
79  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.18 2023/04/11 10:22:04 riastradh Exp $");
84 
85 #define	__UFETCHSTORE_PRIVATE
86 #define	__UCAS_PRIVATE
87 
88 #include <sys/param.h>
89 #include <sys/fcntl.h>
90 #include <sys/proc.h>
91 #include <sys/systm.h>
92 
93 #include <uvm/uvm_extern.h>
94 
95 void
96 uio_setup_sysspace(struct uio *uio)
97 {
98 
99 	uio->uio_vmspace = vmspace_kernel();
100 }
101 
102 int
103 uiomove(void *buf, size_t n, struct uio *uio)
104 {
105 	struct vmspace *vm = uio->uio_vmspace;
106 	struct iovec *iov;
107 	size_t cnt;
108 	int error = 0;
109 	char *cp = buf;
110 
111 	ASSERT_SLEEPABLE();
112 
113 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114 	while (n > 0 && uio->uio_resid) {
115 		KASSERT(uio->uio_iovcnt > 0);
116 		iov = uio->uio_iov;
117 		cnt = iov->iov_len;
118 		if (cnt == 0) {
119 			KASSERT(uio->uio_iovcnt > 1);
120 			uio->uio_iov++;
121 			uio->uio_iovcnt--;
122 			continue;
123 		}
124 		if (cnt > n)
125 			cnt = n;
126 		if (!VMSPACE_IS_KERNEL_P(vm)) {
127 			preempt_point();
128 		}
129 
130 		if (uio->uio_rw == UIO_READ) {
131 			error = copyout_vmspace(vm, cp, iov->iov_base,
132 			    cnt);
133 		} else {
134 			error = copyin_vmspace(vm, iov->iov_base, cp,
135 			    cnt);
136 		}
137 		if (error) {
138 			break;
139 		}
140 		iov->iov_base = (char *)iov->iov_base + cnt;
141 		iov->iov_len -= cnt;
142 		uio->uio_resid -= cnt;
143 		uio->uio_offset += cnt;
144 		cp += cnt;
145 		KDASSERT(cnt <= n);
146 		n -= cnt;
147 	}
148 
149 	return (error);
150 }
151 
152 /*
153  * Wrapper for uiomove() that validates the arguments against a known-good
154  * kernel buffer.
155  */
156 int
157 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
158 {
159 	size_t offset;
160 
161 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
162 	    (offset = uio->uio_offset) != uio->uio_offset)
163 		return (EINVAL);
164 	if (offset >= buflen)
165 		return (0);
166 	return (uiomove((char *)buf + offset, buflen - offset, uio));
167 }
168 
169 /*
170  * Give next character to user as result of read.
171  */
172 int
173 ureadc(int c, struct uio *uio)
174 {
175 	struct iovec *iov;
176 
177 	if (uio->uio_resid <= 0)
178 		panic("ureadc: non-positive resid");
179 again:
180 	if (uio->uio_iovcnt <= 0)
181 		panic("ureadc: non-positive iovcnt");
182 	iov = uio->uio_iov;
183 	if (iov->iov_len <= 0) {
184 		uio->uio_iovcnt--;
185 		uio->uio_iov++;
186 		goto again;
187 	}
188 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
189 		int error;
190 		if ((error = ustore_char(iov->iov_base, c)) != 0)
191 			return (error);
192 	} else {
193 		*(char *)iov->iov_base = c;
194 	}
195 	iov->iov_base = (char *)iov->iov_base + 1;
196 	iov->iov_len--;
197 	uio->uio_resid--;
198 	uio->uio_offset++;
199 	return (0);
200 }
201 
202 /*
203  * Like copyin(), but operates on an arbitrary vmspace.
204  */
205 int
206 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
207 {
208 	struct iovec iov;
209 	struct uio uio;
210 	int error;
211 
212 	if (len == 0)
213 		return (0);
214 
215 	if (VMSPACE_IS_KERNEL_P(vm)) {
216 		return kcopy(uaddr, kaddr, len);
217 	}
218 	if (__predict_true(vm == curproc->p_vmspace)) {
219 		return copyin(uaddr, kaddr, len);
220 	}
221 
222 	iov.iov_base = kaddr;
223 	iov.iov_len = len;
224 	uio.uio_iov = &iov;
225 	uio.uio_iovcnt = 1;
226 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
227 	uio.uio_resid = len;
228 	uio.uio_rw = UIO_READ;
229 	UIO_SETUP_SYSSPACE(&uio);
230 	error = uvm_io(&vm->vm_map, &uio, 0);
231 
232 	return (error);
233 }
234 
235 /*
236  * Like copyout(), but operates on an arbitrary vmspace.
237  */
238 int
239 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
240 {
241 	struct iovec iov;
242 	struct uio uio;
243 	int error;
244 
245 	if (len == 0)
246 		return (0);
247 
248 	if (VMSPACE_IS_KERNEL_P(vm)) {
249 		return kcopy(kaddr, uaddr, len);
250 	}
251 	if (__predict_true(vm == curproc->p_vmspace)) {
252 		return copyout(kaddr, uaddr, len);
253 	}
254 
255 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
256 	iov.iov_len = len;
257 	uio.uio_iov = &iov;
258 	uio.uio_iovcnt = 1;
259 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
260 	uio.uio_resid = len;
261 	uio.uio_rw = UIO_WRITE;
262 	UIO_SETUP_SYSSPACE(&uio);
263 	error = uvm_io(&vm->vm_map, &uio, 0);
264 
265 	return (error);
266 }
267 
268 /*
269  * Like copyin(), but operates on an arbitrary process.
270  */
271 int
272 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
273 {
274 	struct vmspace *vm;
275 	int error;
276 
277 	error = proc_vmspace_getref(p, &vm);
278 	if (error) {
279 		return error;
280 	}
281 	error = copyin_vmspace(vm, uaddr, kaddr, len);
282 	uvmspace_free(vm);
283 
284 	return error;
285 }
286 
287 /*
288  * Like copyout(), but operates on an arbitrary process.
289  */
290 int
291 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
292 {
293 	struct vmspace *vm;
294 	int error;
295 
296 	error = proc_vmspace_getref(p, &vm);
297 	if (error) {
298 		return error;
299 	}
300 	error = copyout_vmspace(vm, kaddr, uaddr, len);
301 	uvmspace_free(vm);
302 
303 	return error;
304 }
305 
306 /*
307  * Like copyin(), but operates on an arbitrary pid.
308  */
309 int
310 copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
311 {
312 	struct proc *p;
313 	struct vmspace *vm;
314 	int error;
315 
316 	mutex_enter(&proc_lock);
317 	p = proc_find(pid);
318 	if (p == NULL) {
319 		mutex_exit(&proc_lock);
320 		return ESRCH;
321 	}
322 	mutex_enter(p->p_lock);
323 	error = proc_vmspace_getref(p, &vm);
324 	mutex_exit(p->p_lock);
325 	mutex_exit(&proc_lock);
326 
327 	if (error == 0) {
328 		error = copyin_vmspace(vm, uaddr, kaddr, len);
329 		uvmspace_free(vm);
330 	}
331 	return error;
332 }
333 
334 /*
335  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
336  * flag is passed in `ioctlflags' from the ioctl call.
337  */
338 int
339 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
340 {
341 	if (ioctlflags & FKIOCTL)
342 		return kcopy(src, dst, len);
343 	return copyin(src, dst, len);
344 }
345 
346 /*
347  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
348  * flag is passed in `ioctlflags' from the ioctl call.
349  */
350 int
351 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
352 {
353 	if (ioctlflags & FKIOCTL)
354 		return kcopy(src, dst, len);
355 	return copyout(src, dst, len);
356 }
357 
358 /*
359  * User-space CAS / fetch / store
360  */
361 
362 #ifdef __NO_STRICT_ALIGNMENT
363 #define	CHECK_ALIGNMENT(x)	__nothing
364 #else /* ! __NO_STRICT_ALIGNMENT */
365 static bool
366 ufetchstore_aligned(uintptr_t uaddr, size_t size)
367 {
368 	return (uaddr & (size - 1)) == 0;
369 }
370 
371 #define	CHECK_ALIGNMENT()						\
372 do {									\
373 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
374 		return EFAULT;						\
375 } while (/*CONSTCOND*/0)
376 #endif /* __NO_STRICT_ALIGNMENT */
377 
378 /*
379  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
380  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
381  *
382  * In all other cases, we provide generic implementations that work on
383  * all platforms.
384  */
385 
386 #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
387 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
388 #include <sys/atomic.h>
389 #include <sys/cpu.h>
390 #include <sys/once.h>
391 #include <sys/mutex.h>
392 #include <sys/ipi.h>
393 
394 static int ucas_critical_splcookie;
395 static volatile u_int ucas_critical_pausing_cpus;
396 static u_int ucas_critical_ipi;
397 static ONCE_DECL(ucas_critical_init_once)
398 
399 static void
400 ucas_critical_cpu_gate(void *arg __unused)
401 {
402 	int count = SPINLOCK_BACKOFF_MIN;
403 
404 	KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
405 
406 	/*
407 	 * Notify ucas_critical_wait that we have stopped.  Using
408 	 * store-release ensures all our memory operations up to the
409 	 * IPI happen before the ucas -- no buffered stores on our end
410 	 * can clobber it later on, for instance.
411 	 *
412 	 * Matches atomic_load_acquire in ucas_critical_wait -- turns
413 	 * the following atomic_dec_uint into a store-release.
414 	 */
415 	membar_release();
416 	atomic_dec_uint(&ucas_critical_pausing_cpus);
417 
418 	/*
419 	 * Wait for ucas_critical_exit to reopen the gate and let us
420 	 * proceed.  Using a load-acquire ensures the ucas happens
421 	 * before any of our memory operations when we return from the
422 	 * IPI and proceed -- we won't observe any stale cached value
423 	 * that the ucas overwrote, for instance.
424 	 *
425 	 * Matches atomic_store_release in ucas_critical_exit.
426 	 */
427 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
428 		SPINLOCK_BACKOFF(count);
429 	}
430 }
431 
432 static int
433 ucas_critical_init(void)
434 {
435 
436 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
437 	return 0;
438 }
439 
440 static void
441 ucas_critical_wait(void)
442 {
443 	int count = SPINLOCK_BACKOFF_MIN;
444 
445 	/*
446 	 * Wait for all CPUs to stop at the gate.  Using a load-acquire
447 	 * ensures all memory operations before they stop at the gate
448 	 * happen before the ucas -- no buffered stores in other CPUs
449 	 * can clobber it later on, for instance.
450 	 *
451 	 * Matches membar_release/atomic_dec_uint (store-release) in
452 	 * ucas_critical_cpu_gate.
453 	 */
454 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
455 		SPINLOCK_BACKOFF(count);
456 	}
457 }
458 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
459 
460 static inline void
461 ucas_critical_enter(lwp_t * const l)
462 {
463 
464 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
465 	if (ncpu > 1) {
466 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
467 
468 		/*
469 		 * Acquire the mutex first, then go to splhigh() and
470 		 * broadcast the IPI to lock all of the other CPUs
471 		 * behind the gate.
472 		 *
473 		 * N.B. Going to splhigh() implicitly disables preemption,
474 		 * so there's no need to do it explicitly.
475 		 */
476 		mutex_enter(&cpu_lock);
477 		ucas_critical_splcookie = splhigh();
478 		ucas_critical_pausing_cpus = ncpu - 1;
479 		ipi_trigger_broadcast(ucas_critical_ipi, true);
480 		ucas_critical_wait();
481 		return;
482 	}
483 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
484 
485 	KPREEMPT_DISABLE(l);
486 }
487 
488 static inline void
489 ucas_critical_exit(lwp_t * const l)
490 {
491 
492 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
493 	if (ncpu > 1) {
494 		/*
495 		 * Open the gate and notify all CPUs in
496 		 * ucas_critical_cpu_gate that they can now proceed.
497 		 * Using a store-release ensures the ucas happens
498 		 * before any memory operations they issue after the
499 		 * IPI -- they won't observe any stale cache of the
500 		 * target word, for instance.
501 		 *
502 		 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
503 		 */
504 		atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
505 		splx(ucas_critical_splcookie);
506 		mutex_exit(&cpu_lock);
507 		return;
508 	}
509 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
510 
511 	KPREEMPT_ENABLE(l);
512 }
513 
514 int
515 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
516 {
517 	lwp_t * const l = curlwp;
518 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
519 	int error;
520 
521 	/*
522 	 * Wire the user address down to avoid taking a page fault during
523 	 * the critical section.
524 	 */
525 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
526 			   VM_PROT_READ | VM_PROT_WRITE);
527 	if (error)
528 		return error;
529 
530 	ucas_critical_enter(l);
531 	error = _ufetch_32(uva, ret);
532 	if (error == 0 && *ret == old) {
533 		error = _ustore_32(uva, new);
534 	}
535 	ucas_critical_exit(l);
536 
537 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
538 
539 	return error;
540 }
541 
542 #ifdef _LP64
543 int
544 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
545 {
546 	lwp_t * const l = curlwp;
547 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
548 	int error;
549 
550 	/*
551 	 * Wire the user address down to avoid taking a page fault during
552 	 * the critical section.
553 	 */
554 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
555 			   VM_PROT_READ | VM_PROT_WRITE);
556 	if (error)
557 		return error;
558 
559 	ucas_critical_enter(l);
560 	error = _ufetch_64(uva, ret);
561 	if (error == 0 && *ret == old) {
562 		error = _ustore_64(uva, new);
563 	}
564 	ucas_critical_exit(l);
565 
566 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
567 
568 	return error;
569 }
570 #endif /* _LP64 */
571 #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
572 
573 int
574 ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
575 {
576 
577 	ASSERT_SLEEPABLE();
578 	CHECK_ALIGNMENT();
579 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
580     !defined(_RUMPKERNEL)
581 	if (ncpu > 1) {
582 		return _ucas_32_mp(uaddr, old, new, ret);
583 	}
584 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
585 	return _ucas_32(uaddr, old, new, ret);
586 }
587 
588 #ifdef _LP64
589 int
590 ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
591 {
592 
593 	ASSERT_SLEEPABLE();
594 	CHECK_ALIGNMENT();
595 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
596     !defined(_RUMPKERNEL)
597 	if (ncpu > 1) {
598 		return _ucas_64_mp(uaddr, old, new, ret);
599 	}
600 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
601 	return _ucas_64(uaddr, old, new, ret);
602 }
603 #endif /* _LP64 */
604 
605 __strong_alias(ucas_int,ucas_32);
606 #ifdef _LP64
607 __strong_alias(ucas_ptr,ucas_64);
608 #else
609 __strong_alias(ucas_ptr,ucas_32);
610 #endif /* _LP64 */
611 
612 int
613 ufetch_8(const uint8_t *uaddr, uint8_t *valp)
614 {
615 
616 	ASSERT_SLEEPABLE();
617 	CHECK_ALIGNMENT();
618 	return _ufetch_8(uaddr, valp);
619 }
620 
621 int
622 ufetch_16(const uint16_t *uaddr, uint16_t *valp)
623 {
624 
625 	ASSERT_SLEEPABLE();
626 	CHECK_ALIGNMENT();
627 	return _ufetch_16(uaddr, valp);
628 }
629 
630 int
631 ufetch_32(const uint32_t *uaddr, uint32_t *valp)
632 {
633 
634 	ASSERT_SLEEPABLE();
635 	CHECK_ALIGNMENT();
636 	return _ufetch_32(uaddr, valp);
637 }
638 
639 #ifdef _LP64
640 int
641 ufetch_64(const uint64_t *uaddr, uint64_t *valp)
642 {
643 
644 	ASSERT_SLEEPABLE();
645 	CHECK_ALIGNMENT();
646 	return _ufetch_64(uaddr, valp);
647 }
648 #endif /* _LP64 */
649 
650 __strong_alias(ufetch_char,ufetch_8);
651 __strong_alias(ufetch_short,ufetch_16);
652 __strong_alias(ufetch_int,ufetch_32);
653 #ifdef _LP64
654 __strong_alias(ufetch_long,ufetch_64);
655 __strong_alias(ufetch_ptr,ufetch_64);
656 #else
657 __strong_alias(ufetch_long,ufetch_32);
658 __strong_alias(ufetch_ptr,ufetch_32);
659 #endif /* _LP64 */
660 
661 int
662 ustore_8(uint8_t *uaddr, uint8_t val)
663 {
664 
665 	ASSERT_SLEEPABLE();
666 	CHECK_ALIGNMENT();
667 	return _ustore_8(uaddr, val);
668 }
669 
670 int
671 ustore_16(uint16_t *uaddr, uint16_t val)
672 {
673 
674 	ASSERT_SLEEPABLE();
675 	CHECK_ALIGNMENT();
676 	return _ustore_16(uaddr, val);
677 }
678 
679 int
680 ustore_32(uint32_t *uaddr, uint32_t val)
681 {
682 
683 	ASSERT_SLEEPABLE();
684 	CHECK_ALIGNMENT();
685 	return _ustore_32(uaddr, val);
686 }
687 
688 #ifdef _LP64
689 int
690 ustore_64(uint64_t *uaddr, uint64_t val)
691 {
692 
693 	ASSERT_SLEEPABLE();
694 	CHECK_ALIGNMENT();
695 	return _ustore_64(uaddr, val);
696 }
697 #endif /* _LP64 */
698 
699 __strong_alias(ustore_char,ustore_8);
700 __strong_alias(ustore_short,ustore_16);
701 __strong_alias(ustore_int,ustore_32);
702 #ifdef _LP64
703 __strong_alias(ustore_long,ustore_64);
704 __strong_alias(ustore_ptr,ustore_64);
705 #else
706 __strong_alias(ustore_long,ustore_32);
707 __strong_alias(ustore_ptr,ustore_32);
708 #endif /* _LP64 */
709