xref: /netbsd-src/sys/kern/subr_copy.c (revision 819a01cff334f9eaf1e295d38ebf36943614d10a)
1*819a01cfSriastradh /*	$NetBSD: subr_copy.c,v 1.19 2023/05/22 14:07:24 riastradh Exp $	*/
2fcc20a4bSpooka 
3fcc20a4bSpooka /*-
491bfaeb6Sthorpej  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
591bfaeb6Sthorpej  *	The NetBSD Foundation, Inc.
6fcc20a4bSpooka  * All rights reserved.
7fcc20a4bSpooka  *
8fcc20a4bSpooka  * This code is derived from software contributed to The NetBSD Foundation
9fcc20a4bSpooka  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10fcc20a4bSpooka  * NASA Ames Research Center.
11fcc20a4bSpooka  *
12fcc20a4bSpooka  * Redistribution and use in source and binary forms, with or without
13fcc20a4bSpooka  * modification, are permitted provided that the following conditions
14fcc20a4bSpooka  * are met:
15fcc20a4bSpooka  * 1. Redistributions of source code must retain the above copyright
16fcc20a4bSpooka  *    notice, this list of conditions and the following disclaimer.
17fcc20a4bSpooka  * 2. Redistributions in binary form must reproduce the above copyright
18fcc20a4bSpooka  *    notice, this list of conditions and the following disclaimer in the
19fcc20a4bSpooka  *    documentation and/or other materials provided with the distribution.
20fcc20a4bSpooka  *
21fcc20a4bSpooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22fcc20a4bSpooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23fcc20a4bSpooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24fcc20a4bSpooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25fcc20a4bSpooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26fcc20a4bSpooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27fcc20a4bSpooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28fcc20a4bSpooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29fcc20a4bSpooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30fcc20a4bSpooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31fcc20a4bSpooka  * POSSIBILITY OF SUCH DAMAGE.
32fcc20a4bSpooka  */
33fcc20a4bSpooka 
34fcc20a4bSpooka /*
35fcc20a4bSpooka  * Copyright (c) 1982, 1986, 1991, 1993
36fcc20a4bSpooka  *	The Regents of the University of California.  All rights reserved.
37fcc20a4bSpooka  * (c) UNIX System Laboratories, Inc.
38fcc20a4bSpooka  * All or some portions of this file are derived from material licensed
39fcc20a4bSpooka  * to the University of California by American Telephone and Telegraph
40fcc20a4bSpooka  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41fcc20a4bSpooka  * the permission of UNIX System Laboratories, Inc.
42fcc20a4bSpooka  *
43fcc20a4bSpooka  * Copyright (c) 1992, 1993
44fcc20a4bSpooka  *	The Regents of the University of California.  All rights reserved.
45fcc20a4bSpooka  *
46fcc20a4bSpooka  * This software was developed by the Computer Systems Engineering group
47fcc20a4bSpooka  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48fcc20a4bSpooka  * contributed to Berkeley.
49fcc20a4bSpooka  *
50fcc20a4bSpooka  * All advertising materials mentioning features or use of this software
51fcc20a4bSpooka  * must display the following acknowledgement:
52fcc20a4bSpooka  *	This product includes software developed by the University of
53fcc20a4bSpooka  *	California, Lawrence Berkeley Laboratory.
54fcc20a4bSpooka  *
55fcc20a4bSpooka  * Redistribution and use in source and binary forms, with or without
56fcc20a4bSpooka  * modification, are permitted provided that the following conditions
57fcc20a4bSpooka  * are met:
58fcc20a4bSpooka  * 1. Redistributions of source code must retain the above copyright
59fcc20a4bSpooka  *    notice, this list of conditions and the following disclaimer.
60fcc20a4bSpooka  * 2. Redistributions in binary form must reproduce the above copyright
61fcc20a4bSpooka  *    notice, this list of conditions and the following disclaimer in the
62fcc20a4bSpooka  *    documentation and/or other materials provided with the distribution.
63fcc20a4bSpooka  * 3. Neither the name of the University nor the names of its contributors
64fcc20a4bSpooka  *    may be used to endorse or promote products derived from this software
65fcc20a4bSpooka  *    without specific prior written permission.
66fcc20a4bSpooka  *
67fcc20a4bSpooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68fcc20a4bSpooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69fcc20a4bSpooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70fcc20a4bSpooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71fcc20a4bSpooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72fcc20a4bSpooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73fcc20a4bSpooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74fcc20a4bSpooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75fcc20a4bSpooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76fcc20a4bSpooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77fcc20a4bSpooka  * SUCH DAMAGE.
78fcc20a4bSpooka  *
79fcc20a4bSpooka  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
80fcc20a4bSpooka  */
81fcc20a4bSpooka 
82fcc20a4bSpooka #include <sys/cdefs.h>
83*819a01cfSriastradh __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.19 2023/05/22 14:07:24 riastradh Exp $");
8491bfaeb6Sthorpej 
8591bfaeb6Sthorpej #define	__UFETCHSTORE_PRIVATE
8691bfaeb6Sthorpej #define	__UCAS_PRIVATE
87fcc20a4bSpooka 
88fcc20a4bSpooka #include <sys/param.h>
89fcc20a4bSpooka #include <sys/fcntl.h>
90fcc20a4bSpooka #include <sys/proc.h>
91fcc20a4bSpooka #include <sys/systm.h>
92fcc20a4bSpooka 
93fcc20a4bSpooka #include <uvm/uvm_extern.h>
94fcc20a4bSpooka 
95fcc20a4bSpooka void
uio_setup_sysspace(struct uio * uio)96fcc20a4bSpooka uio_setup_sysspace(struct uio *uio)
97fcc20a4bSpooka {
98fcc20a4bSpooka 
99fcc20a4bSpooka 	uio->uio_vmspace = vmspace_kernel();
100fcc20a4bSpooka }
101fcc20a4bSpooka 
102fcc20a4bSpooka int
uiomove(void * buf,size_t n,struct uio * uio)103fcc20a4bSpooka uiomove(void *buf, size_t n, struct uio *uio)
104fcc20a4bSpooka {
105fcc20a4bSpooka 	struct vmspace *vm = uio->uio_vmspace;
106fcc20a4bSpooka 	struct iovec *iov;
107fcc20a4bSpooka 	size_t cnt;
108fcc20a4bSpooka 	int error = 0;
109fcc20a4bSpooka 	char *cp = buf;
110fcc20a4bSpooka 
111fcc20a4bSpooka 	ASSERT_SLEEPABLE();
112fcc20a4bSpooka 
113a6d56fa7Sriastradh 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114fcc20a4bSpooka 	while (n > 0 && uio->uio_resid) {
11574b6ad6fSriastradh 		KASSERT(uio->uio_iovcnt > 0);
116fcc20a4bSpooka 		iov = uio->uio_iov;
117fcc20a4bSpooka 		cnt = iov->iov_len;
118fcc20a4bSpooka 		if (cnt == 0) {
11974b6ad6fSriastradh 			KASSERT(uio->uio_iovcnt > 1);
120fcc20a4bSpooka 			uio->uio_iov++;
121fcc20a4bSpooka 			uio->uio_iovcnt--;
122fcc20a4bSpooka 			continue;
123fcc20a4bSpooka 		}
124fcc20a4bSpooka 		if (cnt > n)
125fcc20a4bSpooka 			cnt = n;
126fcc20a4bSpooka 		if (!VMSPACE_IS_KERNEL_P(vm)) {
12716d4fad6Sad 			preempt_point();
128fcc20a4bSpooka 		}
129fcc20a4bSpooka 
130fcc20a4bSpooka 		if (uio->uio_rw == UIO_READ) {
131fcc20a4bSpooka 			error = copyout_vmspace(vm, cp, iov->iov_base,
132fcc20a4bSpooka 			    cnt);
133fcc20a4bSpooka 		} else {
134fcc20a4bSpooka 			error = copyin_vmspace(vm, iov->iov_base, cp,
135fcc20a4bSpooka 			    cnt);
136fcc20a4bSpooka 		}
137fcc20a4bSpooka 		if (error) {
138fcc20a4bSpooka 			break;
139fcc20a4bSpooka 		}
140fcc20a4bSpooka 		iov->iov_base = (char *)iov->iov_base + cnt;
141fcc20a4bSpooka 		iov->iov_len -= cnt;
142fcc20a4bSpooka 		uio->uio_resid -= cnt;
143fcc20a4bSpooka 		uio->uio_offset += cnt;
144fcc20a4bSpooka 		cp += cnt;
145fcc20a4bSpooka 		KDASSERT(cnt <= n);
146fcc20a4bSpooka 		n -= cnt;
147fcc20a4bSpooka 	}
148fcc20a4bSpooka 
149fcc20a4bSpooka 	return (error);
150fcc20a4bSpooka }
151fcc20a4bSpooka 
152fcc20a4bSpooka /*
153fcc20a4bSpooka  * Wrapper for uiomove() that validates the arguments against a known-good
154fcc20a4bSpooka  * kernel buffer.
155fcc20a4bSpooka  */
156fcc20a4bSpooka int
uiomove_frombuf(void * buf,size_t buflen,struct uio * uio)157fcc20a4bSpooka uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
158fcc20a4bSpooka {
159fcc20a4bSpooka 	size_t offset;
160fcc20a4bSpooka 
161fcc20a4bSpooka 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
162fcc20a4bSpooka 	    (offset = uio->uio_offset) != uio->uio_offset)
163fcc20a4bSpooka 		return (EINVAL);
164fcc20a4bSpooka 	if (offset >= buflen)
165fcc20a4bSpooka 		return (0);
166fcc20a4bSpooka 	return (uiomove((char *)buf + offset, buflen - offset, uio));
167fcc20a4bSpooka }
168fcc20a4bSpooka 
169*819a01cfSriastradh int
uiopeek(void * buf,size_t n,struct uio * uio)170*819a01cfSriastradh uiopeek(void *buf, size_t n, struct uio *uio)
171*819a01cfSriastradh {
172*819a01cfSriastradh 	struct vmspace *vm = uio->uio_vmspace;
173*819a01cfSriastradh 	struct iovec *iov;
174*819a01cfSriastradh 	size_t cnt;
175*819a01cfSriastradh 	int error = 0;
176*819a01cfSriastradh 	char *cp = buf;
177*819a01cfSriastradh 	size_t resid = uio->uio_resid;
178*819a01cfSriastradh 	int iovcnt = uio->uio_iovcnt;
179*819a01cfSriastradh 	char *base;
180*819a01cfSriastradh 	size_t len;
181*819a01cfSriastradh 
182*819a01cfSriastradh 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
183*819a01cfSriastradh 
184*819a01cfSriastradh 	if (n == 0 || resid == 0)
185*819a01cfSriastradh 		return 0;
186*819a01cfSriastradh 	iov = uio->uio_iov;
187*819a01cfSriastradh 	base = iov->iov_base;
188*819a01cfSriastradh 	len = iov->iov_len;
189*819a01cfSriastradh 
190*819a01cfSriastradh 	while (n > 0 && resid > 0) {
191*819a01cfSriastradh 		KASSERT(iovcnt > 0);
192*819a01cfSriastradh 		cnt = len;
193*819a01cfSriastradh 		if (cnt == 0) {
194*819a01cfSriastradh 			KASSERT(iovcnt > 1);
195*819a01cfSriastradh 			iov++;
196*819a01cfSriastradh 			iovcnt--;
197*819a01cfSriastradh 			base = iov->iov_base;
198*819a01cfSriastradh 			len = iov->iov_len;
199*819a01cfSriastradh 			continue;
200*819a01cfSriastradh 		}
201*819a01cfSriastradh 		if (cnt > n)
202*819a01cfSriastradh 			cnt = n;
203*819a01cfSriastradh 		if (!VMSPACE_IS_KERNEL_P(vm)) {
204*819a01cfSriastradh 			preempt_point();
205*819a01cfSriastradh 		}
206*819a01cfSriastradh 
207*819a01cfSriastradh 		if (uio->uio_rw == UIO_READ) {
208*819a01cfSriastradh 			error = copyout_vmspace(vm, cp, base, cnt);
209*819a01cfSriastradh 		} else {
210*819a01cfSriastradh 			error = copyin_vmspace(vm, base, cp, cnt);
211*819a01cfSriastradh 		}
212*819a01cfSriastradh 		if (error) {
213*819a01cfSriastradh 			break;
214*819a01cfSriastradh 		}
215*819a01cfSriastradh 		base += cnt;
216*819a01cfSriastradh 		len -= cnt;
217*819a01cfSriastradh 		resid -= cnt;
218*819a01cfSriastradh 		cp += cnt;
219*819a01cfSriastradh 		KDASSERT(cnt <= n);
220*819a01cfSriastradh 		n -= cnt;
221*819a01cfSriastradh 	}
222*819a01cfSriastradh 
223*819a01cfSriastradh 	return error;
224*819a01cfSriastradh }
225*819a01cfSriastradh 
226*819a01cfSriastradh void
uioskip(size_t n,struct uio * uio)227*819a01cfSriastradh uioskip(size_t n, struct uio *uio)
228*819a01cfSriastradh {
229*819a01cfSriastradh 	struct iovec *iov;
230*819a01cfSriastradh 	size_t cnt;
231*819a01cfSriastradh 
232*819a01cfSriastradh 	KASSERTMSG(n <= uio->uio_resid, "n=%zu resid=%zu", n, uio->uio_resid);
233*819a01cfSriastradh 
234*819a01cfSriastradh 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
235*819a01cfSriastradh 	while (n > 0 && uio->uio_resid) {
236*819a01cfSriastradh 		KASSERT(uio->uio_iovcnt > 0);
237*819a01cfSriastradh 		iov = uio->uio_iov;
238*819a01cfSriastradh 		cnt = iov->iov_len;
239*819a01cfSriastradh 		if (cnt == 0) {
240*819a01cfSriastradh 			KASSERT(uio->uio_iovcnt > 1);
241*819a01cfSriastradh 			uio->uio_iov++;
242*819a01cfSriastradh 			uio->uio_iovcnt--;
243*819a01cfSriastradh 			continue;
244*819a01cfSriastradh 		}
245*819a01cfSriastradh 		if (cnt > n)
246*819a01cfSriastradh 			cnt = n;
247*819a01cfSriastradh 		iov->iov_base = (char *)iov->iov_base + cnt;
248*819a01cfSriastradh 		iov->iov_len -= cnt;
249*819a01cfSriastradh 		uio->uio_resid -= cnt;
250*819a01cfSriastradh 		uio->uio_offset += cnt;
251*819a01cfSriastradh 		KDASSERT(cnt <= n);
252*819a01cfSriastradh 		n -= cnt;
253*819a01cfSriastradh 	}
254*819a01cfSriastradh }
255*819a01cfSriastradh 
256fcc20a4bSpooka /*
257fcc20a4bSpooka  * Give next character to user as result of read.
258fcc20a4bSpooka  */
259fcc20a4bSpooka int
ureadc(int c,struct uio * uio)260fcc20a4bSpooka ureadc(int c, struct uio *uio)
261fcc20a4bSpooka {
262fcc20a4bSpooka 	struct iovec *iov;
263fcc20a4bSpooka 
264fcc20a4bSpooka 	if (uio->uio_resid <= 0)
265fcc20a4bSpooka 		panic("ureadc: non-positive resid");
266fcc20a4bSpooka again:
267fcc20a4bSpooka 	if (uio->uio_iovcnt <= 0)
268fcc20a4bSpooka 		panic("ureadc: non-positive iovcnt");
269fcc20a4bSpooka 	iov = uio->uio_iov;
270fcc20a4bSpooka 	if (iov->iov_len <= 0) {
271fcc20a4bSpooka 		uio->uio_iovcnt--;
272fcc20a4bSpooka 		uio->uio_iov++;
273fcc20a4bSpooka 		goto again;
274fcc20a4bSpooka 	}
275fcc20a4bSpooka 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
27691bfaeb6Sthorpej 		int error;
27791bfaeb6Sthorpej 		if ((error = ustore_char(iov->iov_base, c)) != 0)
27891bfaeb6Sthorpej 			return (error);
279fcc20a4bSpooka 	} else {
280fcc20a4bSpooka 		*(char *)iov->iov_base = c;
281fcc20a4bSpooka 	}
282fcc20a4bSpooka 	iov->iov_base = (char *)iov->iov_base + 1;
283fcc20a4bSpooka 	iov->iov_len--;
284fcc20a4bSpooka 	uio->uio_resid--;
285fcc20a4bSpooka 	uio->uio_offset++;
286fcc20a4bSpooka 	return (0);
287fcc20a4bSpooka }
288fcc20a4bSpooka 
289fcc20a4bSpooka /*
290fcc20a4bSpooka  * Like copyin(), but operates on an arbitrary vmspace.
291fcc20a4bSpooka  */
292fcc20a4bSpooka int
copyin_vmspace(struct vmspace * vm,const void * uaddr,void * kaddr,size_t len)293fcc20a4bSpooka copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
294fcc20a4bSpooka {
295fcc20a4bSpooka 	struct iovec iov;
296fcc20a4bSpooka 	struct uio uio;
297fcc20a4bSpooka 	int error;
298fcc20a4bSpooka 
299fcc20a4bSpooka 	if (len == 0)
300fcc20a4bSpooka 		return (0);
301fcc20a4bSpooka 
302c287e76eSriastradh 	if (VMSPACE_IS_KERNEL_P(vm)) {
303c287e76eSriastradh 		return kcopy(uaddr, kaddr, len);
304c287e76eSriastradh 	}
305a1afb93bSriastradh 	if (__predict_true(vm == curproc->p_vmspace)) {
306a1afb93bSriastradh 		return copyin(uaddr, kaddr, len);
307a1afb93bSriastradh 	}
308fcc20a4bSpooka 
309fcc20a4bSpooka 	iov.iov_base = kaddr;
310fcc20a4bSpooka 	iov.iov_len = len;
311fcc20a4bSpooka 	uio.uio_iov = &iov;
312fcc20a4bSpooka 	uio.uio_iovcnt = 1;
313fcc20a4bSpooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
314fcc20a4bSpooka 	uio.uio_resid = len;
315fcc20a4bSpooka 	uio.uio_rw = UIO_READ;
316fcc20a4bSpooka 	UIO_SETUP_SYSSPACE(&uio);
31719ea7434Schristos 	error = uvm_io(&vm->vm_map, &uio, 0);
318fcc20a4bSpooka 
319fcc20a4bSpooka 	return (error);
320fcc20a4bSpooka }
321fcc20a4bSpooka 
322fcc20a4bSpooka /*
323fcc20a4bSpooka  * Like copyout(), but operates on an arbitrary vmspace.
324fcc20a4bSpooka  */
325fcc20a4bSpooka int
copyout_vmspace(struct vmspace * vm,const void * kaddr,void * uaddr,size_t len)326fcc20a4bSpooka copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
327fcc20a4bSpooka {
328fcc20a4bSpooka 	struct iovec iov;
329fcc20a4bSpooka 	struct uio uio;
330fcc20a4bSpooka 	int error;
331fcc20a4bSpooka 
332fcc20a4bSpooka 	if (len == 0)
333fcc20a4bSpooka 		return (0);
334fcc20a4bSpooka 
335c287e76eSriastradh 	if (VMSPACE_IS_KERNEL_P(vm)) {
336c287e76eSriastradh 		return kcopy(kaddr, uaddr, len);
337c287e76eSriastradh 	}
338a1afb93bSriastradh 	if (__predict_true(vm == curproc->p_vmspace)) {
339a1afb93bSriastradh 		return copyout(kaddr, uaddr, len);
340a1afb93bSriastradh 	}
341fcc20a4bSpooka 
342fcc20a4bSpooka 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
343fcc20a4bSpooka 	iov.iov_len = len;
344fcc20a4bSpooka 	uio.uio_iov = &iov;
345fcc20a4bSpooka 	uio.uio_iovcnt = 1;
346fcc20a4bSpooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
347fcc20a4bSpooka 	uio.uio_resid = len;
348fcc20a4bSpooka 	uio.uio_rw = UIO_WRITE;
349fcc20a4bSpooka 	UIO_SETUP_SYSSPACE(&uio);
35019ea7434Schristos 	error = uvm_io(&vm->vm_map, &uio, 0);
351fcc20a4bSpooka 
352fcc20a4bSpooka 	return (error);
353fcc20a4bSpooka }
354fcc20a4bSpooka 
355fcc20a4bSpooka /*
356fcc20a4bSpooka  * Like copyin(), but operates on an arbitrary process.
357fcc20a4bSpooka  */
358fcc20a4bSpooka int
copyin_proc(struct proc * p,const void * uaddr,void * kaddr,size_t len)359fcc20a4bSpooka copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
360fcc20a4bSpooka {
361fcc20a4bSpooka 	struct vmspace *vm;
362fcc20a4bSpooka 	int error;
363fcc20a4bSpooka 
364fcc20a4bSpooka 	error = proc_vmspace_getref(p, &vm);
365fcc20a4bSpooka 	if (error) {
366fcc20a4bSpooka 		return error;
367fcc20a4bSpooka 	}
368fcc20a4bSpooka 	error = copyin_vmspace(vm, uaddr, kaddr, len);
369fcc20a4bSpooka 	uvmspace_free(vm);
370fcc20a4bSpooka 
371fcc20a4bSpooka 	return error;
372fcc20a4bSpooka }
373fcc20a4bSpooka 
374fcc20a4bSpooka /*
375fcc20a4bSpooka  * Like copyout(), but operates on an arbitrary process.
376fcc20a4bSpooka  */
377fcc20a4bSpooka int
copyout_proc(struct proc * p,const void * kaddr,void * uaddr,size_t len)378fcc20a4bSpooka copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
379fcc20a4bSpooka {
380fcc20a4bSpooka 	struct vmspace *vm;
381fcc20a4bSpooka 	int error;
382fcc20a4bSpooka 
383fcc20a4bSpooka 	error = proc_vmspace_getref(p, &vm);
384fcc20a4bSpooka 	if (error) {
385fcc20a4bSpooka 		return error;
386fcc20a4bSpooka 	}
387fcc20a4bSpooka 	error = copyout_vmspace(vm, kaddr, uaddr, len);
388fcc20a4bSpooka 	uvmspace_free(vm);
389fcc20a4bSpooka 
390fcc20a4bSpooka 	return error;
391fcc20a4bSpooka }
392fcc20a4bSpooka 
393fcc20a4bSpooka /*
39451311937Schs  * Like copyin(), but operates on an arbitrary pid.
39551311937Schs  */
39651311937Schs int
copyin_pid(pid_t pid,const void * uaddr,void * kaddr,size_t len)39751311937Schs copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
39851311937Schs {
39951311937Schs 	struct proc *p;
40051311937Schs 	struct vmspace *vm;
40151311937Schs 	int error;
40251311937Schs 
4030eaaa024Sad 	mutex_enter(&proc_lock);
40451311937Schs 	p = proc_find(pid);
40551311937Schs 	if (p == NULL) {
4060eaaa024Sad 		mutex_exit(&proc_lock);
40751311937Schs 		return ESRCH;
40851311937Schs 	}
40951311937Schs 	mutex_enter(p->p_lock);
410e6cc2fd8Schs 	error = proc_vmspace_getref(p, &vm);
41151311937Schs 	mutex_exit(p->p_lock);
4120eaaa024Sad 	mutex_exit(&proc_lock);
41351311937Schs 
414e6cc2fd8Schs 	if (error == 0) {
41551311937Schs 		error = copyin_vmspace(vm, uaddr, kaddr, len);
41651311937Schs 		uvmspace_free(vm);
417e6cc2fd8Schs 	}
41851311937Schs 	return error;
41951311937Schs }
42051311937Schs 
42151311937Schs /*
422fcc20a4bSpooka  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
423fcc20a4bSpooka  * flag is passed in `ioctlflags' from the ioctl call.
424fcc20a4bSpooka  */
425fcc20a4bSpooka int
ioctl_copyin(int ioctlflags,const void * src,void * dst,size_t len)426fcc20a4bSpooka ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
427fcc20a4bSpooka {
428fcc20a4bSpooka 	if (ioctlflags & FKIOCTL)
429fcc20a4bSpooka 		return kcopy(src, dst, len);
430fcc20a4bSpooka 	return copyin(src, dst, len);
431fcc20a4bSpooka }
432fcc20a4bSpooka 
433fcc20a4bSpooka /*
434fcc20a4bSpooka  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
435fcc20a4bSpooka  * flag is passed in `ioctlflags' from the ioctl call.
436fcc20a4bSpooka  */
437fcc20a4bSpooka int
ioctl_copyout(int ioctlflags,const void * src,void * dst,size_t len)438fcc20a4bSpooka ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
439fcc20a4bSpooka {
440fcc20a4bSpooka 	if (ioctlflags & FKIOCTL)
441fcc20a4bSpooka 		return kcopy(src, dst, len);
442fcc20a4bSpooka 	return copyout(src, dst, len);
443fcc20a4bSpooka }
44491bfaeb6Sthorpej 
44591bfaeb6Sthorpej /*
44691bfaeb6Sthorpej  * User-space CAS / fetch / store
44791bfaeb6Sthorpej  */
44891bfaeb6Sthorpej 
44991bfaeb6Sthorpej #ifdef __NO_STRICT_ALIGNMENT
45091bfaeb6Sthorpej #define	CHECK_ALIGNMENT(x)	__nothing
45191bfaeb6Sthorpej #else /* ! __NO_STRICT_ALIGNMENT */
45291bfaeb6Sthorpej static bool
ufetchstore_aligned(uintptr_t uaddr,size_t size)45391bfaeb6Sthorpej ufetchstore_aligned(uintptr_t uaddr, size_t size)
45491bfaeb6Sthorpej {
45591bfaeb6Sthorpej 	return (uaddr & (size - 1)) == 0;
45691bfaeb6Sthorpej }
45791bfaeb6Sthorpej 
45891bfaeb6Sthorpej #define	CHECK_ALIGNMENT()						\
45991bfaeb6Sthorpej do {									\
46091bfaeb6Sthorpej 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
46191bfaeb6Sthorpej 		return EFAULT;						\
46291bfaeb6Sthorpej } while (/*CONSTCOND*/0)
46391bfaeb6Sthorpej #endif /* __NO_STRICT_ALIGNMENT */
46491bfaeb6Sthorpej 
465d4126f8dSthorpej /*
466d4126f8dSthorpej  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
467d4126f8dSthorpej  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
468d4126f8dSthorpej  *
469d4126f8dSthorpej  * In all other cases, we provide generic implementations that work on
470d4126f8dSthorpej  * all platforms.
471d4126f8dSthorpej  */
472d4126f8dSthorpej 
473d4126f8dSthorpej #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
47491bfaeb6Sthorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
47591bfaeb6Sthorpej #include <sys/atomic.h>
47691bfaeb6Sthorpej #include <sys/cpu.h>
47791bfaeb6Sthorpej #include <sys/once.h>
47891bfaeb6Sthorpej #include <sys/mutex.h>
47991bfaeb6Sthorpej #include <sys/ipi.h>
48091bfaeb6Sthorpej 
48191bfaeb6Sthorpej static int ucas_critical_splcookie;
48291bfaeb6Sthorpej static volatile u_int ucas_critical_pausing_cpus;
48391bfaeb6Sthorpej static u_int ucas_critical_ipi;
ONCE_DECL(ucas_critical_init_once)48491bfaeb6Sthorpej static ONCE_DECL(ucas_critical_init_once)
48591bfaeb6Sthorpej 
48691bfaeb6Sthorpej static void
48791bfaeb6Sthorpej ucas_critical_cpu_gate(void *arg __unused)
48891bfaeb6Sthorpej {
48991bfaeb6Sthorpej 	int count = SPINLOCK_BACKOFF_MIN;
49091bfaeb6Sthorpej 
491ab67b1abSriastradh 	KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
492ab67b1abSriastradh 
493ab67b1abSriastradh 	/*
494ab67b1abSriastradh 	 * Notify ucas_critical_wait that we have stopped.  Using
495ab67b1abSriastradh 	 * store-release ensures all our memory operations up to the
496ab67b1abSriastradh 	 * IPI happen before the ucas -- no buffered stores on our end
497ab67b1abSriastradh 	 * can clobber it later on, for instance.
498ab67b1abSriastradh 	 *
499ab67b1abSriastradh 	 * Matches atomic_load_acquire in ucas_critical_wait -- turns
500ab67b1abSriastradh 	 * the following atomic_dec_uint into a store-release.
501ab67b1abSriastradh 	 */
502761e5a4bSriastradh 	membar_release();
50391bfaeb6Sthorpej 	atomic_dec_uint(&ucas_critical_pausing_cpus);
504ab67b1abSriastradh 
505ab67b1abSriastradh 	/*
506ab67b1abSriastradh 	 * Wait for ucas_critical_exit to reopen the gate and let us
507ab67b1abSriastradh 	 * proceed.  Using a load-acquire ensures the ucas happens
508ab67b1abSriastradh 	 * before any of our memory operations when we return from the
509ab67b1abSriastradh 	 * IPI and proceed -- we won't observe any stale cached value
510ab67b1abSriastradh 	 * that the ucas overwrote, for instance.
511ab67b1abSriastradh 	 *
512ab67b1abSriastradh 	 * Matches atomic_store_release in ucas_critical_exit.
513ab67b1abSriastradh 	 */
514ab67b1abSriastradh 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
51591bfaeb6Sthorpej 		SPINLOCK_BACKOFF(count);
51691bfaeb6Sthorpej 	}
51791bfaeb6Sthorpej }
51891bfaeb6Sthorpej 
51991bfaeb6Sthorpej static int
ucas_critical_init(void)52091bfaeb6Sthorpej ucas_critical_init(void)
52191bfaeb6Sthorpej {
522ab67b1abSriastradh 
52391bfaeb6Sthorpej 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
52491bfaeb6Sthorpej 	return 0;
52591bfaeb6Sthorpej }
52691bfaeb6Sthorpej 
52791bfaeb6Sthorpej static void
ucas_critical_wait(void)52891bfaeb6Sthorpej ucas_critical_wait(void)
52991bfaeb6Sthorpej {
53091bfaeb6Sthorpej 	int count = SPINLOCK_BACKOFF_MIN;
53191bfaeb6Sthorpej 
532ab67b1abSriastradh 	/*
533ab67b1abSriastradh 	 * Wait for all CPUs to stop at the gate.  Using a load-acquire
534ab67b1abSriastradh 	 * ensures all memory operations before they stop at the gate
535ab67b1abSriastradh 	 * happen before the ucas -- no buffered stores in other CPUs
536ab67b1abSriastradh 	 * can clobber it later on, for instance.
537ab67b1abSriastradh 	 *
538761e5a4bSriastradh 	 * Matches membar_release/atomic_dec_uint (store-release) in
539ab67b1abSriastradh 	 * ucas_critical_cpu_gate.
540ab67b1abSriastradh 	 */
541ab67b1abSriastradh 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
54291bfaeb6Sthorpej 		SPINLOCK_BACKOFF(count);
54391bfaeb6Sthorpej 	}
54491bfaeb6Sthorpej }
54591bfaeb6Sthorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
54691bfaeb6Sthorpej 
54791bfaeb6Sthorpej static inline void
ucas_critical_enter(lwp_t * const l)54891bfaeb6Sthorpej ucas_critical_enter(lwp_t * const l)
54991bfaeb6Sthorpej {
55091bfaeb6Sthorpej 
55191bfaeb6Sthorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
55291bfaeb6Sthorpej 	if (ncpu > 1) {
55391bfaeb6Sthorpej 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
55491bfaeb6Sthorpej 
55591bfaeb6Sthorpej 		/*
55691bfaeb6Sthorpej 		 * Acquire the mutex first, then go to splhigh() and
55791bfaeb6Sthorpej 		 * broadcast the IPI to lock all of the other CPUs
55891bfaeb6Sthorpej 		 * behind the gate.
55991bfaeb6Sthorpej 		 *
56091bfaeb6Sthorpej 		 * N.B. Going to splhigh() implicitly disables preemption,
56191bfaeb6Sthorpej 		 * so there's no need to do it explicitly.
56291bfaeb6Sthorpej 		 */
56391bfaeb6Sthorpej 		mutex_enter(&cpu_lock);
56491bfaeb6Sthorpej 		ucas_critical_splcookie = splhigh();
56591bfaeb6Sthorpej 		ucas_critical_pausing_cpus = ncpu - 1;
56691bfaeb6Sthorpej 		ipi_trigger_broadcast(ucas_critical_ipi, true);
56791bfaeb6Sthorpej 		ucas_critical_wait();
56891bfaeb6Sthorpej 		return;
56991bfaeb6Sthorpej 	}
57091bfaeb6Sthorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
57191bfaeb6Sthorpej 
57291bfaeb6Sthorpej 	KPREEMPT_DISABLE(l);
57391bfaeb6Sthorpej }
57491bfaeb6Sthorpej 
57591bfaeb6Sthorpej static inline void
ucas_critical_exit(lwp_t * const l)57691bfaeb6Sthorpej ucas_critical_exit(lwp_t * const l)
57791bfaeb6Sthorpej {
57891bfaeb6Sthorpej 
57991bfaeb6Sthorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
58091bfaeb6Sthorpej 	if (ncpu > 1) {
581ab67b1abSriastradh 		/*
582ab67b1abSriastradh 		 * Open the gate and notify all CPUs in
583ab67b1abSriastradh 		 * ucas_critical_cpu_gate that they can now proceed.
584ab67b1abSriastradh 		 * Using a store-release ensures the ucas happens
585ab67b1abSriastradh 		 * before any memory operations they issue after the
586ab67b1abSriastradh 		 * IPI -- they won't observe any stale cache of the
587ab67b1abSriastradh 		 * target word, for instance.
588ab67b1abSriastradh 		 *
589ab67b1abSriastradh 		 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
590ab67b1abSriastradh 		 */
591ab67b1abSriastradh 		atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
59291bfaeb6Sthorpej 		splx(ucas_critical_splcookie);
59391bfaeb6Sthorpej 		mutex_exit(&cpu_lock);
59491bfaeb6Sthorpej 		return;
59591bfaeb6Sthorpej 	}
59691bfaeb6Sthorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
59791bfaeb6Sthorpej 
59891bfaeb6Sthorpej 	KPREEMPT_ENABLE(l);
59991bfaeb6Sthorpej }
60091bfaeb6Sthorpej 
60191bfaeb6Sthorpej int
_ucas_32(volatile uint32_t * uaddr,uint32_t old,uint32_t new,uint32_t * ret)60291bfaeb6Sthorpej _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
60391bfaeb6Sthorpej {
60491bfaeb6Sthorpej 	lwp_t * const l = curlwp;
60591bfaeb6Sthorpej 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
60691bfaeb6Sthorpej 	int error;
60791bfaeb6Sthorpej 
60891bfaeb6Sthorpej 	/*
60991bfaeb6Sthorpej 	 * Wire the user address down to avoid taking a page fault during
61091bfaeb6Sthorpej 	 * the critical section.
61191bfaeb6Sthorpej 	 */
61291bfaeb6Sthorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
61391bfaeb6Sthorpej 			   VM_PROT_READ | VM_PROT_WRITE);
61491bfaeb6Sthorpej 	if (error)
61591bfaeb6Sthorpej 		return error;
61691bfaeb6Sthorpej 
61791bfaeb6Sthorpej 	ucas_critical_enter(l);
61891bfaeb6Sthorpej 	error = _ufetch_32(uva, ret);
61991bfaeb6Sthorpej 	if (error == 0 && *ret == old) {
62091bfaeb6Sthorpej 		error = _ustore_32(uva, new);
62191bfaeb6Sthorpej 	}
62291bfaeb6Sthorpej 	ucas_critical_exit(l);
62391bfaeb6Sthorpej 
62491bfaeb6Sthorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
62591bfaeb6Sthorpej 
62691bfaeb6Sthorpej 	return error;
62791bfaeb6Sthorpej }
62891bfaeb6Sthorpej 
62991bfaeb6Sthorpej #ifdef _LP64
63091bfaeb6Sthorpej int
_ucas_64(volatile uint64_t * uaddr,uint64_t old,uint64_t new,uint64_t * ret)63191bfaeb6Sthorpej _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
63291bfaeb6Sthorpej {
63391bfaeb6Sthorpej 	lwp_t * const l = curlwp;
63491bfaeb6Sthorpej 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
63591bfaeb6Sthorpej 	int error;
63691bfaeb6Sthorpej 
63791bfaeb6Sthorpej 	/*
63891bfaeb6Sthorpej 	 * Wire the user address down to avoid taking a page fault during
63991bfaeb6Sthorpej 	 * the critical section.
64091bfaeb6Sthorpej 	 */
64191bfaeb6Sthorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
64291bfaeb6Sthorpej 			   VM_PROT_READ | VM_PROT_WRITE);
64391bfaeb6Sthorpej 	if (error)
64491bfaeb6Sthorpej 		return error;
64591bfaeb6Sthorpej 
64691bfaeb6Sthorpej 	ucas_critical_enter(l);
64791bfaeb6Sthorpej 	error = _ufetch_64(uva, ret);
64891bfaeb6Sthorpej 	if (error == 0 && *ret == old) {
64991bfaeb6Sthorpej 		error = _ustore_64(uva, new);
65091bfaeb6Sthorpej 	}
65191bfaeb6Sthorpej 	ucas_critical_exit(l);
65291bfaeb6Sthorpej 
65391bfaeb6Sthorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
65491bfaeb6Sthorpej 
65591bfaeb6Sthorpej 	return error;
65691bfaeb6Sthorpej }
65791bfaeb6Sthorpej #endif /* _LP64 */
658d4126f8dSthorpej #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
65991bfaeb6Sthorpej 
66091bfaeb6Sthorpej int
ucas_32(volatile uint32_t * uaddr,uint32_t old,uint32_t new,uint32_t * ret)66191bfaeb6Sthorpej ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
66291bfaeb6Sthorpej {
66391bfaeb6Sthorpej 
66491bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
66591bfaeb6Sthorpej 	CHECK_ALIGNMENT();
666e6a98741Sthorpej #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
667e6a98741Sthorpej     !defined(_RUMPKERNEL)
66891bfaeb6Sthorpej 	if (ncpu > 1) {
66991bfaeb6Sthorpej 		return _ucas_32_mp(uaddr, old, new, ret);
67091bfaeb6Sthorpej 	}
67191bfaeb6Sthorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
67291bfaeb6Sthorpej 	return _ucas_32(uaddr, old, new, ret);
67391bfaeb6Sthorpej }
67491bfaeb6Sthorpej 
67591bfaeb6Sthorpej #ifdef _LP64
67691bfaeb6Sthorpej int
ucas_64(volatile uint64_t * uaddr,uint64_t old,uint64_t new,uint64_t * ret)67791bfaeb6Sthorpej ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
67891bfaeb6Sthorpej {
67991bfaeb6Sthorpej 
68091bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
68191bfaeb6Sthorpej 	CHECK_ALIGNMENT();
682e6a98741Sthorpej #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
683e6a98741Sthorpej     !defined(_RUMPKERNEL)
68491bfaeb6Sthorpej 	if (ncpu > 1) {
68591bfaeb6Sthorpej 		return _ucas_64_mp(uaddr, old, new, ret);
68691bfaeb6Sthorpej 	}
68791bfaeb6Sthorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
68891bfaeb6Sthorpej 	return _ucas_64(uaddr, old, new, ret);
68991bfaeb6Sthorpej }
69091bfaeb6Sthorpej #endif /* _LP64 */
69191bfaeb6Sthorpej 
69291bfaeb6Sthorpej __strong_alias(ucas_int,ucas_32);
69391bfaeb6Sthorpej #ifdef _LP64
69491bfaeb6Sthorpej __strong_alias(ucas_ptr,ucas_64);
69591bfaeb6Sthorpej #else
69691bfaeb6Sthorpej __strong_alias(ucas_ptr,ucas_32);
69791bfaeb6Sthorpej #endif /* _LP64 */
69891bfaeb6Sthorpej 
69991bfaeb6Sthorpej int
ufetch_8(const uint8_t * uaddr,uint8_t * valp)70091bfaeb6Sthorpej ufetch_8(const uint8_t *uaddr, uint8_t *valp)
70191bfaeb6Sthorpej {
70291bfaeb6Sthorpej 
70391bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
70491bfaeb6Sthorpej 	CHECK_ALIGNMENT();
70591bfaeb6Sthorpej 	return _ufetch_8(uaddr, valp);
70691bfaeb6Sthorpej }
70791bfaeb6Sthorpej 
70891bfaeb6Sthorpej int
ufetch_16(const uint16_t * uaddr,uint16_t * valp)70991bfaeb6Sthorpej ufetch_16(const uint16_t *uaddr, uint16_t *valp)
71091bfaeb6Sthorpej {
71191bfaeb6Sthorpej 
71291bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
71391bfaeb6Sthorpej 	CHECK_ALIGNMENT();
71491bfaeb6Sthorpej 	return _ufetch_16(uaddr, valp);
71591bfaeb6Sthorpej }
71691bfaeb6Sthorpej 
71791bfaeb6Sthorpej int
ufetch_32(const uint32_t * uaddr,uint32_t * valp)71891bfaeb6Sthorpej ufetch_32(const uint32_t *uaddr, uint32_t *valp)
71991bfaeb6Sthorpej {
72091bfaeb6Sthorpej 
72191bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
72291bfaeb6Sthorpej 	CHECK_ALIGNMENT();
72391bfaeb6Sthorpej 	return _ufetch_32(uaddr, valp);
72491bfaeb6Sthorpej }
72591bfaeb6Sthorpej 
72691bfaeb6Sthorpej #ifdef _LP64
72791bfaeb6Sthorpej int
ufetch_64(const uint64_t * uaddr,uint64_t * valp)72891bfaeb6Sthorpej ufetch_64(const uint64_t *uaddr, uint64_t *valp)
72991bfaeb6Sthorpej {
73091bfaeb6Sthorpej 
73191bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
73291bfaeb6Sthorpej 	CHECK_ALIGNMENT();
73391bfaeb6Sthorpej 	return _ufetch_64(uaddr, valp);
73491bfaeb6Sthorpej }
73591bfaeb6Sthorpej #endif /* _LP64 */
73691bfaeb6Sthorpej 
73791bfaeb6Sthorpej __strong_alias(ufetch_char,ufetch_8);
73891bfaeb6Sthorpej __strong_alias(ufetch_short,ufetch_16);
73991bfaeb6Sthorpej __strong_alias(ufetch_int,ufetch_32);
74091bfaeb6Sthorpej #ifdef _LP64
74191bfaeb6Sthorpej __strong_alias(ufetch_long,ufetch_64);
74291bfaeb6Sthorpej __strong_alias(ufetch_ptr,ufetch_64);
74391bfaeb6Sthorpej #else
74491bfaeb6Sthorpej __strong_alias(ufetch_long,ufetch_32);
74591bfaeb6Sthorpej __strong_alias(ufetch_ptr,ufetch_32);
74691bfaeb6Sthorpej #endif /* _LP64 */
74791bfaeb6Sthorpej 
74891bfaeb6Sthorpej int
ustore_8(uint8_t * uaddr,uint8_t val)74991bfaeb6Sthorpej ustore_8(uint8_t *uaddr, uint8_t val)
75091bfaeb6Sthorpej {
75191bfaeb6Sthorpej 
75291bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
75391bfaeb6Sthorpej 	CHECK_ALIGNMENT();
75491bfaeb6Sthorpej 	return _ustore_8(uaddr, val);
75591bfaeb6Sthorpej }
75691bfaeb6Sthorpej 
75791bfaeb6Sthorpej int
ustore_16(uint16_t * uaddr,uint16_t val)75891bfaeb6Sthorpej ustore_16(uint16_t *uaddr, uint16_t val)
75991bfaeb6Sthorpej {
76091bfaeb6Sthorpej 
76191bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
76291bfaeb6Sthorpej 	CHECK_ALIGNMENT();
76391bfaeb6Sthorpej 	return _ustore_16(uaddr, val);
76491bfaeb6Sthorpej }
76591bfaeb6Sthorpej 
76691bfaeb6Sthorpej int
ustore_32(uint32_t * uaddr,uint32_t val)76791bfaeb6Sthorpej ustore_32(uint32_t *uaddr, uint32_t val)
76891bfaeb6Sthorpej {
76991bfaeb6Sthorpej 
77091bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
77191bfaeb6Sthorpej 	CHECK_ALIGNMENT();
77291bfaeb6Sthorpej 	return _ustore_32(uaddr, val);
77391bfaeb6Sthorpej }
77491bfaeb6Sthorpej 
77591bfaeb6Sthorpej #ifdef _LP64
77691bfaeb6Sthorpej int
ustore_64(uint64_t * uaddr,uint64_t val)77791bfaeb6Sthorpej ustore_64(uint64_t *uaddr, uint64_t val)
77891bfaeb6Sthorpej {
77991bfaeb6Sthorpej 
78091bfaeb6Sthorpej 	ASSERT_SLEEPABLE();
78191bfaeb6Sthorpej 	CHECK_ALIGNMENT();
78291bfaeb6Sthorpej 	return _ustore_64(uaddr, val);
78391bfaeb6Sthorpej }
78491bfaeb6Sthorpej #endif /* _LP64 */
78591bfaeb6Sthorpej 
78691bfaeb6Sthorpej __strong_alias(ustore_char,ustore_8);
78791bfaeb6Sthorpej __strong_alias(ustore_short,ustore_16);
78891bfaeb6Sthorpej __strong_alias(ustore_int,ustore_32);
78991bfaeb6Sthorpej #ifdef _LP64
79091bfaeb6Sthorpej __strong_alias(ustore_long,ustore_64);
79191bfaeb6Sthorpej __strong_alias(ustore_ptr,ustore_64);
79291bfaeb6Sthorpej #else
79391bfaeb6Sthorpej __strong_alias(ustore_long,ustore_32);
79491bfaeb6Sthorpej __strong_alias(ustore_ptr,ustore_32);
79591bfaeb6Sthorpej #endif /* _LP64 */
796