xref: /netbsd-src/sys/kern/subr_copy.c (revision 5dd36a3bc8bf2a9dec29ceb6349550414570c447)
1 /*	$NetBSD: subr_copy.c,v 1.12 2020/02/22 21:59:30 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
5  *	The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Copyright (c) 1992, 1993
44  *	The Regents of the University of California.  All rights reserved.
45  *
46  * This software was developed by the Computer Systems Engineering group
47  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48  * contributed to Berkeley.
49  *
50  * All advertising materials mentioning features or use of this software
51  * must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Lawrence Berkeley Laboratory.
54  *
55  * Redistribution and use in source and binary forms, with or without
56  * modification, are permitted provided that the following conditions
57  * are met:
58  * 1. Redistributions of source code must retain the above copyright
59  *    notice, this list of conditions and the following disclaimer.
60  * 2. Redistributions in binary form must reproduce the above copyright
61  *    notice, this list of conditions and the following disclaimer in the
62  *    documentation and/or other materials provided with the distribution.
63  * 3. Neither the name of the University nor the names of its contributors
64  *    may be used to endorse or promote products derived from this software
65  *    without specific prior written permission.
66  *
67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77  * SUCH DAMAGE.
78  *
79  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.12 2020/02/22 21:59:30 chs Exp $");
84 
85 #define	__UFETCHSTORE_PRIVATE
86 #define	__UCAS_PRIVATE
87 
88 #include <sys/param.h>
89 #include <sys/fcntl.h>
90 #include <sys/proc.h>
91 #include <sys/systm.h>
92 
93 #include <uvm/uvm_extern.h>
94 
95 void
96 uio_setup_sysspace(struct uio *uio)
97 {
98 
99 	uio->uio_vmspace = vmspace_kernel();
100 }
101 
102 int
103 uiomove(void *buf, size_t n, struct uio *uio)
104 {
105 	struct vmspace *vm = uio->uio_vmspace;
106 	struct iovec *iov;
107 	size_t cnt;
108 	int error = 0;
109 	char *cp = buf;
110 
111 	ASSERT_SLEEPABLE();
112 
113 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114 	while (n > 0 && uio->uio_resid) {
115 		iov = uio->uio_iov;
116 		cnt = iov->iov_len;
117 		if (cnt == 0) {
118 			KASSERT(uio->uio_iovcnt > 0);
119 			uio->uio_iov++;
120 			uio->uio_iovcnt--;
121 			continue;
122 		}
123 		if (cnt > n)
124 			cnt = n;
125 		if (!VMSPACE_IS_KERNEL_P(vm)) {
126 			if (curcpu()->ci_schedstate.spc_flags &
127 			    SPCF_SHOULDYIELD)
128 				preempt();
129 		}
130 
131 		if (uio->uio_rw == UIO_READ) {
132 			error = copyout_vmspace(vm, cp, iov->iov_base,
133 			    cnt);
134 		} else {
135 			error = copyin_vmspace(vm, iov->iov_base, cp,
136 			    cnt);
137 		}
138 		if (error) {
139 			break;
140 		}
141 		iov->iov_base = (char *)iov->iov_base + cnt;
142 		iov->iov_len -= cnt;
143 		uio->uio_resid -= cnt;
144 		uio->uio_offset += cnt;
145 		cp += cnt;
146 		KDASSERT(cnt <= n);
147 		n -= cnt;
148 	}
149 
150 	return (error);
151 }
152 
153 /*
154  * Wrapper for uiomove() that validates the arguments against a known-good
155  * kernel buffer.
156  */
157 int
158 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
159 {
160 	size_t offset;
161 
162 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
163 	    (offset = uio->uio_offset) != uio->uio_offset)
164 		return (EINVAL);
165 	if (offset >= buflen)
166 		return (0);
167 	return (uiomove((char *)buf + offset, buflen - offset, uio));
168 }
169 
170 /*
171  * Give next character to user as result of read.
172  */
173 int
174 ureadc(int c, struct uio *uio)
175 {
176 	struct iovec *iov;
177 
178 	if (uio->uio_resid <= 0)
179 		panic("ureadc: non-positive resid");
180 again:
181 	if (uio->uio_iovcnt <= 0)
182 		panic("ureadc: non-positive iovcnt");
183 	iov = uio->uio_iov;
184 	if (iov->iov_len <= 0) {
185 		uio->uio_iovcnt--;
186 		uio->uio_iov++;
187 		goto again;
188 	}
189 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
190 		int error;
191 		if ((error = ustore_char(iov->iov_base, c)) != 0)
192 			return (error);
193 	} else {
194 		*(char *)iov->iov_base = c;
195 	}
196 	iov->iov_base = (char *)iov->iov_base + 1;
197 	iov->iov_len--;
198 	uio->uio_resid--;
199 	uio->uio_offset++;
200 	return (0);
201 }
202 
203 /*
204  * Like copyin(), but operates on an arbitrary vmspace.
205  */
206 int
207 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
208 {
209 	struct iovec iov;
210 	struct uio uio;
211 	int error;
212 
213 	if (len == 0)
214 		return (0);
215 
216 	if (VMSPACE_IS_KERNEL_P(vm)) {
217 		return kcopy(uaddr, kaddr, len);
218 	}
219 	if (__predict_true(vm == curproc->p_vmspace)) {
220 		return copyin(uaddr, kaddr, len);
221 	}
222 
223 	iov.iov_base = kaddr;
224 	iov.iov_len = len;
225 	uio.uio_iov = &iov;
226 	uio.uio_iovcnt = 1;
227 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
228 	uio.uio_resid = len;
229 	uio.uio_rw = UIO_READ;
230 	UIO_SETUP_SYSSPACE(&uio);
231 	error = uvm_io(&vm->vm_map, &uio, 0);
232 
233 	return (error);
234 }
235 
236 /*
237  * Like copyout(), but operates on an arbitrary vmspace.
238  */
239 int
240 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
241 {
242 	struct iovec iov;
243 	struct uio uio;
244 	int error;
245 
246 	if (len == 0)
247 		return (0);
248 
249 	if (VMSPACE_IS_KERNEL_P(vm)) {
250 		return kcopy(kaddr, uaddr, len);
251 	}
252 	if (__predict_true(vm == curproc->p_vmspace)) {
253 		return copyout(kaddr, uaddr, len);
254 	}
255 
256 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
257 	iov.iov_len = len;
258 	uio.uio_iov = &iov;
259 	uio.uio_iovcnt = 1;
260 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
261 	uio.uio_resid = len;
262 	uio.uio_rw = UIO_WRITE;
263 	UIO_SETUP_SYSSPACE(&uio);
264 	error = uvm_io(&vm->vm_map, &uio, 0);
265 
266 	return (error);
267 }
268 
269 /*
270  * Like copyin(), but operates on an arbitrary process.
271  */
272 int
273 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
274 {
275 	struct vmspace *vm;
276 	int error;
277 
278 	error = proc_vmspace_getref(p, &vm);
279 	if (error) {
280 		return error;
281 	}
282 	error = copyin_vmspace(vm, uaddr, kaddr, len);
283 	uvmspace_free(vm);
284 
285 	return error;
286 }
287 
288 /*
289  * Like copyout(), but operates on an arbitrary process.
290  */
291 int
292 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
293 {
294 	struct vmspace *vm;
295 	int error;
296 
297 	error = proc_vmspace_getref(p, &vm);
298 	if (error) {
299 		return error;
300 	}
301 	error = copyout_vmspace(vm, kaddr, uaddr, len);
302 	uvmspace_free(vm);
303 
304 	return error;
305 }
306 
307 /*
308  * Like copyin(), but operates on an arbitrary pid.
309  */
310 int
311 copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
312 {
313 	struct proc *p;
314 	struct vmspace *vm;
315 	int error;
316 
317 	mutex_enter(proc_lock);
318 	p = proc_find(pid);
319 	if (p == NULL) {
320 		mutex_exit(proc_lock);
321 		return ESRCH;
322 	}
323 	mutex_enter(p->p_lock);
324 	error = proc_vmspace_getref(p, &vm);
325 	mutex_exit(p->p_lock);
326 	mutex_exit(proc_lock);
327 
328 	if (error == 0) {
329 		error = copyin_vmspace(vm, uaddr, kaddr, len);
330 		uvmspace_free(vm);
331 	}
332 	return error;
333 }
334 
335 /*
336  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
337  * flag is passed in `ioctlflags' from the ioctl call.
338  */
339 int
340 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
341 {
342 	if (ioctlflags & FKIOCTL)
343 		return kcopy(src, dst, len);
344 	return copyin(src, dst, len);
345 }
346 
347 /*
348  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
349  * flag is passed in `ioctlflags' from the ioctl call.
350  */
351 int
352 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
353 {
354 	if (ioctlflags & FKIOCTL)
355 		return kcopy(src, dst, len);
356 	return copyout(src, dst, len);
357 }
358 
359 /*
360  * User-space CAS / fetch / store
361  */
362 
363 #ifdef __NO_STRICT_ALIGNMENT
364 #define	CHECK_ALIGNMENT(x)	__nothing
365 #else /* ! __NO_STRICT_ALIGNMENT */
366 static bool
367 ufetchstore_aligned(uintptr_t uaddr, size_t size)
368 {
369 	return (uaddr & (size - 1)) == 0;
370 }
371 
372 #define	CHECK_ALIGNMENT()						\
373 do {									\
374 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
375 		return EFAULT;						\
376 } while (/*CONSTCOND*/0)
377 #endif /* __NO_STRICT_ALIGNMENT */
378 
379 /*
380  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
381  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
382  *
383  * In all other cases, we provide generic implementations that work on
384  * all platforms.
385  */
386 
387 #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
388 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
389 #include <sys/atomic.h>
390 #include <sys/cpu.h>
391 #include <sys/once.h>
392 #include <sys/mutex.h>
393 #include <sys/ipi.h>
394 
395 static int ucas_critical_splcookie;
396 static volatile u_int ucas_critical_pausing_cpus;
397 static u_int ucas_critical_ipi;
398 static ONCE_DECL(ucas_critical_init_once)
399 
400 static void
401 ucas_critical_cpu_gate(void *arg __unused)
402 {
403 	int count = SPINLOCK_BACKOFF_MIN;
404 
405 	KASSERT(ucas_critical_pausing_cpus > 0);
406 	atomic_dec_uint(&ucas_critical_pausing_cpus);
407 	while (ucas_critical_pausing_cpus != (u_int)-1) {
408 		SPINLOCK_BACKOFF(count);
409 	}
410 }
411 
412 static int
413 ucas_critical_init(void)
414 {
415 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
416 	return 0;
417 }
418 
419 static void
420 ucas_critical_wait(void)
421 {
422 	int count = SPINLOCK_BACKOFF_MIN;
423 
424 	while (ucas_critical_pausing_cpus > 0) {
425 		SPINLOCK_BACKOFF(count);
426 	}
427 }
428 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
429 
430 static inline void
431 ucas_critical_enter(lwp_t * const l)
432 {
433 
434 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
435 	if (ncpu > 1) {
436 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
437 
438 		/*
439 		 * Acquire the mutex first, then go to splhigh() and
440 		 * broadcast the IPI to lock all of the other CPUs
441 		 * behind the gate.
442 		 *
443 		 * N.B. Going to splhigh() implicitly disables preemption,
444 		 * so there's no need to do it explicitly.
445 		 */
446 		mutex_enter(&cpu_lock);
447 		ucas_critical_splcookie = splhigh();
448 		ucas_critical_pausing_cpus = ncpu - 1;
449 		membar_enter();
450 
451 		ipi_trigger_broadcast(ucas_critical_ipi, true);
452 		ucas_critical_wait();
453 		return;
454 	}
455 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
456 
457 	KPREEMPT_DISABLE(l);
458 }
459 
460 static inline void
461 ucas_critical_exit(lwp_t * const l)
462 {
463 
464 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
465 	if (ncpu > 1) {
466 		membar_exit();
467 		ucas_critical_pausing_cpus = (u_int)-1;
468 		splx(ucas_critical_splcookie);
469 		mutex_exit(&cpu_lock);
470 		return;
471 	}
472 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
473 
474 	KPREEMPT_ENABLE(l);
475 }
476 
477 int
478 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
479 {
480 	lwp_t * const l = curlwp;
481 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
482 	int error;
483 
484 	/*
485 	 * Wire the user address down to avoid taking a page fault during
486 	 * the critical section.
487 	 */
488 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
489 			   VM_PROT_READ | VM_PROT_WRITE);
490 	if (error)
491 		return error;
492 
493 	ucas_critical_enter(l);
494 	error = _ufetch_32(uva, ret);
495 	if (error == 0 && *ret == old) {
496 		error = _ustore_32(uva, new);
497 	}
498 	ucas_critical_exit(l);
499 
500 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
501 
502 	return error;
503 }
504 
505 #ifdef _LP64
506 int
507 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
508 {
509 	lwp_t * const l = curlwp;
510 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
511 	int error;
512 
513 	/*
514 	 * Wire the user address down to avoid taking a page fault during
515 	 * the critical section.
516 	 */
517 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
518 			   VM_PROT_READ | VM_PROT_WRITE);
519 	if (error)
520 		return error;
521 
522 	ucas_critical_enter(l);
523 	error = _ufetch_64(uva, ret);
524 	if (error == 0 && *ret == old) {
525 		error = _ustore_64(uva, new);
526 	}
527 	ucas_critical_exit(l);
528 
529 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
530 
531 	return error;
532 }
533 #endif /* _LP64 */
534 #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
535 
536 int
537 ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
538 {
539 
540 	ASSERT_SLEEPABLE();
541 	CHECK_ALIGNMENT();
542 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
543     !defined(_RUMPKERNEL)
544 	if (ncpu > 1) {
545 		return _ucas_32_mp(uaddr, old, new, ret);
546 	}
547 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
548 	return _ucas_32(uaddr, old, new, ret);
549 }
550 
551 #ifdef _LP64
552 int
553 ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
554 {
555 
556 	ASSERT_SLEEPABLE();
557 	CHECK_ALIGNMENT();
558 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
559     !defined(_RUMPKERNEL)
560 	if (ncpu > 1) {
561 		return _ucas_64_mp(uaddr, old, new, ret);
562 	}
563 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
564 	return _ucas_64(uaddr, old, new, ret);
565 }
566 #endif /* _LP64 */
567 
568 __strong_alias(ucas_int,ucas_32);
569 #ifdef _LP64
570 __strong_alias(ucas_ptr,ucas_64);
571 #else
572 __strong_alias(ucas_ptr,ucas_32);
573 #endif /* _LP64 */
574 
575 int
576 ufetch_8(const uint8_t *uaddr, uint8_t *valp)
577 {
578 
579 	ASSERT_SLEEPABLE();
580 	CHECK_ALIGNMENT();
581 	return _ufetch_8(uaddr, valp);
582 }
583 
584 int
585 ufetch_16(const uint16_t *uaddr, uint16_t *valp)
586 {
587 
588 	ASSERT_SLEEPABLE();
589 	CHECK_ALIGNMENT();
590 	return _ufetch_16(uaddr, valp);
591 }
592 
593 int
594 ufetch_32(const uint32_t *uaddr, uint32_t *valp)
595 {
596 
597 	ASSERT_SLEEPABLE();
598 	CHECK_ALIGNMENT();
599 	return _ufetch_32(uaddr, valp);
600 }
601 
602 #ifdef _LP64
603 int
604 ufetch_64(const uint64_t *uaddr, uint64_t *valp)
605 {
606 
607 	ASSERT_SLEEPABLE();
608 	CHECK_ALIGNMENT();
609 	return _ufetch_64(uaddr, valp);
610 }
611 #endif /* _LP64 */
612 
613 __strong_alias(ufetch_char,ufetch_8);
614 __strong_alias(ufetch_short,ufetch_16);
615 __strong_alias(ufetch_int,ufetch_32);
616 #ifdef _LP64
617 __strong_alias(ufetch_long,ufetch_64);
618 __strong_alias(ufetch_ptr,ufetch_64);
619 #else
620 __strong_alias(ufetch_long,ufetch_32);
621 __strong_alias(ufetch_ptr,ufetch_32);
622 #endif /* _LP64 */
623 
624 int
625 ustore_8(uint8_t *uaddr, uint8_t val)
626 {
627 
628 	ASSERT_SLEEPABLE();
629 	CHECK_ALIGNMENT();
630 	return _ustore_8(uaddr, val);
631 }
632 
633 int
634 ustore_16(uint16_t *uaddr, uint16_t val)
635 {
636 
637 	ASSERT_SLEEPABLE();
638 	CHECK_ALIGNMENT();
639 	return _ustore_16(uaddr, val);
640 }
641 
642 int
643 ustore_32(uint32_t *uaddr, uint32_t val)
644 {
645 
646 	ASSERT_SLEEPABLE();
647 	CHECK_ALIGNMENT();
648 	return _ustore_32(uaddr, val);
649 }
650 
651 #ifdef _LP64
652 int
653 ustore_64(uint64_t *uaddr, uint64_t val)
654 {
655 
656 	ASSERT_SLEEPABLE();
657 	CHECK_ALIGNMENT();
658 	return _ustore_64(uaddr, val);
659 }
660 #endif /* _LP64 */
661 
662 __strong_alias(ustore_char,ustore_8);
663 __strong_alias(ustore_short,ustore_16);
664 __strong_alias(ustore_int,ustore_32);
665 #ifdef _LP64
666 __strong_alias(ustore_long,ustore_64);
667 __strong_alias(ustore_ptr,ustore_64);
668 #else
669 __strong_alias(ustore_long,ustore_32);
670 __strong_alias(ustore_ptr,ustore_32);
671 #endif /* _LP64 */
672