subr_copy.c revision 1.17 1 /* $NetBSD: subr_copy.c,v 1.17 2023/02/24 11:02:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
42 *
43 * Copyright (c) 1992, 1993
44 * The Regents of the University of California. All rights reserved.
45 *
46 * This software was developed by the Computer Systems Engineering group
47 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
48 * contributed to Berkeley.
49 *
50 * All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Lawrence Berkeley Laboratory.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_subr.c 8.4 (Berkeley) 2/14/95
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.17 2023/02/24 11:02:27 riastradh Exp $");
84
85 #define __UFETCHSTORE_PRIVATE
86 #define __UCAS_PRIVATE
87
88 #include <sys/param.h>
89 #include <sys/fcntl.h>
90 #include <sys/proc.h>
91 #include <sys/systm.h>
92
93 #include <uvm/uvm_extern.h>
94
95 void
96 uio_setup_sysspace(struct uio *uio)
97 {
98
99 uio->uio_vmspace = vmspace_kernel();
100 }
101
102 int
103 uiomove(void *buf, size_t n, struct uio *uio)
104 {
105 struct vmspace *vm = uio->uio_vmspace;
106 struct iovec *iov;
107 size_t cnt;
108 int error = 0;
109 char *cp = buf;
110
111 ASSERT_SLEEPABLE();
112
113 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
114 while (n > 0 && uio->uio_resid) {
115 iov = uio->uio_iov;
116 cnt = iov->iov_len;
117 if (cnt == 0) {
118 KASSERT(uio->uio_iovcnt > 0);
119 uio->uio_iov++;
120 uio->uio_iovcnt--;
121 continue;
122 }
123 if (cnt > n)
124 cnt = n;
125 if (!VMSPACE_IS_KERNEL_P(vm)) {
126 preempt_point();
127 }
128
129 if (uio->uio_rw == UIO_READ) {
130 error = copyout_vmspace(vm, cp, iov->iov_base,
131 cnt);
132 } else {
133 error = copyin_vmspace(vm, iov->iov_base, cp,
134 cnt);
135 }
136 if (error) {
137 break;
138 }
139 iov->iov_base = (char *)iov->iov_base + cnt;
140 iov->iov_len -= cnt;
141 uio->uio_resid -= cnt;
142 uio->uio_offset += cnt;
143 cp += cnt;
144 KDASSERT(cnt <= n);
145 n -= cnt;
146 }
147
148 return (error);
149 }
150
151 /*
152 * Wrapper for uiomove() that validates the arguments against a known-good
153 * kernel buffer.
154 */
155 int
156 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
157 {
158 size_t offset;
159
160 if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
161 (offset = uio->uio_offset) != uio->uio_offset)
162 return (EINVAL);
163 if (offset >= buflen)
164 return (0);
165 return (uiomove((char *)buf + offset, buflen - offset, uio));
166 }
167
168 /*
169 * Give next character to user as result of read.
170 */
171 int
172 ureadc(int c, struct uio *uio)
173 {
174 struct iovec *iov;
175
176 if (uio->uio_resid <= 0)
177 panic("ureadc: non-positive resid");
178 again:
179 if (uio->uio_iovcnt <= 0)
180 panic("ureadc: non-positive iovcnt");
181 iov = uio->uio_iov;
182 if (iov->iov_len <= 0) {
183 uio->uio_iovcnt--;
184 uio->uio_iov++;
185 goto again;
186 }
187 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
188 int error;
189 if ((error = ustore_char(iov->iov_base, c)) != 0)
190 return (error);
191 } else {
192 *(char *)iov->iov_base = c;
193 }
194 iov->iov_base = (char *)iov->iov_base + 1;
195 iov->iov_len--;
196 uio->uio_resid--;
197 uio->uio_offset++;
198 return (0);
199 }
200
201 /*
202 * Like copyin(), but operates on an arbitrary vmspace.
203 */
204 int
205 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
206 {
207 struct iovec iov;
208 struct uio uio;
209 int error;
210
211 if (len == 0)
212 return (0);
213
214 if (VMSPACE_IS_KERNEL_P(vm)) {
215 return kcopy(uaddr, kaddr, len);
216 }
217 if (__predict_true(vm == curproc->p_vmspace)) {
218 return copyin(uaddr, kaddr, len);
219 }
220
221 iov.iov_base = kaddr;
222 iov.iov_len = len;
223 uio.uio_iov = &iov;
224 uio.uio_iovcnt = 1;
225 uio.uio_offset = (off_t)(uintptr_t)uaddr;
226 uio.uio_resid = len;
227 uio.uio_rw = UIO_READ;
228 UIO_SETUP_SYSSPACE(&uio);
229 error = uvm_io(&vm->vm_map, &uio, 0);
230
231 return (error);
232 }
233
234 /*
235 * Like copyout(), but operates on an arbitrary vmspace.
236 */
237 int
238 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
239 {
240 struct iovec iov;
241 struct uio uio;
242 int error;
243
244 if (len == 0)
245 return (0);
246
247 if (VMSPACE_IS_KERNEL_P(vm)) {
248 return kcopy(kaddr, uaddr, len);
249 }
250 if (__predict_true(vm == curproc->p_vmspace)) {
251 return copyout(kaddr, uaddr, len);
252 }
253
254 iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
255 iov.iov_len = len;
256 uio.uio_iov = &iov;
257 uio.uio_iovcnt = 1;
258 uio.uio_offset = (off_t)(uintptr_t)uaddr;
259 uio.uio_resid = len;
260 uio.uio_rw = UIO_WRITE;
261 UIO_SETUP_SYSSPACE(&uio);
262 error = uvm_io(&vm->vm_map, &uio, 0);
263
264 return (error);
265 }
266
267 /*
268 * Like copyin(), but operates on an arbitrary process.
269 */
270 int
271 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
272 {
273 struct vmspace *vm;
274 int error;
275
276 error = proc_vmspace_getref(p, &vm);
277 if (error) {
278 return error;
279 }
280 error = copyin_vmspace(vm, uaddr, kaddr, len);
281 uvmspace_free(vm);
282
283 return error;
284 }
285
286 /*
287 * Like copyout(), but operates on an arbitrary process.
288 */
289 int
290 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
291 {
292 struct vmspace *vm;
293 int error;
294
295 error = proc_vmspace_getref(p, &vm);
296 if (error) {
297 return error;
298 }
299 error = copyout_vmspace(vm, kaddr, uaddr, len);
300 uvmspace_free(vm);
301
302 return error;
303 }
304
305 /*
306 * Like copyin(), but operates on an arbitrary pid.
307 */
308 int
309 copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
310 {
311 struct proc *p;
312 struct vmspace *vm;
313 int error;
314
315 mutex_enter(&proc_lock);
316 p = proc_find(pid);
317 if (p == NULL) {
318 mutex_exit(&proc_lock);
319 return ESRCH;
320 }
321 mutex_enter(p->p_lock);
322 error = proc_vmspace_getref(p, &vm);
323 mutex_exit(p->p_lock);
324 mutex_exit(&proc_lock);
325
326 if (error == 0) {
327 error = copyin_vmspace(vm, uaddr, kaddr, len);
328 uvmspace_free(vm);
329 }
330 return error;
331 }
332
333 /*
334 * Like copyin(), except it operates on kernel addresses when the FKIOCTL
335 * flag is passed in `ioctlflags' from the ioctl call.
336 */
337 int
338 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
339 {
340 if (ioctlflags & FKIOCTL)
341 return kcopy(src, dst, len);
342 return copyin(src, dst, len);
343 }
344
345 /*
346 * Like copyout(), except it operates on kernel addresses when the FKIOCTL
347 * flag is passed in `ioctlflags' from the ioctl call.
348 */
349 int
350 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
351 {
352 if (ioctlflags & FKIOCTL)
353 return kcopy(src, dst, len);
354 return copyout(src, dst, len);
355 }
356
357 /*
358 * User-space CAS / fetch / store
359 */
360
361 #ifdef __NO_STRICT_ALIGNMENT
362 #define CHECK_ALIGNMENT(x) __nothing
363 #else /* ! __NO_STRICT_ALIGNMENT */
364 static bool
365 ufetchstore_aligned(uintptr_t uaddr, size_t size)
366 {
367 return (uaddr & (size - 1)) == 0;
368 }
369
370 #define CHECK_ALIGNMENT() \
371 do { \
372 if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr))) \
373 return EFAULT; \
374 } while (/*CONSTCOND*/0)
375 #endif /* __NO_STRICT_ALIGNMENT */
376
377 /*
378 * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
379 * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
380 *
381 * In all other cases, we provide generic implementations that work on
382 * all platforms.
383 */
384
385 #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
386 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
387 #include <sys/atomic.h>
388 #include <sys/cpu.h>
389 #include <sys/once.h>
390 #include <sys/mutex.h>
391 #include <sys/ipi.h>
392
393 static int ucas_critical_splcookie;
394 static volatile u_int ucas_critical_pausing_cpus;
395 static u_int ucas_critical_ipi;
396 static ONCE_DECL(ucas_critical_init_once)
397
398 static void
399 ucas_critical_cpu_gate(void *arg __unused)
400 {
401 int count = SPINLOCK_BACKOFF_MIN;
402
403 KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
404
405 /*
406 * Notify ucas_critical_wait that we have stopped. Using
407 * store-release ensures all our memory operations up to the
408 * IPI happen before the ucas -- no buffered stores on our end
409 * can clobber it later on, for instance.
410 *
411 * Matches atomic_load_acquire in ucas_critical_wait -- turns
412 * the following atomic_dec_uint into a store-release.
413 */
414 membar_release();
415 atomic_dec_uint(&ucas_critical_pausing_cpus);
416
417 /*
418 * Wait for ucas_critical_exit to reopen the gate and let us
419 * proceed. Using a load-acquire ensures the ucas happens
420 * before any of our memory operations when we return from the
421 * IPI and proceed -- we won't observe any stale cached value
422 * that the ucas overwrote, for instance.
423 *
424 * Matches atomic_store_release in ucas_critical_exit.
425 */
426 while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
427 SPINLOCK_BACKOFF(count);
428 }
429 }
430
431 static int
432 ucas_critical_init(void)
433 {
434
435 ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
436 return 0;
437 }
438
439 static void
440 ucas_critical_wait(void)
441 {
442 int count = SPINLOCK_BACKOFF_MIN;
443
444 /*
445 * Wait for all CPUs to stop at the gate. Using a load-acquire
446 * ensures all memory operations before they stop at the gate
447 * happen before the ucas -- no buffered stores in other CPUs
448 * can clobber it later on, for instance.
449 *
450 * Matches membar_release/atomic_dec_uint (store-release) in
451 * ucas_critical_cpu_gate.
452 */
453 while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
454 SPINLOCK_BACKOFF(count);
455 }
456 }
457 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
458
459 static inline void
460 ucas_critical_enter(lwp_t * const l)
461 {
462
463 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
464 if (ncpu > 1) {
465 RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
466
467 /*
468 * Acquire the mutex first, then go to splhigh() and
469 * broadcast the IPI to lock all of the other CPUs
470 * behind the gate.
471 *
472 * N.B. Going to splhigh() implicitly disables preemption,
473 * so there's no need to do it explicitly.
474 */
475 mutex_enter(&cpu_lock);
476 ucas_critical_splcookie = splhigh();
477 ucas_critical_pausing_cpus = ncpu - 1;
478 ipi_trigger_broadcast(ucas_critical_ipi, true);
479 ucas_critical_wait();
480 return;
481 }
482 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
483
484 KPREEMPT_DISABLE(l);
485 }
486
487 static inline void
488 ucas_critical_exit(lwp_t * const l)
489 {
490
491 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
492 if (ncpu > 1) {
493 /*
494 * Open the gate and notify all CPUs in
495 * ucas_critical_cpu_gate that they can now proceed.
496 * Using a store-release ensures the ucas happens
497 * before any memory operations they issue after the
498 * IPI -- they won't observe any stale cache of the
499 * target word, for instance.
500 *
501 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
502 */
503 atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
504 splx(ucas_critical_splcookie);
505 mutex_exit(&cpu_lock);
506 return;
507 }
508 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
509
510 KPREEMPT_ENABLE(l);
511 }
512
513 int
514 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
515 {
516 lwp_t * const l = curlwp;
517 uint32_t *uva = ((void *)(uintptr_t)uaddr);
518 int error;
519
520 /*
521 * Wire the user address down to avoid taking a page fault during
522 * the critical section.
523 */
524 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
525 VM_PROT_READ | VM_PROT_WRITE);
526 if (error)
527 return error;
528
529 ucas_critical_enter(l);
530 error = _ufetch_32(uva, ret);
531 if (error == 0 && *ret == old) {
532 error = _ustore_32(uva, new);
533 }
534 ucas_critical_exit(l);
535
536 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
537
538 return error;
539 }
540
541 #ifdef _LP64
542 int
543 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
544 {
545 lwp_t * const l = curlwp;
546 uint64_t *uva = ((void *)(uintptr_t)uaddr);
547 int error;
548
549 /*
550 * Wire the user address down to avoid taking a page fault during
551 * the critical section.
552 */
553 error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
554 VM_PROT_READ | VM_PROT_WRITE);
555 if (error)
556 return error;
557
558 ucas_critical_enter(l);
559 error = _ufetch_64(uva, ret);
560 if (error == 0 && *ret == old) {
561 error = _ustore_64(uva, new);
562 }
563 ucas_critical_exit(l);
564
565 uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
566
567 return error;
568 }
569 #endif /* _LP64 */
570 #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
571
572 int
573 ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
574 {
575
576 ASSERT_SLEEPABLE();
577 CHECK_ALIGNMENT();
578 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
579 !defined(_RUMPKERNEL)
580 if (ncpu > 1) {
581 return _ucas_32_mp(uaddr, old, new, ret);
582 }
583 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
584 return _ucas_32(uaddr, old, new, ret);
585 }
586
587 #ifdef _LP64
588 int
589 ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
590 {
591
592 ASSERT_SLEEPABLE();
593 CHECK_ALIGNMENT();
594 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
595 !defined(_RUMPKERNEL)
596 if (ncpu > 1) {
597 return _ucas_64_mp(uaddr, old, new, ret);
598 }
599 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
600 return _ucas_64(uaddr, old, new, ret);
601 }
602 #endif /* _LP64 */
603
604 __strong_alias(ucas_int,ucas_32);
605 #ifdef _LP64
606 __strong_alias(ucas_ptr,ucas_64);
607 #else
608 __strong_alias(ucas_ptr,ucas_32);
609 #endif /* _LP64 */
610
611 int
612 ufetch_8(const uint8_t *uaddr, uint8_t *valp)
613 {
614
615 ASSERT_SLEEPABLE();
616 CHECK_ALIGNMENT();
617 return _ufetch_8(uaddr, valp);
618 }
619
620 int
621 ufetch_16(const uint16_t *uaddr, uint16_t *valp)
622 {
623
624 ASSERT_SLEEPABLE();
625 CHECK_ALIGNMENT();
626 return _ufetch_16(uaddr, valp);
627 }
628
629 int
630 ufetch_32(const uint32_t *uaddr, uint32_t *valp)
631 {
632
633 ASSERT_SLEEPABLE();
634 CHECK_ALIGNMENT();
635 return _ufetch_32(uaddr, valp);
636 }
637
638 #ifdef _LP64
639 int
640 ufetch_64(const uint64_t *uaddr, uint64_t *valp)
641 {
642
643 ASSERT_SLEEPABLE();
644 CHECK_ALIGNMENT();
645 return _ufetch_64(uaddr, valp);
646 }
647 #endif /* _LP64 */
648
649 __strong_alias(ufetch_char,ufetch_8);
650 __strong_alias(ufetch_short,ufetch_16);
651 __strong_alias(ufetch_int,ufetch_32);
652 #ifdef _LP64
653 __strong_alias(ufetch_long,ufetch_64);
654 __strong_alias(ufetch_ptr,ufetch_64);
655 #else
656 __strong_alias(ufetch_long,ufetch_32);
657 __strong_alias(ufetch_ptr,ufetch_32);
658 #endif /* _LP64 */
659
660 int
661 ustore_8(uint8_t *uaddr, uint8_t val)
662 {
663
664 ASSERT_SLEEPABLE();
665 CHECK_ALIGNMENT();
666 return _ustore_8(uaddr, val);
667 }
668
669 int
670 ustore_16(uint16_t *uaddr, uint16_t val)
671 {
672
673 ASSERT_SLEEPABLE();
674 CHECK_ALIGNMENT();
675 return _ustore_16(uaddr, val);
676 }
677
678 int
679 ustore_32(uint32_t *uaddr, uint32_t val)
680 {
681
682 ASSERT_SLEEPABLE();
683 CHECK_ALIGNMENT();
684 return _ustore_32(uaddr, val);
685 }
686
687 #ifdef _LP64
688 int
689 ustore_64(uint64_t *uaddr, uint64_t val)
690 {
691
692 ASSERT_SLEEPABLE();
693 CHECK_ALIGNMENT();
694 return _ustore_64(uaddr, val);
695 }
696 #endif /* _LP64 */
697
698 __strong_alias(ustore_char,ustore_8);
699 __strong_alias(ustore_short,ustore_16);
700 __strong_alias(ustore_int,ustore_32);
701 #ifdef _LP64
702 __strong_alias(ustore_long,ustore_64);
703 __strong_alias(ustore_ptr,ustore_64);
704 #else
705 __strong_alias(ustore_long,ustore_32);
706 __strong_alias(ustore_ptr,ustore_32);
707 #endif /* _LP64 */
708