Home | History | Annotate | Line # | Download | only in booke
copyout.c revision 1.1.2.1
      1 /*	$NetBSD: copyout.c,v 1.1.2.1 2011/01/07 01:26:19 matt Exp $	*/
      2 /*-
      3  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      8  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
      9  *
     10  * This material is based upon work supported by the Defense Advanced Research
     11  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     12  * Contract No. N66001-09-C-2073.
     13  * Approved for Public Release, Distribution Unlimited
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34  * POSSIBILITY OF SUCH DAMAGE.
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: copyout.c,v 1.1.2.1 2011/01/07 01:26:19 matt Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/lwp.h>
     42 
     43 #include <machine/pcb.h>
     44 
     45 static inline void
     46 copyout_uint8(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     47 {
     48 	register_t msr;
     49 	__asm volatile(
     50 		"mfmsr	%[msr]"				/* Save MSR */
     51 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     52 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     53 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     54 	    : [msr] "=&r" (msr)
     55 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     56 }
     57 
     58 static inline void
     59 copyout_uint16(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     60 {
     61 	register_t msr;
     62 	__asm volatile(
     63 		"mfmsr	%[msr]"				/* Save MSR */
     64 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     65 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     66 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     67 	    : [msr] "=&r" (msr)
     68 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     69 }
     70 
     71 static inline void
     72 copyout_uint32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     73 {
     74 	register_t msr;
     75 	__asm volatile(
     76 		"mfmsr	%[msr]"				/* Save MSR */
     77 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     78 	"\n\t"	"stw	%[data],0(%[udaddr])"		/* store user data */
     79 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     80 	    : [msr] "=&r" (msr)
     81 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     82 }
     83 
     84 static inline void
     85 copyout_le32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     86 {
     87 	register_t msr;
     88 	__asm volatile(
     89 		"mfmsr	%[msr]"				/* Save MSR */
     90 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     91 	"\n\t"	"stwbrx	%[data],0,%[udaddr]"		/* store user data */
     92 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     93 	    : [msr] "=&r" (msr)
     94 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     95 }
     96 
     97 static inline void
     98 copyout_le32_with_mask(uint32_t * const udaddr, uint32_t data,
     99 	uint32_t mask, register_t ds_msr)
    100 {
    101 	register_t msr;
    102 	uint32_t tmp;
    103 	KASSERT((data & ~mask) == 0);
    104 	__asm volatile(
    105 		"mfmsr	%[msr]"				/* Save MSR */
    106 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    107 	"\n\t"	"lwbrx	%[tmp],0,%[udaddr]"		/* fetch user data */
    108 	"\n\t"	"andc	%[tmp],%[tmp],%[mask]"		/* mask out new data */
    109 	"\n\t"	"or	%[tmp],%[tmp],%[data]"		/* merge new data */
    110 	"\n\t"	"stwbrx	%[tmp],0,%[udaddr]"		/* store user data */
    111 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    112 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    113 	    : [ds_msr] "r" (ds_msr), [data] "r" (data),
    114 	      [mask] "r" (mask), [udaddr] "b" (udaddr));
    115 }
    116 
    117 static inline void
    118 copyout_16uint8s(const uint8_t *ksaddr8, uint8_t *udaddr8, register_t ds_msr)
    119 {
    120 	register_t msr;
    121 	__asm volatile(
    122 		"mfmsr	%[msr]"				/* Save MSR */
    123 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    124 	"\n\t"	"stb	%[data0],0(%[udaddr8])"		/* store user data */
    125 	"\n\t"	"stb	%[data1],1(%[udaddr8])"		/* store user data */
    126 	"\n\t"	"stb	%[data2],2(%[udaddr8])"		/* store user data */
    127 	"\n\t"	"stb	%[data3],3(%[udaddr8])"		/* store user data */
    128 	"\n\t"	"stb	%[data4],4(%[udaddr8])"		/* store user data */
    129 	"\n\t"	"stb	%[data5],5(%[udaddr8])"		/* store user data */
    130 	"\n\t"	"stb	%[data6],6(%[udaddr8])"		/* store user data */
    131 	"\n\t"	"stb	%[data7],7(%[udaddr8])"		/* store user data */
    132 	"\n\t"	"stb	%[data8],8(%[udaddr8])"		/* store user data */
    133 	"\n\t"	"stb	%[data9],9(%[udaddr8])"		/* store user data */
    134 	"\n\t"	"stb	%[data10],10(%[udaddr8])"	/* store user data */
    135 	"\n\t"	"stb	%[data11],11(%[udaddr8])"	/* store user data */
    136 	"\n\t"	"stb	%[data12],12(%[udaddr8])"	/* store user data */
    137 	"\n\t"	"stb	%[data13],13(%[udaddr8])"	/* store user data */
    138 	"\n\t"	"stb	%[data14],14(%[udaddr8])"	/* store user data */
    139 	"\n\t"	"stb	%[data15],15(%[udaddr8])"	/* store user data */
    140 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    141 	    : [msr] "=&r" (msr)
    142 	    : [ds_msr] "r" (ds_msr), [udaddr8] "b" (udaddr8),
    143 	      [data0] "r" (ksaddr8[0]), [data1] "r" (ksaddr8[1]),
    144 	      [data2] "r" (ksaddr8[2]), [data3] "r" (ksaddr8[3]),
    145 	      [data4] "r" (ksaddr8[4]), [data5] "r" (ksaddr8[5]),
    146 	      [data6] "r" (ksaddr8[6]), [data7] "r" (ksaddr8[7]),
    147 	      [data8] "r" (ksaddr8[8]), [data9] "r" (ksaddr8[9]),
    148 	      [data10] "r" (ksaddr8[10]), [data11] "r" (ksaddr8[11]),
    149 	      [data12] "r" (ksaddr8[12]), [data13] "r" (ksaddr8[13]),
    150 	      [data14] "r" (ksaddr8[14]), [data15] "r" (ksaddr8[15]));
    151 }
    152 
    153 static inline void
    154 copyout_8uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    155 	const register_t ds_msr, const size_t line_mask)
    156 {
    157 	register_t msr;
    158 	register_t tmp;
    159 	__asm volatile(
    160 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    161 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    162 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    163 	"\n\t"	"bne	0,1f"
    164 	"\n\t"	"dcba	0,%[udaddr32]"
    165 	"\n"	"1:"
    166 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    167 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    168 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    169 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    170 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    171 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    172 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    173 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    174 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    175 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    176 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    177 	      [line_mask] "r" (line_mask),
    178 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    179 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    180 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    181 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7])
    182 	    : "cr0");
    183 }
    184 
    185 static inline void
    186 copyout_16uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    187 	const register_t ds_msr, const size_t line_mask)
    188 {
    189 	KASSERT(((uintptr_t)udaddr32 & line_mask) == 0);
    190 	register_t msr;
    191 	register_t tmp;
    192 	__asm volatile(
    193 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    194 	"\n\t"	"cmplwi	2,%[line_size],32"
    195 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    196 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    197 	"\n\t"	"bne	0,1f"
    198 	"\n\t"	"dcba	0,%[udaddr32]"
    199 	"\n\t"	"bne	2,1f"
    200 	"\n\t"	"dcba	%[line_size],%[udaddr32]"
    201 	"\n"	"1:"
    202 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    203 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    204 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    205 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    206 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    207 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    208 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    209 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    210 	"\n\t"	"stw	%[data8],32(%[udaddr32])"	/* store user data */
    211 	"\n\t"	"stw	%[data9],36(%[udaddr32])"	/* store user data */
    212 	"\n\t"	"stw	%[data10],40(%[udaddr32])"	/* store user data */
    213 	"\n\t"	"stw	%[data11],44(%[udaddr32])"	/* store user data */
    214 	"\n\t"	"stw	%[data12],48(%[udaddr32])"	/* store user data */
    215 	"\n\t"	"stw	%[data13],52(%[udaddr32])"	/* store user data */
    216 	"\n\t"	"stw	%[data14],56(%[udaddr32])"	/* store user data */
    217 	"\n\t"	"stw	%[data15],60(%[udaddr32])"	/* store user data */
    218 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    219 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    220 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    221 	      [line_size] "r" (line_mask + 1), [line_mask] "r" (line_mask),
    222 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    223 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    224 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    225 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7]),
    226 	      [data8] "r" (ksaddr32[8]), [data9] "r" (ksaddr32[9]),
    227 	      [data10] "r" (ksaddr32[10]), [data11] "r" (ksaddr32[11]),
    228 	      [data12] "r" (ksaddr32[12]), [data13] "r" (ksaddr32[13]),
    229 	      [data14] "r" (ksaddr32[14]), [data15] "r" (ksaddr32[15])
    230 	    : "cr0", "cr2");
    231 }
    232 
    233 static inline void
    234 copyout_uint8s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    235 {
    236 	const uint8_t *ksaddr8 = (void *)ksaddr;
    237 	uint8_t *udaddr8 = (void *)udaddr;
    238 
    239 	__builtin_prefetch(ksaddr8, 0, 1);
    240 
    241 	for (; len >= 16; len -= 16, ksaddr8 += 16, udaddr8 += 16) {
    242 		__builtin_prefetch(ksaddr8 + 16, 0, 1);
    243 		copyout_16uint8s(ksaddr8, udaddr8, ds_msr);
    244 	}
    245 
    246 	while (len-- > 0) {
    247 		copyout_uint8(udaddr8++, *ksaddr8++, ds_msr);
    248 	}
    249 }
    250 
    251 static inline void
    252 copyout_uint32s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    253 {
    254 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    255 	const size_t line_mask = line_size - 1;
    256 	const size_t udalignment = udaddr & line_mask;
    257 	KASSERT((ksaddr & 3) == 0);
    258 	KASSERT((udaddr & 3) == 0);
    259 	const uint32_t *ksaddr32 = (void *)ksaddr;
    260 	uint32_t *udaddr32 = (void *)udaddr;
    261 	len >>= 2;
    262 	__builtin_prefetch(ksaddr32, 0, 1);
    263 	if (udalignment != 0 && udalignment + 4*len > line_size) {
    264 		size_t slen = (line_size - udalignment) >> 2;
    265 		len -= slen;
    266 		for (; slen >= 8; ksaddr32 += 8, udaddr32 += 8, slen -= 8) {
    267 			copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    268 		}
    269 		while (slen-- > 0) {
    270 			copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    271 		}
    272 		if (len == 0)
    273 			return;
    274 	}
    275 	__builtin_prefetch(ksaddr32, 0, 1);
    276 	while (len >= 16) {
    277 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    278 		__builtin_prefetch(ksaddr32 + 16, 0, 1);
    279 		copyout_16uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    280 		ksaddr32 += 16, udaddr32 += 16, len -= 16;
    281 	}
    282 	KASSERT(len <= 16);
    283 	if (len >= 8) {
    284 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    285 		copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    286 		ksaddr32 += 8, udaddr32 += 8, len -= 8;
    287 	}
    288 	while (len-- > 0) {
    289 		copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    290 	}
    291 }
    292 
    293 int
    294 copyout(const void *vksaddr, void *vudaddr, size_t len)
    295 {
    296 	struct faultbuf env;
    297 	vaddr_t udaddr = (vaddr_t) vudaddr;
    298 	vaddr_t ksaddr = (vaddr_t) vksaddr;
    299 
    300 	if (__predict_false(len == 0)) {
    301 		return 0;
    302 	}
    303 
    304 	const register_t ds_msr = mfmsr() | PSL_DS;
    305 
    306 	int rv = setfault(&env);
    307 	if (rv != 0) {
    308 		curpcb->pcb_onfault = NULL;
    309 		return rv;
    310 	}
    311 
    312 	if (__predict_false(len < 4)) {
    313 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    314 		curpcb->pcb_onfault = NULL;
    315 		return 0;
    316 	}
    317 
    318 	const size_t alignment = (udaddr ^ ksaddr) & 3;
    319 	if (__predict_true(alignment == 0)) {
    320 		size_t slen;
    321 		if (__predict_false(ksaddr & 3)) {
    322 			slen = 4 - (ksaddr & 3);
    323 			copyout_uint8s(ksaddr, udaddr, slen, ds_msr);
    324 			udaddr += slen, ksaddr += slen, len -= slen;
    325 		}
    326 		slen = len & ~3;
    327 		if (__predict_true(slen >= 4)) {
    328 			copyout_uint32s(ksaddr, udaddr, slen, ds_msr);
    329 			udaddr += slen, ksaddr += slen, len -= slen;
    330 		}
    331 	}
    332 
    333 	if (len > 0) {
    334 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    335 	}
    336 	curpcb->pcb_onfault = NULL;
    337 	return 0;
    338 }
    339 
    340 int
    341 copyoutstr(const void *ksaddr, void *udaddr, size_t len, size_t *lenp)
    342 {
    343 	struct faultbuf env;
    344 
    345 	if (__predict_false(len == 0)) {
    346 		if (lenp)
    347 			*lenp = 0;
    348 		return 0;
    349 	}
    350 
    351 	if (setfault(&env)) {
    352 		curpcb->pcb_onfault = NULL;
    353 		if (lenp)
    354 			*lenp = 0;
    355 		return EFAULT;
    356 	}
    357 
    358 	const register_t ds_msr = mfmsr() | PSL_DS;
    359 	const uint8_t *ksaddr8 = ksaddr;
    360 	size_t copylen = 0;
    361 
    362 #if 1
    363 	uint8_t *udaddr8 = (void *)udaddr;
    364 
    365 	while (copylen++ < len) {
    366 		const uint8_t data = *ksaddr8++;
    367 		copyout_uint8(udaddr8++, data, ds_msr);
    368 		if (data == 0)
    369 			break;
    370 	}
    371 #else
    372 	uint32_t *udaddr32 = (void *)((uintptr_t)udaddr & ~3);
    373 
    374 	size_t boff = (uintptr_t)udaddr & 3;
    375 	bool done = false;
    376 	size_t wlen = 0;
    377 	size_t data = 0;
    378 
    379 	/*
    380 	 * If the destination buffer doesn't start on a 32-bit boundary
    381 	 * try to partially fill in the first word.  If we succeed we can
    382 	 * finish writing it while preserving the bytes on front.
    383 	 */
    384 	if (boff > 0) {
    385 		KASSERT(len > 0);
    386 		do {
    387 			data = (data << 8) | *ksaddr8++;
    388 			wlen++;
    389 			done = ((uint8_t)data == 0 || len == wlen);
    390 		} while (!done && boff + wlen < 4);
    391 		KASSERT(wlen > 0);
    392 		data <<= 8 * boff;
    393 		if (!done || boff + wlen == 4) {
    394 			uint32_t mask = 0xffffffff << (8 * boff);
    395 			copyout_le32_with_mask(udaddr32++, data, mask, ds_msr);
    396 			boff = 0;
    397 			copylen = wlen;
    398 			wlen = 0;
    399 			data = 0;
    400 		}
    401 	}
    402 
    403 	/*
    404 	 * Now we get to the heart of the routine.  Build up complete words
    405 	 * if possible.  When we have one, write it to the user's address
    406 	 * space and go for the next.  If we ran out of space or we found the
    407 	 * end of the string, stop building.  If we managed to build a complete
    408 	 * word, just write it and be happy.  Otherwise we have to deal with
    409 	 * the trailing bytes.
    410 	 */
    411 	KASSERT(done || boff == 0);
    412 	KASSERT(done || copylen < len);
    413 	while (!done) {
    414 		KASSERT(wlen == 0);
    415 		KASSERT(copylen < len);
    416 		do {
    417 			data = (data << 8) | *ksaddr8++;
    418 			wlen++;
    419 			done = ((uint8_t)data == 0 || copylen + wlen == len);
    420 		} while (!done && wlen < 4);
    421 		KASSERT(done || wlen == 4);
    422 		if (__predict_true(wlen == 4)) {
    423 			copyout_le32(udaddr32++, data, ds_msr);
    424 			data = 0;
    425 			copylen += wlen;
    426 			wlen = 0;
    427 			KASSERT(copylen < len || done);
    428 		}
    429 	}
    430 	KASSERT(wlen < 3);
    431 	if (wlen) {
    432 		/*
    433 		 * Remember even though we are running big-endian we are using
    434 		 * byte reversed load/stores so we need to deal with things as
    435 		 * little endian.
    436 		 *
    437 		 * wlen=1 boff=0:
    438 		 * (~(~0 <<  8) <<  0) -> (~(0xffffff00) <<  0) -> 0x000000ff
    439 		 * wlen=1 boff=1:
    440 		 * (~(~0 <<  8) <<  8) -> (~(0xffffff00) <<  8) -> 0x0000ff00
    441 		 * wlen=1 boff=2:
    442 		 * (~(~0 <<  8) << 16) -> (~(0xffffff00) << 16) -> 0x00ff0000
    443 		 * wlen=1 boff=3:
    444 		 * (~(~0 <<  8) << 24) -> (~(0xffffff00) << 24) -> 0xff000000
    445 		 * wlen=2 boff=0:
    446 		 * (~(~0 << 16) <<  0) -> (~(0xffff0000) <<  0) -> 0x0000ffff
    447 		 * wlen=2 boff=1:
    448 		 * (~(~0 << 16) <<  8) -> (~(0xffff0000) <<  8) -> 0x00ffff00
    449 		 * wlen=2 boff=2:
    450 		 * (~(~0 << 16) << 16) -> (~(0xffff0000) << 16) -> 0xffff0000
    451 		 * wlen=3 boff=0:
    452 		 * (~(~0 << 24) <<  0) -> (~(0xff000000) <<  0) -> 0x00ffffff
    453 		 * wlen=3 boff=1:
    454 		 * (~(~0 << 24) <<  8) -> (~(0xff000000) <<  8) -> 0xffffff00
    455 		 */
    456 		KASSERT(boff + wlen <= 4);
    457 		uint32_t mask = (~(~0 << (8 * wlen))) << (8 * boff);
    458 		KASSERT(mask != 0xffffffff);
    459 		copyout_le32_with_mask(udaddr32, data, mask, ds_msr);
    460 		copylen += wlen;
    461 	}
    462 #endif
    463 
    464 	curpcb->pcb_onfault = NULL;
    465 	if (lenp)
    466 		*lenp = copylen;
    467 	return 0;
    468 }
    469