Home | History | Annotate | Line # | Download | only in booke
copyout.c revision 1.4
      1 /*	$NetBSD: copyout.c,v 1.4 2014/07/24 23:29:02 joerg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      9  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
     10  *
     11  * This material is based upon work supported by the Defense Advanced Research
     12  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     13  * Contract No. N66001-09-C-2073.
     14  * Approved for Public Release, Distribution Unlimited
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: copyout.c,v 1.4 2014/07/24 23:29:02 joerg Exp $");
     40 
     41 #include <sys/param.h>
     42 #include <sys/lwp.h>
     43 
     44 #include <powerpc/pcb.h>
     45 
     46 #include <powerpc/booke/cpuvar.h>
     47 
     48 static inline void
     49 copyout_uint8(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     50 {
     51 	register_t msr;
     52 	__asm volatile(
     53 		"mfmsr	%[msr]"				/* Save MSR */
     54 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     55 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     56 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     57 	    : [msr] "=&r" (msr)
     58 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     59 }
     60 
     61 #if 0
     62 static inline void
     63 copyout_uint16(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     64 {
     65 	register_t msr;
     66 	__asm volatile(
     67 		"mfmsr	%[msr]"				/* Save MSR */
     68 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     69 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     70 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     71 	    : [msr] "=&r" (msr)
     72 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     73 }
     74 #endif
     75 
     76 static inline void
     77 copyout_uint32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     78 {
     79 	register_t msr;
     80 	__asm volatile(
     81 		"mfmsr	%[msr]"				/* Save MSR */
     82 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     83 	"\n\t"	"stw	%[data],0(%[udaddr])"		/* store user data */
     84 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     85 	    : [msr] "=&r" (msr)
     86 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     87 }
     88 
     89 #if 0
     90 static inline void
     91 copyout_le32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     92 {
     93 	register_t msr;
     94 	__asm volatile(
     95 		"mfmsr	%[msr]"				/* Save MSR */
     96 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     97 	"\n\t"	"stwbrx	%[data],0,%[udaddr]"		/* store user data */
     98 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     99 	    : [msr] "=&r" (msr)
    100 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
    101 }
    102 
    103 static inline void
    104 copyout_le32_with_mask(uint32_t * const udaddr, uint32_t data,
    105 	uint32_t mask, register_t ds_msr)
    106 {
    107 	register_t msr;
    108 	uint32_t tmp;
    109 	KASSERT((data & ~mask) == 0);
    110 	__asm volatile(
    111 		"mfmsr	%[msr]"				/* Save MSR */
    112 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    113 	"\n\t"	"lwbrx	%[tmp],0,%[udaddr]"		/* fetch user data */
    114 	"\n\t"	"andc	%[tmp],%[tmp],%[mask]"		/* mask out new data */
    115 	"\n\t"	"or	%[tmp],%[tmp],%[data]"		/* merge new data */
    116 	"\n\t"	"stwbrx	%[tmp],0,%[udaddr]"		/* store user data */
    117 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    118 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    119 	    : [ds_msr] "r" (ds_msr), [data] "r" (data),
    120 	      [mask] "r" (mask), [udaddr] "b" (udaddr));
    121 }
    122 #endif
    123 
    124 static inline void
    125 copyout_16uint8s(const uint8_t *ksaddr8, uint8_t *udaddr8, register_t ds_msr)
    126 {
    127 	register_t msr;
    128 	__asm volatile(
    129 		"mfmsr	%[msr]"				/* Save MSR */
    130 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    131 	"\n\t"	"stb	%[data0],0(%[udaddr8])"		/* store user data */
    132 	"\n\t"	"stb	%[data1],1(%[udaddr8])"		/* store user data */
    133 	"\n\t"	"stb	%[data2],2(%[udaddr8])"		/* store user data */
    134 	"\n\t"	"stb	%[data3],3(%[udaddr8])"		/* store user data */
    135 	"\n\t"	"stb	%[data4],4(%[udaddr8])"		/* store user data */
    136 	"\n\t"	"stb	%[data5],5(%[udaddr8])"		/* store user data */
    137 	"\n\t"	"stb	%[data6],6(%[udaddr8])"		/* store user data */
    138 	"\n\t"	"stb	%[data7],7(%[udaddr8])"		/* store user data */
    139 	"\n\t"	"stb	%[data8],8(%[udaddr8])"		/* store user data */
    140 	"\n\t"	"stb	%[data9],9(%[udaddr8])"		/* store user data */
    141 	"\n\t"	"stb	%[data10],10(%[udaddr8])"	/* store user data */
    142 	"\n\t"	"stb	%[data11],11(%[udaddr8])"	/* store user data */
    143 	"\n\t"	"stb	%[data12],12(%[udaddr8])"	/* store user data */
    144 	"\n\t"	"stb	%[data13],13(%[udaddr8])"	/* store user data */
    145 	"\n\t"	"stb	%[data14],14(%[udaddr8])"	/* store user data */
    146 	"\n\t"	"stb	%[data15],15(%[udaddr8])"	/* store user data */
    147 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    148 	    : [msr] "=&r" (msr)
    149 	    : [ds_msr] "r" (ds_msr), [udaddr8] "b" (udaddr8),
    150 	      [data0] "r" (ksaddr8[0]), [data1] "r" (ksaddr8[1]),
    151 	      [data2] "r" (ksaddr8[2]), [data3] "r" (ksaddr8[3]),
    152 	      [data4] "r" (ksaddr8[4]), [data5] "r" (ksaddr8[5]),
    153 	      [data6] "r" (ksaddr8[6]), [data7] "r" (ksaddr8[7]),
    154 	      [data8] "r" (ksaddr8[8]), [data9] "r" (ksaddr8[9]),
    155 	      [data10] "r" (ksaddr8[10]), [data11] "r" (ksaddr8[11]),
    156 	      [data12] "r" (ksaddr8[12]), [data13] "r" (ksaddr8[13]),
    157 	      [data14] "r" (ksaddr8[14]), [data15] "r" (ksaddr8[15]));
    158 }
    159 
    160 static inline void
    161 copyout_8uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    162 	const register_t ds_msr, const size_t line_mask)
    163 {
    164 	register_t msr;
    165 	register_t tmp;
    166 	__asm volatile(
    167 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    168 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    169 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    170 	"\n\t"	"bne	0,1f"
    171 	"\n\t"	"dcba	0,%[udaddr32]"
    172 	"\n"	"1:"
    173 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    174 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    175 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    176 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    177 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    178 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    179 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    180 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    181 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    182 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    183 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    184 	      [line_mask] "r" (line_mask),
    185 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    186 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    187 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    188 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7])
    189 	    : "cr0");
    190 }
    191 
    192 static inline void
    193 copyout_16uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    194 	const register_t ds_msr, const size_t line_mask)
    195 {
    196 	KASSERT(((uintptr_t)udaddr32 & line_mask) == 0);
    197 	register_t msr;
    198 	register_t tmp;
    199 	__asm volatile(
    200 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    201 	"\n\t"	"cmplwi	2,%[line_size],32"
    202 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    203 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    204 	"\n\t"	"bne	0,1f"
    205 	"\n\t"	"dcba	0,%[udaddr32]"
    206 	"\n\t"	"bne	2,1f"
    207 	"\n\t"	"dcba	%[line_size],%[udaddr32]"
    208 	"\n"	"1:"
    209 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    210 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    211 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    212 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    213 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    214 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    215 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    216 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    217 	"\n\t"	"stw	%[data8],32(%[udaddr32])"	/* store user data */
    218 	"\n\t"	"stw	%[data9],36(%[udaddr32])"	/* store user data */
    219 	"\n\t"	"stw	%[data10],40(%[udaddr32])"	/* store user data */
    220 	"\n\t"	"stw	%[data11],44(%[udaddr32])"	/* store user data */
    221 	"\n\t"	"stw	%[data12],48(%[udaddr32])"	/* store user data */
    222 	"\n\t"	"stw	%[data13],52(%[udaddr32])"	/* store user data */
    223 	"\n\t"	"stw	%[data14],56(%[udaddr32])"	/* store user data */
    224 	"\n\t"	"stw	%[data15],60(%[udaddr32])"	/* store user data */
    225 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    226 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    227 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    228 	      [line_size] "r" (line_mask + 1), [line_mask] "r" (line_mask),
    229 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    230 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    231 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    232 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7]),
    233 	      [data8] "r" (ksaddr32[8]), [data9] "r" (ksaddr32[9]),
    234 	      [data10] "r" (ksaddr32[10]), [data11] "r" (ksaddr32[11]),
    235 	      [data12] "r" (ksaddr32[12]), [data13] "r" (ksaddr32[13]),
    236 	      [data14] "r" (ksaddr32[14]), [data15] "r" (ksaddr32[15])
    237 	    : "cr0", "cr2");
    238 }
    239 
    240 static inline void
    241 copyout_uint8s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    242 {
    243 	const uint8_t *ksaddr8 = (void *)ksaddr;
    244 	uint8_t *udaddr8 = (void *)udaddr;
    245 
    246 	__builtin_prefetch(ksaddr8, 0, 1);
    247 
    248 	for (; len >= 16; len -= 16, ksaddr8 += 16, udaddr8 += 16) {
    249 		__builtin_prefetch(ksaddr8 + 16, 0, 1);
    250 		copyout_16uint8s(ksaddr8, udaddr8, ds_msr);
    251 	}
    252 
    253 	while (len-- > 0) {
    254 		copyout_uint8(udaddr8++, *ksaddr8++, ds_msr);
    255 	}
    256 }
    257 
    258 static inline void
    259 copyout_uint32s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    260 {
    261 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    262 	const size_t line_mask = line_size - 1;
    263 	const size_t udalignment = udaddr & line_mask;
    264 	KASSERT((ksaddr & 3) == 0);
    265 	KASSERT((udaddr & 3) == 0);
    266 	const uint32_t *ksaddr32 = (void *)ksaddr;
    267 	uint32_t *udaddr32 = (void *)udaddr;
    268 	len >>= 2;
    269 	__builtin_prefetch(ksaddr32, 0, 1);
    270 	if (udalignment != 0 && udalignment + 4*len > line_size) {
    271 		size_t slen = (line_size - udalignment) >> 2;
    272 		len -= slen;
    273 		for (; slen >= 8; ksaddr32 += 8, udaddr32 += 8, slen -= 8) {
    274 			copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    275 		}
    276 		while (slen-- > 0) {
    277 			copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    278 		}
    279 		if (len == 0)
    280 			return;
    281 	}
    282 	__builtin_prefetch(ksaddr32, 0, 1);
    283 	while (len >= 16) {
    284 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    285 		__builtin_prefetch(ksaddr32 + 16, 0, 1);
    286 		copyout_16uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    287 		ksaddr32 += 16, udaddr32 += 16, len -= 16;
    288 	}
    289 	KASSERT(len <= 16);
    290 	if (len >= 8) {
    291 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    292 		copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    293 		ksaddr32 += 8, udaddr32 += 8, len -= 8;
    294 	}
    295 	while (len-- > 0) {
    296 		copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    297 	}
    298 }
    299 
    300 int
    301 copyout(const void *vksaddr, void *vudaddr, size_t len)
    302 {
    303 	struct pcb * const pcb = lwp_getpcb(curlwp);
    304 	struct faultbuf env;
    305 	vaddr_t udaddr = (vaddr_t) vudaddr;
    306 	vaddr_t ksaddr = (vaddr_t) vksaddr;
    307 
    308 	if (__predict_false(len == 0)) {
    309 		return 0;
    310 	}
    311 
    312 	const register_t ds_msr = mfmsr() | PSL_DS;
    313 
    314 	int rv = setfault(&env);
    315 	if (rv != 0) {
    316 		pcb->pcb_onfault = NULL;
    317 		return rv;
    318 	}
    319 
    320 	if (__predict_false(len < 4)) {
    321 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    322 		pcb->pcb_onfault = NULL;
    323 		return 0;
    324 	}
    325 
    326 	const size_t alignment = (udaddr ^ ksaddr) & 3;
    327 	if (__predict_true(alignment == 0)) {
    328 		size_t slen;
    329 		if (__predict_false(ksaddr & 3)) {
    330 			slen = 4 - (ksaddr & 3);
    331 			copyout_uint8s(ksaddr, udaddr, slen, ds_msr);
    332 			udaddr += slen, ksaddr += slen, len -= slen;
    333 		}
    334 		slen = len & ~3;
    335 		if (__predict_true(slen >= 4)) {
    336 			copyout_uint32s(ksaddr, udaddr, slen, ds_msr);
    337 			udaddr += slen, ksaddr += slen, len -= slen;
    338 		}
    339 	}
    340 
    341 	if (len > 0) {
    342 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    343 	}
    344 	pcb->pcb_onfault = NULL;
    345 	return 0;
    346 }
    347 
    348 int
    349 copyoutstr(const void *ksaddr, void *udaddr, size_t len, size_t *lenp)
    350 {
    351 	struct pcb * const pcb = lwp_getpcb(curlwp);
    352 	struct faultbuf env;
    353 
    354 	if (__predict_false(len == 0)) {
    355 		if (lenp)
    356 			*lenp = 0;
    357 		return 0;
    358 	}
    359 
    360 	if (setfault(&env)) {
    361 		pcb->pcb_onfault = NULL;
    362 		if (lenp)
    363 			*lenp = 0;
    364 		return EFAULT;
    365 	}
    366 
    367 	const register_t ds_msr = mfmsr() | PSL_DS;
    368 	const uint8_t *ksaddr8 = ksaddr;
    369 	size_t copylen = 0;
    370 
    371 #if 1
    372 	uint8_t *udaddr8 = (void *)udaddr;
    373 
    374 	while (copylen++ < len) {
    375 		const uint8_t data = *ksaddr8++;
    376 		copyout_uint8(udaddr8++, data, ds_msr);
    377 		if (data == 0)
    378 			break;
    379 	}
    380 #else
    381 	uint32_t *udaddr32 = (void *)((uintptr_t)udaddr & ~3);
    382 
    383 	size_t boff = (uintptr_t)udaddr & 3;
    384 	bool done = false;
    385 	size_t wlen = 0;
    386 	size_t data = 0;
    387 
    388 	/*
    389 	 * If the destination buffer doesn't start on a 32-bit boundary
    390 	 * try to partially fill in the first word.  If we succeed we can
    391 	 * finish writing it while preserving the bytes on front.
    392 	 */
    393 	if (boff > 0) {
    394 		KASSERT(len > 0);
    395 		do {
    396 			data = (data << 8) | *ksaddr8++;
    397 			wlen++;
    398 			done = ((uint8_t)data == 0 || len == wlen);
    399 		} while (!done && boff + wlen < 4);
    400 		KASSERT(wlen > 0);
    401 		data <<= 8 * boff;
    402 		if (!done || boff + wlen == 4) {
    403 			uint32_t mask = 0xffffffff << (8 * boff);
    404 			copyout_le32_with_mask(udaddr32++, data, mask, ds_msr);
    405 			boff = 0;
    406 			copylen = wlen;
    407 			wlen = 0;
    408 			data = 0;
    409 		}
    410 	}
    411 
    412 	/*
    413 	 * Now we get to the heart of the routine.  Build up complete words
    414 	 * if possible.  When we have one, write it to the user's address
    415 	 * space and go for the next.  If we ran out of space or we found the
    416 	 * end of the string, stop building.  If we managed to build a complete
    417 	 * word, just write it and be happy.  Otherwise we have to deal with
    418 	 * the trailing bytes.
    419 	 */
    420 	KASSERT(done || boff == 0);
    421 	KASSERT(done || copylen < len);
    422 	while (!done) {
    423 		KASSERT(wlen == 0);
    424 		KASSERT(copylen < len);
    425 		do {
    426 			data = (data << 8) | *ksaddr8++;
    427 			wlen++;
    428 			done = ((uint8_t)data == 0 || copylen + wlen == len);
    429 		} while (!done && wlen < 4);
    430 		KASSERT(done || wlen == 4);
    431 		if (__predict_true(wlen == 4)) {
    432 			copyout_le32(udaddr32++, data, ds_msr);
    433 			data = 0;
    434 			copylen += wlen;
    435 			wlen = 0;
    436 			KASSERT(copylen < len || done);
    437 		}
    438 	}
    439 	KASSERT(wlen < 3);
    440 	if (wlen) {
    441 		/*
    442 		 * Remember even though we are running big-endian we are using
    443 		 * byte reversed load/stores so we need to deal with things as
    444 		 * little endian.
    445 		 *
    446 		 * wlen=1 boff=0:
    447 		 * (~(~0 <<  8) <<  0) -> (~(0xffffff00) <<  0) -> 0x000000ff
    448 		 * wlen=1 boff=1:
    449 		 * (~(~0 <<  8) <<  8) -> (~(0xffffff00) <<  8) -> 0x0000ff00
    450 		 * wlen=1 boff=2:
    451 		 * (~(~0 <<  8) << 16) -> (~(0xffffff00) << 16) -> 0x00ff0000
    452 		 * wlen=1 boff=3:
    453 		 * (~(~0 <<  8) << 24) -> (~(0xffffff00) << 24) -> 0xff000000
    454 		 * wlen=2 boff=0:
    455 		 * (~(~0 << 16) <<  0) -> (~(0xffff0000) <<  0) -> 0x0000ffff
    456 		 * wlen=2 boff=1:
    457 		 * (~(~0 << 16) <<  8) -> (~(0xffff0000) <<  8) -> 0x00ffff00
    458 		 * wlen=2 boff=2:
    459 		 * (~(~0 << 16) << 16) -> (~(0xffff0000) << 16) -> 0xffff0000
    460 		 * wlen=3 boff=0:
    461 		 * (~(~0 << 24) <<  0) -> (~(0xff000000) <<  0) -> 0x00ffffff
    462 		 * wlen=3 boff=1:
    463 		 * (~(~0 << 24) <<  8) -> (~(0xff000000) <<  8) -> 0xffffff00
    464 		 */
    465 		KASSERT(boff + wlen <= 4);
    466 		uint32_t mask = (~(~0 << (8 * wlen))) << (8 * boff);
    467 		KASSERT(mask != 0xffffffff);
    468 		copyout_le32_with_mask(udaddr32, data, mask, ds_msr);
    469 		copylen += wlen;
    470 	}
    471 #endif
    472 
    473 	pcb->pcb_onfault = NULL;
    474 	if (lenp)
    475 		*lenp = copylen;
    476 	return 0;
    477 }
    478