Home | History | Annotate | Line # | Download | only in booke
copyout.c revision 1.3
      1 /*	$NetBSD: copyout.c,v 1.3 2011/06/20 05:17:24 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      9  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
     10  *
     11  * This material is based upon work supported by the Defense Advanced Research
     12  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     13  * Contract No. N66001-09-C-2073.
     14  * Approved for Public Release, Distribution Unlimited
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: copyout.c,v 1.3 2011/06/20 05:17:24 matt Exp $");
     40 
     41 #include <sys/param.h>
     42 #include <sys/lwp.h>
     43 
     44 #include <powerpc/pcb.h>
     45 
     46 #include <powerpc/booke/cpuvar.h>
     47 
     48 static inline void
     49 copyout_uint8(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     50 {
     51 	register_t msr;
     52 	__asm volatile(
     53 		"mfmsr	%[msr]"				/* Save MSR */
     54 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     55 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     56 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     57 	    : [msr] "=&r" (msr)
     58 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     59 }
     60 
     61 static inline void
     62 copyout_uint16(uint8_t *udaddr, uint8_t data, register_t ds_msr)
     63 {
     64 	register_t msr;
     65 	__asm volatile(
     66 		"mfmsr	%[msr]"				/* Save MSR */
     67 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     68 	"\n\t"	"stb	%[data],0(%[udaddr])"		/* store user byte */
     69 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     70 	    : [msr] "=&r" (msr)
     71 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     72 }
     73 
     74 static inline void
     75 copyout_uint32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     76 {
     77 	register_t msr;
     78 	__asm volatile(
     79 		"mfmsr	%[msr]"				/* Save MSR */
     80 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     81 	"\n\t"	"stw	%[data],0(%[udaddr])"		/* store user data */
     82 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     83 	    : [msr] "=&r" (msr)
     84 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     85 }
     86 
     87 static inline void
     88 copyout_le32(uint32_t * const udaddr, uint32_t data, register_t ds_msr)
     89 {
     90 	register_t msr;
     91 	__asm volatile(
     92 		"mfmsr	%[msr]"				/* Save MSR */
     93 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
     94 	"\n\t"	"stwbrx	%[data],0,%[udaddr]"		/* store user data */
     95 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
     96 	    : [msr] "=&r" (msr)
     97 	    : [ds_msr] "r" (ds_msr), [data] "r" (data), [udaddr] "b" (udaddr));
     98 }
     99 
    100 static inline void
    101 copyout_le32_with_mask(uint32_t * const udaddr, uint32_t data,
    102 	uint32_t mask, register_t ds_msr)
    103 {
    104 	register_t msr;
    105 	uint32_t tmp;
    106 	KASSERT((data & ~mask) == 0);
    107 	__asm volatile(
    108 		"mfmsr	%[msr]"				/* Save MSR */
    109 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    110 	"\n\t"	"lwbrx	%[tmp],0,%[udaddr]"		/* fetch user data */
    111 	"\n\t"	"andc	%[tmp],%[tmp],%[mask]"		/* mask out new data */
    112 	"\n\t"	"or	%[tmp],%[tmp],%[data]"		/* merge new data */
    113 	"\n\t"	"stwbrx	%[tmp],0,%[udaddr]"		/* store user data */
    114 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    115 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    116 	    : [ds_msr] "r" (ds_msr), [data] "r" (data),
    117 	      [mask] "r" (mask), [udaddr] "b" (udaddr));
    118 }
    119 
    120 static inline void
    121 copyout_16uint8s(const uint8_t *ksaddr8, uint8_t *udaddr8, register_t ds_msr)
    122 {
    123 	register_t msr;
    124 	__asm volatile(
    125 		"mfmsr	%[msr]"				/* Save MSR */
    126 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    127 	"\n\t"	"stb	%[data0],0(%[udaddr8])"		/* store user data */
    128 	"\n\t"	"stb	%[data1],1(%[udaddr8])"		/* store user data */
    129 	"\n\t"	"stb	%[data2],2(%[udaddr8])"		/* store user data */
    130 	"\n\t"	"stb	%[data3],3(%[udaddr8])"		/* store user data */
    131 	"\n\t"	"stb	%[data4],4(%[udaddr8])"		/* store user data */
    132 	"\n\t"	"stb	%[data5],5(%[udaddr8])"		/* store user data */
    133 	"\n\t"	"stb	%[data6],6(%[udaddr8])"		/* store user data */
    134 	"\n\t"	"stb	%[data7],7(%[udaddr8])"		/* store user data */
    135 	"\n\t"	"stb	%[data8],8(%[udaddr8])"		/* store user data */
    136 	"\n\t"	"stb	%[data9],9(%[udaddr8])"		/* store user data */
    137 	"\n\t"	"stb	%[data10],10(%[udaddr8])"	/* store user data */
    138 	"\n\t"	"stb	%[data11],11(%[udaddr8])"	/* store user data */
    139 	"\n\t"	"stb	%[data12],12(%[udaddr8])"	/* store user data */
    140 	"\n\t"	"stb	%[data13],13(%[udaddr8])"	/* store user data */
    141 	"\n\t"	"stb	%[data14],14(%[udaddr8])"	/* store user data */
    142 	"\n\t"	"stb	%[data15],15(%[udaddr8])"	/* store user data */
    143 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    144 	    : [msr] "=&r" (msr)
    145 	    : [ds_msr] "r" (ds_msr), [udaddr8] "b" (udaddr8),
    146 	      [data0] "r" (ksaddr8[0]), [data1] "r" (ksaddr8[1]),
    147 	      [data2] "r" (ksaddr8[2]), [data3] "r" (ksaddr8[3]),
    148 	      [data4] "r" (ksaddr8[4]), [data5] "r" (ksaddr8[5]),
    149 	      [data6] "r" (ksaddr8[6]), [data7] "r" (ksaddr8[7]),
    150 	      [data8] "r" (ksaddr8[8]), [data9] "r" (ksaddr8[9]),
    151 	      [data10] "r" (ksaddr8[10]), [data11] "r" (ksaddr8[11]),
    152 	      [data12] "r" (ksaddr8[12]), [data13] "r" (ksaddr8[13]),
    153 	      [data14] "r" (ksaddr8[14]), [data15] "r" (ksaddr8[15]));
    154 }
    155 
    156 static inline void
    157 copyout_8uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    158 	const register_t ds_msr, const size_t line_mask)
    159 {
    160 	register_t msr;
    161 	register_t tmp;
    162 	__asm volatile(
    163 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    164 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    165 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    166 	"\n\t"	"bne	0,1f"
    167 	"\n\t"	"dcba	0,%[udaddr32]"
    168 	"\n"	"1:"
    169 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    170 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    171 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    172 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    173 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    174 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    175 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    176 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    177 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    178 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    179 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    180 	      [line_mask] "r" (line_mask),
    181 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    182 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    183 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    184 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7])
    185 	    : "cr0");
    186 }
    187 
    188 static inline void
    189 copyout_16uint32s(const uint32_t * const ksaddr32, uint32_t * const udaddr32,
    190 	const register_t ds_msr, const size_t line_mask)
    191 {
    192 	KASSERT(((uintptr_t)udaddr32 & line_mask) == 0);
    193 	register_t msr;
    194 	register_t tmp;
    195 	__asm volatile(
    196 		"and.	%[tmp],%[line_mask],%[udaddr32]"
    197 	"\n\t"	"cmplwi	2,%[line_size],32"
    198 	"\n\t"	"mfmsr	%[msr]"				/* Save MSR */
    199 	"\n\t"	"mtmsr	%[ds_msr]; sync; isync"		/* DS on */
    200 	"\n\t"	"bne	0,1f"
    201 	"\n\t"	"dcba	0,%[udaddr32]"
    202 	"\n\t"	"bne	2,1f"
    203 	"\n\t"	"dcba	%[line_size],%[udaddr32]"
    204 	"\n"	"1:"
    205 	"\n\t"	"stw	%[data0],0(%[udaddr32])"	/* store user data */
    206 	"\n\t"	"stw	%[data1],4(%[udaddr32])"	/* store user data */
    207 	"\n\t"	"stw	%[data2],8(%[udaddr32])"	/* store user data */
    208 	"\n\t"	"stw	%[data3],12(%[udaddr32])"	/* store user data */
    209 	"\n\t"	"stw	%[data4],16(%[udaddr32])"	/* store user data */
    210 	"\n\t"	"stw	%[data5],20(%[udaddr32])"	/* store user data */
    211 	"\n\t"	"stw	%[data6],24(%[udaddr32])"	/* store user data */
    212 	"\n\t"	"stw	%[data7],28(%[udaddr32])"	/* store user data */
    213 	"\n\t"	"stw	%[data8],32(%[udaddr32])"	/* store user data */
    214 	"\n\t"	"stw	%[data9],36(%[udaddr32])"	/* store user data */
    215 	"\n\t"	"stw	%[data10],40(%[udaddr32])"	/* store user data */
    216 	"\n\t"	"stw	%[data11],44(%[udaddr32])"	/* store user data */
    217 	"\n\t"	"stw	%[data12],48(%[udaddr32])"	/* store user data */
    218 	"\n\t"	"stw	%[data13],52(%[udaddr32])"	/* store user data */
    219 	"\n\t"	"stw	%[data14],56(%[udaddr32])"	/* store user data */
    220 	"\n\t"	"stw	%[data15],60(%[udaddr32])"	/* store user data */
    221 	"\n\t"	"mtmsr	%[msr]; sync; isync"		/* DS off */
    222 	    : [msr] "=&r" (msr), [tmp] "=&r" (tmp)
    223 	    : [ds_msr] "r" (ds_msr), [udaddr32] "b" (udaddr32),
    224 	      [line_size] "r" (line_mask + 1), [line_mask] "r" (line_mask),
    225 	      [data0] "r" (ksaddr32[0]), [data1] "r" (ksaddr32[1]),
    226 	      [data2] "r" (ksaddr32[2]), [data3] "r" (ksaddr32[3]),
    227 	      [data4] "r" (ksaddr32[4]), [data5] "r" (ksaddr32[5]),
    228 	      [data6] "r" (ksaddr32[6]), [data7] "r" (ksaddr32[7]),
    229 	      [data8] "r" (ksaddr32[8]), [data9] "r" (ksaddr32[9]),
    230 	      [data10] "r" (ksaddr32[10]), [data11] "r" (ksaddr32[11]),
    231 	      [data12] "r" (ksaddr32[12]), [data13] "r" (ksaddr32[13]),
    232 	      [data14] "r" (ksaddr32[14]), [data15] "r" (ksaddr32[15])
    233 	    : "cr0", "cr2");
    234 }
    235 
    236 static inline void
    237 copyout_uint8s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    238 {
    239 	const uint8_t *ksaddr8 = (void *)ksaddr;
    240 	uint8_t *udaddr8 = (void *)udaddr;
    241 
    242 	__builtin_prefetch(ksaddr8, 0, 1);
    243 
    244 	for (; len >= 16; len -= 16, ksaddr8 += 16, udaddr8 += 16) {
    245 		__builtin_prefetch(ksaddr8 + 16, 0, 1);
    246 		copyout_16uint8s(ksaddr8, udaddr8, ds_msr);
    247 	}
    248 
    249 	while (len-- > 0) {
    250 		copyout_uint8(udaddr8++, *ksaddr8++, ds_msr);
    251 	}
    252 }
    253 
    254 static inline void
    255 copyout_uint32s(vaddr_t ksaddr, vaddr_t udaddr, size_t len, register_t ds_msr)
    256 {
    257 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    258 	const size_t line_mask = line_size - 1;
    259 	const size_t udalignment = udaddr & line_mask;
    260 	KASSERT((ksaddr & 3) == 0);
    261 	KASSERT((udaddr & 3) == 0);
    262 	const uint32_t *ksaddr32 = (void *)ksaddr;
    263 	uint32_t *udaddr32 = (void *)udaddr;
    264 	len >>= 2;
    265 	__builtin_prefetch(ksaddr32, 0, 1);
    266 	if (udalignment != 0 && udalignment + 4*len > line_size) {
    267 		size_t slen = (line_size - udalignment) >> 2;
    268 		len -= slen;
    269 		for (; slen >= 8; ksaddr32 += 8, udaddr32 += 8, slen -= 8) {
    270 			copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    271 		}
    272 		while (slen-- > 0) {
    273 			copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    274 		}
    275 		if (len == 0)
    276 			return;
    277 	}
    278 	__builtin_prefetch(ksaddr32, 0, 1);
    279 	while (len >= 16) {
    280 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    281 		__builtin_prefetch(ksaddr32 + 16, 0, 1);
    282 		copyout_16uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    283 		ksaddr32 += 16, udaddr32 += 16, len -= 16;
    284 	}
    285 	KASSERT(len <= 16);
    286 	if (len >= 8) {
    287 		__builtin_prefetch(ksaddr32 + 8, 0, 1);
    288 		copyout_8uint32s(ksaddr32, udaddr32, ds_msr, line_mask);
    289 		ksaddr32 += 8, udaddr32 += 8, len -= 8;
    290 	}
    291 	while (len-- > 0) {
    292 		copyout_uint32(udaddr32++, *ksaddr32++, ds_msr);
    293 	}
    294 }
    295 
    296 int
    297 copyout(const void *vksaddr, void *vudaddr, size_t len)
    298 {
    299 	struct pcb * const pcb = lwp_getpcb(curlwp);
    300 	struct faultbuf env;
    301 	vaddr_t udaddr = (vaddr_t) vudaddr;
    302 	vaddr_t ksaddr = (vaddr_t) vksaddr;
    303 
    304 	if (__predict_false(len == 0)) {
    305 		return 0;
    306 	}
    307 
    308 	const register_t ds_msr = mfmsr() | PSL_DS;
    309 
    310 	int rv = setfault(&env);
    311 	if (rv != 0) {
    312 		pcb->pcb_onfault = NULL;
    313 		return rv;
    314 	}
    315 
    316 	if (__predict_false(len < 4)) {
    317 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    318 		pcb->pcb_onfault = NULL;
    319 		return 0;
    320 	}
    321 
    322 	const size_t alignment = (udaddr ^ ksaddr) & 3;
    323 	if (__predict_true(alignment == 0)) {
    324 		size_t slen;
    325 		if (__predict_false(ksaddr & 3)) {
    326 			slen = 4 - (ksaddr & 3);
    327 			copyout_uint8s(ksaddr, udaddr, slen, ds_msr);
    328 			udaddr += slen, ksaddr += slen, len -= slen;
    329 		}
    330 		slen = len & ~3;
    331 		if (__predict_true(slen >= 4)) {
    332 			copyout_uint32s(ksaddr, udaddr, slen, ds_msr);
    333 			udaddr += slen, ksaddr += slen, len -= slen;
    334 		}
    335 	}
    336 
    337 	if (len > 0) {
    338 		copyout_uint8s(ksaddr, udaddr, len, ds_msr);
    339 	}
    340 	pcb->pcb_onfault = NULL;
    341 	return 0;
    342 }
    343 
    344 int
    345 copyoutstr(const void *ksaddr, void *udaddr, size_t len, size_t *lenp)
    346 {
    347 	struct pcb * const pcb = lwp_getpcb(curlwp);
    348 	struct faultbuf env;
    349 
    350 	if (__predict_false(len == 0)) {
    351 		if (lenp)
    352 			*lenp = 0;
    353 		return 0;
    354 	}
    355 
    356 	if (setfault(&env)) {
    357 		pcb->pcb_onfault = NULL;
    358 		if (lenp)
    359 			*lenp = 0;
    360 		return EFAULT;
    361 	}
    362 
    363 	const register_t ds_msr = mfmsr() | PSL_DS;
    364 	const uint8_t *ksaddr8 = ksaddr;
    365 	size_t copylen = 0;
    366 
    367 #if 1
    368 	uint8_t *udaddr8 = (void *)udaddr;
    369 
    370 	while (copylen++ < len) {
    371 		const uint8_t data = *ksaddr8++;
    372 		copyout_uint8(udaddr8++, data, ds_msr);
    373 		if (data == 0)
    374 			break;
    375 	}
    376 #else
    377 	uint32_t *udaddr32 = (void *)((uintptr_t)udaddr & ~3);
    378 
    379 	size_t boff = (uintptr_t)udaddr & 3;
    380 	bool done = false;
    381 	size_t wlen = 0;
    382 	size_t data = 0;
    383 
    384 	/*
    385 	 * If the destination buffer doesn't start on a 32-bit boundary
    386 	 * try to partially fill in the first word.  If we succeed we can
    387 	 * finish writing it while preserving the bytes on front.
    388 	 */
    389 	if (boff > 0) {
    390 		KASSERT(len > 0);
    391 		do {
    392 			data = (data << 8) | *ksaddr8++;
    393 			wlen++;
    394 			done = ((uint8_t)data == 0 || len == wlen);
    395 		} while (!done && boff + wlen < 4);
    396 		KASSERT(wlen > 0);
    397 		data <<= 8 * boff;
    398 		if (!done || boff + wlen == 4) {
    399 			uint32_t mask = 0xffffffff << (8 * boff);
    400 			copyout_le32_with_mask(udaddr32++, data, mask, ds_msr);
    401 			boff = 0;
    402 			copylen = wlen;
    403 			wlen = 0;
    404 			data = 0;
    405 		}
    406 	}
    407 
    408 	/*
    409 	 * Now we get to the heart of the routine.  Build up complete words
    410 	 * if possible.  When we have one, write it to the user's address
    411 	 * space and go for the next.  If we ran out of space or we found the
    412 	 * end of the string, stop building.  If we managed to build a complete
    413 	 * word, just write it and be happy.  Otherwise we have to deal with
    414 	 * the trailing bytes.
    415 	 */
    416 	KASSERT(done || boff == 0);
    417 	KASSERT(done || copylen < len);
    418 	while (!done) {
    419 		KASSERT(wlen == 0);
    420 		KASSERT(copylen < len);
    421 		do {
    422 			data = (data << 8) | *ksaddr8++;
    423 			wlen++;
    424 			done = ((uint8_t)data == 0 || copylen + wlen == len);
    425 		} while (!done && wlen < 4);
    426 		KASSERT(done || wlen == 4);
    427 		if (__predict_true(wlen == 4)) {
    428 			copyout_le32(udaddr32++, data, ds_msr);
    429 			data = 0;
    430 			copylen += wlen;
    431 			wlen = 0;
    432 			KASSERT(copylen < len || done);
    433 		}
    434 	}
    435 	KASSERT(wlen < 3);
    436 	if (wlen) {
    437 		/*
    438 		 * Remember even though we are running big-endian we are using
    439 		 * byte reversed load/stores so we need to deal with things as
    440 		 * little endian.
    441 		 *
    442 		 * wlen=1 boff=0:
    443 		 * (~(~0 <<  8) <<  0) -> (~(0xffffff00) <<  0) -> 0x000000ff
    444 		 * wlen=1 boff=1:
    445 		 * (~(~0 <<  8) <<  8) -> (~(0xffffff00) <<  8) -> 0x0000ff00
    446 		 * wlen=1 boff=2:
    447 		 * (~(~0 <<  8) << 16) -> (~(0xffffff00) << 16) -> 0x00ff0000
    448 		 * wlen=1 boff=3:
    449 		 * (~(~0 <<  8) << 24) -> (~(0xffffff00) << 24) -> 0xff000000
    450 		 * wlen=2 boff=0:
    451 		 * (~(~0 << 16) <<  0) -> (~(0xffff0000) <<  0) -> 0x0000ffff
    452 		 * wlen=2 boff=1:
    453 		 * (~(~0 << 16) <<  8) -> (~(0xffff0000) <<  8) -> 0x00ffff00
    454 		 * wlen=2 boff=2:
    455 		 * (~(~0 << 16) << 16) -> (~(0xffff0000) << 16) -> 0xffff0000
    456 		 * wlen=3 boff=0:
    457 		 * (~(~0 << 24) <<  0) -> (~(0xff000000) <<  0) -> 0x00ffffff
    458 		 * wlen=3 boff=1:
    459 		 * (~(~0 << 24) <<  8) -> (~(0xff000000) <<  8) -> 0xffffff00
    460 		 */
    461 		KASSERT(boff + wlen <= 4);
    462 		uint32_t mask = (~(~0 << (8 * wlen))) << (8 * boff);
    463 		KASSERT(mask != 0xffffffff);
    464 		copyout_le32_with_mask(udaddr32, data, mask, ds_msr);
    465 		copylen += wlen;
    466 	}
    467 #endif
    468 
    469 	pcb->pcb_onfault = NULL;
    470 	if (lenp)
    471 		*lenp = copylen;
    472 	return 0;
    473 }
    474