Home | History | Annotate | Line # | Download | only in x86
      1 /*	$NetBSD: bus_space.c,v 1.47 2022/07/17 08:33:48 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.47 2022/07/17 08:33:48 riastradh Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/malloc.h>
     39 #include <sys/extent.h>
     40 #include <sys/kmem.h>
     41 
     42 #include <uvm/uvm_extern.h>
     43 
     44 #include <dev/isa/isareg.h>
     45 
     46 #include <sys/bus.h>
     47 #include <machine/pio.h>
     48 #include <machine/isa_machdep.h>
     49 
     50 #ifdef XEN
     51 #include <xen/hypervisor.h>
     52 #endif
     53 
     54 /*
     55  * Macros for sanity-checking the aligned-ness of pointers passed to
     56  * bus space ops.  These are not strictly necessary on the x86, but
     57  * could lead to performance improvements, and help catch problems
     58  * with drivers that would creep up on other architectures.
     59  */
     60 #ifdef BUS_SPACE_DEBUG
     61 #define	BUS_SPACE_ALIGNED_ADDRESS(p, t)				\
     62 	((((u_long)(p)) & (sizeof(t)-1)) == 0)
     63 
     64 #define	BUS_SPACE_ADDRESS_SANITY(p, t, d)				\
     65 ({									\
     66 	if (BUS_SPACE_ALIGNED_ADDRESS((p), t) == 0) {			\
     67 		printf("%s 0x%lx not aligned to %zu bytes %s:%d\n",	\
     68 		    d, (u_long)(p), sizeof(t), __FILE__, __LINE__);	\
     69 	}								\
     70 	(void) 0;							\
     71 })
     72 #else
     73 #define	BUS_SPACE_ADDRESS_SANITY(p,t,d)	(void) 0
     74 #endif /* BUS_SPACE_DEBUG */
     75 
     76 /*
     77  * Extent maps to manage I/O and memory space.  Allocate
     78  * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
     79  * will indicate that it's safe to use malloc() to dynamically allocate
     80  * region descriptors.
     81  *
     82  * N.B. At least two regions are _always_ allocated from the iomem
     83  * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
     84  *
     85  * The extent maps are not static!  Machine-dependent ISA and EISA
     86  * routines need access to them for bus address space allocation.
     87  */
     88 static	long ioport_ex_storage[EXTENT_FIXED_STORAGE_SIZE(16) / sizeof(long)];
     89 static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(64) / sizeof(long)];
     90 struct	extent *ioport_ex;
     91 struct	extent *iomem_ex;
     92 static	int ioport_malloc_safe;
     93 
     94 static struct bus_space_tag x86_io = { .bst_type = X86_BUS_SPACE_IO };
     95 static struct bus_space_tag x86_mem = { .bst_type = X86_BUS_SPACE_MEM };
     96 
     97 bus_space_tag_t x86_bus_space_io = &x86_io;
     98 bus_space_tag_t x86_bus_space_mem = &x86_mem;
     99 
    100 int x86_mem_add_mapping(bus_addr_t, bus_size_t,
    101 	    int, bus_space_handle_t *);
    102 
    103 static inline bool
    104 x86_bus_space_is_io(bus_space_tag_t t)
    105 {
    106 	return t->bst_type == X86_BUS_SPACE_IO;
    107 }
    108 
    109 static inline bool
    110 x86_bus_space_is_mem(bus_space_tag_t t)
    111 {
    112 	return t->bst_type == X86_BUS_SPACE_MEM;
    113 }
    114 
    115 void
    116 x86_bus_space_init(void)
    117 {
    118 	/*
    119 	 * Initialize the I/O port and I/O mem extent maps.
    120 	 * Note: we don't have to check the return value since
    121 	 * creation of a fixed extent map will never fail (since
    122 	 * descriptor storage has already been allocated).
    123 	 *
    124 	 * N.B. The iomem extent manages _all_ physical addresses
    125 	 * on the machine.  When the amount of RAM is found, the two
    126 	 * extents of RAM are allocated from the map (0 -> ISA hole
    127 	 * and end of ISA hole -> end of RAM).
    128 	 */
    129 	ioport_ex = extent_create("ioport", 0x0, 0xffff,
    130 	    (void *)ioport_ex_storage, sizeof(ioport_ex_storage),
    131 	    EX_NOCOALESCE|EX_NOWAIT);
    132 	iomem_ex = extent_create("iomem", 0x0, MAXIOMEM,
    133 	    (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
    134 	    EX_NOCOALESCE|EX_NOWAIT);
    135 
    136 #ifdef XENPV
    137 	/* We are privileged guest os - should have IO privileges. */
    138 	if (xendomain_is_privileged()) {
    139 		struct physdev_set_iopl set_iopl;
    140 		memset(&set_iopl, 0, sizeof(set_iopl));
    141 		set_iopl.iopl = 1;
    142 		if (HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl) != 0)
    143 			panic("Unable to obtain IOPL, "
    144 			    "despite being SIF_PRIVILEGED");
    145 	}
    146 #endif	/* XENPV */
    147 }
    148 
    149 void
    150 x86_bus_space_mallocok(void)
    151 {
    152 
    153 	ioport_malloc_safe = 1;
    154 }
    155 
    156 int
    157 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
    158 		int flags, bus_space_handle_t *bshp)
    159 {
    160 	bus_space_reservation_t bsr;
    161 	bus_space_tag_t it;
    162 	int error;
    163 
    164 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_MAP) == 0)
    165 		;	/* skip override */
    166 	else for (it = t; it != NULL; it = it->bst_super) {
    167 		if ((it->bst_present & BUS_SPACE_OVERRIDE_MAP) == 0)
    168 			continue;
    169 		return (*it->bst_ov->ov_space_map)(it->bst_ctx, t, bpa, size,
    170 		    flags, bshp);
    171 	}
    172 
    173 	error = bus_space_reserve(t, bpa, size, flags, &bsr);
    174 	if (error != 0)
    175 		return error;
    176 
    177 	error = bus_space_reservation_map(t, &bsr, flags, bshp);
    178 	if (error != 0)
    179 		bus_space_release(t, &bsr);
    180 
    181 	return error;
    182 }
    183 
    184 int
    185 bus_space_reservation_map(bus_space_tag_t t, bus_space_reservation_t *bsr,
    186     int flags, bus_space_handle_t *bshp)
    187 {
    188 	bus_addr_t bpa;
    189 	bus_size_t size;
    190 	bus_space_tag_t it;
    191 
    192 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_RESERVATION_MAP) == 0)
    193 		;	/* skip override */
    194 	else for (it = t; it != NULL; it = it->bst_super) {
    195 		if ((it->bst_present & BUS_SPACE_OVERRIDE_RESERVATION_MAP) == 0)
    196 			continue;
    197 		return (*it->bst_ov->ov_space_reservation_map)(it->bst_ctx, t,
    198 		    bsr, flags, bshp);
    199 	}
    200 
    201 	bpa = bus_space_reservation_addr(bsr);
    202 	size = bus_space_reservation_size(bsr);
    203 
    204 	/*
    205 	 * For I/O space, that's all she wrote.
    206 	 */
    207 	if (x86_bus_space_is_io(t)) {
    208 		*bshp = bpa;
    209 		return 0;
    210 	}
    211 
    212 #ifndef XENPV
    213 	if (bpa >= IOM_BEGIN && (bpa + size) != 0 && (bpa + size) <= IOM_END) {
    214 		*bshp = (bus_space_handle_t)ISA_HOLE_VADDR(bpa);
    215 		return 0;
    216 	}
    217 #endif	/* !XENPV */
    218 
    219 	/*
    220 	 * For memory space, map the bus physical address to
    221 	 * a kernel virtual address.
    222 	 */
    223 	return x86_mem_add_mapping(bpa, size, flags, bshp);
    224 }
    225 
    226 int
    227 _x86_memio_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
    228 		int flags, bus_space_handle_t *bshp)
    229 {
    230 
    231 	/*
    232 	 * For I/O space, just fill in the handle.
    233 	 */
    234 	if (x86_bus_space_is_io(t)) {
    235 		if (flags & BUS_SPACE_MAP_LINEAR)
    236 			return (EOPNOTSUPP);
    237 		*bshp = bpa;
    238 		return (0);
    239 	}
    240 
    241 	/*
    242 	 * For memory space, map the bus physical address to
    243 	 * a kernel virtual address.
    244 	 */
    245 	return x86_mem_add_mapping(bpa, size, flags, bshp);
    246 }
    247 
    248 int
    249 bus_space_reserve(bus_space_tag_t t,
    250     bus_addr_t bpa,
    251     bus_size_t size,
    252     int flags, bus_space_reservation_t *bsrp)
    253 {
    254 	struct extent *ex;
    255 	int error;
    256 	bus_space_tag_t it;
    257 
    258 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_RESERVE) == 0)
    259 		;	/* skip override */
    260 	else for (it = t; it != NULL; it = it->bst_super) {
    261 		if ((it->bst_present & BUS_SPACE_OVERRIDE_RESERVE) == 0)
    262 			continue;
    263 		return (*it->bst_ov->ov_space_reserve)(it->bst_ctx, t,
    264 		    bpa, size, flags, bsrp);
    265 	}
    266 
    267 	/*
    268 	 * Pick the appropriate extent map.
    269 	 */
    270 	if (x86_bus_space_is_io(t)) {
    271 		if (flags & BUS_SPACE_MAP_LINEAR)
    272 			return (EOPNOTSUPP);
    273 		ex = ioport_ex;
    274 	} else if (x86_bus_space_is_mem(t))
    275 		ex = iomem_ex;
    276 	else
    277 		panic("x86_memio_alloc: bad bus space tag");
    278 
    279 	/*
    280 	 * Before we go any further, let's make sure that this
    281 	 * region is available.
    282 	 */
    283 	error = extent_alloc_region(ex, bpa, size,
    284 	    EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0));
    285 
    286 	if (error != 0)
    287 		return error;
    288 
    289 	bus_space_reservation_init(bsrp, bpa, size);
    290 
    291 	return 0;
    292 }
    293 
    294 int
    295 bus_space_reserve_subregion(bus_space_tag_t t,
    296     bus_addr_t rstart, bus_addr_t rend,
    297     const bus_size_t size, const bus_size_t alignment,
    298     const bus_size_t boundary,
    299     const int flags, bus_space_reservation_t *bsrp)
    300 {
    301 	bus_space_reservation_t bsr;
    302 	struct extent *ex;
    303 	u_long bpa;
    304 	int error;
    305 	bus_space_tag_t it;
    306 
    307 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_RESERVE_SUBREGION) == 0)
    308 		;	/* skip override */
    309 	else for (it = t; it != NULL; it = it->bst_super) {
    310 		if ((it->bst_present & BUS_SPACE_OVERRIDE_RESERVE_SUBREGION) ==
    311 		    0)
    312 			continue;
    313 		return (*it->bst_ov->ov_space_reserve_subregion)(it->bst_ctx, t,
    314 		    rstart, rend, size, alignment, boundary, flags, bsrp);
    315 	}
    316 
    317 	/*
    318 	 * Pick the appropriate extent map.
    319 	 */
    320 	if (x86_bus_space_is_io(t)) {
    321 		if (flags & BUS_SPACE_MAP_LINEAR)
    322 			return (EOPNOTSUPP);
    323 		ex = ioport_ex;
    324 	} else if (x86_bus_space_is_mem(t))
    325 		ex = iomem_ex;
    326 	else
    327 		panic("x86_memio_alloc: bad bus space tag");
    328 
    329 	/*
    330 	 * Sanity check the allocation against the extent's boundaries.
    331 	 */
    332 	rstart = MAX(rstart, ex->ex_start);
    333 	rend = MIN(rend, ex->ex_end);
    334 	if (rstart >= rend)
    335 		panic("x86_memio_alloc: bad region start/end");
    336 
    337 	/*
    338 	 * Do the requested allocation.
    339 	 */
    340 	error = extent_alloc_subregion(ex, rstart, rend, size, alignment,
    341 	    boundary,
    342 	    EX_FAST | EX_NOWAIT | (ioport_malloc_safe ?  EX_MALLOCOK : 0),
    343 	    &bpa);
    344 
    345 	if (error)
    346 		return (error);
    347 
    348 	bus_space_reservation_init(&bsr, bpa, size);
    349 
    350 	*bsrp = bsr;
    351 
    352 	return 0;
    353 }
    354 
    355 void
    356 bus_space_release(bus_space_tag_t t, bus_space_reservation_t *bsr)
    357 {
    358 	struct extent *ex;
    359 	bus_space_tag_t it;
    360 
    361 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_RELEASE) == 0)
    362 		;	/* skip override */
    363 	else for (it = t; it != NULL; it = it->bst_super) {
    364 		if ((it->bst_present & BUS_SPACE_OVERRIDE_RELEASE) == 0)
    365 			continue;
    366 		(*it->bst_ov->ov_space_release)(it->bst_ctx, t, bsr);
    367 		return;
    368 	}
    369 
    370 	/*
    371 	 * Pick the appropriate extent map.
    372 	 */
    373 	if (x86_bus_space_is_io(t)) {
    374 		ex = ioport_ex;
    375 	} else if (x86_bus_space_is_mem(t))
    376 		ex = iomem_ex;
    377 	else
    378 		panic("x86_memio_alloc: bad bus space tag");
    379 
    380 	if (extent_free(ex, bus_space_reservation_addr(bsr),
    381 	    bus_space_reservation_size(bsr), EX_NOWAIT |
    382 	    (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
    383 		printf("%s: pa 0x%jx, size 0x%jx\n", __func__,
    384 		    (uintmax_t)bus_space_reservation_addr(bsr),
    385 		    (uintmax_t)bus_space_reservation_size(bsr));
    386 		printf("%s: can't free region\n", __func__);
    387 	}
    388 }
    389 
    390 int
    391 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
    392 		bus_size_t size, bus_size_t alignment, bus_size_t boundary,
    393 		int flags, bus_addr_t *bpap, bus_space_handle_t *bshp)
    394 {
    395 	bus_space_reservation_t bsr;
    396 	bus_space_tag_t it;
    397 	int error;
    398 
    399 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_ALLOC) == 0)
    400 		;	/* skip override */
    401 	else for (it = t; it != NULL; it = it->bst_super) {
    402 		if ((it->bst_present & BUS_SPACE_OVERRIDE_ALLOC) == 0)
    403 			continue;
    404 		return (*it->bst_ov->ov_space_alloc)(it->bst_ctx, t,
    405 		    rstart, rend, size, alignment, boundary, flags, bpap, bshp);
    406 	}
    407 
    408 	/*
    409 	 * Do the requested allocation.
    410 	 */
    411 	error = bus_space_reserve_subregion(t, rstart, rend, size, alignment,
    412 	    boundary, flags, &bsr);
    413 
    414 	if (error != 0)
    415 		return error;
    416 
    417 	error = bus_space_reservation_map(t, &bsr, flags, bshp);
    418 	if (error != 0)
    419 		bus_space_release(t, &bsr);
    420 
    421 	*bpap = bus_space_reservation_addr(&bsr);
    422 
    423 	return error;
    424 }
    425 
    426 int
    427 x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size,
    428 		int flags, bus_space_handle_t *bshp)
    429 {
    430 	paddr_t pa, endpa;
    431 	vaddr_t va, sva;
    432 	u_int pmapflags;
    433 
    434 	pa = x86_trunc_page(bpa);
    435 	endpa = x86_round_page(bpa + size);
    436 
    437 	pmapflags = PMAP_NOCACHE;
    438 	if ((flags & BUS_SPACE_MAP_CACHEABLE) != 0)
    439 		pmapflags = 0;
    440 	else if (flags & BUS_SPACE_MAP_PREFETCHABLE)
    441 		pmapflags = PMAP_WRITE_COMBINE;
    442 
    443 #ifdef DIAGNOSTIC
    444 	if (endpa != 0 && endpa <= pa)
    445 		panic("x86_mem_add_mapping: overflow");
    446 #endif
    447 
    448 #ifdef XENPV
    449 	if (bpa >= IOM_BEGIN && (bpa + size) != 0 && (bpa + size) <= IOM_END) {
    450 		sva = (vaddr_t)ISA_HOLE_VADDR(pa);
    451 	} else
    452 #endif	/* XENPV */
    453 	{
    454 		sva = uvm_km_alloc(kernel_map, endpa - pa, 0,
    455 		    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    456 		if (sva == 0)
    457 			return (ENOMEM);
    458 	}
    459 
    460 	*bshp = (bus_space_handle_t)(sva + (bpa & PGOFSET));
    461 
    462 	for (va = sva; pa != endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
    463 		pmap_kenter_ma(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
    464 	}
    465 	pmap_update(pmap_kernel());
    466 
    467 	return 0;
    468 }
    469 
    470 bool
    471 bus_space_is_equal(bus_space_tag_t t1, bus_space_tag_t t2)
    472 {
    473 	if (t1 == NULL || t2 == NULL)
    474 		return false;
    475 	return t1->bst_type == t2->bst_type;
    476 }
    477 
    478 /*
    479  * void _x86_memio_unmap(bus_space_tag bst, bus_space_handle bsh,
    480  *                        bus_size_t size, bus_addr_t *adrp)
    481  *
    482  *   This function unmaps memory- or io-space mapped by the function
    483  *   _x86_memio_map().  This function works nearly as same as
    484  *   x86_memio_unmap(), but this function does not ask kernel
    485  *   built-in extents and returns physical address of the bus space,
    486  *   for the convenience of the extra extent manager.
    487  */
    488 void
    489 _x86_memio_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
    490 		bus_size_t size, bus_addr_t *adrp)
    491 {
    492 	u_long va, endva;
    493 	bus_addr_t bpa;
    494 
    495 	/*
    496 	 * Find the correct extent and bus physical address.
    497 	 */
    498 	if (x86_bus_space_is_io(t)) {
    499 		bpa = bsh;
    500 	} else if (x86_bus_space_is_mem(t)) {
    501 		if (bsh >= atdevbase && (bsh + size) != 0 &&
    502 		    (bsh + size) <= (atdevbase + IOM_SIZE)) {
    503 			bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
    504 		} else {
    505 
    506 			va = x86_trunc_page(bsh);
    507 			endva = x86_round_page(bsh + size);
    508 
    509 #ifdef DIAGNOSTIC
    510 			if (endva <= va) {
    511 				panic("_x86_memio_unmap: overflow");
    512 			}
    513 #endif
    514 
    515 			if (pmap_extract_ma(pmap_kernel(), va, &bpa) == FALSE) {
    516 				panic("_x86_memio_unmap:"
    517 				    " wrong virtual address");
    518 			}
    519 			bpa += (bsh & PGOFSET);
    520 			pmap_kremove(va, endva - va);
    521 			pmap_update(pmap_kernel());
    522 
    523 			/*
    524 			 * Free the kernel virtual mapping.
    525 			 */
    526 			uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
    527 		}
    528 	} else {
    529 		panic("_x86_memio_unmap: bad bus space tag");
    530 	}
    531 
    532 	if (adrp != NULL) {
    533 		*adrp = bpa;
    534 	}
    535 }
    536 
    537 static void
    538 bus_space_reservation_unmap1(bus_space_tag_t t, const bus_space_handle_t bsh,
    539     const bus_size_t size, bus_addr_t *bpap)
    540 {
    541 	u_long va, endva;
    542 	bus_addr_t bpa;
    543 
    544 	/*
    545 	 * Find the correct extent and bus physical address.
    546 	 */
    547 	if (x86_bus_space_is_io(t)) {
    548 		bpa = bsh;
    549 	} else if (x86_bus_space_is_mem(t)) {
    550 		if (bsh >= atdevbase && (bsh + size) != 0 &&
    551 		    (bsh + size) <= (atdevbase + IOM_SIZE)) {
    552 			bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
    553 			goto ok;
    554 		}
    555 
    556 		va = x86_trunc_page(bsh);
    557 		endva = x86_round_page(bsh + size);
    558 
    559 #ifdef DIAGNOSTIC
    560 		if (endva <= va)
    561 			panic("x86_memio_unmap: overflow");
    562 #endif
    563 
    564 		(void) pmap_extract_ma(pmap_kernel(), va, &bpa);
    565 		bpa += (bsh & PGOFSET);
    566 
    567 		pmap_kremove(va, endva - va);
    568 		pmap_update(pmap_kernel());
    569 
    570 		/*
    571 		 * Free the kernel virtual mapping.
    572 		 */
    573 		uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
    574 	} else
    575 		panic("x86_memio_unmap: bad bus space tag");
    576 ok:
    577 	if (bpap != NULL)
    578 		*bpap = bpa;
    579 }
    580 
    581 void
    582 bus_space_reservation_unmap(bus_space_tag_t t, const bus_space_handle_t bsh,
    583     const bus_size_t size)
    584 {
    585 	bus_space_tag_t it;
    586 
    587 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_RESERVATION_UNMAP) == 0)
    588 		;	/* skip override */
    589 	else for (it = t; it != NULL; it = it->bst_super) {
    590 		if ((it->bst_present & BUS_SPACE_OVERRIDE_RESERVATION_UNMAP) ==
    591 		    0)
    592 			continue;
    593 		(*it->bst_ov->ov_space_reservation_unmap)(it->bst_ctx,
    594 		    t, bsh, size);
    595 		return;
    596 	}
    597 
    598 	bus_space_reservation_unmap1(t, bsh, size, NULL);
    599 }
    600 
    601 void
    602 bus_space_unmap(bus_space_tag_t t, const bus_space_handle_t bsh,
    603     const bus_size_t size)
    604 {
    605 	bus_addr_t addr;
    606 	bus_space_reservation_t bsr;
    607 	bus_space_tag_t it;
    608 
    609 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_UNMAP) == 0)
    610 		;	/* skip override */
    611 	else for (it = t; it != NULL; it = it->bst_super) {
    612 		if ((it->bst_present & BUS_SPACE_OVERRIDE_UNMAP) == 0)
    613 			continue;
    614 		(*it->bst_ov->ov_space_unmap)(it->bst_ctx, t, bsh, size);
    615 		return;
    616 	}
    617 
    618 	bus_space_reservation_unmap1(t, bsh, size, &addr);
    619 
    620 	bus_space_reservation_init(&bsr, addr, size);
    621 	bus_space_release(t, &bsr);
    622 }
    623 
    624 void
    625 bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
    626 {
    627 	bus_space_tag_t it;
    628 
    629 	if ((t->bst_exists & BUS_SPACE_OVERRIDE_FREE) == 0)
    630 		;	/* skip override */
    631 	else for (it = t; it != NULL; it = it->bst_super) {
    632 		if ((it->bst_present & BUS_SPACE_OVERRIDE_FREE) == 0)
    633 			continue;
    634 		(*it->bst_ov->ov_space_free)(it->bst_ctx, t, bsh, size);
    635 		return;
    636 	}
    637 	/* bus_space_unmap() does all that we need to do. */
    638 	bus_space_unmap(t, bsh, size);
    639 }
    640 
    641 int
    642 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
    643     bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
    644 {
    645 
    646 	*nbshp = bsh + offset;
    647 	return (0);
    648 }
    649 
    650 paddr_t
    651 bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot,
    652     int flags)
    653 {
    654 	paddr_t pflags = 0;
    655 
    656 	/* Can't mmap I/O space. */
    657 	if (x86_bus_space_is_io(t))
    658 		return (-1);
    659 
    660 	/*
    661 	 * "addr" is the base address of the device we're mapping.
    662 	 * "off" is the offset into that device.
    663 	 *
    664 	 * Note we are called for each "page" in the device that
    665 	 * the upper layers want to map.
    666 	 */
    667 	if (flags & BUS_SPACE_MAP_PREFETCHABLE)
    668 		pflags |= X86_MMAP_FLAG_PREFETCH;
    669 
    670 	return x86_btop(addr + off) | (pflags << X86_MMAP_FLAG_SHIFT);
    671 }
    672 
    673 void
    674 bus_space_set_multi_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    675 		      uint8_t v, size_t c)
    676 {
    677 	vaddr_t addr = h + o;
    678 
    679 	if (x86_bus_space_is_io(t))
    680 		while (c--)
    681 			outb(addr, v);
    682 	else
    683 		while (c--)
    684 			*(volatile uint8_t *)(addr) = v;
    685 }
    686 
    687 void
    688 bus_space_set_multi_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    689 		      uint16_t v, size_t c)
    690 {
    691 	vaddr_t addr = h + o;
    692 
    693 	BUS_SPACE_ADDRESS_SANITY(addr, uint16_t, "bus addr");
    694 
    695 	if (x86_bus_space_is_io(t))
    696 		while (c--)
    697 			outw(addr, v);
    698 	else
    699 		while (c--)
    700 			*(volatile uint16_t *)(addr) = v;
    701 }
    702 
    703 void
    704 bus_space_set_multi_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    705 		      uint32_t v, size_t c)
    706 {
    707 	vaddr_t addr = h + o;
    708 
    709 	BUS_SPACE_ADDRESS_SANITY(addr, uint32_t, "bus addr");
    710 
    711 	if (x86_bus_space_is_io(t))
    712 		while (c--)
    713 			outl(addr, v);
    714 	else
    715 		while (c--)
    716 			*(volatile uint32_t *)(addr) = v;
    717 }
    718 
    719 void
    720 bus_space_set_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    721 		      uint8_t v, size_t c)
    722 {
    723 	vaddr_t addr = h + o;
    724 
    725 	if (x86_bus_space_is_io(t))
    726 		for (; c != 0; c--, addr++)
    727 			outb(addr, v);
    728 	else
    729 		for (; c != 0; c--, addr++)
    730 			*(volatile uint8_t *)(addr) = v;
    731 }
    732 
    733 void
    734 bus_space_set_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    735 		       uint16_t v, size_t c)
    736 {
    737 	vaddr_t addr = h + o;
    738 
    739 	BUS_SPACE_ADDRESS_SANITY(addr, uint16_t, "bus addr");
    740 
    741 	if (x86_bus_space_is_io(t))
    742 		for (; c != 0; c--, addr += 2)
    743 			outw(addr, v);
    744 	else
    745 		for (; c != 0; c--, addr += 2)
    746 			*(volatile uint16_t *)(addr) = v;
    747 }
    748 
    749 void
    750 bus_space_set_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
    751 		       uint32_t v, size_t c)
    752 {
    753 	vaddr_t addr = h + o;
    754 
    755 	BUS_SPACE_ADDRESS_SANITY(addr, uint32_t, "bus addr");
    756 
    757 	if (x86_bus_space_is_io(t))
    758 		for (; c != 0; c--, addr += 4)
    759 			outl(addr, v);
    760 	else
    761 		for (; c != 0; c--, addr += 4)
    762 			*(volatile uint32_t *)(addr) = v;
    763 }
    764 
    765 void
    766 bus_space_copy_region_1(bus_space_tag_t t, bus_space_handle_t h1,
    767 			bus_size_t o1, bus_space_handle_t h2,
    768 			bus_size_t o2, size_t c)
    769 {
    770 	vaddr_t addr1 = h1 + o1;
    771 	vaddr_t addr2 = h2 + o2;
    772 
    773 	if (x86_bus_space_is_io(t)) {
    774 		if (addr1 >= addr2) {
    775 			/* src after dest: copy forward */
    776 			for (; c != 0; c--, addr1++, addr2++)
    777 				outb(addr2, inb(addr1));
    778 		} else {
    779 			/* dest after src: copy backwards */
    780 			for (addr1 += (c - 1), addr2 += (c - 1);
    781 			    c != 0; c--, addr1--, addr2--)
    782 				outb(addr2, inb(addr1));
    783 		}
    784 	} else {
    785 		if (addr1 >= addr2) {
    786 			/* src after dest: copy forward */
    787 			for (; c != 0; c--, addr1++, addr2++)
    788 				*(volatile uint8_t *)(addr2) =
    789 				    *(volatile uint8_t *)(addr1);
    790 		} else {
    791 			/* dest after src: copy backwards */
    792 			for (addr1 += (c - 1), addr2 += (c - 1);
    793 			    c != 0; c--, addr1--, addr2--)
    794 				*(volatile uint8_t *)(addr2) =
    795 				    *(volatile uint8_t *)(addr1);
    796 		}
    797 	}
    798 }
    799 
    800 void
    801 bus_space_copy_region_2(bus_space_tag_t t, bus_space_handle_t h1,
    802 			bus_size_t o1, bus_space_handle_t h2,
    803 			bus_size_t o2, size_t c)
    804 {
    805 	vaddr_t addr1 = h1 + o1;
    806 	vaddr_t addr2 = h2 + o2;
    807 
    808 	BUS_SPACE_ADDRESS_SANITY(addr1, uint16_t, "bus addr 1");
    809 	BUS_SPACE_ADDRESS_SANITY(addr2, uint16_t, "bus addr 2");
    810 
    811 	if (x86_bus_space_is_io(t)) {
    812 		if (addr1 >= addr2) {
    813 			/* src after dest: copy forward */
    814 			for (; c != 0; c--, addr1 += 2, addr2 += 2)
    815 				outw(addr2, inw(addr1));
    816 		} else {
    817 			/* dest after src: copy backwards */
    818 			for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1);
    819 			    c != 0; c--, addr1 -= 2, addr2 -= 2)
    820 				outw(addr2, inw(addr1));
    821 		}
    822 	} else {
    823 		if (addr1 >= addr2) {
    824 			/* src after dest: copy forward */
    825 			for (; c != 0; c--, addr1 += 2, addr2 += 2)
    826 				*(volatile uint16_t *)(addr2) =
    827 				    *(volatile uint16_t *)(addr1);
    828 		} else {
    829 			/* dest after src: copy backwards */
    830 			for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1);
    831 			    c != 0; c--, addr1 -= 2, addr2 -= 2)
    832 				*(volatile uint16_t *)(addr2) =
    833 				    *(volatile uint16_t *)(addr1);
    834 		}
    835 	}
    836 }
    837 
    838 void
    839 bus_space_copy_region_4(bus_space_tag_t t, bus_space_handle_t h1,
    840 			bus_size_t o1, bus_space_handle_t h2,
    841 			bus_size_t o2, size_t c)
    842 {
    843 	vaddr_t addr1 = h1 + o1;
    844 	vaddr_t addr2 = h2 + o2;
    845 
    846 	BUS_SPACE_ADDRESS_SANITY(addr1, uint32_t, "bus addr 1");
    847 	BUS_SPACE_ADDRESS_SANITY(addr2, uint32_t, "bus addr 2");
    848 
    849 	if (x86_bus_space_is_io(t)) {
    850 		if (addr1 >= addr2) {
    851 			/* src after dest: copy forward */
    852 			for (; c != 0; c--, addr1 += 4, addr2 += 4)
    853 				outl(addr2, inl(addr1));
    854 		} else {
    855 			/* dest after src: copy backwards */
    856 			for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1);
    857 			    c != 0; c--, addr1 -= 4, addr2 -= 4)
    858 				outl(addr2, inl(addr1));
    859 		}
    860 	} else {
    861 		if (addr1 >= addr2) {
    862 			/* src after dest: copy forward */
    863 			for (; c != 0; c--, addr1 += 4, addr2 += 4)
    864 				*(volatile uint32_t *)(addr2) =
    865 				    *(volatile uint32_t *)(addr1);
    866 		} else {
    867 			/* dest after src: copy backwards */
    868 			for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1);
    869 			    c != 0; c--, addr1 -= 4, addr2 -= 4)
    870 				*(volatile uint32_t *)(addr2) =
    871 				    *(volatile uint32_t *)(addr1);
    872 		}
    873 	}
    874 }
    875 
    876 void
    877 bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t bsh,
    878 		  bus_size_t offset, bus_size_t len, int flags)
    879 {
    880 
    881 	/* I/O instructions always happen in program order.  */
    882 	if (x86_bus_space_is_io(tag))
    883 		return;
    884 
    885 	/*
    886 	 * For default mappings, which are mapped with UC-type memory
    887 	 * regions, all loads and stores are issued in program order.
    888 	 *
    889 	 * For BUS_SPACE_MAP_PREFETCHABLE mappings, which are mapped
    890 	 * with WC-type memory regions, loads and stores may be issued
    891 	 * out of order, potentially requiring any of the three x86
    892 	 * fences -- LFENCE, SFENCE, MFENCE.
    893 	 *
    894 	 * For BUS_SPACE_MAP_CACHEABLE mappings, which are mapped with
    895 	 * WB-type memory regions (like normal memory), store/load may
    896 	 * be reordered to load/store, potentially requiring MFENCE.
    897 	 *
    898 	 * We can't easily tell here how the region was mapped (without
    899 	 * consulting the page tables), so just issue the fence
    900 	 * unconditionally.  Chances are either it's necessary or the
    901 	 * cost is small in comparison to device register I/O.
    902 	 *
    903 	 * Reference:
    904 	 *
    905 	 *	AMD64 Architecture Programmer's Manual, Volume 2:
    906 	 *	System Programming, 24593--Rev. 3.38--November 2021,
    907 	 *	Sec. 7.4.2 Memory Barrier Interaction with Memory
    908 	 *	Types, Table 7-3, p. 196.
    909 	 *	https://web.archive.org/web/20220625040004/https://www.amd.com/system/files/TechDocs/24593.pdf#page=256
    910 	 */
    911 	switch (flags) {
    912 	case 0:
    913 		break;
    914 	case BUS_SPACE_BARRIER_READ:
    915 		x86_lfence();
    916 		break;
    917 	case BUS_SPACE_BARRIER_WRITE:
    918 		x86_sfence();
    919 		break;
    920 	case BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE:
    921 		x86_mfence();
    922 		break;
    923 	default:
    924 		panic("unknown bus space barrier: 0x%x", (unsigned)flags);
    925 	}
    926 }
    927 
    928 void *
    929 bus_space_vaddr(bus_space_tag_t tag, bus_space_handle_t bsh)
    930 {
    931 
    932 	return x86_bus_space_is_mem(tag) ? (void *)bsh : NULL;
    933 }
    934 
    935 static const void *
    936 bit_to_function_pointer(const struct bus_space_overrides *ov, uint64_t bit)
    937 {
    938 	switch (bit) {
    939 	case BUS_SPACE_OVERRIDE_MAP:
    940 		return ov->ov_space_map;
    941 	case BUS_SPACE_OVERRIDE_UNMAP:
    942 		return ov->ov_space_unmap;
    943 	case BUS_SPACE_OVERRIDE_ALLOC:
    944 		return ov->ov_space_alloc;
    945 	case BUS_SPACE_OVERRIDE_FREE:
    946 		return ov->ov_space_free;
    947 	case BUS_SPACE_OVERRIDE_RESERVE:
    948 		return ov->ov_space_reserve;
    949 	case BUS_SPACE_OVERRIDE_RELEASE:
    950 		return ov->ov_space_release;
    951 	case BUS_SPACE_OVERRIDE_RESERVATION_MAP:
    952 		return ov->ov_space_reservation_map;
    953 	case BUS_SPACE_OVERRIDE_RESERVATION_UNMAP:
    954 		return ov->ov_space_reservation_unmap;
    955 	case BUS_SPACE_OVERRIDE_RESERVE_SUBREGION:
    956 		return ov->ov_space_reserve_subregion;
    957 	default:
    958 		return NULL;
    959 	}
    960 }
    961 
    962 void
    963 bus_space_tag_destroy(bus_space_tag_t bst)
    964 {
    965 	kmem_free(bst, sizeof(struct bus_space_tag));
    966 }
    967 
    968 int
    969 bus_space_tag_create(bus_space_tag_t obst, const uint64_t present,
    970     const uint64_t extpresent, const struct bus_space_overrides *ov, void *ctx,
    971     bus_space_tag_t *bstp)
    972 {
    973 	uint64_t bit, bits, nbits;
    974 	bus_space_tag_t bst;
    975 	const void *fp;
    976 
    977 	if (ov == NULL || present == 0 || extpresent != 0)
    978 		return EINVAL;
    979 
    980 	bst = kmem_alloc(sizeof(struct bus_space_tag), KM_SLEEP);
    981 	bst->bst_super = obst;
    982 	bst->bst_type = obst->bst_type;
    983 
    984 	for (bits = present; bits != 0; bits = nbits) {
    985 		nbits = bits & (bits - 1);
    986 		bit = nbits ^ bits;
    987 		if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
    988 			printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
    989 			goto einval;
    990 		}
    991 	}
    992 
    993 	bst->bst_ov = ov;
    994 	bst->bst_exists = obst->bst_exists | present;
    995 	bst->bst_present = present;
    996 	bst->bst_ctx = ctx;
    997 
    998 	*bstp = bst;
    999 
   1000 	return 0;
   1001 einval:
   1002 	kmem_free(bst, sizeof(struct bus_space_tag));
   1003 	return EINVAL;
   1004 }
   1005