Home | History | Annotate | Line # | Download | only in atari
      1 /*	$NetBSD: bus.c,v 1.71 2025/11/29 19:34:20 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include "opt_m68k_arch.h"
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.71 2025/11/29 19:34:20 thorpej Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/kmem.h>
     41 #include <sys/mbuf.h>
     42 #include <sys/proc.h>
     43 #include <sys/vmem_impl.h>
     44 
     45 #include <uvm/uvm.h>
     46 
     47 #include <machine/cpu.h>
     48 #include <m68k/cacheops.h>
     49 #define	_ATARI_BUS_DMA_PRIVATE
     50 #include <sys/bus.h>
     51 
     52 /*
     53  * Vmem arena to manage all memory space, including I/O ranges.  Allocate
     54  * storage for 16 regions in each, initially.
     55  *
     56  * This means that the fixed static storage is only used for registrating
     57  * the found memory regions and the bus-mapping of the console.
     58  */
     59 #define	IOMEM_BTAG_COUNT	VMEM_EST_BTCOUNT(1, 16)
     60 static struct vmem iomem_arena_store;
     61 static struct vmem_btag iomem_btag_store[IOMEM_BTAG_COUNT];
     62 static vmem_t *iomem_arena;
     63 
     64 static int  _bus_dmamap_load_buffer(bus_dma_tag_t tag, bus_dmamap_t,
     65 		void *, bus_size_t, struct vmspace *, int, paddr_t *,
     66 		int *, int);
     67 static int  bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa,
     68 		bus_size_t size, int flags, bus_space_handle_t *bsph);
     69 
     70 extern paddr_t avail_end;
     71 
     72 /*
     73  * We need these for the early memory allocator. The idea is this:
     74  * Allocate VA-space through ptextra (atari_init.c:startc()). When
     75  * The VA & size of this space are known, call bootm_init().
     76  * Until the VM-system is up, bus_mem_add_mapping() allocates its virtual
     77  * addresses from this extent-map.
     78  *
     79  * This allows for the console code to use the bus_space interface at a
     80  * very early stage of the system configuration.
     81  */
     82 static pt_entry_t	*bootm_ptep;
     83 static vaddr_t		 bootm_start;
     84 static vaddr_t		 bootm_end;		/* inclusive */
     85 #define	BOOTM_BTAG_COUNT	VMEM_EST_BTCOUNT(1, 32)
     86 static struct vmem	 bootm_arena_store;
     87 static struct vmem_btag	 bootm_btag_store[BOOTM_BTAG_COUNT];
     88 static vmem_t *		 bootm_arena;
     89 
     90 static vaddr_t	bootm_alloc(paddr_t pa, u_long size, int flags);
     91 static int	bootm_free(vaddr_t va, u_long size);
     92 
     93 void
     94 bootm_init(vaddr_t va, void *ptep, vsize_t size)
     95 {
     96 
     97 	bootm_start = va;
     98 	bootm_end = va + size - 1;
     99 	bootm_ptep = (pt_entry_t *)ptep;
    100 
    101 	bootm_arena = vmem_init(&bootm_arena_store,
    102 				"bootmem",		/* name */
    103 				0,			/* addr */
    104 				0,			/* size */
    105 				PAGE_SIZE,		/* quantum */
    106 				NULL,			/* importfn */
    107 				NULL,			/* releasefn */
    108 				NULL,			/* source */
    109 				0,			/* qcache_max */
    110 				VM_NOSLEEP | VM_PRIVTAGS,
    111 				IPL_NONE);
    112 
    113 	vmem_add_bts(bootm_arena, bootm_btag_store, BOOTM_BTAG_COUNT);
    114 	vmem_add(bootm_arena, va, size, VM_NOSLEEP);
    115 }
    116 
    117 vaddr_t
    118 bootm_alloc(paddr_t pa, u_long size, int flags)
    119 {
    120 	pt_entry_t	*pg, *epg;
    121 	pt_entry_t	pg_proto;
    122 	vmem_addr_t	rva;
    123 	vaddr_t		va;
    124 
    125 	if (vmem_alloc(bootm_arena, size, VM_BESTFIT | VM_NOSLEEP, &rva) != 0) {
    126 		printf("bootm_alloc fails! Not enough fixed boundary tags?\n");
    127 		printf("Requested extent: pa=%lx, size=%lx\n",
    128 						(u_long)pa, size);
    129 		return 0;
    130 	}
    131 
    132 	pg  = &bootm_ptep[btoc(rva - bootm_start)];
    133 	epg = &pg[btoc(size)];
    134 	va  = rva;
    135 	pg_proto = pa | PG_RW | PG_V;
    136 	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
    137 		pg_proto |= PG_CI;
    138 	while (pg < epg) {
    139 		*pg++     = pg_proto;
    140 		pg_proto += PAGE_SIZE;
    141 #if defined(M68040) || defined(M68060)
    142 		if (mmutype == MMU_68040) {
    143 			DCFP(pa);
    144 			pa += PAGE_SIZE;
    145 		}
    146 #endif
    147 		TBIS(va);
    148 		va += PAGE_SIZE;
    149 	}
    150 	return rva;
    151 }
    152 
    153 int
    154 bootm_free(vaddr_t va, u_long size)
    155 {
    156 
    157 	if ((va < bootm_start) || ((va + size - 1) > bootm_end))
    158 		return 0; /* Not for us! */
    159 	vmem_free(bootm_arena, va, size);
    160 	return 1;
    161 }
    162 
    163 void
    164 atari_bus_space_arena_init(paddr_t startpa, paddr_t endpa)
    165 {
    166 	vmem_size_t size;
    167 
    168 	/*
    169 	 * Initialize the I/O mem vmem arena.
    170 	 *
    171 	 * Note: we don't have to check the return value since
    172 	 * creation of a fixed extent map will never fail (since
    173 	 * descriptor storage has already been allocated).
    174 	 *
    175 	 * N.B. The iomem arena manages _all_ physical addresses
    176 	 * on the machine.  When the amount of RAM is found, all
    177 	 * extents of RAM are allocated from the map.
    178 	 */
    179 
    180 	iomem_arena = vmem_init(&iomem_arena_store,
    181 				"iomem",		/* name */
    182 				0,			/* addr */
    183 				0,			/* size */
    184 				1,			/* quantum */
    185 				NULL,			/* importfn */
    186 				NULL,			/* releasefn */
    187 				NULL,			/* source */
    188 				0,			/* qcache_max */
    189 				VM_NOSLEEP | VM_PRIVTAGS,
    190 				IPL_NONE);
    191 
    192 	vmem_add_bts(iomem_arena, iomem_btag_store, IOMEM_BTAG_COUNT);
    193 
    194 	/* XXX kern/57748 */
    195 	size = (vmem_size_t)(endpa - startpa) + 1;
    196 	if (size == 0) {
    197 		size--;
    198 	}
    199 	vmem_add(iomem_arena, startpa, size, VM_NOSLEEP);
    200 }
    201 
    202 int
    203 atari_bus_space_alloc_physmem(paddr_t startpa, paddr_t endpa)
    204 {
    205 
    206 	return vmem_xalloc_addr(iomem_arena, startpa, endpa - startpa,
    207 	    VM_NOSLEEP);
    208 }
    209 
    210 int
    211 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
    212     bus_space_handle_t *mhp)
    213 {
    214 	int	error;
    215 
    216 	/*
    217 	 * Before we go any further, let's make sure that this
    218 	 * region is available.
    219 	 */
    220 	error = vmem_xalloc_addr(iomem_arena, bpa + t->base, size,
    221 	    VM_NOSLEEP);
    222 	if (error != 0)
    223 		return error;
    224 
    225 	error = bus_mem_add_mapping(t, bpa, size, flags, mhp);
    226 	if (error != 0) {
    227 		vmem_xfree(iomem_arena, bpa + t->base, size);
    228 	}
    229 	return error;
    230 }
    231 
    232 int
    233 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
    234     bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
    235     bus_addr_t *bpap, bus_space_handle_t *bshp)
    236 {
    237 	vmem_addr_t bpa;
    238 	int error;
    239 
    240 	/*
    241 	 * Do the requested allocation.
    242 	 */
    243 	error = vmem_xalloc(iomem_arena, size,
    244 			    alignment,		/* align */
    245 			    0,			/* phase */
    246 			    boundary,		/* boundary */
    247 			    rstart + t->base,	/* minaddr */
    248 			    rend + t->base,	/* maxaddr */
    249 			    VM_BESTFIT | VM_NOSLEEP,
    250 			    &bpa);
    251 	if (error != 0)
    252 		return error;
    253 
    254 	/*
    255 	 * Map the bus physical address to a kernel virtual address.
    256 	 */
    257 	error = bus_mem_add_mapping(t, bpa, size, flags, bshp);
    258 	if (error != 0) {
    259 		vmem_xfree(iomem_arena, bpa, size);
    260 	}
    261 
    262 	*bpap = bpa;
    263 
    264 	return error;
    265 }
    266 
    267 static int
    268 bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
    269     int flags, bus_space_handle_t *bshp)
    270 {
    271 	vaddr_t	va;
    272 	paddr_t	pa, endpa;
    273 
    274 	pa    = m68k_trunc_page(bpa + t->base);
    275 	endpa = m68k_round_page((bpa + t->base + size) - 1);
    276 
    277 #ifdef DIAGNOSTIC
    278 	if (endpa <= pa)
    279 		panic("%s: overflow", __func__);
    280 #endif
    281 
    282 	if (kernel_map == NULL) {
    283 		/*
    284 		 * The VM-system is not yet operational, allocate from
    285 		 * a special pool.
    286 		 */
    287 		va = bootm_alloc(pa, endpa - pa, flags);
    288 		if (va == 0)
    289 			return ENOMEM;
    290 		*bshp = va + (bpa & PGOFSET);
    291 		return 0;
    292 	}
    293 
    294 	va = uvm_km_alloc(kernel_map, endpa - pa, 0,
    295 	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    296 	if (va == 0)
    297 		return ENOMEM;
    298 
    299 	*bshp = va + (bpa & PGOFSET);
    300 
    301 	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
    302 		pt_entry_t *ptep, npte;
    303 
    304 		pmap_enter(pmap_kernel(), (vaddr_t)va, pa,
    305 		    VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE);
    306 
    307 		ptep = kvtopte(va);
    308 		npte = *ptep & ~PG_CMASK;
    309 
    310 		if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
    311 			npte |= PG_CI;
    312 		else if (mmutype == MMU_68040)
    313 			npte |= PG_CCB;
    314 
    315 		*ptep = npte;
    316 	}
    317 	pmap_update(pmap_kernel());
    318 	TBIAS();
    319 	return 0;
    320 }
    321 
    322 void
    323 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
    324 {
    325 	vaddr_t	va, endva;
    326 	paddr_t bpa;
    327 
    328 	va = m68k_trunc_page(bsh);
    329 	endva = m68k_round_page(((char *)bsh + size) - 1);
    330 #ifdef DIAGNOSTIC
    331 	if (endva < va)
    332 		panic("%s: overflow", __func__);
    333 #endif
    334 
    335 	(void)pmap_extract(pmap_kernel(), va, &bpa);
    336 	bpa += ((paddr_t)bsh & PGOFSET);
    337 
    338 	/*
    339 	 * Free the kernel virtual mapping.
    340 	 */
    341 	if (!bootm_free(va, endva - va)) {
    342 		pmap_remove(pmap_kernel(), va, endva);
    343 		pmap_update(pmap_kernel());
    344 		uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
    345 	}
    346 
    347 	/*
    348 	 * Mark as free in the extent map.
    349 	 */
    350 	vmem_xfree(iomem_arena, bpa, size);
    351 }
    352 
    353 /*
    354  * Get a new handle for a subregion of an already-mapped area of bus space.
    355  */
    356 int
    357 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t memh,
    358     bus_size_t off, bus_size_t sz, bus_space_handle_t *mhp)
    359 {
    360 
    361 	*mhp = memh + off;
    362 	return 0;
    363 }
    364 
    365 paddr_t
    366 bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot,
    367     int flags)
    368 {
    369 
    370 	/*
    371 	 * "addr" is the base address of the device we're mapping.
    372 	 * "off" is the offset into that device.
    373 	 *
    374 	 * Note we are called for each "page" in the device that
    375 	 * the upper layers want to map.
    376 	 */
    377 	return m68k_btop(addr + off);
    378 }
    379 
    380 static size_t
    381 _bus_dmamap_mapsize(int const nsegments)
    382 {
    383 
    384 	KASSERT(nsegments > 0);
    385 	return sizeof(struct atari_bus_dmamap) +
    386 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    387 }
    388 
    389 /*
    390  * Common function for DMA map creation.  May be called by bus-specific
    391  * DMA map creation functions.
    392  */
    393 int
    394 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    395     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
    396 {
    397 	struct atari_bus_dmamap *map;
    398 	void *mapstore;
    399 
    400 	/*
    401 	 * Allocate and initialize the DMA map.  The end of the map
    402 	 * is a variable-sized array of segments, so we allocate enough
    403 	 * room for them in one shot.
    404 	 *
    405 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    406 	 * of ALLOCNOW notifies others that we've reserved these resources,
    407 	 * and they are not to be freed.
    408 	 *
    409 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    410 	 * the (nsegments - 1).
    411 	 */
    412 	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
    413 	    (flags & BUS_DMA_NOWAIT) != 0 ? KM_NOSLEEP : KM_SLEEP)) == NULL)
    414 		return ENOMEM;
    415 
    416 	map = (struct atari_bus_dmamap *)mapstore;
    417 	map->_dm_size = size;
    418 	map->_dm_segcnt = nsegments;
    419 	map->_dm_maxmaxsegsz = maxsegsz;
    420 	map->_dm_boundary = boundary;
    421 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    422 	map->dm_maxsegsz = maxsegsz;
    423 	map->dm_mapsize = 0;		/* no valid mappings */
    424 	map->dm_nsegs = 0;
    425 
    426 	*dmamp = map;
    427 	return 0;
    428 }
    429 
    430 /*
    431  * Common function for DMA map destruction.  May be called by bus-specific
    432  * DMA map destruction functions.
    433  */
    434 void
    435 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    436 {
    437 
    438 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
    439 }
    440 
    441 /*
    442  * Common function for loading a DMA map with a linear buffer.  May
    443  * be called by bus-specific DMA map load functions.
    444  */
    445 int
    446 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    447     bus_size_t buflen, struct proc *p, int flags)
    448 {
    449 	paddr_t lastaddr;
    450 	int seg, error;
    451 	struct vmspace *vm;
    452 
    453 	/*
    454 	 * Make sure that on error condition we return "no valid mappings".
    455 	 */
    456 	map->dm_mapsize = 0;
    457 	map->dm_nsegs = 0;
    458 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    459 
    460 	if (buflen > map->_dm_size)
    461 		return EINVAL;
    462 
    463 	if (p != NULL) {
    464 		vm = p->p_vmspace;
    465 	} else {
    466 		vm = vmspace_kernel();
    467 	}
    468 
    469 	seg = 0;
    470 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
    471 	    &lastaddr, &seg, 1);
    472 	if (error == 0) {
    473 		map->dm_mapsize = buflen;
    474 		map->dm_nsegs = seg + 1;
    475 	}
    476 	return error;
    477 }
    478 
    479 /*
    480  * Like _bus_dmamap_load(), but for mbufs.
    481  */
    482 int
    483 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
    484     int flags)
    485 {
    486 	paddr_t lastaddr;
    487 	int seg, error, first;
    488 	struct mbuf *m;
    489 
    490 	/*
    491 	 * Make sure that on error condition we return "no valid mappings."
    492 	 */
    493 	map->dm_mapsize = 0;
    494 	map->dm_nsegs = 0;
    495 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    496 
    497 #ifdef DIAGNOSTIC
    498 	if ((m0->m_flags & M_PKTHDR) == 0)
    499 		panic("%s: no packet header", __func__);
    500 #endif
    501 
    502 	if (m0->m_pkthdr.len > map->_dm_size)
    503 		return EINVAL;
    504 
    505 	first = 1;
    506 	seg = 0;
    507 	error = 0;
    508 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
    509 		if (m->m_len == 0)
    510 			continue;
    511 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
    512 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
    513 		first = 0;
    514 	}
    515 	if (error == 0) {
    516 		map->dm_mapsize = m0->m_pkthdr.len;
    517 		map->dm_nsegs = seg + 1;
    518 	}
    519 	return error;
    520 }
    521 
    522 /*
    523  * Like _bus_dmamap_load(), but for uios.
    524  */
    525 int
    526 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
    527     int flags)
    528 {
    529 	paddr_t lastaddr;
    530 	int seg, i, error, first;
    531 	bus_size_t minlen, resid;
    532 	struct iovec *iov;
    533 	void *addr;
    534 
    535 	/*
    536 	 * Make sure that on error condition we return "no valid mappings."
    537 	 */
    538 	map->dm_mapsize = 0;
    539 	map->dm_nsegs = 0;
    540 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    541 
    542 	resid = uio->uio_resid;
    543 	iov = uio->uio_iov;
    544 
    545 	first = 1;
    546 	seg = 0;
    547 	error = 0;
    548 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    549 		/*
    550 		 * Now at the first iovec to load.  Load each iovec
    551 		 * until we have exhausted the residual count.
    552 		 */
    553 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    554 		addr = (void *)iov[i].iov_base;
    555 
    556 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    557 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
    558 		first = 0;
    559 
    560 		resid -= minlen;
    561 	}
    562 	if (error == 0) {
    563 		map->dm_mapsize = uio->uio_resid;
    564 		map->dm_nsegs = seg + 1;
    565 	}
    566 	return error;
    567 }
    568 
    569 /*
    570  * Like _bus_dmamap_load(), but for raw memory allocated with
    571  * bus_dmamem_alloc().
    572  */
    573 int
    574 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    575     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    576 {
    577 
    578 	panic("%s: not implemented", __func__);
    579 }
    580 
    581 /*
    582  * Common function for unloading a DMA map.  May be called by
    583  * bus-specific DMA map unload functions.
    584  */
    585 void
    586 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    587 {
    588 
    589 	/*
    590 	 * No resources to free; just mark the mappings as
    591 	 * invalid.
    592 	 */
    593 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    594 	map->dm_mapsize = 0;
    595 	map->dm_nsegs = 0;
    596 }
    597 
    598 /*
    599  * Common function for DMA map synchronization.  May be called
    600  * by bus-specific DMA map synchronization functions.
    601  */
    602 void
    603 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
    604     bus_size_t len, int ops)
    605 {
    606 #if defined(M68040) || defined(M68060)
    607 	bus_addr_t p, e, ps, pe;
    608 	bus_size_t seglen;
    609 	bus_dma_segment_t *seg;
    610 	int i;
    611 #endif
    612 
    613 #if defined(M68020) || defined(M68030)
    614 #if defined(M68040) || defined(M68060)
    615 	if (cputype == CPU_68020 || cputype == CPU_68030)
    616 #endif
    617 		/* assume no L2 physical cache */
    618 		return;
    619 #endif
    620 
    621 #if defined(M68040) || defined(M68060)
    622 	/* If the whole DMA map is uncached, do nothing. */
    623 	if ((map->_dm_flags & BUS_DMA_COHERENT) != 0)
    624 		return;
    625 
    626 	/* Short-circuit for unsupported `ops' */
    627 	if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0)
    628 		return;
    629 
    630 	/*
    631 	 * flush/purge the cache.
    632 	 */
    633 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
    634 		seg = &map->dm_segs[i];
    635 		if (seg->ds_len <= offset) {
    636 			/* Segment irrelevant - before requested offset */
    637 			offset -= seg->ds_len;
    638 			continue;
    639 		}
    640 
    641 		/*
    642 		 * Now at the first segment to sync; nail
    643 		 * each segment until we have exhausted the
    644 		 * length.
    645 		 */
    646 		seglen = seg->ds_len - offset;
    647 		if (seglen > len)
    648 			seglen = len;
    649 
    650 		ps = seg->ds_addr + offset;
    651 		pe = ps + seglen;
    652 
    653 		if ((ops & BUS_DMASYNC_PREWRITE) != 0) {
    654 			p = ps & ~CACHELINE_MASK;
    655 			e = (pe + CACHELINE_MASK) & ~CACHELINE_MASK;
    656 
    657 			/* flush cacheline */
    658 			while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
    659 				DCFL(p);
    660 				p += CACHELINE_SIZE;
    661 			}
    662 
    663 			/* flush cachelines per 128bytes */
    664 			while ((p < e) && (p & PAGE_MASK) != 0) {
    665 				DCFL(p);
    666 				p += CACHELINE_SIZE;
    667 				DCFL(p);
    668 				p += CACHELINE_SIZE;
    669 				DCFL(p);
    670 				p += CACHELINE_SIZE;
    671 				DCFL(p);
    672 				p += CACHELINE_SIZE;
    673 				DCFL(p);
    674 				p += CACHELINE_SIZE;
    675 				DCFL(p);
    676 				p += CACHELINE_SIZE;
    677 				DCFL(p);
    678 				p += CACHELINE_SIZE;
    679 				DCFL(p);
    680 				p += CACHELINE_SIZE;
    681 			}
    682 
    683 			/* flush page */
    684 			while (p + PAGE_SIZE <= e) {
    685 				DCFP(p);
    686 				p += PAGE_SIZE;
    687 			}
    688 
    689 			/* flush cachelines per 128bytes */
    690 			while (p + CACHELINE_SIZE * 8 <= e) {
    691 				DCFL(p);
    692 				p += CACHELINE_SIZE;
    693 				DCFL(p);
    694 				p += CACHELINE_SIZE;
    695 				DCFL(p);
    696 				p += CACHELINE_SIZE;
    697 				DCFL(p);
    698 				p += CACHELINE_SIZE;
    699 				DCFL(p);
    700 				p += CACHELINE_SIZE;
    701 				DCFL(p);
    702 				p += CACHELINE_SIZE;
    703 				DCFL(p);
    704 				p += CACHELINE_SIZE;
    705 				DCFL(p);
    706 				p += CACHELINE_SIZE;
    707 			}
    708 
    709 			/* flush cacheline */
    710 			while (p < e) {
    711 				DCFL(p);
    712 				p += CACHELINE_SIZE;
    713 			}
    714 		}
    715 
    716 		/*
    717 		 * Normally, the `PREREAD' flag instructs us to purge the
    718 		 * cache for the specified offset and length. However, if
    719 		 * the offset/length is not aligned to a cacheline boundary,
    720 		 * we may end up purging some legitimate data from the
    721 		 * start/end of the cache. In such a case, *flush* the
    722 		 * cachelines at the start and end of the required region.
    723 		 */
    724 		else if ((ops & BUS_DMASYNC_PREREAD) != 0) {
    725 			/* flush cacheline on start boundary */
    726 			if ((ps & CACHELINE_MASK) != 0) {
    727 				DCFL(ps & ~CACHELINE_MASK);
    728 			}
    729 
    730 			p = (ps + CACHELINE_MASK) & ~CACHELINE_MASK;
    731 			e = pe & ~CACHELINE_MASK;
    732 
    733 			/* purge cacheline */
    734 			while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
    735 				DCPL(p);
    736 				p += CACHELINE_SIZE;
    737 			}
    738 
    739 			/* purge cachelines per 128bytes */
    740 			while ((p < e) && (p & PAGE_MASK) != 0) {
    741 				DCPL(p);
    742 				p += CACHELINE_SIZE;
    743 				DCPL(p);
    744 				p += CACHELINE_SIZE;
    745 				DCPL(p);
    746 				p += CACHELINE_SIZE;
    747 				DCPL(p);
    748 				p += CACHELINE_SIZE;
    749 				DCPL(p);
    750 				p += CACHELINE_SIZE;
    751 				DCPL(p);
    752 				p += CACHELINE_SIZE;
    753 				DCPL(p);
    754 				p += CACHELINE_SIZE;
    755 				DCPL(p);
    756 				p += CACHELINE_SIZE;
    757 			}
    758 
    759 			/* purge page */
    760 			while (p + PAGE_SIZE <= e) {
    761 				DCPP(p);
    762 				p += PAGE_SIZE;
    763 			}
    764 
    765 			/* purge cachelines per 128bytes */
    766 			while (p + CACHELINE_SIZE * 8 <= e) {
    767 				DCPL(p);
    768 				p += CACHELINE_SIZE;
    769 				DCPL(p);
    770 				p += CACHELINE_SIZE;
    771 				DCPL(p);
    772 				p += CACHELINE_SIZE;
    773 				DCPL(p);
    774 				p += CACHELINE_SIZE;
    775 				DCPL(p);
    776 				p += CACHELINE_SIZE;
    777 				DCPL(p);
    778 				p += CACHELINE_SIZE;
    779 				DCPL(p);
    780 				p += CACHELINE_SIZE;
    781 				DCPL(p);
    782 				p += CACHELINE_SIZE;
    783 			}
    784 
    785 			/* purge cacheline */
    786 			while (p < e) {
    787 				DCPL(p);
    788 				p += CACHELINE_SIZE;
    789 			}
    790 
    791 			/* flush cacheline on end boundary */
    792 			if (p < pe) {
    793 				DCFL(p);
    794 			}
    795 		}
    796 		offset = 0;
    797 		len -= seglen;
    798 	}
    799 #endif	/* defined(M68040) || defined(M68060) */
    800 }
    801 
    802 /*
    803  * Common function for DMA-safe memory allocation.  May be called
    804  * by bus-specific DMA memory allocation functions.
    805  */
    806 int
    807 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    808     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    809     int flags)
    810 {
    811 
    812 	return bus_dmamem_alloc_range(t, size, alignment, boundary,
    813 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end));
    814 }
    815 
    816 /*
    817  * Common function for freeing DMA-safe memory.  May be called by
    818  * bus-specific DMA memory free functions.
    819  */
    820 void
    821 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    822 {
    823 	struct vm_page *m;
    824 	bus_addr_t addr, offset;
    825 	struct pglist mlist;
    826 	int curseg;
    827 
    828 	offset = t->_displacement;
    829 
    830 	/*
    831 	 * Build a list of pages to free back to the VM system.
    832 	 */
    833 	TAILQ_INIT(&mlist);
    834 	for (curseg = 0; curseg < nsegs; curseg++) {
    835 		for (addr = segs[curseg].ds_addr;
    836 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    837 		    addr += PAGE_SIZE) {
    838 			m = PHYS_TO_VM_PAGE(addr - offset);
    839 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
    840 		}
    841 	}
    842 
    843 	uvm_pglistfree(&mlist);
    844 }
    845 
    846 /*
    847  * Common function for mapping DMA-safe memory.  May be called by
    848  * bus-specific DMA memory map functions.
    849  */
    850 int
    851 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    852     size_t size, void **kvap, int flags)
    853 {
    854 	vaddr_t va;
    855 	bus_addr_t addr, offset;
    856 	int curseg;
    857 	const uvm_flag_t kmflags =
    858 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
    859 
    860 	offset = t->_displacement;
    861 
    862 	size = round_page(size);
    863 
    864 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
    865 
    866 	if (va == 0)
    867 		return ENOMEM;
    868 
    869 	*kvap = (void *)va;
    870 
    871 	for (curseg = 0; curseg < nsegs; curseg++) {
    872 		for (addr = segs[curseg].ds_addr;
    873 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
    874 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
    875 			if (size == 0)
    876 				panic("%s: size botch", __func__);
    877 			pmap_enter(pmap_kernel(), va, addr - offset,
    878 			    VM_PROT_READ | VM_PROT_WRITE,
    879 			    VM_PROT_READ | VM_PROT_WRITE);
    880 		}
    881 	}
    882 	pmap_update(pmap_kernel());
    883 
    884 	return 0;
    885 }
    886 
    887 /*
    888  * Common function for unmapping DMA-safe memory.  May be called by
    889  * bus-specific DMA memory unmapping functions.
    890  */
    891 void
    892 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    893 {
    894 
    895 #ifdef DIAGNOSTIC
    896 	if ((vaddr_t)kva & PGOFSET)
    897 		panic("%s", __func__);
    898 #endif
    899 
    900 	size = round_page(size);
    901 
    902 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
    903 	pmap_update(pmap_kernel());
    904 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
    905 }
    906 
    907 /*
    908  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
    909  * bus-specific DMA mmap(2)'ing functions.
    910  */
    911 paddr_t
    912 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
    913     int prot, int flags)
    914 {
    915 	int i, offset;
    916 
    917 	offset = t->_displacement;
    918 
    919 	for (i = 0; i < nsegs; i++) {
    920 #ifdef DIAGNOSTIC
    921 		if ((off & PGOFSET) != 0)
    922 			panic("%s: offset unaligned", __func__);
    923 		if ((segs[i].ds_addr & PGOFSET) != 0)
    924 			panic("%s: segment unaligned", __func__);
    925 		if ((segs[i].ds_len & PGOFSET) != 0)
    926 			panic("%s: segment size not multiple of page size",
    927 			    __func__);
    928 #endif
    929 		if (off >= segs[i].ds_len) {
    930 			off -= segs[i].ds_len;
    931 			continue;
    932 		}
    933 
    934 		return m68k_btop((char *)segs[i].ds_addr - offset + off);
    935 	}
    936 
    937 	/* Page not found. */
    938 	return -1;
    939 }
    940 
    941 /**********************************************************************
    942  * DMA utility functions
    943  **********************************************************************/
    944 
    945 /*
    946  * Utility function to load a linear buffer.  lastaddrp holds state
    947  * between invocations (for multiple-buffer loads).  segp contains
    948  * the starting segment on entrance, and the ending segment on exit.
    949  * first indicates if this is the first invocation of this function.
    950  */
    951 static int
    952 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    953     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
    954     int *segp, int first)
    955 {
    956 	bus_size_t sgsize;
    957 	bus_addr_t curaddr, lastaddr, offset, baddr, bmask;
    958 	vaddr_t vaddr = (vaddr_t)buf;
    959 	int seg;
    960 	pmap_t pmap;
    961 
    962 	offset = t->_displacement;
    963 
    964 	pmap = vm_map_pmap(&vm->vm_map);
    965 
    966 	lastaddr = *lastaddrp;
    967 	bmask = ~(map->_dm_boundary - 1);
    968 
    969 	for (seg = *segp; buflen > 0 ; ) {
    970 		/*
    971 		 * Get the physical address for this segment.
    972 		 */
    973 		(void)pmap_extract(pmap, vaddr, &curaddr);
    974 
    975 		/*
    976 		 * Compute the segment size, and adjust counts.
    977 		 */
    978 		sgsize = PAGE_SIZE - ((vaddr_t)vaddr & PGOFSET);
    979 		if (buflen < sgsize)
    980 			sgsize = buflen;
    981 
    982 		/*
    983 		 * Make sure we don't cross any boundaries.
    984 		 */
    985 		if (map->_dm_boundary > 0) {
    986 			baddr = (curaddr + map->_dm_boundary) & bmask;
    987 			if (sgsize > (baddr - curaddr))
    988 				sgsize = (baddr - curaddr);
    989 		}
    990 
    991 		/*
    992 		 * Insert chunk into a segment, coalescing with
    993 		 * previous segment if possible.
    994 		 */
    995 		if (first) {
    996 			map->dm_segs[seg].ds_addr = curaddr + offset;
    997 			map->dm_segs[seg].ds_len = sgsize;
    998 			first = 0;
    999 		} else {
   1000 			if (curaddr == lastaddr &&
   1001 			    (map->dm_segs[seg].ds_len + sgsize) <=
   1002 			     map->dm_maxsegsz &&
   1003 			    (map->_dm_boundary == 0 ||
   1004 			     (map->dm_segs[seg].ds_addr & bmask) ==
   1005 			     (curaddr & bmask)))
   1006 				map->dm_segs[seg].ds_len += sgsize;
   1007 			else {
   1008 				if (++seg >= map->_dm_segcnt)
   1009 					break;
   1010 				map->dm_segs[seg].ds_addr = curaddr + offset;
   1011 				map->dm_segs[seg].ds_len = sgsize;
   1012 			}
   1013 		}
   1014 
   1015 		lastaddr = curaddr + sgsize;
   1016 		vaddr += sgsize;
   1017 		buflen -= sgsize;
   1018 	}
   1019 
   1020 	*segp = seg;
   1021 	*lastaddrp = lastaddr;
   1022 
   1023 	/*
   1024 	 * Did we fit?
   1025 	 */
   1026 	if (buflen != 0)
   1027 		return EFBIG;		/* XXX better return value here? */
   1028 	return 0;
   1029 }
   1030 
   1031 /*
   1032  * Allocate physical memory from the given physical address range.
   1033  * Called by DMA-safe memory allocation methods.
   1034  */
   1035 int
   1036 bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1037     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
   1038     int flags, paddr_t low, paddr_t high)
   1039 {
   1040 	paddr_t curaddr, lastaddr;
   1041 	bus_addr_t offset;
   1042 	struct vm_page *m;
   1043 	struct pglist mlist;
   1044 	int curseg, error;
   1045 
   1046 	offset = t->_displacement;
   1047 
   1048 	/* Always round the size. */
   1049 	size = round_page(size);
   1050 
   1051 	/*
   1052 	 * Allocate pages from the VM system.
   1053 	 */
   1054 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
   1055 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
   1056 	if (error != 0)
   1057 		return error;
   1058 
   1059 	/*
   1060 	 * Compute the location, size, and number of segments actually
   1061 	 * returned by the VM code.
   1062 	 */
   1063 	m = TAILQ_FIRST(&mlist);
   1064 	curseg = 0;
   1065 	lastaddr = VM_PAGE_TO_PHYS(m);
   1066 	segs[curseg].ds_addr = lastaddr + offset;
   1067 	segs[curseg].ds_len = PAGE_SIZE;
   1068 	m = TAILQ_NEXT(m, pageq.queue);
   1069 
   1070 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
   1071 		curaddr = VM_PAGE_TO_PHYS(m);
   1072 #ifdef DIAGNOSTIC
   1073 		if (curaddr < low || curaddr >= high) {
   1074 			printf("uvm_pglistalloc returned non-sensical"
   1075 			    " address 0x%lx\n", curaddr);
   1076 			panic("%s", __func__);
   1077 		}
   1078 #endif
   1079 		if (curaddr == (lastaddr + PAGE_SIZE))
   1080 			segs[curseg].ds_len += PAGE_SIZE;
   1081 		else {
   1082 			curseg++;
   1083 			segs[curseg].ds_addr = curaddr + offset;
   1084 			segs[curseg].ds_len = PAGE_SIZE;
   1085 		}
   1086 		lastaddr = curaddr;
   1087 	}
   1088 
   1089 	*rsegs = curseg + 1;
   1090 
   1091 	return 0;
   1092 }
   1093