Home | History | Annotate | Line # | Download | only in libpci
rumpdev_bus_dma.c revision 1.3.10.2
      1  1.3.10.2  skrll /*	$NetBSD: rumpdev_bus_dma.c,v 1.3.10.2 2015/09/22 12:06:13 skrll Exp $	*/
      2       1.1  pooka 
      3       1.1  pooka /*-
      4       1.1  pooka  * Copyright (c) 2013 Antti Kantee
      5       1.1  pooka  * All rights reserved.
      6       1.1  pooka  *
      7       1.1  pooka  * Redistribution and use in source and binary forms, with or without
      8       1.1  pooka  * modification, are permitted provided that the following conditions
      9       1.1  pooka  * are met:
     10       1.1  pooka  * 1. Redistributions of source code must retain the above copyright
     11       1.1  pooka  *    notice, this list of conditions and the following disclaimer.
     12       1.1  pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1  pooka  *    notice, this list of conditions and the following disclaimer in the
     14       1.1  pooka  *    documentation and/or other materials provided with the distribution.
     15       1.1  pooka  *
     16       1.1  pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
     17       1.1  pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18       1.1  pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19       1.1  pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20       1.1  pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21       1.1  pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22       1.1  pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23       1.1  pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24       1.1  pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25       1.1  pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26       1.1  pooka  * POSSIBILITY OF SUCH DAMAGE.
     27       1.1  pooka  */
     28       1.1  pooka 
     29       1.1  pooka /*-
     30       1.1  pooka  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
     31       1.1  pooka  * All rights reserved.
     32       1.1  pooka  *
     33       1.1  pooka  * This code is derived from software contributed to The NetBSD Foundation
     34       1.1  pooka  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     35       1.1  pooka  * NASA Ames Research Center.
     36       1.1  pooka  *
     37       1.1  pooka  * Redistribution and use in source and binary forms, with or without
     38       1.1  pooka  * modification, are permitted provided that the following conditions
     39       1.1  pooka  * are met:
     40       1.1  pooka  * 1. Redistributions of source code must retain the above copyright
     41       1.1  pooka  *    notice, this list of conditions and the following disclaimer.
     42       1.1  pooka  * 2. Redistributions in binary form must reproduce the above copyright
     43       1.1  pooka  *    notice, this list of conditions and the following disclaimer in the
     44       1.1  pooka  *    documentation and/or other materials provided with the distribution.
     45       1.1  pooka  *
     46       1.1  pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     47       1.1  pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     48       1.1  pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     49       1.1  pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     50       1.1  pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     51       1.1  pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     52       1.1  pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     53       1.1  pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     54       1.1  pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     55       1.1  pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     56       1.1  pooka  * POSSIBILITY OF SUCH DAMAGE.
     57       1.1  pooka  */
     58       1.1  pooka 
     59       1.1  pooka /*
     60       1.1  pooka  * bus_dma(9) implementation which runs on top of rump kernel hypercalls.
     61       1.1  pooka  * It's essentially the same as the PowerPC implementation its based on,
     62       1.1  pooka  * except with some indirection and PowerPC MD features removed.
     63       1.1  pooka  * This should/could be expected to run on x86, other archs may need
     64       1.1  pooka  * some cache flushing hooks.
     65       1.1  pooka  *
     66       1.1  pooka  * From sys/arch/powerpc/powerpc/bus_dma.c:
     67       1.1  pooka  *	NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp
     68       1.1  pooka  */
     69       1.1  pooka 
     70       1.1  pooka #include <sys/param.h>
     71       1.1  pooka #include <sys/systm.h>
     72       1.1  pooka #include <sys/kernel.h>
     73       1.1  pooka #include <sys/device.h>
     74       1.1  pooka #include <sys/kmem.h>
     75       1.1  pooka #include <sys/proc.h>
     76       1.1  pooka #include <sys/mbuf.h>
     77       1.1  pooka #include <sys/bus.h>
     78       1.1  pooka #include <sys/intr.h>
     79       1.1  pooka 
     80       1.1  pooka #include <uvm/uvm.h>
     81       1.1  pooka 
     82       1.1  pooka #include "pci_user.h"
     83       1.1  pooka 
     84       1.1  pooka #define	EIEIO	membar_sync()
     85       1.1  pooka 
     86       1.1  pooka int	_bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
     87       1.1  pooka 	    bus_size_t, struct vmspace *, int, paddr_t *, int *, int);
     88       1.1  pooka 
     89       1.1  pooka /*
     90       1.1  pooka  * Common function for DMA map creation.  May be called by bus-specific
     91       1.1  pooka  * DMA map creation functions.
     92       1.1  pooka  */
     93       1.1  pooka int
     94       1.1  pooka bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
     95       1.1  pooka 	bus_size_t maxsegsz, bus_size_t boundary, int flags,
     96       1.1  pooka 	bus_dmamap_t *dmamp)
     97       1.1  pooka {
     98       1.1  pooka 	bus_dmamap_t map;
     99       1.1  pooka 	void *mapstore;
    100       1.1  pooka 	size_t mapsize;
    101       1.1  pooka 
    102       1.1  pooka 	/*
    103       1.1  pooka 	 * Allocate and initialize the DMA map.  The end of the map
    104       1.1  pooka 	 * is a variable-sized array of segments, so we allocate enough
    105       1.1  pooka 	 * room for them in one shot.
    106       1.1  pooka 	 *
    107       1.1  pooka 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    108       1.1  pooka 	 * of ALLOCNOW notifies others that we've reserved these resources,
    109       1.1  pooka 	 * and they are not to be freed.
    110       1.1  pooka 	 *
    111       1.1  pooka 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    112       1.1  pooka 	 * the (nsegments - 1).
    113       1.1  pooka 	 */
    114       1.1  pooka 	mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]);
    115       1.1  pooka 	if ((mapstore = kmem_intr_alloc(mapsize,
    116       1.1  pooka 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
    117       1.1  pooka 		return (ENOMEM);
    118       1.1  pooka 
    119       1.1  pooka 	memset(mapstore, 0, mapsize);
    120       1.1  pooka 	map = (void *)mapstore;
    121       1.1  pooka 	map->_dm_size = size;
    122       1.1  pooka 	map->_dm_segcnt = nsegments;
    123       1.1  pooka 	map->_dm_maxmaxsegsz = maxsegsz;
    124       1.1  pooka 	map->_dm_boundary = boundary;
    125       1.1  pooka 	map->_dm_bounce_thresh = 0;
    126       1.1  pooka 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
    127       1.1  pooka 	map->dm_maxsegsz = maxsegsz;
    128       1.1  pooka 	map->dm_mapsize = 0;		/* no valid mappings */
    129       1.1  pooka 	map->dm_nsegs = 0;
    130       1.1  pooka 
    131       1.1  pooka 	*dmamp = map;
    132       1.1  pooka 	return (0);
    133       1.1  pooka }
    134       1.1  pooka 
    135       1.1  pooka /*
    136       1.1  pooka  * Common function for DMA map destruction.  May be called by bus-specific
    137       1.1  pooka  * DMA map destruction functions.
    138       1.1  pooka  */
    139       1.1  pooka void
    140       1.1  pooka bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
    141       1.1  pooka {
    142       1.1  pooka 
    143       1.1  pooka 	size_t mapsize = sizeof(*map)
    144       1.1  pooka 	    + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]);
    145       1.1  pooka 	kmem_intr_free(map, mapsize);
    146       1.1  pooka }
    147       1.1  pooka 
    148       1.1  pooka /*
    149       1.1  pooka  * Utility function to load a linear buffer.  lastaddrp holds state
    150       1.1  pooka  * between invocations (for multiple-buffer loads).  segp contains
    151       1.1  pooka  * the starting segment on entrance, and the ending segment on exit.
    152       1.1  pooka  * first indicates if this is the first invocation of this function.
    153       1.1  pooka  */
    154       1.1  pooka int
    155       1.1  pooka _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
    156       1.1  pooka 	void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
    157       1.1  pooka 	paddr_t *lastaddrp, int *segp, int first)
    158       1.1  pooka {
    159       1.1  pooka 	bus_size_t sgsize;
    160       1.1  pooka 	bus_addr_t curaddr, lastaddr, baddr, bmask;
    161       1.1  pooka 	vaddr_t vaddr = (vaddr_t)buf;
    162       1.1  pooka 	int seg;
    163       1.1  pooka 
    164       1.1  pooka //	printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__,
    165       1.1  pooka //	    t, map, buf, buflen, vm, flags, lastaddrp, segp, first);
    166       1.1  pooka 
    167       1.1  pooka 	lastaddr = *lastaddrp;
    168       1.1  pooka 	bmask = ~(map->_dm_boundary - 1);
    169       1.1  pooka 
    170       1.1  pooka 	for (seg = *segp; buflen > 0 ; ) {
    171       1.1  pooka 		/*
    172       1.1  pooka 		 * Get the physical address for this segment.
    173       1.1  pooka 		 */
    174       1.1  pooka 		if (!VMSPACE_IS_KERNEL_P(vm))
    175       1.1  pooka 			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
    176       1.1  pooka 			    vaddr, (void *)&curaddr);
    177       1.1  pooka 		else
    178       1.1  pooka 			curaddr = vtophys(vaddr);
    179       1.1  pooka 
    180       1.1  pooka 		/*
    181       1.1  pooka 		 * If we're beyond the bounce threshold, notify
    182       1.1  pooka 		 * the caller.
    183       1.1  pooka 		 */
    184       1.1  pooka 		if (map->_dm_bounce_thresh != 0 &&
    185       1.1  pooka 		    curaddr >= map->_dm_bounce_thresh)
    186       1.1  pooka 			return (EINVAL);
    187       1.1  pooka 
    188       1.1  pooka 		/*
    189       1.1  pooka 		 * Compute the segment size, and adjust counts.
    190       1.1  pooka 		 */
    191       1.1  pooka 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
    192       1.1  pooka 		if (buflen < sgsize)
    193       1.1  pooka 			sgsize = buflen;
    194       1.1  pooka 		sgsize = min(sgsize, map->dm_maxsegsz);
    195       1.1  pooka 
    196       1.1  pooka 		/*
    197       1.1  pooka 		 * Make sure we don't cross any boundaries.
    198       1.1  pooka 		 */
    199       1.1  pooka 		if (map->_dm_boundary > 0) {
    200       1.1  pooka 			baddr = (curaddr + map->_dm_boundary) & bmask;
    201       1.1  pooka 			if (sgsize > (baddr - curaddr))
    202       1.1  pooka 				sgsize = (baddr - curaddr);
    203       1.1  pooka 		}
    204       1.1  pooka 
    205       1.1  pooka 		/*
    206       1.1  pooka 		 * Insert chunk into a segment, coalescing with
    207       1.1  pooka 		 * the previous segment if possible.
    208       1.1  pooka 		 */
    209       1.1  pooka 		if (first) {
    210       1.2  pooka 			map->dm_segs[seg].ds_addr
    211       1.2  pooka 			    = rumpcomp_pci_virt_to_mach((void *)curaddr);
    212       1.1  pooka 			map->dm_segs[seg].ds_len = sgsize;
    213       1.1  pooka 			first = 0;
    214       1.1  pooka 		} else {
    215       1.1  pooka 			if (curaddr == lastaddr &&
    216       1.1  pooka 			    (map->dm_segs[seg].ds_len + sgsize) <=
    217       1.1  pooka 			     map->dm_maxsegsz &&
    218       1.1  pooka 			    (map->_dm_boundary == 0 ||
    219       1.1  pooka 			     (map->dm_segs[seg].ds_addr & bmask) ==
    220       1.2  pooka 			     (rumpcomp_pci_virt_to_mach((void*)curaddr)&bmask)))
    221       1.1  pooka 				map->dm_segs[seg].ds_len += sgsize;
    222       1.1  pooka 			else {
    223       1.1  pooka 				if (++seg >= map->_dm_segcnt)
    224       1.1  pooka 					break;
    225       1.1  pooka 				map->dm_segs[seg].ds_addr =
    226       1.2  pooka 				    rumpcomp_pci_virt_to_mach((void *)curaddr);
    227       1.1  pooka 				map->dm_segs[seg].ds_len = sgsize;
    228       1.1  pooka 			}
    229       1.1  pooka 		}
    230       1.1  pooka 
    231       1.1  pooka 		lastaddr = curaddr + sgsize;
    232       1.1  pooka 		vaddr += sgsize;
    233       1.1  pooka 		buflen -= sgsize;
    234       1.1  pooka 	}
    235       1.1  pooka 
    236       1.1  pooka 	*segp = seg;
    237       1.1  pooka 	*lastaddrp = lastaddr;
    238       1.1  pooka 
    239       1.1  pooka 	/*
    240       1.1  pooka 	 * Did we fit?
    241       1.1  pooka 	 */
    242       1.1  pooka 	if (buflen != 0)
    243       1.1  pooka 		return (EFBIG);		/* XXX better return value here? */
    244       1.1  pooka 
    245       1.1  pooka 	return (0);
    246       1.1  pooka }
    247       1.1  pooka 
    248       1.1  pooka /*
    249       1.1  pooka  * Common function for loading a DMA map with a linear buffer.  May
    250       1.1  pooka  * be called by bus-specific DMA map load functions.
    251       1.1  pooka  */
    252       1.1  pooka int
    253       1.1  pooka bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
    254       1.1  pooka 	void *buf, bus_size_t buflen, struct proc *p, int flags)
    255       1.1  pooka {
    256       1.1  pooka 	paddr_t lastaddr = 0;
    257       1.1  pooka 	int seg, error;
    258       1.1  pooka 	struct vmspace *vm;
    259       1.1  pooka 
    260       1.1  pooka 	/*
    261       1.1  pooka 	 * Make sure that on error condition we return "no valid mappings".
    262       1.1  pooka 	 */
    263       1.1  pooka 	map->dm_mapsize = 0;
    264       1.1  pooka 	map->dm_nsegs = 0;
    265       1.1  pooka 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    266       1.1  pooka 
    267       1.1  pooka 	if (buflen > map->_dm_size)
    268       1.1  pooka 		return (EINVAL);
    269       1.1  pooka 
    270       1.1  pooka 	if (p != NULL) {
    271       1.1  pooka 		vm = p->p_vmspace;
    272       1.1  pooka 	} else {
    273       1.1  pooka 		vm = vmspace_kernel();
    274       1.1  pooka 	}
    275       1.1  pooka 
    276       1.1  pooka 	seg = 0;
    277       1.1  pooka 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
    278       1.1  pooka 		&lastaddr, &seg, 1);
    279       1.1  pooka 	if (error == 0) {
    280       1.1  pooka 		map->dm_mapsize = buflen;
    281       1.1  pooka 		map->dm_nsegs = seg + 1;
    282       1.1  pooka 	}
    283       1.1  pooka 	return (error);
    284       1.1  pooka }
    285       1.1  pooka 
    286       1.1  pooka /*
    287       1.1  pooka  * Like _bus_dmamap_load(), but for mbufs.
    288       1.1  pooka  */
    289       1.1  pooka int
    290       1.1  pooka bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
    291       1.1  pooka 	struct mbuf *m0, int flags)
    292       1.1  pooka {
    293       1.1  pooka 	paddr_t lastaddr = 0;
    294       1.1  pooka 	int seg, error, first;
    295       1.1  pooka 	struct mbuf *m;
    296       1.1  pooka 
    297       1.1  pooka 	/*
    298       1.1  pooka 	 * Make sure that on error condition we return "no valid mappings."
    299       1.1  pooka 	 */
    300       1.1  pooka 	map->dm_mapsize = 0;
    301       1.1  pooka 	map->dm_nsegs = 0;
    302       1.1  pooka 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    303       1.1  pooka 
    304       1.1  pooka #ifdef DIAGNOSTIC
    305       1.1  pooka 	if ((m0->m_flags & M_PKTHDR) == 0)
    306       1.1  pooka 		panic("_bus_dmamap_load_mbuf: no packet header");
    307       1.1  pooka #endif
    308       1.1  pooka 
    309       1.1  pooka 	if (m0->m_pkthdr.len > map->_dm_size)
    310       1.1  pooka 		return (EINVAL);
    311       1.1  pooka 
    312       1.1  pooka 	first = 1;
    313       1.1  pooka 	seg = 0;
    314       1.1  pooka 	error = 0;
    315       1.1  pooka 	for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
    316       1.1  pooka 		if (m->m_len == 0)
    317       1.1  pooka 			continue;
    318       1.1  pooka #ifdef POOL_VTOPHYS
    319       1.1  pooka 		/* XXX Could be better about coalescing. */
    320       1.1  pooka 		/* XXX Doesn't check boundaries. */
    321       1.1  pooka 		switch (m->m_flags & (M_EXT|M_CLUSTER)) {
    322       1.1  pooka 		case M_EXT|M_CLUSTER:
    323       1.1  pooka 			/* XXX KDASSERT */
    324       1.1  pooka 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
    325       1.1  pooka 			lastaddr = m->m_ext.ext_paddr +
    326       1.1  pooka 			    (m->m_data - m->m_ext.ext_buf);
    327       1.1  pooka  have_addr:
    328       1.1  pooka 			if (first == 0 && ++seg >= map->_dm_segcnt) {
    329       1.1  pooka 				error = EFBIG;
    330       1.1  pooka 				continue;
    331       1.1  pooka 			}
    332       1.1  pooka 			map->dm_segs[seg].ds_addr =
    333       1.2  pooka 			    rumpcomp_pci_virt_to_mach((void *)lastaddr);
    334       1.1  pooka 			map->dm_segs[seg].ds_len = m->m_len;
    335       1.1  pooka 			lastaddr += m->m_len;
    336       1.1  pooka 			continue;
    337       1.1  pooka 
    338       1.1  pooka 		case 0:
    339       1.1  pooka 			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
    340       1.1  pooka 			    (m->m_data - M_BUFADDR(m));
    341       1.1  pooka 			goto have_addr;
    342       1.1  pooka 
    343       1.1  pooka 		default:
    344       1.1  pooka 			break;
    345       1.1  pooka 		}
    346       1.1  pooka #endif
    347       1.1  pooka 		error = _bus_dmamap_load_buffer(t, map, m->m_data,
    348       1.1  pooka 		    m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
    349       1.1  pooka 	}
    350       1.1  pooka 	if (error == 0) {
    351       1.1  pooka 		map->dm_mapsize = m0->m_pkthdr.len;
    352       1.1  pooka 		map->dm_nsegs = seg + 1;
    353       1.1  pooka 	}
    354       1.1  pooka 	return (error);
    355       1.1  pooka }
    356       1.1  pooka 
    357       1.1  pooka /*
    358       1.1  pooka  * Like _bus_dmamap_load(), but for uios.
    359       1.1  pooka  */
    360       1.1  pooka int
    361       1.1  pooka bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
    362       1.1  pooka 	struct uio *uio, int flags)
    363       1.1  pooka {
    364       1.1  pooka 	paddr_t lastaddr = 0;
    365       1.1  pooka 	int seg, i, error, first;
    366       1.1  pooka 	bus_size_t minlen, resid;
    367       1.1  pooka 	struct iovec *iov;
    368       1.1  pooka 	void *addr;
    369       1.1  pooka 
    370       1.1  pooka 	/*
    371       1.1  pooka 	 * Make sure that on error condition we return "no valid mappings."
    372       1.1  pooka 	 */
    373       1.1  pooka 	map->dm_mapsize = 0;
    374       1.1  pooka 	map->dm_nsegs = 0;
    375       1.1  pooka 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
    376       1.1  pooka 
    377       1.1  pooka 	resid = uio->uio_resid;
    378       1.1  pooka 	iov = uio->uio_iov;
    379       1.1  pooka 
    380       1.1  pooka 	first = 1;
    381       1.1  pooka 	seg = 0;
    382       1.1  pooka 	error = 0;
    383       1.1  pooka 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
    384       1.1  pooka 		/*
    385       1.1  pooka 		 * Now at the first iovec to load.  Load each iovec
    386       1.1  pooka 		 * until we have exhausted the residual count.
    387       1.1  pooka 		 */
    388       1.1  pooka 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
    389       1.1  pooka 		addr = (void *)iov[i].iov_base;
    390       1.1  pooka 
    391       1.1  pooka 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
    392       1.1  pooka 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
    393       1.1  pooka 		first = 0;
    394       1.1  pooka 
    395       1.1  pooka 		resid -= minlen;
    396       1.1  pooka 	}
    397       1.1  pooka 	if (error == 0) {
    398       1.1  pooka 		map->dm_mapsize = uio->uio_resid;
    399       1.1  pooka 		map->dm_nsegs = seg + 1;
    400       1.1  pooka 	}
    401       1.1  pooka 	return (error);
    402       1.1  pooka }
    403       1.1  pooka 
    404       1.1  pooka /*
    405       1.1  pooka  * Like _bus_dmamap_load(), but for raw memory allocated with
    406       1.1  pooka  * bus_dmamem_alloc().
    407       1.1  pooka  */
    408       1.1  pooka int
    409       1.1  pooka bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    410       1.1  pooka 	bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
    411       1.1  pooka {
    412       1.1  pooka 
    413       1.1  pooka 	panic("_bus_dmamap_load_raw: not implemented");
    414       1.1  pooka }
    415       1.1  pooka 
    416       1.1  pooka /*
    417       1.1  pooka  * Common function for unloading a DMA map.  May be called by
    418       1.1  pooka  * chipset-specific DMA map unload functions.
    419       1.1  pooka  */
    420       1.1  pooka void
    421       1.1  pooka bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
    422       1.1  pooka {
    423       1.1  pooka 
    424       1.1  pooka 	/*
    425       1.1  pooka 	 * No resources to free; just mark the mappings as
    426       1.1  pooka 	 * invalid.
    427       1.1  pooka 	 */
    428       1.1  pooka 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
    429       1.1  pooka 	map->dm_mapsize = 0;
    430       1.1  pooka 	map->dm_nsegs = 0;
    431       1.1  pooka }
    432       1.1  pooka 
    433       1.1  pooka void
    434       1.1  pooka bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
    435       1.1  pooka 	bus_addr_t offset, bus_size_t len, int ops)
    436       1.1  pooka {
    437       1.1  pooka 
    438       1.1  pooka 	/* XXX: this might need some MD tweaks */
    439       1.1  pooka 	membar_sync();
    440       1.1  pooka }
    441       1.1  pooka 
    442       1.1  pooka /*
    443       1.1  pooka  * Common function for freeing DMA-safe memory.  May be called by
    444       1.1  pooka  * bus-specific DMA memory free functions.
    445       1.1  pooka  */
    446       1.1  pooka void
    447       1.1  pooka bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
    448       1.1  pooka {
    449  1.3.10.2  skrll #ifdef RUMPCOMP_USERFEATURE_PCI_DMAFREE
    450  1.3.10.1  skrll 	vaddr_t vacookie = segs[0]._ds_vacookie;
    451  1.3.10.1  skrll 	bus_size_t sizecookie = segs[0]._ds_sizecookie;
    452       1.1  pooka 
    453  1.3.10.2  skrll 	rumpcomp_pci_dmafree(vacookie, sizecookie);
    454  1.3.10.1  skrll #else
    455       1.1  pooka 	panic("bus_dmamem_free not implemented");
    456  1.3.10.1  skrll #endif
    457       1.1  pooka }
    458       1.1  pooka 
    459       1.1  pooka /*
    460       1.1  pooka  * Don't have hypercall for mapping scatter-gather memory.
    461       1.1  pooka  * So just simply fail if there's more than one segment to map
    462       1.1  pooka  */
    463       1.1  pooka int
    464       1.1  pooka bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    465       1.1  pooka 	size_t size, void **kvap, int flags)
    466       1.1  pooka {
    467       1.3  pooka 	struct rumpcomp_pci_dmaseg *dss;
    468       1.3  pooka 	size_t allocsize = nsegs * sizeof(*dss);
    469       1.3  pooka 	int rv, i;
    470       1.1  pooka 
    471       1.3  pooka 	/*
    472       1.3  pooka 	 * Though rumpcomp_pci_dmaseg "accidentally" matches the
    473       1.3  pooka 	 * bus_dma segment descriptor (at least for now), act
    474       1.3  pooka 	 * proper and actually translate it.
    475       1.3  pooka 	 */
    476       1.3  pooka 	dss = kmem_alloc(allocsize, KM_SLEEP);
    477       1.3  pooka 	for (i = 0; i < nsegs; i++) {
    478       1.3  pooka 		dss[i].ds_pa = segs[i].ds_addr;
    479       1.3  pooka 		dss[i].ds_len = segs[i].ds_len;
    480       1.3  pooka 		dss[i].ds_vacookie = segs[i]._ds_vacookie;
    481       1.3  pooka 	}
    482       1.3  pooka 	rv = rumpcomp_pci_dmamem_map(dss, nsegs, size, kvap);
    483       1.3  pooka 	kmem_free(dss, allocsize);
    484       1.1  pooka 
    485       1.3  pooka 	return rv;
    486       1.1  pooka }
    487       1.1  pooka 
    488       1.1  pooka /*
    489       1.1  pooka  * Common function for unmapping DMA-safe memory.  May be called by
    490       1.1  pooka  * bus-specific DMA memory unmapping functions.
    491       1.1  pooka  */
    492       1.1  pooka void
    493       1.1  pooka bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
    494       1.1  pooka {
    495       1.1  pooka 
    496       1.1  pooka 	/* nothing to do as long as bus_dmamem_map() is what it is */
    497       1.1  pooka }
    498       1.1  pooka 
    499       1.1  pooka paddr_t
    500       1.1  pooka bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    501       1.1  pooka 	off_t off, int prot, int flags)
    502       1.1  pooka {
    503       1.1  pooka 
    504       1.1  pooka 	panic("bus_dmamem_mmap not supported");
    505       1.1  pooka }
    506       1.1  pooka 
    507       1.1  pooka /*
    508       1.1  pooka  * Allocate physical memory from the given physical address range.
    509       1.1  pooka  * Called by DMA-safe memory allocation methods.
    510       1.1  pooka  */
    511       1.1  pooka int
    512       1.1  pooka bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    513       1.1  pooka 	bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    514       1.1  pooka 	int flags)
    515       1.1  pooka {
    516       1.1  pooka 	paddr_t curaddr, lastaddr, pa;
    517       1.3  pooka 	vaddr_t vacookie;
    518  1.3.10.1  skrll 	size_t sizecookie;
    519       1.1  pooka 	int curseg, error;
    520       1.1  pooka 
    521       1.1  pooka 	/* Always round the size. */
    522       1.1  pooka 	size = round_page(size);
    523       1.1  pooka 
    524  1.3.10.1  skrll 	sizecookie = size;
    525  1.3.10.1  skrll 
    526       1.1  pooka 	/*
    527       1.1  pooka 	 * Allocate pages from the VM system.
    528       1.1  pooka 	 */
    529       1.1  pooka #if 0
    530       1.1  pooka 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
    531       1.1  pooka 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    532       1.1  pooka #else
    533       1.1  pooka 	/* XXX: ignores boundary, nsegs, etc. */
    534       1.1  pooka 	//printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs);
    535       1.3  pooka 	error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie);
    536       1.1  pooka #endif
    537       1.1  pooka 	if (error)
    538       1.1  pooka 		return (error);
    539       1.1  pooka 
    540       1.1  pooka 	/*
    541       1.1  pooka 	 * Compute the location, size, and number of segments actually
    542       1.1  pooka 	 * returned by the VM code.
    543       1.1  pooka 	 */
    544       1.1  pooka 	curseg = 0;
    545       1.1  pooka 	lastaddr = segs[curseg].ds_addr = pa;
    546       1.1  pooka 	segs[curseg].ds_len = PAGE_SIZE;
    547       1.3  pooka 	segs[curseg]._ds_vacookie = vacookie;
    548  1.3.10.1  skrll 	segs[curseg]._ds_sizecookie = sizecookie;
    549       1.1  pooka 	size -= PAGE_SIZE;
    550       1.1  pooka 	pa += PAGE_SIZE;
    551       1.3  pooka 	vacookie += PAGE_SIZE;
    552       1.1  pooka 
    553       1.3  pooka 	for (; size;
    554       1.3  pooka 	    pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) {
    555       1.1  pooka 		curaddr = pa;
    556       1.1  pooka 		if (curaddr == (lastaddr + PAGE_SIZE) &&
    557       1.1  pooka 		    (lastaddr & boundary) == (curaddr & boundary)) {
    558       1.1  pooka 			segs[curseg].ds_len += PAGE_SIZE;
    559       1.1  pooka 		} else {
    560       1.1  pooka 			curseg++;
    561       1.1  pooka 			if (curseg >= nsegs)
    562       1.1  pooka 				return EFBIG;
    563       1.1  pooka 			segs[curseg].ds_addr = curaddr;
    564       1.1  pooka 			segs[curseg].ds_len = PAGE_SIZE;
    565       1.3  pooka 			segs[curseg]._ds_vacookie = vacookie;
    566  1.3.10.1  skrll 			segs[curseg]._ds_sizecookie = sizecookie;
    567       1.1  pooka 		}
    568       1.1  pooka 		lastaddr = curaddr;
    569       1.1  pooka 	}
    570       1.1  pooka 	*rsegs = curseg + 1;
    571       1.1  pooka 
    572       1.1  pooka 	return (0);
    573       1.1  pooka }
    574