Lines Matching refs:map
100 * Common function for DMA map creation. May be called by bus-specific
101 * DMA map creation functions.
107 struct pmax_bus_dmamap *map;
111 * Allocate and initialize the DMA map. The end of the map
126 map = (struct pmax_bus_dmamap *)mapstore;
127 map->_dm_size = size;
128 map->_dm_segcnt = nsegments;
129 map->_dm_maxmaxsegsz = maxsegsz;
130 map->_dm_boundary = boundary;
131 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
132 map->_dm_vmspace = NULL;
133 map->dm_maxsegsz = maxsegsz;
134 map->dm_mapsize = 0; /* no valid mappings */
135 map->dm_nsegs = 0;
137 *dmamp = map;
142 * Common function for DMA map destruction. May be called by bus-specific
143 * DMA map destruction functions.
146 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
149 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
159 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
169 bmask = ~(map->_dm_boundary - 1);
192 if (map->_dm_boundary > 0) {
193 baddr = (curaddr + map->_dm_boundary) & bmask;
203 map->dm_segs[seg].ds_addr = curaddr;
204 map->dm_segs[seg].ds_len = sgsize;
205 map->dm_segs[seg]._ds_vaddr = vaddr;
209 (map->dm_segs[seg].ds_len + sgsize) <=
210 map->dm_maxsegsz &&
211 (map->_dm_boundary == 0 ||
212 (map->dm_segs[seg].ds_addr & bmask) ==
214 map->dm_segs[seg].ds_len += sgsize;
216 if (++seg >= map->_dm_segcnt)
218 map->dm_segs[seg].ds_addr = curaddr;
219 map->dm_segs[seg].ds_len = sgsize;
220 map->dm_segs[seg]._ds_vaddr = vaddr;
242 * Common function for loading a direct-mapped DMA map with a linear
246 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
256 map->dm_mapsize = 0;
257 map->dm_nsegs = 0;
258 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
260 if (buflen > map->_dm_size)
270 error = _bus_dmamap_load_buffer(map, buf, buflen,
273 map->dm_mapsize = buflen;
274 map->dm_nsegs = seg + 1;
275 map->_dm_vmspace = vm;
285 map->_dm_flags |= PMAX_DMAMAP_COHERENT;
294 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
304 map->dm_mapsize = 0;
305 map->dm_nsegs = 0;
306 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
313 if (m0->m_pkthdr.len > map->_dm_size)
322 error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
327 map->dm_mapsize = m0->m_pkthdr.len;
328 map->dm_nsegs = seg + 1;
329 map->_dm_vmspace = vmspace_kernel(); /* always kernel */
338 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
350 map->dm_mapsize = 0;
351 map->dm_nsegs = 0;
352 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
368 error = _bus_dmamap_load_buffer(map, addr, minlen,
375 map->dm_mapsize = uio->uio_resid;
376 map->dm_nsegs = seg + 1;
377 map->_dm_vmspace = uio->uio_vmspace;
386 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
394 * Common function for unloading a DMA map. May be called by
395 * chipset-specific DMA map unload functions.
398 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
405 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
406 map->dm_mapsize = 0;
407 map->dm_nsegs = 0;
408 map->_dm_flags &= ~PMAX_DMAMAP_COHERENT;
409 map->_dm_vmspace = NULL;
414 * Common function for DMA map synchronization. May be called
415 * by chipset-specific DMA map synchronization functions.
420 _bus_dmamap_sync_r3k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
435 if (offset >= map->dm_mapsize)
436 panic("_bus_dmamap_sync_r3k: bad offset %lu (map size is %lu)",
437 offset, map->dm_mapsize);
438 if (len == 0 || (offset + len) > map->dm_mapsize)
465 * No cache invalidation is necessary if the DMA map covers
468 if (map->_dm_flags & PMAX_DMAMAP_COHERENT)
483 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
485 if (offset >= map->dm_segs[i].ds_len) {
486 offset -= map->dm_segs[i].ds_len;
495 minlen = len < map->dm_segs[i].ds_len - offset ?
496 len : map->dm_segs[i].ds_len - offset;
498 addr = map->dm_segs[i].ds_addr;
518 * Common function for DMA map synchronization. May be called
519 * by chipset-specific DMA map synchronization functions.
524 _bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
539 if (offset >= map->dm_mapsize)
540 panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)",
541 offset, map->dm_mapsize);
542 if (len == 0 || (offset + len) > map->dm_mapsize)
578 if (map->_dm_flags & PMAX_DMAMAP_COHERENT)
588 if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
589 map->_dm_vmspace == curproc->p_vmspace))
594 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
596 if (offset >= map->dm_segs[i].ds_len) {
597 offset -= map->dm_segs[i].ds_len;
606 minlen = len < map->dm_segs[i].ds_len - offset ?
607 len : map->dm_segs[i].ds_len - offset;
609 addr = map->dm_segs[i]._ds_vaddr;
685 * bus-specific DMA memory map functions.