Home | History | Annotate | Line # | Download | only in drm
drm_memory.c revision 1.1.2.7
      1 /*	$NetBSD: drm_memory.c,v 1.1.2.7 2013/07/24 03:14:15 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: drm_memory.c,v 1.1.2.7 2013/07/24 03:14:15 riastradh Exp $");
     34 
     35 #ifdef _KERNEL_OPT
     36 #include "agp_i810.h"
     37 #include "genfb.h"
     38 #else
     39 #define	NAGP_I810	1	/* XXX WTF?  */
     40 #define	NGENFB		0	/* XXX WTF?  */
     41 #endif
     42 
     43 #include <sys/bus.h>
     44 
     45 #if NAGP_I810 > 0
     46 /* XXX include order botch -- shouldn't need to include pcivar.h */
     47 #include <dev/pci/pcivar.h>
     48 #include <dev/pci/agpvar.h>
     49 #endif
     50 
     51 #if NGENFB > 0
     52 #include <dev/wsfb/genfbvar.h>
     53 #endif
     54 
     55 #include <drm/drmP.h>
     56 
     57 /*
     58  * XXX drm_bus_borrow is a horrible kludge!
     59  */
     60 static bool
     61 drm_bus_borrow(bus_addr_t base, bus_space_handle_t *handlep)
     62 {
     63 
     64 #if NAGP_I810 > 0
     65 	if (agp_i810_borrow(base, handlep))
     66 		return true;
     67 #endif
     68 
     69 #if NGENFB > 0
     70 	if (genfb_borrow(base, handlep))
     71 		return true;
     72 #endif
     73 
     74 	return false;
     75 }
     76 
     77 void *
     78 drm_ioremap(struct drm_device *dev, struct drm_local_map *map)
     79 {
     80 	const bus_space_tag_t bst = dev->bst;
     81 	unsigned int unit;
     82 	int error;
     83 
     84 	/*
     85 	 * Search dev's bus maps for a match.
     86 	 */
     87 	for (unit = 0; unit < dev->bus_nmaps; unit++) {
     88 		struct drm_bus_map *const bm = &dev->bus_maps[unit];
     89 
     90 		/* Reject maps starting after the request.  */
     91 		if (map->offset < bm->bm_base)
     92 			continue;
     93 
     94 		/* Reject maps smaller than the request.  */
     95 		if (bm->bm_size < map->size)
     96 			continue;
     97 
     98 		/*
     99 		 * Reject maps that the request doesn't fit in.  (Make
    100 		 * sure to avoid integer overflow.)
    101 		 */
    102 		if ((bm->bm_size - map->size) <
    103 		    (map->offset - bm->bm_base))
    104 			continue;
    105 
    106 		/* Has it been mapped yet?  If not, map it.  */
    107 		if (bm->bm_mapped == 0) {
    108 			KASSERT(ISSET(bm->bm_flags, BUS_SPACE_MAP_LINEAR));
    109 			error = bus_space_map(bst, bm->bm_base,
    110 			    bm->bm_size, bm->bm_flags, &bm->bm_bsh);
    111 			if (error) {
    112 				if (drm_bus_borrow(map->offset,
    113 					&map->lm_data.bus_space.bsh)) {
    114 					map->lm_data.bus_space.bus_map = NULL;
    115 					goto win;
    116 				}
    117 				return NULL;
    118 			}
    119 		}
    120 
    121 		/* Mark it used and make a subregion just for the request.  */
    122 		if (bm->bm_mapped == UINT_MAX)
    123 			return NULL;
    124 		bm->bm_mapped++;
    125 		error = bus_space_subregion(bst, bm->bm_bsh,
    126 		    map->offset - bm->bm_base, map->size,
    127 		    &map->lm_data.bus_space.bsh);
    128 		if (error) {
    129 			/*
    130 			 * Back out: unmark it and, if nobody else was
    131 			 * using it, unmap it.
    132 			 */
    133 			KASSERT(bm->bm_mapped > 0);
    134 			if (--bm->bm_mapped == 0)
    135 				bus_space_unmap(bst, bm->bm_bsh,
    136 				    bm->bm_size);
    137 			return NULL;
    138 		}
    139 
    140 		/* Got it!  */
    141 		map->lm_data.bus_space.bus_map = bm;
    142 		goto win;
    143 	}
    144 
    145 	/*
    146 	 * No dice.  Try mapping it directly ourselves.
    147 	 *
    148 	 * XXX Is this sensible?  What prevents us from clobbering some
    149 	 * existing map?  And what does this have to do with agp?
    150 	 */
    151 	for (unit = 0; unit < dev->agp_nmaps; unit++) {
    152 		struct drm_bus_map *const bm = &dev->agp_maps[unit];
    153 
    154 		/* Is this one allocated? */
    155 		if (bm->bm_mapped > 0) {
    156 			/*
    157 			 * Make sure it has the same base.
    158 			 *
    159 			 * XXX Why must it be the same base?  Can't we
    160 			 * subregion here too?
    161 			 */
    162 			if (bm->bm_base != map->offset)
    163 				continue;
    164 
    165 			/* Make sure it's big enough.  */
    166 			if (bm->bm_size < map->size)
    167 				continue;
    168 
    169 			/* Mark it used and return it.  */
    170 			if (bm->bm_mapped == UINT_MAX)
    171 				return NULL;
    172 			bm->bm_mapped++;
    173 
    174 			/* XXX size is an input/output parameter too...?  */
    175 			map->size = bm->bm_size;
    176 
    177 			map->lm_data.bus_space.bsh = bm->bm_bsh;
    178 			map->lm_data.bus_space.bus_map = bm;
    179 			goto win;
    180 		} else {
    181 			const int flags = BUS_SPACE_MAP_PREFETCHABLE |
    182 			    BUS_SPACE_MAP_LINEAR;
    183 
    184 			/* Try mapping the request.  */
    185 			error = bus_space_map(bst, map->offset, map->size,
    186 			    flags, &bm->bm_bsh);
    187 			if (error)
    188 				return NULL; /* XXX Why not continue?  */
    189 
    190 			/* Got it.  Allocate this bus map.  */
    191 			KASSERT(bm->bm_mapped == 0);
    192 			bm->bm_mapped++;
    193 			bm->bm_base = map->offset;
    194 			bm->bm_size = map->size;
    195 			bm->bm_flags = flags; /* XXX What for?  */
    196 
    197 			map->lm_data.bus_space.bsh = bm->bm_bsh;
    198 			map->lm_data.bus_space.bus_map = bm;
    199 			goto win;
    200 		}
    201 	}
    202 
    203 	return NULL;
    204 
    205 win:
    206 	map->lm_data.bus_space.bst = bst;
    207 	return bus_space_vaddr(bst, map->lm_data.bus_space.bsh);
    208 }
    209 
    210 void
    211 drm_iounmap(struct drm_device *dev, struct drm_local_map *map)
    212 {
    213 	const bus_space_tag_t bst = dev->bst;
    214 	struct drm_bus_map *const bm = map->lm_data.bus_space.bus_map;
    215 
    216 	/*
    217 	 * bm may be null if we have committed the horrible deed of
    218 	 * borrowing from agp_i810 or genfb.
    219 	 */
    220 	if (bm != NULL) {
    221 		KASSERT(bm->bm_mapped > 0);
    222 		if (--bm->bm_mapped == 0)
    223 			bus_space_unmap(bst, bm->bm_bsh, bm->bm_size);
    224 	}
    225 }
    226 
    227 /*
    228  * Allocate a drm dma handle, allocate memory fit for DMA, and map it.
    229  *
    230  * XXX This is called drm_pci_alloc for hysterical raisins; it is not
    231  * specific to PCI.
    232  *
    233  * XXX For now, we use non-blocking allocations because this is called
    234  * by ioctls with the drm global mutex held.
    235  *
    236  * XXX Error information is lost because this returns NULL on failure,
    237  * not even an error embedded in a pointer.
    238  */
    239 struct drm_dma_handle *
    240 drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
    241 {
    242 	int nsegs;
    243 	int error;
    244 
    245 	/*
    246 	 * Allocate a drm_dma_handle record.
    247 	 */
    248 	struct drm_dma_handle *const dmah = kmem_alloc(sizeof(*dmah),
    249 	    KM_NOSLEEP);
    250 	if (dmah == NULL) {
    251 		error = -ENOMEM;
    252 		goto out;
    253 	}
    254 	dmah->dmah_tag = dev->dmat;
    255 
    256 	/*
    257 	 * Allocate the requested amount of DMA-safe memory.
    258 	 */
    259 	/* XXX errno NetBSD->Linux */
    260 	error = -bus_dmamem_alloc(dmah->dmah_tag, size, align, 0,
    261 	    &dmah->dmah_seg, 1, &nsegs, BUS_DMA_NOWAIT);
    262 	if (error)
    263 		goto fail0;
    264 	KASSERT(nsegs == 1);
    265 
    266 	/*
    267 	 * XXX Old drm passed BUS_DMA_NOWAIT below but BUS_DMA_WAITOK
    268 	 * above.  WTF?
    269 	 */
    270 
    271 	/*
    272 	 * Map the DMA-safe memory into kernel virtual address space.
    273 	 */
    274 	/* XXX errno NetBSD->Linux */
    275 	error = -bus_dmamem_map(dmah->dmah_tag, &dmah->dmah_seg, 1, size,
    276 	    &dmah->vaddr,
    277 	    (BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE));
    278 	if (error)
    279 		goto fail1;
    280 	dmah->size = size;
    281 
    282 	/*
    283 	 * Create a map for DMA transfers.
    284 	 */
    285 	/* XXX errno NetBSD->Linux */
    286 	error = -bus_dmamap_create(dmah->dmah_tag, size, 1, size, 0,
    287 	    BUS_DMA_NOWAIT, &dmah->dmah_map);
    288 	if (error)
    289 		goto fail2;
    290 
    291 	/*
    292 	 * Load the kva buffer into the map for DMA transfers.
    293 	 */
    294 	/* XXX errno NetBSD->Linux */
    295 	error = -bus_dmamap_load(dmah->dmah_tag, dmah->dmah_map, dmah->vaddr,
    296 	    size, NULL, (BUS_DMA_NOWAIT | BUS_DMA_NOCACHE));
    297 	if (error)
    298 		goto fail3;
    299 
    300 	/* Record the bus address for convenient reference.  */
    301 	dmah->busaddr = dmah->dmah_map->dm_segs[0].ds_addr;
    302 
    303 	/* Zero the DMA buffer.  XXX Yikes!  Is this necessary?  */
    304 	memset(dmah->vaddr, 0, size);
    305 
    306 	/* Success!  */
    307 	return dmah;
    308 
    309 fail3:	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
    310 fail2:	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
    311 fail1:	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
    312 fail0:	dmah->dmah_tag = NULL;	/* XXX paranoia */
    313 	kmem_free(dmah, sizeof(*dmah));
    314 out:	DRM_DEBUG("drm_pci_alloc failed: %d\n", error);
    315 	return NULL;
    316 }
    317 
    318 /*
    319  * Release the bus DMA mappings and memory in dmah, and deallocate it.
    320  */
    321 void
    322 drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
    323 {
    324 
    325 	bus_dmamap_unload(dmah->dmah_tag, dmah->dmah_map);
    326 	bus_dmamap_destroy(dmah->dmah_tag, dmah->dmah_map);
    327 	bus_dmamem_unmap(dmah->dmah_tag, dmah->vaddr, dmah->size);
    328 	bus_dmamem_free(dmah->dmah_tag, &dmah->dmah_seg, 1);
    329 	dmah->dmah_tag = NULL;	/* XXX paranoia */
    330 	kmem_free(dmah, sizeof(*dmah));
    331 }
    332 
    333 /*
    334  * Make sure the DMA-safe memory allocated for dev lies between
    335  * min_addr and max_addr.  Can be used multiple times to restrict the
    336  * bounds further, but never to expand the bounds again.
    337  *
    338  * XXX Caller must guarantee nobody has used the tag yet,
    339  * i.e. allocated any DMA memory.
    340  */
    341 int
    342 drm_limit_dma_space(struct drm_device *dev, resource_size_t min_addr,
    343     resource_size_t max_addr)
    344 {
    345 	int error;
    346 
    347 	KASSERT(min_addr <= max_addr);
    348 
    349 	/*
    350 	 * Limit it further if we have already limited it, and destroy
    351 	 * the old subregion DMA tag.
    352 	 */
    353 	if (dev->dmat_subregion_p) {
    354 		min_addr = MAX(min_addr, dev->dmat_subregion_min);
    355 		max_addr = MIN(max_addr, dev->dmat_subregion_max);
    356 		bus_dmatag_destroy(dev->dmat);
    357 	}
    358 
    359 	/*
    360 	 * Create a DMA tag for a subregion from the bus's DMA tag.  If
    361 	 * that fails, restore dev->dmat to the whole region so that we
    362 	 * need not worry about dev->dmat being uninitialized (not that
    363 	 * the caller should try to allocate DMA-safe memory on failure
    364 	 * anyway, but...paranoia).
    365 	 */
    366 	error = bus_dmatag_subregion(dev->bus_dmat, min_addr, max_addr,
    367 	    &dev->dmat, BUS_DMA_WAITOK);
    368 	if (error) {
    369 		dev->dmat = dev->bus_dmat;
    370 		return error;
    371 	}
    372 
    373 	/*
    374 	 * Remember that we have a subregion tag so that we know to
    375 	 * destroy it later, and record the bounds in case we need to
    376 	 * limit them again.
    377 	 */
    378 	dev->dmat_subregion_p = true;
    379 	dev->dmat_subregion_min = min_addr;
    380 	dev->dmat_subregion_max = max_addr;
    381 
    382 	/* Success!  */
    383 	return 0;
    384 }
    385