Home | History | Annotate | Line # | Download | only in drm
drm_vm.c revision 1.11
      1 /*	$NetBSD: drm_vm.c,v 1.11 2020/02/14 04:36:56 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: drm_vm.c,v 1.11 2020/02/14 04:36:56 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/conf.h>
     37 
     38 #include <uvm/uvm.h>
     39 #include <uvm/uvm_extern.h>
     40 #include <uvm/uvm_device.h>
     41 
     42 #include <drm/drmP.h>
     43 #include <drm/drm_legacy.h>
     44 
     45 static paddr_t	drm_legacy_mmap_paddr_locked(struct drm_device *, off_t, int);
     46 static paddr_t	drm_legacy_mmap_dma_paddr(struct drm_device *, off_t, int);
     47 static paddr_t	drm_legacy_mmap_map_paddr(struct drm_device *,
     48 		    struct drm_local_map *, off_t, int);
     49 
     50 int
     51 drm_legacy_mmap_object(struct drm_device *dev, off_t offset, size_t size,
     52     int prot, struct uvm_object **uobjp, voff_t *uoffsetp,
     53     struct file *file __unused)
     54 {
     55 	devmajor_t maj = cdevsw_lookup_major(&drm_cdevsw);
     56 	dev_t devno = makedev(maj, dev->primary->index);
     57 	struct uvm_object *uobj;
     58 
     59 	KASSERT(offset == (offset & ~(PAGE_SIZE-1)));
     60 
     61 	/*
     62 	 * Attach the device.  The size and offset are used only for
     63 	 * access checks; offset does not become a base address for the
     64 	 * subsequent uvm_map, hence we set *uoffsetp to offset, not 0.
     65 	 */
     66 	uobj = udv_attach(devno, prot, offset, size);
     67 	if (uobj == NULL)
     68 		return -EINVAL;
     69 
     70 	*uobjp = uobj;
     71 	*uoffsetp = offset;
     72 	return 0;
     73 }
     74 
     75 paddr_t
     76 drm_legacy_mmap_paddr(struct drm_device *dev, off_t byte_offset, int prot)
     77 {
     78 	paddr_t paddr;
     79 
     80 	if (byte_offset != (byte_offset & ~(PAGE_SIZE-1)))
     81 		return -1;
     82 
     83 	mutex_lock(&dev->struct_mutex);
     84 	paddr = drm_legacy_mmap_paddr_locked(dev, byte_offset, prot);
     85 	mutex_unlock(&dev->struct_mutex);
     86 
     87 	return paddr;
     88 }
     89 
     90 static paddr_t
     91 drm_legacy_mmap_paddr_locked(struct drm_device *dev, off_t byte_offset,
     92     int prot)
     93 {
     94 	const off_t page_offset = (byte_offset >> PAGE_SHIFT);
     95 	struct drm_hash_item *hash;
     96 
     97 	KASSERT(mutex_is_locked(&dev->struct_mutex));
     98 	KASSERT(byte_offset == (byte_offset & ~(PAGE_SIZE-1)));
     99 
    100 	if ((dev->dma != NULL) &&
    101 	    (0 <= byte_offset) &&
    102 	    (page_offset <= dev->dma->page_count))
    103 		return drm_legacy_mmap_dma_paddr(dev, byte_offset, prot);
    104 
    105 	if (drm_ht_find_item(&dev->map_hash, page_offset, &hash))
    106 		return -1;
    107 
    108 	struct drm_local_map *const map = drm_hash_entry(hash,
    109 	    struct drm_map_list, hash)->map;
    110 	if (map == NULL)
    111 		return -1;
    112 
    113 	/*
    114 	 * XXX FreeBSD drops the mutex at this point, which would be
    115 	 * nice, to allow sleeping in bus_dma cruft, but I don't know
    116 	 * what guarantees the map will continue to exist.
    117 	 */
    118 
    119 	if (ISSET(map->flags, _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))
    120 		return -1;
    121 
    122 	if (!(map->offset <= byte_offset))
    123 		return -1;
    124 	if (map->size < (map->offset - byte_offset))
    125 		return -1;
    126 
    127 	return drm_legacy_mmap_map_paddr(dev, map, (byte_offset - map->offset),
    128 	    prot);
    129 }
    130 
    131 static paddr_t
    132 drm_legacy_mmap_dma_paddr(struct drm_device *dev, off_t byte_offset, int prot)
    133 {
    134 	const off_t page_offset = (byte_offset >> PAGE_SHIFT);
    135 
    136 	KASSERT(mutex_is_locked(&dev->struct_mutex));
    137 	KASSERT(byte_offset == (byte_offset & ~(PAGE_SIZE-1)));
    138 	KASSERT(page_offset <= dev->dma->page_count);
    139 
    140 	if (dev->dma->pagelist == NULL)
    141 		return (paddr_t)-1;
    142 
    143 	return dev->dma->pagelist[page_offset];
    144 }
    145 
    146 static paddr_t
    147 drm_legacy_mmap_map_paddr(struct drm_device *dev, struct drm_local_map *map,
    148     off_t byte_offset, int prot)
    149 {
    150 	int flags = 0;
    151 
    152 	KASSERT(byte_offset <= map->size);
    153 
    154 	switch (map->type) {
    155 	case _DRM_FRAME_BUFFER:
    156 	case _DRM_AGP:
    157 		flags |= BUS_SPACE_MAP_PREFETCHABLE;
    158 		/* Fall through.  */
    159 
    160 	case _DRM_REGISTERS:
    161 		flags |= BUS_SPACE_MAP_LINEAR; /* XXX Why?  */
    162 
    163 		return bus_space_mmap(map->lm_data.bus_space.bst, map->offset,
    164 		    byte_offset, prot, flags);
    165 
    166 	case _DRM_CONSISTENT: {
    167 		struct drm_dma_handle *const dmah = map->lm_data.dmah;
    168 
    169 		return bus_dmamem_mmap(dev->dmat, &dmah->dmah_seg, 1,
    170 		    byte_offset, prot,
    171 		    /* XXX BUS_DMA_WAITOK?  We're holding a mutex...  */
    172 		    /* XXX What else?  BUS_DMA_COHERENT?  */
    173 		    (BUS_DMA_WAITOK | BUS_DMA_NOCACHE));
    174 	}
    175 
    176 	case _DRM_SCATTER_GATHER: {
    177 		struct drm_sg_mem *const sg = dev->sg;
    178 
    179 #if 0				/* XXX */
    180 		KASSERT(sg == map->lm_data.sg);
    181 #endif
    182 
    183 		return bus_dmamem_mmap(dev->dmat, sg->sg_segs, sg->sg_nsegs,
    184 		    byte_offset, prot,
    185 		    /* XXX BUS_DMA_WAITOK?  We're holding a mutex...  */
    186 		    /* XXX What else?  BUS_DMA_COHERENT?  */
    187 		    (BUS_DMA_WAITOK | BUS_DMA_NOCACHE));
    188 	}
    189 
    190 	case _DRM_SHM:
    191 	default:
    192 		return (paddr_t)-1;
    193 	}
    194 }
    195