Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: drm_vm.c,v 1.13 2022/07/06 01:12:45 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: drm_vm.c,v 1.13 2022/07/06 01:12:45 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/conf.h>
     37 
     38 #include <uvm/uvm.h>
     39 #include <uvm/uvm_extern.h>
     40 #include <uvm/uvm_device.h>
     41 
     42 #include <linux/capability.h>
     43 
     44 #include <drm/drm_device.h>
     45 #include <drm/drm_file.h>
     46 #include <drm/drm_legacy.h>
     47 
     48 static paddr_t	drm_legacy_mmap_paddr_locked(struct drm_device *, off_t, int);
     49 static paddr_t	drm_legacy_mmap_dma_paddr(struct drm_device *, off_t, int);
     50 static paddr_t	drm_legacy_mmap_map_paddr(struct drm_device *,
     51 		    struct drm_local_map *, off_t, int);
     52 
     53 int
     54 drm_legacy_mmap_object(struct drm_device *dev, off_t offset, size_t size,
     55     int prot, struct uvm_object **uobjp, voff_t *uoffsetp,
     56     struct file *file __unused)
     57 {
     58 	devmajor_t maj = cdevsw_lookup_major(&drm_cdevsw);
     59 	dev_t devno = makedev(maj, dev->primary->index);
     60 	struct uvm_object *uobj;
     61 
     62 	KASSERT(offset == (offset & ~(PAGE_SIZE-1)));
     63 	KASSERT(size > 0);
     64 
     65 	/*
     66 	 * Attach the device.  The size and offset are used only for
     67 	 * access checks; offset does not become a base address for the
     68 	 * subsequent uvm_map, hence we set *uoffsetp to offset, not 0.
     69 	 */
     70 	uobj = udv_attach(devno, prot, offset, size);
     71 	if (uobj == NULL)
     72 		return -EINVAL;
     73 
     74 	*uobjp = uobj;
     75 	*uoffsetp = offset;
     76 	return 0;
     77 }
     78 
     79 paddr_t
     80 drm_legacy_mmap_paddr(struct drm_device *dev, off_t byte_offset, int prot)
     81 {
     82 	paddr_t paddr;
     83 
     84 	if (byte_offset != (byte_offset & ~(PAGE_SIZE-1)))
     85 		return -1;
     86 
     87 	mutex_lock(&dev->struct_mutex);
     88 	paddr = drm_legacy_mmap_paddr_locked(dev, byte_offset, prot);
     89 	mutex_unlock(&dev->struct_mutex);
     90 
     91 	return paddr;
     92 }
     93 
     94 static paddr_t
     95 drm_legacy_mmap_paddr_locked(struct drm_device *dev, off_t byte_offset,
     96     int prot)
     97 {
     98 	const off_t page_offset = (byte_offset >> PAGE_SHIFT);
     99 	struct drm_hash_item *hash;
    100 
    101 	KASSERT(mutex_is_locked(&dev->struct_mutex));
    102 	KASSERT(byte_offset == (byte_offset & ~(PAGE_SIZE-1)));
    103 
    104 	if ((dev->dma != NULL) &&
    105 	    (0 <= byte_offset) &&
    106 	    (page_offset <= dev->dma->page_count))
    107 		return drm_legacy_mmap_dma_paddr(dev, byte_offset, prot);
    108 
    109 	if (drm_ht_find_item(&dev->map_hash, page_offset, &hash))
    110 		return -1;
    111 
    112 	struct drm_local_map *const map = drm_hash_entry(hash,
    113 	    struct drm_map_list, hash)->map;
    114 	if (map == NULL)
    115 		return -1;
    116 
    117 	/*
    118 	 * XXX FreeBSD drops the mutex at this point, which would be
    119 	 * nice, to allow sleeping in bus_dma cruft, but I don't know
    120 	 * what guarantees the map will continue to exist.
    121 	 */
    122 
    123 	if (ISSET(map->flags, _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))
    124 		return -1;
    125 
    126 	if (!(map->offset <= byte_offset))
    127 		return -1;
    128 	if (map->size < (map->offset - byte_offset))
    129 		return -1;
    130 
    131 	return drm_legacy_mmap_map_paddr(dev, map, (byte_offset - map->offset),
    132 	    prot);
    133 }
    134 
    135 static paddr_t
    136 drm_legacy_mmap_dma_paddr(struct drm_device *dev, off_t byte_offset, int prot)
    137 {
    138 	const off_t page_offset = (byte_offset >> PAGE_SHIFT);
    139 
    140 	KASSERT(mutex_is_locked(&dev->struct_mutex));
    141 	KASSERT(byte_offset == (byte_offset & ~(PAGE_SIZE-1)));
    142 	KASSERT(page_offset <= dev->dma->page_count);
    143 
    144 	if (dev->dma->pagelist == NULL)
    145 		return (paddr_t)-1;
    146 
    147 	return dev->dma->pagelist[page_offset];
    148 }
    149 
    150 static paddr_t
    151 drm_legacy_mmap_map_paddr(struct drm_device *dev, struct drm_local_map *map,
    152     off_t byte_offset, int prot)
    153 {
    154 	int flags = 0;
    155 
    156 	KASSERT(byte_offset <= map->size);
    157 
    158 	switch (map->type) {
    159 	case _DRM_FRAME_BUFFER:
    160 	case _DRM_AGP:
    161 		flags |= BUS_SPACE_MAP_PREFETCHABLE;
    162 		/* Fall through.  */
    163 
    164 	case _DRM_REGISTERS:
    165 		flags |= BUS_SPACE_MAP_LINEAR; /* XXX Why?  */
    166 
    167 		return bus_space_mmap(map->lm_data.bus_space.bst, map->offset,
    168 		    byte_offset, prot, flags);
    169 
    170 	case _DRM_CONSISTENT: {
    171 		struct drm_dma_handle *const dmah = map->lm_data.dmah;
    172 
    173 		return bus_dmamem_mmap(dev->dmat, &dmah->dmah_seg, 1,
    174 		    byte_offset, prot,
    175 		    /* XXX BUS_DMA_WAITOK?  We're holding a mutex...  */
    176 		    /* XXX What else?  BUS_DMA_COHERENT?  */
    177 		    (BUS_DMA_WAITOK | BUS_DMA_NOCACHE));
    178 	}
    179 
    180 	case _DRM_SCATTER_GATHER: {
    181 		struct drm_sg_mem *const sg = dev->sg;
    182 
    183 #if 0				/* XXX */
    184 		KASSERT(sg == map->lm_data.sg);
    185 #endif
    186 
    187 		return bus_dmamem_mmap(dev->dmat, sg->sg_segs, sg->sg_nsegs,
    188 		    byte_offset, prot,
    189 		    /* XXX BUS_DMA_WAITOK?  We're holding a mutex...  */
    190 		    /* XXX What else?  BUS_DMA_COHERENT?  */
    191 		    (BUS_DMA_WAITOK | BUS_DMA_NOCACHE));
    192 	}
    193 
    194 	case _DRM_SHM:
    195 	default:
    196 		return (paddr_t)-1;
    197 	}
    198 }
    199