Home | History | Annotate | Line # | Download | only in drm
drm_gem_cma_helper.c revision 1.13
      1 /* $NetBSD: drm_gem_cma_helper.c,v 1.13 2021/12/19 09:52:00 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.13 2021/12/19 09:52:00 riastradh Exp $");
     31 
     32 #include <linux/err.h>
     33 
     34 #include <drm/drm_drv.h>
     35 #include <drm/drm_print.h>
     36 #include <drm/drm_gem_cma_helper.h>
     37 #include <drm/bus_dma_hacks.h>
     38 
     39 #include <uvm/uvm.h>
     40 
     41 static struct drm_gem_cma_object *
     42 drm_gem_cma_create_internal(struct drm_device *ddev, size_t size,
     43     struct sg_table *sgt)
     44 {
     45 	struct drm_gem_cma_object *obj;
     46 	int error, nsegs;
     47 
     48 	obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
     49 	obj->dmat = ddev->dmat;
     50 	obj->dmasize = size;
     51 
     52 	if (sgt) {
     53 		error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
     54 		    &nsegs, sgt);
     55 	} else {
     56 		if (ddev->cma_pool != NULL) {
     57 			error = vmem_xalloc(ddev->cma_pool, obj->dmasize,
     58 			    PAGE_SIZE, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
     59 			    VM_BESTFIT | VM_NOSLEEP, &obj->vmem_addr);
     60 			if (!error) {
     61 				obj->vmem_pool = ddev->cma_pool;
     62 				obj->dmasegs[0].ds_addr =
     63 				    PHYS_TO_BUS_MEM(obj->dmat, obj->vmem_addr);
     64 				obj->dmasegs[0].ds_len =
     65 				    roundup(obj->dmasize, PAGE_SIZE);
     66 				nsegs = 1;
     67 			}
     68 		}
     69 		if (obj->vmem_pool == NULL) {
     70 			error = bus_dmamem_alloc(obj->dmat, obj->dmasize,
     71 			    PAGE_SIZE, 0, obj->dmasegs, 1, &nsegs,
     72 			    BUS_DMA_WAITOK);
     73 		}
     74 	}
     75 	if (error)
     76 		goto failed;
     77 	error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
     78 	    obj->dmasize, &obj->vaddr,
     79 	    BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE);
     80 	if (error)
     81 		goto free;
     82 	error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
     83 	    obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
     84 	if (error)
     85 		goto unmap;
     86 	error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
     87 	    obj->dmasize, NULL, BUS_DMA_WAITOK);
     88 	if (error)
     89 		goto destroy;
     90 
     91 	if (!sgt)
     92 		memset(obj->vaddr, 0, obj->dmasize);
     93 
     94 	drm_gem_private_object_init(ddev, &obj->base, size);
     95 
     96 	return obj;
     97 
     98 destroy:
     99 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
    100 unmap:
    101 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
    102 free:
    103 	if (obj->sgt)
    104 		drm_prime_sg_free(obj->sgt);
    105 	else if (obj->vmem_pool)
    106 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
    107 	else
    108 		bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
    109 failed:
    110 	kmem_free(obj, sizeof(*obj));
    111 
    112 	return NULL;
    113 }
    114 
    115 struct drm_gem_cma_object *
    116 drm_gem_cma_create(struct drm_device *ddev, size_t size)
    117 {
    118 
    119 	return drm_gem_cma_create_internal(ddev, size, NULL);
    120 }
    121 
    122 static void
    123 drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
    124 {
    125 
    126 	bus_dmamap_unload(obj->dmat, obj->dmamap);
    127 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
    128 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
    129 	if (obj->sgt)
    130 		drm_prime_sg_free(obj->sgt);
    131 	else if (obj->vmem_pool)
    132 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
    133 	else
    134 		bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
    135 	kmem_free(obj, sizeof(*obj));
    136 }
    137 
    138 void
    139 drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
    140 {
    141 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    142 
    143 	drm_gem_free_mmap_offset(gem_obj);
    144 	drm_gem_object_release(gem_obj);
    145 	drm_gem_cma_obj_free(obj);
    146 }
    147 
    148 int
    149 drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
    150     struct drm_mode_create_dumb *args)
    151 {
    152 	struct drm_gem_cma_object *obj;
    153 	uint32_t handle;
    154 	int error;
    155 
    156 	args->pitch = args->width * ((args->bpp + 7) / 8);
    157 	args->size = args->pitch * args->height;
    158 	args->size = roundup(args->size, PAGE_SIZE);
    159 	args->handle = 0;
    160 
    161 	obj = drm_gem_cma_create(ddev, args->size);
    162 	if (obj == NULL)
    163 		return -ENOMEM;
    164 
    165 	error = drm_gem_handle_create(file_priv, &obj->base, &handle);
    166 	drm_gem_object_put_unlocked(&obj->base);
    167 	if (error) {
    168 		drm_gem_cma_obj_free(obj);
    169 		return error;
    170 	}
    171 
    172 	args->handle = handle;
    173 
    174 	return 0;
    175 }
    176 
    177 static int
    178 drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
    179     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    180     int flags)
    181 {
    182 	struct vm_map_entry *entry = ufi->entry;
    183 	struct uvm_object *uobj = entry->object.uvm_obj;
    184 	struct drm_gem_object *gem_obj =
    185 	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
    186 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    187 	off_t curr_offset;
    188 	vaddr_t curr_va;
    189 	paddr_t paddr, mdpgno;
    190 	u_int mmapflags;
    191 	int lcv, retval;
    192 	vm_prot_t mapprot;
    193 
    194 	if (UVM_ET_ISCOPYONWRITE(entry))
    195 		return EIO;
    196 
    197 	curr_offset = entry->offset + (vaddr - entry->start);
    198 	curr_va = vaddr;
    199 
    200 	retval = 0;
    201 	for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
    202 	    curr_va += PAGE_SIZE) {
    203 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
    204 			continue;
    205 		if (pps[lcv] == PGO_DONTCARE)
    206 			continue;
    207 
    208 		mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
    209 		    curr_offset, access_type, BUS_DMA_PREFETCHABLE);
    210 		if (mdpgno == -1) {
    211 			retval = EIO;
    212 			break;
    213 		}
    214 		paddr = pmap_phys_address(mdpgno);
    215 		mmapflags = pmap_mmap_flags(mdpgno);
    216 		mapprot = ufi->entry->protection;
    217 
    218 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
    219 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
    220 			pmap_update(ufi->orig_map->pmap);
    221 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    222 			return ENOMEM;
    223 		}
    224 	}
    225 
    226 	pmap_update(ufi->orig_map->pmap);
    227 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    228 
    229 	return retval;
    230 }
    231 
    232 const struct uvm_pagerops drm_gem_cma_uvm_ops = {
    233 	.pgo_reference = drm_gem_pager_reference,
    234 	.pgo_detach = drm_gem_pager_detach,
    235 	.pgo_fault = drm_gem_cma_fault,
    236 };
    237 
    238 struct sg_table *
    239 drm_gem_cma_prime_get_sg_table(struct drm_gem_object *gem_obj)
    240 {
    241 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    242 
    243 	return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
    244 }
    245 
    246 struct drm_gem_object *
    247 drm_gem_cma_prime_import_sg_table(struct drm_device *ddev,
    248     struct dma_buf_attachment *attach, struct sg_table *sgt)
    249 {
    250 	size_t size = drm_prime_sg_size(sgt);
    251 	struct drm_gem_cma_object *obj;
    252 
    253 	obj = drm_gem_cma_create_internal(ddev, size, sgt);
    254 	if (obj == NULL)
    255 		return ERR_PTR(-ENOMEM);
    256 
    257 	return &obj->base;
    258 }
    259 
    260 void *
    261 drm_gem_cma_prime_vmap(struct drm_gem_object *gem_obj)
    262 {
    263 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    264 
    265 	return obj->vaddr;
    266 }
    267 
    268 void
    269 drm_gem_cma_prime_vunmap(struct drm_gem_object *gem_obj, void *vaddr)
    270 {
    271 	struct drm_gem_cma_object *obj __diagused =
    272 	    to_drm_gem_cma_obj(gem_obj);
    273 
    274 	KASSERT(vaddr == obj->vaddr);
    275 }
    276