Home | History | Annotate | Line # | Download | only in drm
drm_gem_cma_helper.c revision 1.8
      1 /* $NetBSD: drm_gem_cma_helper.c,v 1.8 2019/11/05 09:59:16 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.8 2019/11/05 09:59:16 jmcneill Exp $");
     31 
     32 #include <drm/drmP.h>
     33 #include <drm/drm_gem_cma_helper.h>
     34 
     35 #include <uvm/uvm.h>
     36 
     37 static struct drm_gem_cma_object *
     38 drm_gem_cma_create_internal(struct drm_device *ddev, size_t size,
     39     struct sg_table *sgt)
     40 {
     41 	struct drm_gem_cma_object *obj;
     42 	int error, nsegs;
     43 
     44 	obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
     45 	obj->dmat = ddev->dmat;
     46 	obj->dmasize = size;
     47 
     48 	if (sgt) {
     49 		error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
     50 		    &nsegs, sgt);
     51 	} else {
     52 		error = bus_dmamem_alloc(obj->dmat, obj->dmasize, PAGE_SIZE, 0,
     53 		    obj->dmasegs, 1, &nsegs, BUS_DMA_WAITOK);
     54 	}
     55 	if (error)
     56 		goto failed;
     57 	error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
     58 	    obj->dmasize, &obj->vaddr,
     59 	    BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE);
     60 	if (error)
     61 		goto free;
     62 	error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
     63 	    obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
     64 	if (error)
     65 		goto unmap;
     66 	error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
     67 	    obj->dmasize, NULL, BUS_DMA_WAITOK);
     68 	if (error)
     69 		goto destroy;
     70 
     71 	if (!sgt)
     72 		memset(obj->vaddr, 0, obj->dmasize);
     73 
     74 	drm_gem_private_object_init(ddev, &obj->base, size);
     75 
     76 	return obj;
     77 
     78 destroy:
     79 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
     80 unmap:
     81 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
     82 free:
     83 	bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
     84 failed:
     85 	kmem_free(obj, sizeof(*obj));
     86 
     87 	return NULL;
     88 }
     89 
     90 struct drm_gem_cma_object *
     91 drm_gem_cma_create(struct drm_device *ddev, size_t size)
     92 {
     93 
     94 	return drm_gem_cma_create_internal(ddev, size, NULL);
     95 }
     96 
     97 static void
     98 drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
     99 {
    100 
    101 	bus_dmamap_unload(obj->dmat, obj->dmamap);
    102 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
    103 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
    104 	if (obj->sgt)
    105 		drm_prime_sg_free(obj->sgt);
    106 	else
    107 		bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
    108 	kmem_free(obj, sizeof(*obj));
    109 }
    110 
    111 void
    112 drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
    113 {
    114 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    115 
    116 	drm_gem_free_mmap_offset(gem_obj);
    117 	drm_gem_object_release(gem_obj);
    118 	drm_gem_cma_obj_free(obj);
    119 }
    120 
    121 int
    122 drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
    123     struct drm_mode_create_dumb *args)
    124 {
    125 	struct drm_gem_cma_object *obj;
    126 	uint32_t handle;
    127 	int error;
    128 
    129 	args->pitch = args->width * ((args->bpp + 7) / 8);
    130 	args->size = args->pitch * args->height;
    131 	args->size = roundup(args->size, PAGE_SIZE);
    132 	args->handle = 0;
    133 
    134 	obj = drm_gem_cma_create(ddev, args->size);
    135 	if (obj == NULL)
    136 		return -ENOMEM;
    137 
    138 	error = drm_gem_handle_create(file_priv, &obj->base, &handle);
    139 	drm_gem_object_unreference_unlocked(&obj->base);
    140 	if (error) {
    141 		drm_gem_cma_obj_free(obj);
    142 		return error;
    143 	}
    144 
    145 	args->handle = handle;
    146 
    147 	return 0;
    148 }
    149 
    150 int
    151 drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, struct drm_device *ddev,
    152     uint32_t handle, uint64_t *offset)
    153 {
    154 	struct drm_gem_object *gem_obj;
    155 	struct drm_gem_cma_object *obj;
    156 	int error;
    157 
    158 	gem_obj = drm_gem_object_lookup(ddev, file_priv, handle);
    159 	if (gem_obj == NULL)
    160 		return -ENOENT;
    161 
    162 	obj = to_drm_gem_cma_obj(gem_obj);
    163 
    164 	if (drm_vma_node_has_offset(&obj->base.vma_node) == 0) {
    165 		error = drm_gem_create_mmap_offset(&obj->base);
    166 		if (error)
    167 			goto done;
    168 	} else {
    169 		error = 0;
    170 	}
    171 
    172 	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
    173 
    174 done:
    175 	drm_gem_object_unreference_unlocked(&obj->base);
    176 
    177 	return error;
    178 }
    179 
    180 static int
    181 drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
    182     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    183     int flags)
    184 {
    185 	struct vm_map_entry *entry = ufi->entry;
    186 	struct uvm_object *uobj = entry->object.uvm_obj;
    187 	struct drm_gem_object *gem_obj =
    188 	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
    189 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    190 	off_t curr_offset;
    191 	vaddr_t curr_va;
    192 	paddr_t paddr, mdpgno;
    193 	u_int mmapflags;
    194 	int lcv, retval;
    195 	vm_prot_t mapprot;
    196 
    197 	if (UVM_ET_ISCOPYONWRITE(entry))
    198 		return EIO;
    199 
    200 	curr_offset = entry->offset + (vaddr - entry->start);
    201 	curr_va = vaddr;
    202 
    203 	retval = 0;
    204 	for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
    205 	    curr_va += PAGE_SIZE) {
    206 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
    207 			continue;
    208 		if (pps[lcv] == PGO_DONTCARE)
    209 			continue;
    210 
    211 		mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
    212 		    curr_offset, access_type, BUS_DMA_PREFETCHABLE);
    213 		if (mdpgno == -1) {
    214 			retval = EIO;
    215 			break;
    216 		}
    217 		paddr = pmap_phys_address(mdpgno);
    218 		mmapflags = pmap_mmap_flags(mdpgno);
    219 		mapprot = ufi->entry->protection;
    220 
    221 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
    222 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
    223 			pmap_update(ufi->orig_map->pmap);
    224 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    225 			uvm_wait("drm_gem_cma_fault");
    226 			return ERESTART;
    227 		}
    228 	}
    229 
    230 	pmap_update(ufi->orig_map->pmap);
    231 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    232 
    233 	return retval;
    234 }
    235 
    236 const struct uvm_pagerops drm_gem_cma_uvm_ops = {
    237 	.pgo_reference = drm_gem_pager_reference,
    238 	.pgo_detach = drm_gem_pager_detach,
    239 	.pgo_fault = drm_gem_cma_fault,
    240 };
    241 
    242 struct sg_table *
    243 drm_gem_cma_prime_get_sg_table(struct drm_gem_object *gem_obj)
    244 {
    245 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    246 
    247 	return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
    248 }
    249 
    250 struct drm_gem_object *
    251 drm_gem_cma_prime_import_sg_table(struct drm_device *ddev,
    252     struct dma_buf_attachment *attach, struct sg_table *sgt)
    253 {
    254 	size_t size = drm_prime_sg_size(sgt);
    255 	struct drm_gem_cma_object *obj;
    256 
    257 	obj = drm_gem_cma_create_internal(ddev, size, sgt);
    258 	if (obj == NULL)
    259 		return ERR_PTR(-ENOMEM);
    260 
    261 	return &obj->base;
    262 }
    263 
    264 void *
    265 drm_gem_cma_prime_vmap(struct drm_gem_object *gem_obj)
    266 {
    267 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    268 
    269 	return obj->vaddr;
    270 }
    271 
    272 void
    273 drm_gem_cma_prime_vunmap(struct drm_gem_object *gem_obj, void *vaddr)
    274 {
    275 	struct drm_gem_cma_object *obj __diagused =
    276 	    to_drm_gem_cma_obj(gem_obj);
    277 
    278 	KASSERT(vaddr == obj->vaddr);
    279 }
    280