Home | History | Annotate | Line # | Download | only in drm
      1  1.15       mrg /* $NetBSD: drm_gem_cma_helper.c,v 1.15 2023/08/15 04:57:36 mrg Exp $ */
      2   1.1  jmcneill 
      3   1.1  jmcneill /*-
      4   1.1  jmcneill  * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
      5   1.1  jmcneill  * All rights reserved.
      6   1.1  jmcneill  *
      7   1.1  jmcneill  * Redistribution and use in source and binary forms, with or without
      8   1.1  jmcneill  * modification, are permitted provided that the following conditions
      9   1.1  jmcneill  * are met:
     10   1.1  jmcneill  * 1. Redistributions of source code must retain the above copyright
     11   1.1  jmcneill  *    notice, this list of conditions and the following disclaimer.
     12   1.1  jmcneill  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1  jmcneill  *    notice, this list of conditions and the following disclaimer in the
     14   1.1  jmcneill  *    documentation and/or other materials provided with the distribution.
     15   1.1  jmcneill  *
     16   1.1  jmcneill  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17   1.1  jmcneill  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18   1.1  jmcneill  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19   1.1  jmcneill  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20   1.1  jmcneill  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21   1.1  jmcneill  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22   1.1  jmcneill  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23   1.1  jmcneill  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24   1.1  jmcneill  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25   1.1  jmcneill  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26   1.1  jmcneill  * SUCH DAMAGE.
     27   1.1  jmcneill  */
     28   1.1  jmcneill 
     29   1.1  jmcneill #include <sys/cdefs.h>
     30  1.15       mrg __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.15 2023/08/15 04:57:36 mrg Exp $");
     31   1.1  jmcneill 
     32  1.13  riastrad #include <linux/err.h>
     33  1.13  riastrad 
     34  1.14  riastrad #include <drm/bus_dma_hacks.h>
     35  1.13  riastrad #include <drm/drm_drv.h>
     36  1.14  riastrad #include <drm/drm_gem_cma_helper.h>
     37  1.14  riastrad #include <drm/drm_prime.h>
     38  1.13  riastrad #include <drm/drm_print.h>
     39   1.1  jmcneill 
     40  1.14  riastrad #include <uvm/uvm_extern.h>
     41   1.1  jmcneill 
     42   1.4  riastrad static struct drm_gem_cma_object *
     43   1.4  riastrad drm_gem_cma_create_internal(struct drm_device *ddev, size_t size,
     44   1.4  riastrad     struct sg_table *sgt)
     45   1.1  jmcneill {
     46   1.1  jmcneill 	struct drm_gem_cma_object *obj;
     47  1.15       mrg 	int error = EINVAL, nsegs;
     48   1.1  jmcneill 
     49   1.1  jmcneill 	obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
     50   1.6  riastrad 	obj->dmat = ddev->dmat;
     51   1.1  jmcneill 	obj->dmasize = size;
     52   1.1  jmcneill 
     53   1.4  riastrad 	if (sgt) {
     54   1.4  riastrad 		error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
     55   1.4  riastrad 		    &nsegs, sgt);
     56   1.4  riastrad 	} else {
     57   1.9  jmcneill 		if (ddev->cma_pool != NULL) {
     58   1.9  jmcneill 			error = vmem_xalloc(ddev->cma_pool, obj->dmasize,
     59   1.9  jmcneill 			    PAGE_SIZE, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
     60   1.9  jmcneill 			    VM_BESTFIT | VM_NOSLEEP, &obj->vmem_addr);
     61   1.9  jmcneill 			if (!error) {
     62   1.9  jmcneill 				obj->vmem_pool = ddev->cma_pool;
     63   1.9  jmcneill 				obj->dmasegs[0].ds_addr =
     64   1.9  jmcneill 				    PHYS_TO_BUS_MEM(obj->dmat, obj->vmem_addr);
     65   1.9  jmcneill 				obj->dmasegs[0].ds_len =
     66   1.9  jmcneill 				    roundup(obj->dmasize, PAGE_SIZE);
     67   1.9  jmcneill 				nsegs = 1;
     68   1.9  jmcneill 			}
     69   1.9  jmcneill 		}
     70   1.9  jmcneill 		if (obj->vmem_pool == NULL) {
     71   1.9  jmcneill 			error = bus_dmamem_alloc(obj->dmat, obj->dmasize,
     72   1.9  jmcneill 			    PAGE_SIZE, 0, obj->dmasegs, 1, &nsegs,
     73   1.9  jmcneill 			    BUS_DMA_WAITOK);
     74   1.9  jmcneill 		}
     75   1.4  riastrad 	}
     76   1.1  jmcneill 	if (error)
     77   1.1  jmcneill 		goto failed;
     78   1.1  jmcneill 	error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
     79   1.8  jmcneill 	    obj->dmasize, &obj->vaddr,
     80   1.8  jmcneill 	    BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE);
     81   1.1  jmcneill 	if (error)
     82   1.1  jmcneill 		goto free;
     83   1.1  jmcneill 	error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
     84   1.1  jmcneill 	    obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
     85   1.1  jmcneill 	if (error)
     86   1.1  jmcneill 		goto unmap;
     87   1.1  jmcneill 	error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
     88   1.1  jmcneill 	    obj->dmasize, NULL, BUS_DMA_WAITOK);
     89   1.1  jmcneill 	if (error)
     90   1.1  jmcneill 		goto destroy;
     91   1.1  jmcneill 
     92   1.4  riastrad 	if (!sgt)
     93   1.4  riastrad 		memset(obj->vaddr, 0, obj->dmasize);
     94   1.1  jmcneill 
     95   1.1  jmcneill 	drm_gem_private_object_init(ddev, &obj->base, size);
     96   1.1  jmcneill 
     97   1.1  jmcneill 	return obj;
     98   1.1  jmcneill 
     99   1.1  jmcneill destroy:
    100   1.1  jmcneill 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
    101   1.1  jmcneill unmap:
    102   1.1  jmcneill 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
    103   1.1  jmcneill free:
    104   1.9  jmcneill 	if (obj->sgt)
    105   1.9  jmcneill 		drm_prime_sg_free(obj->sgt);
    106   1.9  jmcneill 	else if (obj->vmem_pool)
    107   1.9  jmcneill 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
    108   1.9  jmcneill 	else
    109   1.9  jmcneill 		bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
    110   1.1  jmcneill failed:
    111   1.1  jmcneill 	kmem_free(obj, sizeof(*obj));
    112   1.1  jmcneill 
    113   1.1  jmcneill 	return NULL;
    114   1.1  jmcneill }
    115   1.1  jmcneill 
    116   1.4  riastrad struct drm_gem_cma_object *
    117   1.4  riastrad drm_gem_cma_create(struct drm_device *ddev, size_t size)
    118   1.4  riastrad {
    119   1.4  riastrad 
    120   1.4  riastrad 	return drm_gem_cma_create_internal(ddev, size, NULL);
    121   1.4  riastrad }
    122   1.4  riastrad 
    123   1.1  jmcneill static void
    124   1.1  jmcneill drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
    125   1.1  jmcneill {
    126   1.3  riastrad 
    127   1.1  jmcneill 	bus_dmamap_unload(obj->dmat, obj->dmamap);
    128   1.1  jmcneill 	bus_dmamap_destroy(obj->dmat, obj->dmamap);
    129   1.1  jmcneill 	bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
    130   1.3  riastrad 	if (obj->sgt)
    131   1.3  riastrad 		drm_prime_sg_free(obj->sgt);
    132   1.9  jmcneill 	else if (obj->vmem_pool)
    133   1.9  jmcneill 		vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
    134   1.3  riastrad 	else
    135   1.3  riastrad 		bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
    136   1.1  jmcneill 	kmem_free(obj, sizeof(*obj));
    137   1.1  jmcneill }
    138   1.1  jmcneill 
    139   1.1  jmcneill void
    140   1.1  jmcneill drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
    141   1.1  jmcneill {
    142   1.1  jmcneill 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    143   1.1  jmcneill 
    144   1.1  jmcneill 	drm_gem_free_mmap_offset(gem_obj);
    145   1.1  jmcneill 	drm_gem_object_release(gem_obj);
    146   1.1  jmcneill 	drm_gem_cma_obj_free(obj);
    147   1.1  jmcneill }
    148   1.1  jmcneill 
    149   1.1  jmcneill int
    150   1.1  jmcneill drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
    151   1.1  jmcneill     struct drm_mode_create_dumb *args)
    152   1.1  jmcneill {
    153   1.1  jmcneill 	struct drm_gem_cma_object *obj;
    154   1.1  jmcneill 	uint32_t handle;
    155   1.1  jmcneill 	int error;
    156   1.1  jmcneill 
    157   1.1  jmcneill 	args->pitch = args->width * ((args->bpp + 7) / 8);
    158   1.1  jmcneill 	args->size = args->pitch * args->height;
    159   1.1  jmcneill 	args->size = roundup(args->size, PAGE_SIZE);
    160   1.1  jmcneill 	args->handle = 0;
    161   1.1  jmcneill 
    162   1.1  jmcneill 	obj = drm_gem_cma_create(ddev, args->size);
    163   1.1  jmcneill 	if (obj == NULL)
    164   1.1  jmcneill 		return -ENOMEM;
    165   1.1  jmcneill 
    166   1.1  jmcneill 	error = drm_gem_handle_create(file_priv, &obj->base, &handle);
    167  1.12  riastrad 	drm_gem_object_put_unlocked(&obj->base);
    168   1.1  jmcneill 	if (error) {
    169   1.1  jmcneill 		drm_gem_cma_obj_free(obj);
    170   1.1  jmcneill 		return error;
    171   1.1  jmcneill 	}
    172   1.1  jmcneill 
    173   1.1  jmcneill 	args->handle = handle;
    174   1.1  jmcneill 
    175   1.1  jmcneill 	return 0;
    176   1.1  jmcneill }
    177   1.1  jmcneill 
    178   1.1  jmcneill static int
    179   1.1  jmcneill drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
    180   1.1  jmcneill     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
    181   1.1  jmcneill     int flags)
    182   1.1  jmcneill {
    183   1.1  jmcneill 	struct vm_map_entry *entry = ufi->entry;
    184   1.1  jmcneill 	struct uvm_object *uobj = entry->object.uvm_obj;
    185   1.1  jmcneill 	struct drm_gem_object *gem_obj =
    186   1.1  jmcneill 	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
    187   1.1  jmcneill 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    188   1.1  jmcneill 	off_t curr_offset;
    189   1.1  jmcneill 	vaddr_t curr_va;
    190   1.1  jmcneill 	paddr_t paddr, mdpgno;
    191   1.1  jmcneill 	u_int mmapflags;
    192   1.1  jmcneill 	int lcv, retval;
    193   1.1  jmcneill 	vm_prot_t mapprot;
    194   1.1  jmcneill 
    195   1.1  jmcneill 	if (UVM_ET_ISCOPYONWRITE(entry))
    196   1.7       mrg 		return EIO;
    197   1.1  jmcneill 
    198   1.1  jmcneill 	curr_offset = entry->offset + (vaddr - entry->start);
    199   1.1  jmcneill 	curr_va = vaddr;
    200   1.1  jmcneill 
    201   1.1  jmcneill 	retval = 0;
    202   1.1  jmcneill 	for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
    203   1.1  jmcneill 	    curr_va += PAGE_SIZE) {
    204   1.1  jmcneill 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
    205   1.1  jmcneill 			continue;
    206   1.1  jmcneill 		if (pps[lcv] == PGO_DONTCARE)
    207   1.1  jmcneill 			continue;
    208   1.1  jmcneill 
    209   1.1  jmcneill 		mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
    210   1.1  jmcneill 		    curr_offset, access_type, BUS_DMA_PREFETCHABLE);
    211   1.1  jmcneill 		if (mdpgno == -1) {
    212   1.7       mrg 			retval = EIO;
    213   1.1  jmcneill 			break;
    214   1.1  jmcneill 		}
    215   1.1  jmcneill 		paddr = pmap_phys_address(mdpgno);
    216   1.1  jmcneill 		mmapflags = pmap_mmap_flags(mdpgno);
    217   1.1  jmcneill 		mapprot = ufi->entry->protection;
    218   1.1  jmcneill 
    219   1.1  jmcneill 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
    220   1.1  jmcneill 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
    221   1.1  jmcneill 			pmap_update(ufi->orig_map->pmap);
    222   1.1  jmcneill 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    223  1.10       chs 			return ENOMEM;
    224   1.1  jmcneill 		}
    225   1.1  jmcneill 	}
    226   1.1  jmcneill 
    227   1.1  jmcneill 	pmap_update(ufi->orig_map->pmap);
    228   1.1  jmcneill 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
    229   1.1  jmcneill 
    230   1.1  jmcneill 	return retval;
    231   1.1  jmcneill }
    232   1.1  jmcneill 
    233   1.1  jmcneill const struct uvm_pagerops drm_gem_cma_uvm_ops = {
    234   1.1  jmcneill 	.pgo_reference = drm_gem_pager_reference,
    235   1.1  jmcneill 	.pgo_detach = drm_gem_pager_detach,
    236   1.1  jmcneill 	.pgo_fault = drm_gem_cma_fault,
    237   1.1  jmcneill };
    238   1.3  riastrad 
    239   1.3  riastrad struct sg_table *
    240   1.3  riastrad drm_gem_cma_prime_get_sg_table(struct drm_gem_object *gem_obj)
    241   1.3  riastrad {
    242   1.3  riastrad 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    243   1.3  riastrad 
    244   1.4  riastrad 	return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
    245   1.3  riastrad }
    246   1.3  riastrad 
    247   1.3  riastrad struct drm_gem_object *
    248   1.3  riastrad drm_gem_cma_prime_import_sg_table(struct drm_device *ddev,
    249   1.3  riastrad     struct dma_buf_attachment *attach, struct sg_table *sgt)
    250   1.3  riastrad {
    251   1.4  riastrad 	size_t size = drm_prime_sg_size(sgt);
    252   1.3  riastrad 	struct drm_gem_cma_object *obj;
    253   1.3  riastrad 
    254   1.4  riastrad 	obj = drm_gem_cma_create_internal(ddev, size, sgt);
    255   1.5  riastrad 	if (obj == NULL)
    256   1.5  riastrad 		return ERR_PTR(-ENOMEM);
    257   1.3  riastrad 
    258   1.3  riastrad 	return &obj->base;
    259   1.3  riastrad }
    260   1.3  riastrad 
    261   1.3  riastrad void *
    262   1.3  riastrad drm_gem_cma_prime_vmap(struct drm_gem_object *gem_obj)
    263   1.3  riastrad {
    264   1.3  riastrad 	struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
    265   1.3  riastrad 
    266   1.3  riastrad 	return obj->vaddr;
    267   1.3  riastrad }
    268   1.3  riastrad 
    269   1.3  riastrad void
    270   1.3  riastrad drm_gem_cma_prime_vunmap(struct drm_gem_object *gem_obj, void *vaddr)
    271   1.3  riastrad {
    272   1.3  riastrad 	struct drm_gem_cma_object *obj __diagused =
    273   1.3  riastrad 	    to_drm_gem_cma_obj(gem_obj);
    274   1.3  riastrad 
    275   1.3  riastrad 	KASSERT(vaddr == obj->vaddr);
    276   1.3  riastrad }
    277