Home | History | Annotate | Line # | Download | only in gem
      1 /*	$NetBSD: i915_gem_dmabuf.c,v 1.7 2024/05/20 11:34:45 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright 2012 Red Hat Inc
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_dmabuf.c,v 1.7 2024/05/20 11:34:45 riastradh Exp $");
     11 
     12 #include <linux/dma-buf.h>
     13 #include <linux/highmem.h>
     14 #include <linux/dma-resv.h>
     15 
     16 #include "i915_drv.h"
     17 #include "i915_gem_object.h"
     18 #include "i915_scatterlist.h"
     19 
     20 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
     21 {
     22 	struct drm_gem_object *obj = buf->priv;
     23 
     24 	return to_intel_bo(obj);
     25 }
     26 
     27 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
     28 					     enum dma_data_direction dir)
     29 {
     30 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
     31 	struct sg_table *st;
     32 	struct scatterlist *src, *dst;
     33 	int ret, i;
     34 
     35 	ret = i915_gem_object_pin_pages(obj);
     36 	if (ret)
     37 		goto err;
     38 
     39 	/* Copy sg so that we make an independent mapping */
     40 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
     41 	if (st == NULL) {
     42 		ret = -ENOMEM;
     43 		goto err_unpin_pages;
     44 	}
     45 
     46 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
     47 	if (ret)
     48 		goto err_free;
     49 
     50 #ifdef __NetBSD__
     51 	__USE(i);
     52 	__USE(src);
     53 	__USE(dst);
     54 	memcpy(st->sgl->sg_pgs, obj->mm.pages->sgl->sg_pgs,
     55 	    obj->mm.pages->nents * sizeof(st->sgl->sg_pgs[0]));
     56 #else
     57 
     58 	src = obj->mm.pages->sgl;
     59 	dst = st->sgl;
     60 	for (i = 0; i < obj->mm.pages->nents; i++) {
     61 		sg_set_page(dst, sg_page(src), src->length, 0);
     62 		dst = sg_next(dst);
     63 		src = sg_next(src);
     64 	}
     65 #endif
     66 
     67 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
     68 		ret = -ENOMEM;
     69 		goto err_free_sg;
     70 	}
     71 
     72 	return st;
     73 
     74 err_free_sg:
     75 	sg_free_table(st);
     76 err_free:
     77 	kfree(st);
     78 err_unpin_pages:
     79 	i915_gem_object_unpin_pages(obj);
     80 err:
     81 	return ERR_PTR(ret);
     82 }
     83 
     84 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
     85 				   struct sg_table *sg,
     86 				   enum dma_data_direction dir)
     87 {
     88 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
     89 
     90 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
     91 	sg_free_table(sg);
     92 	kfree(sg);
     93 
     94 	i915_gem_object_unpin_pages(obj);
     95 }
     96 
     97 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
     98 {
     99 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    100 
    101 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
    102 }
    103 
    104 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
    105 {
    106 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    107 
    108 	i915_gem_object_flush_map(obj);
    109 	i915_gem_object_unpin_map(obj);
    110 }
    111 
    112 #ifdef __NetBSD__
    113 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp,
    114     size_t size, int prot, int *flagsp, int *advicep,
    115     struct uvm_object **uobjp, int *maxprotp)
    116 #else
    117 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
    118 #endif
    119 {
    120 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    121 	int ret;
    122 
    123 #ifdef __NetBSD__
    124 	__USE(ret);
    125 	if (obj->base.size < size)
    126 		return -EINVAL;
    127 	if (!obj->base.filp)
    128 		return -ENODEV;
    129 	uao_reference(obj->base.filp);
    130 	*advicep = UVM_ADV_RANDOM;
    131 	*uobjp = obj->base.filp;
    132 	*maxprotp = prot;
    133 #else
    134 	if (obj->base.size < vma->vm_end - vma->vm_start)
    135 		return -EINVAL;
    136 
    137 	if (!obj->base.filp)
    138 		return -ENODEV;
    139 
    140 	ret = call_mmap(obj->base.filp, vma);
    141 	if (ret)
    142 		return ret;
    143 
    144 	fput(vma->vm_file);
    145 	vma->vm_file = get_file(obj->base.filp);
    146 #endif
    147 
    148 	return 0;
    149 }
    150 
    151 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
    152 {
    153 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    154 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
    155 	int err;
    156 
    157 	err = i915_gem_object_pin_pages(obj);
    158 	if (err)
    159 		return err;
    160 
    161 	err = i915_gem_object_lock_interruptible(obj);
    162 	if (err)
    163 		goto out;
    164 
    165 	err = i915_gem_object_set_to_cpu_domain(obj, write);
    166 	i915_gem_object_unlock(obj);
    167 
    168 out:
    169 	i915_gem_object_unpin_pages(obj);
    170 	return err;
    171 }
    172 
    173 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
    174 {
    175 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    176 	int err;
    177 
    178 	err = i915_gem_object_pin_pages(obj);
    179 	if (err)
    180 		return err;
    181 
    182 	err = i915_gem_object_lock_interruptible(obj);
    183 	if (err)
    184 		goto out;
    185 
    186 	err = i915_gem_object_set_to_gtt_domain(obj, false);
    187 	i915_gem_object_unlock(obj);
    188 
    189 out:
    190 	i915_gem_object_unpin_pages(obj);
    191 	return err;
    192 }
    193 
    194 static const struct dma_buf_ops i915_dmabuf_ops =  {
    195 	.map_dma_buf = i915_gem_map_dma_buf,
    196 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
    197 	.release = drm_gem_dmabuf_release,
    198 	.mmap = i915_gem_dmabuf_mmap,
    199 	.vmap = i915_gem_dmabuf_vmap,
    200 	.vunmap = i915_gem_dmabuf_vunmap,
    201 	.begin_cpu_access = i915_gem_begin_cpu_access,
    202 	.end_cpu_access = i915_gem_end_cpu_access,
    203 };
    204 
    205 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
    206 {
    207 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
    208 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
    209 
    210 	exp_info.ops = &i915_dmabuf_ops;
    211 	exp_info.size = gem_obj->size;
    212 	exp_info.flags = flags;
    213 	exp_info.priv = gem_obj;
    214 	exp_info.resv = obj->base.resv;
    215 
    216 	if (obj->ops->dmabuf_export) {
    217 		int ret = obj->ops->dmabuf_export(obj);
    218 		if (ret)
    219 			return ERR_PTR(ret);
    220 	}
    221 
    222 	return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
    223 }
    224 
    225 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
    226 {
    227 	struct sg_table *pages;
    228 	unsigned int sg_page_sizes;
    229 
    230 	pages = dma_buf_map_attachment(obj->base.import_attach,
    231 				       DMA_BIDIRECTIONAL);
    232 	if (IS_ERR(pages))
    233 		return PTR_ERR(pages);
    234 
    235 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
    236 
    237 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
    238 
    239 	return 0;
    240 }
    241 
    242 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
    243 					     struct sg_table *pages)
    244 {
    245 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
    246 				 DMA_BIDIRECTIONAL);
    247 }
    248 
    249 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
    250 	.get_pages = i915_gem_object_get_pages_dmabuf,
    251 	.put_pages = i915_gem_object_put_pages_dmabuf,
    252 };
    253 
    254 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
    255 					     struct dma_buf *dma_buf)
    256 {
    257 	static struct lock_class_key lock_class;
    258 	struct dma_buf_attachment *attach;
    259 	struct drm_i915_gem_object *obj;
    260 	int ret;
    261 
    262 	/* is this one of own objects? */
    263 	if (dma_buf->ops == &i915_dmabuf_ops) {
    264 		obj = dma_buf_to_obj(dma_buf);
    265 		/* is it from our device? */
    266 		if (obj->base.dev == dev) {
    267 			/*
    268 			 * Importing dmabuf exported from out own gem increases
    269 			 * refcount on gem itself instead of f_count of dmabuf.
    270 			 */
    271 			return &i915_gem_object_get(obj)->base;
    272 		}
    273 	}
    274 
    275 	/* need to attach */
    276 	attach = dma_buf_attach(dma_buf, dev->dmat);
    277 	if (IS_ERR(attach))
    278 		return ERR_CAST(attach);
    279 
    280 	get_dma_buf(dma_buf);
    281 
    282 	obj = i915_gem_object_alloc();
    283 	if (obj == NULL) {
    284 		ret = -ENOMEM;
    285 		goto fail_detach;
    286 	}
    287 
    288 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
    289 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
    290 	obj->base.import_attach = attach;
    291 	obj->base.resv = dma_buf->resv;
    292 
    293 	/* We use GTT as shorthand for a coherent domain, one that is
    294 	 * neither in the GPU cache nor in the CPU cache, where all
    295 	 * writes are immediately visible in memory. (That's not strictly
    296 	 * true, but it's close! There are internal buffers such as the
    297 	 * write-combined buffer or a delay through the chipset for GTT
    298 	 * writes that do require us to treat GTT as a separate cache domain.)
    299 	 */
    300 	obj->read_domains = I915_GEM_DOMAIN_GTT;
    301 	obj->write_domain = 0;
    302 
    303 	return &obj->base;
    304 
    305 fail_detach:
    306 	dma_buf_detach(dma_buf, attach);
    307 	dma_buf_put(dma_buf);
    308 
    309 	return ERR_PTR(ret);
    310 }
    311 
    312 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    313 #include "selftests/mock_dmabuf.c"
    314 #include "selftests/i915_gem_dmabuf.c"
    315 #endif
    316