Home | History | Annotate | Line # | Download | only in linux
linux_dma_buf.c revision 1.15.4.1
      1 /*	$NetBSD: linux_dma_buf.c,v 1.15.4.1 2023/02/24 14:14:16 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.15.4.1 2023/02/24 14:14:16 martin Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/atomic.h>
     37 #include <sys/file.h>
     38 #include <sys/filedesc.h>
     39 #include <sys/kmem.h>
     40 #include <sys/mutex.h>
     41 
     42 #include <linux/dma-buf.h>
     43 #include <linux/err.h>
     44 #include <linux/dma-resv.h>
     45 
     46 static int	dmabuf_fop_poll(struct file *, int);
     47 static int	dmabuf_fop_close(struct file *);
     48 static int	dmabuf_fop_kqfilter(struct file *, struct knote *);
     49 static int	dmabuf_fop_mmap(struct file *, off_t *, size_t, int, int *,
     50 		    int *, struct uvm_object **, int *);
     51 static int	dmabuf_fop_seek(struct file *fp, off_t delta, int whence,
     52 		    off_t *newoffp, int flags);
     53 
     54 static const struct fileops dmabuf_fileops = {
     55 	.fo_name = "dmabuf",
     56 	.fo_read = fbadop_read,
     57 	.fo_write = fbadop_write,
     58 	.fo_ioctl = fbadop_ioctl,
     59 	.fo_fcntl = fnullop_fcntl,
     60 	.fo_poll = dmabuf_fop_poll,
     61 	.fo_stat = fbadop_stat,
     62 	.fo_close = dmabuf_fop_close,
     63 	.fo_kqfilter = dmabuf_fop_kqfilter,
     64 	.fo_restart = fnullop_restart,
     65 	.fo_mmap = dmabuf_fop_mmap,
     66 	.fo_seek = dmabuf_fop_seek,
     67 };
     68 
     69 struct dma_buf *
     70 dma_buf_export(struct dma_buf_export_info *info)
     71 {
     72 	struct dma_buf *dmabuf;
     73 
     74 	if (info->resv == NULL) {
     75 		dmabuf = kmem_zalloc(offsetof(struct dma_buf, db_resv_int[1]),
     76 		    KM_SLEEP);
     77 	} else {
     78 		dmabuf = kmem_zalloc(sizeof(*dmabuf), KM_SLEEP);
     79 	}
     80 
     81 	dmabuf->priv = info->priv;
     82 	dmabuf->ops = info->ops;
     83 	dmabuf->size = info->size;
     84 	dmabuf->resv = info->resv;
     85 
     86 	mutex_init(&dmabuf->db_lock, MUTEX_DEFAULT, IPL_NONE);
     87 	dmabuf->db_refcnt = 1;
     88 	dma_resv_poll_init(&dmabuf->db_resv_poll);
     89 
     90 	if (dmabuf->resv == NULL) {
     91 		dmabuf->resv = &dmabuf->db_resv_int[0];
     92 		dma_resv_init(dmabuf->resv);
     93 	}
     94 
     95 	return dmabuf;
     96 }
     97 
     98 int
     99 dma_buf_fd(struct dma_buf *dmabuf, int flags)
    100 {
    101 	struct file *file;
    102 	int fd;
    103 	unsigned refcnt __diagused;
    104 	int ret;
    105 
    106 	/* XXX errno NetBSD->Linux */
    107 	ret = -fd_allocfile(&file, &fd);
    108 	if (ret)
    109 		goto out0;
    110 
    111 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
    112 	KASSERT(refcnt > 1);
    113 
    114 	file->f_type = DTYPE_MISC;
    115 	file->f_flag = 0;	/* XXX DRM code allows only O_CLOEXEC.  */
    116 	file->f_ops = &dmabuf_fileops;
    117 	file->f_data = dmabuf;
    118 	fd_set_exclose(curlwp, fd, (flags & O_CLOEXEC) != 0);
    119 	fd_affix(curproc, file, fd);
    120 
    121 	ret = fd;
    122 out0:	return ret;
    123 }
    124 
    125 struct dma_buf *
    126 dma_buf_get(int fd)
    127 {
    128 	struct file *file;
    129 	struct dma_buf *dmabuf;
    130 	unsigned refcnt __diagused;
    131 	int error;
    132 
    133 	if ((file = fd_getfile(fd)) == NULL) {
    134 		error = EBADF;
    135 		goto fail0;
    136 	}
    137 	if (file->f_type != DTYPE_MISC || file->f_ops != &dmabuf_fileops) {
    138 		error = EINVAL;
    139 		goto fail1;
    140 	}
    141 
    142 	dmabuf = file->f_data;
    143 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
    144 	KASSERT(refcnt > 1);
    145 	fd_putfile(fd);
    146 	return dmabuf;
    147 
    148 fail1:	fd_putfile(fd);
    149 fail0:	KASSERT(error);
    150 	return ERR_PTR(-error);
    151 }
    152 
    153 void
    154 get_dma_buf(struct dma_buf *dmabuf)
    155 {
    156 	unsigned refcnt __diagused;
    157 
    158 	refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
    159 	KASSERT(refcnt > 1);
    160 }
    161 
    162 void
    163 dma_buf_put(struct dma_buf *dmabuf)
    164 {
    165 
    166 	membar_release();
    167 	if (atomic_dec_uint_nv(&dmabuf->db_refcnt) != 0)
    168 		return;
    169 	membar_acquire();
    170 
    171 	dma_resv_poll_fini(&dmabuf->db_resv_poll);
    172 	mutex_destroy(&dmabuf->db_lock);
    173 	if (dmabuf->resv == &dmabuf->db_resv_int[0]) {
    174 		dma_resv_fini(dmabuf->resv);
    175 		kmem_free(dmabuf, offsetof(struct dma_buf, db_resv_int[1]));
    176 	} else {
    177 		kmem_free(dmabuf, sizeof(*dmabuf));
    178 	}
    179 }
    180 
    181 struct dma_buf_attachment *
    182 dma_buf_dynamic_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat,
    183     bool dynamic_mapping)
    184 {
    185 	struct dma_buf_attachment *attach;
    186 	int ret = 0;
    187 
    188 	attach = kmem_zalloc(sizeof(*attach), KM_SLEEP);
    189 	attach->dmabuf = dmabuf;
    190 	attach->dev = dmat;
    191 	attach->dynamic_mapping = dynamic_mapping;
    192 
    193 	mutex_enter(&dmabuf->db_lock);
    194 	if (dmabuf->ops->attach)
    195 		ret = dmabuf->ops->attach(dmabuf, attach);
    196 	mutex_exit(&dmabuf->db_lock);
    197 	if (ret)
    198 		goto fail0;
    199 
    200 	if (attach->dynamic_mapping != dmabuf->ops->dynamic_mapping)
    201 		panic("%s: NYI", __func__);
    202 
    203 	return attach;
    204 
    205 fail0:	kmem_free(attach, sizeof(*attach));
    206 	return ERR_PTR(ret);
    207 }
    208 
    209 struct dma_buf_attachment *
    210 dma_buf_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat)
    211 {
    212 
    213 	return dma_buf_dynamic_attach(dmabuf, dmat, /*dynamic_mapping*/false);
    214 }
    215 
    216 void
    217 dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
    218 {
    219 
    220 	mutex_enter(&dmabuf->db_lock);
    221 	if (dmabuf->ops->detach)
    222 		dmabuf->ops->detach(dmabuf, attach);
    223 	mutex_exit(&dmabuf->db_lock);
    224 
    225 	kmem_free(attach, sizeof(*attach));
    226 }
    227 
    228 struct sg_table *
    229 dma_buf_map_attachment(struct dma_buf_attachment *attach,
    230     enum dma_data_direction dir)
    231 {
    232 	struct sg_table *sg;
    233 
    234 	if (attach->dmabuf->ops->dynamic_mapping)
    235 		dma_resv_lock(attach->dmabuf->resv, NULL);
    236 	sg = attach->dmabuf->ops->map_dma_buf(attach, dir);
    237 	if (attach->dmabuf->ops->dynamic_mapping)
    238 		dma_resv_unlock(attach->dmabuf->resv);
    239 
    240 	return sg;
    241 }
    242 
    243 void
    244 dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
    245     struct sg_table *sg, enum dma_data_direction dir)
    246 {
    247 
    248 	if (attach->dmabuf->ops->dynamic_mapping)
    249 		dma_resv_lock(attach->dmabuf->resv, NULL);
    250 	attach->dmabuf->ops->unmap_dma_buf(attach, sg, dir);
    251 	if (attach->dmabuf->ops->dynamic_mapping)
    252 		dma_resv_unlock(attach->dmabuf->resv);
    253 }
    254 
    255 static int
    256 dmabuf_fop_close(struct file *file)
    257 {
    258 	struct dma_buf *dmabuf = file->f_data;
    259 
    260 	dma_buf_put(dmabuf);
    261 	return 0;
    262 }
    263 
    264 static int
    265 dmabuf_fop_poll(struct file *file, int events)
    266 {
    267 	struct dma_buf *dmabuf = file->f_data;
    268 	struct dma_resv_poll *rpoll = &dmabuf->db_resv_poll;
    269 
    270 	return dma_resv_do_poll(dmabuf->resv, events, rpoll);
    271 }
    272 
    273 static int
    274 dmabuf_fop_kqfilter(struct file *file, struct knote *kn)
    275 {
    276 	struct dma_buf *dmabuf = file->f_data;
    277 	struct dma_resv_poll *rpoll = &dmabuf->db_resv_poll;
    278 
    279 	return dma_resv_kqfilter(dmabuf->resv, kn, rpoll);
    280 }
    281 
    282 static int
    283 dmabuf_fop_mmap(struct file *file, off_t *offp, size_t size, int prot,
    284     int *flagsp, int *advicep, struct uvm_object **uobjp, int *maxprotp)
    285 {
    286 	struct dma_buf *dmabuf = file->f_data;
    287 
    288 	if (size > dmabuf->size)
    289 		return EINVAL;
    290 
    291 	return dmabuf->ops->mmap(dmabuf, offp, size, prot, flagsp, advicep,
    292 	    uobjp, maxprotp);
    293 }
    294 
    295 /*
    296  * We don't actually do anything with the file offset; this is just how
    297  * libdrm_amdgpu expects to find the size of the DMA buf.  (Why it
    298  * doesn't use fstat is unclear, but it doesn't really matter.)
    299  */
    300 static int
    301 dmabuf_fop_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
    302     int flags)
    303 {
    304 	const off_t OFF_MAX = __type_max(off_t);
    305 	struct dma_buf *dmabuf = fp->f_data;
    306 	off_t base, newoff;
    307 	int error;
    308 
    309 	mutex_enter(&fp->f_lock);
    310 
    311 	switch (whence) {
    312 	case SEEK_CUR:
    313 		base = fp->f_offset;
    314 		break;
    315 	case SEEK_END:
    316 		base = dmabuf->size;
    317 		break;
    318 	case SEEK_SET:
    319 		base = 0;
    320 		break;
    321 	default:
    322 		error = EINVAL;
    323 		goto out;
    324 	}
    325 
    326 	/* Check for arithmetic overflow and reject negative offsets.  */
    327 	if (base < 0 || delta > OFF_MAX - base || base + delta < 0) {
    328 		error = EINVAL;
    329 		goto out;
    330 	}
    331 
    332 	/* Compute the new offset.  */
    333 	newoff = base + delta;
    334 
    335 	/* Success!  */
    336 	if (newoffp)
    337 		*newoffp = newoff;
    338 	if (flags & FOF_UPDATE_OFFSET)
    339 		fp->f_offset = newoff;
    340 	error = 0;
    341 
    342 out:	mutex_exit(&fp->f_lock);
    343 	return error;
    344 }
    345