1/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3/*
4 * Copyright (C) 2011 Texas Instruments, Inc
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 *    Rob Clark <rob@ti.com>
27 */
28
29#include <stdlib.h>
30#include <linux/types.h>
31#include <errno.h>
32#include <sys/mman.h>
33#include <fcntl.h>
34#include <unistd.h>
35#include <pthread.h>
36
37#include <libdrm_macros.h>
38#include <xf86drm.h>
39#include <xf86atomic.h>
40
41#include "omap_drm.h"
42#include "omap_drmif.h"
43
44#define __round_mask(x, y) ((__typeof__(x))((y)-1))
45#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
46#define PAGE_SIZE 4096
47
48static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
49static void * dev_table;
50
51struct omap_device {
52	int fd;
53	atomic_t refcnt;
54
55	/* The handle_table is used to track GEM bo handles associated w/
56	 * this fd.  This is needed, in particular, when importing
57	 * dmabuf's because we don't want multiple 'struct omap_bo's
58	 * floating around with the same handle.  Otherwise, when the
59	 * first one is omap_bo_del()'d the handle becomes no longer
60	 * valid, and the remaining 'struct omap_bo's are left pointing
61	 * to an invalid handle (and possible a GEM bo that is already
62	 * free'd).
63	 */
64	void *handle_table;
65};
66
67/* a GEM buffer object allocated from the DRM device */
68struct omap_bo {
69	struct omap_device	*dev;
70	void		*map;		/* userspace mmap'ing (if there is one) */
71	uint32_t	size;
72	uint32_t	handle;
73	uint32_t	name;		/* flink global handle (DRI2 name) */
74	uint64_t	offset;		/* offset to mmap() */
75	int		fd;		/* dmabuf handle */
76	atomic_t	refcnt;
77};
78
79static struct omap_device * omap_device_new_impl(int fd)
80{
81	struct omap_device *dev = calloc(sizeof(*dev), 1);
82	if (!dev)
83		return NULL;
84	dev->fd = fd;
85	atomic_set(&dev->refcnt, 1);
86	dev->handle_table = drmHashCreate();
87	return dev;
88}
89
90drm_public struct omap_device * omap_device_new(int fd)
91{
92	struct omap_device *dev = NULL;
93
94	pthread_mutex_lock(&table_lock);
95
96	if (!dev_table)
97		dev_table = drmHashCreate();
98
99	if (drmHashLookup(dev_table, fd, (void **)&dev)) {
100		/* not found, create new device */
101		dev = omap_device_new_impl(fd);
102		drmHashInsert(dev_table, fd, dev);
103	} else {
104		/* found, just incr refcnt */
105		dev = omap_device_ref(dev);
106	}
107
108	pthread_mutex_unlock(&table_lock);
109
110	return dev;
111}
112
113drm_public struct omap_device * omap_device_ref(struct omap_device *dev)
114{
115	atomic_inc(&dev->refcnt);
116	return dev;
117}
118
119drm_public void omap_device_del(struct omap_device *dev)
120{
121	if (!atomic_dec_and_test(&dev->refcnt))
122		return;
123	pthread_mutex_lock(&table_lock);
124	drmHashDestroy(dev->handle_table);
125	drmHashDelete(dev_table, dev->fd);
126	pthread_mutex_unlock(&table_lock);
127	free(dev);
128}
129
130drm_public int
131omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value)
132{
133	struct drm_omap_param req = {
134			.param = param,
135	};
136	int ret;
137
138	ret = drmCommandWriteRead(dev->fd, DRM_OMAP_GET_PARAM, &req, sizeof(req));
139	if (ret) {
140		return ret;
141	}
142
143	*value = req.value;
144
145	return 0;
146}
147
148drm_public int
149omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value)
150{
151	struct drm_omap_param req = {
152			.param = param,
153			.value = value,
154	};
155	return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
156}
157
158/* lookup a buffer from it's handle, call w/ table_lock held: */
159static struct omap_bo * lookup_bo(struct omap_device *dev,
160		uint32_t handle)
161{
162	struct omap_bo *bo = NULL;
163	if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
164		/* found, incr refcnt and return: */
165		bo = omap_bo_ref(bo);
166	}
167	return bo;
168}
169
170/* allocate a new buffer object, call w/ table_lock held */
171static struct omap_bo * bo_from_handle(struct omap_device *dev,
172		uint32_t handle)
173{
174	struct omap_bo *bo = calloc(sizeof(*bo), 1);
175	if (!bo) {
176		drmCloseBufferHandle(dev->fd, handle);
177		return NULL;
178	}
179	bo->dev = omap_device_ref(dev);
180	bo->handle = handle;
181	bo->fd = -1;
182	atomic_set(&bo->refcnt, 1);
183	/* add ourselves to the handle table: */
184	drmHashInsert(dev->handle_table, handle, bo);
185	return bo;
186}
187
188/* allocate a new buffer object */
189static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
190		union omap_gem_size size, uint32_t flags)
191{
192	struct omap_bo *bo = NULL;
193	struct drm_omap_gem_new req = {
194			.size = size,
195			.flags = flags,
196	};
197
198	if (size.bytes == 0) {
199		goto fail;
200	}
201
202	if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
203		goto fail;
204	}
205
206	pthread_mutex_lock(&table_lock);
207	bo = bo_from_handle(dev, req.handle);
208	pthread_mutex_unlock(&table_lock);
209
210	if (flags & OMAP_BO_TILED) {
211		bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
212	} else {
213		bo->size = size.bytes;
214	}
215
216	return bo;
217
218fail:
219	free(bo);
220	return NULL;
221}
222
223
224/* allocate a new (un-tiled) buffer object */
225drm_public struct omap_bo *
226omap_bo_new(struct omap_device *dev, uint32_t size, uint32_t flags)
227{
228	union omap_gem_size gsize = {
229			.bytes = size,
230	};
231	if (flags & OMAP_BO_TILED) {
232		return NULL;
233	}
234	return omap_bo_new_impl(dev, gsize, flags);
235}
236
237/* allocate a new buffer object */
238drm_public struct omap_bo *
239omap_bo_new_tiled(struct omap_device *dev, uint32_t width,
240		  uint32_t height, uint32_t flags)
241{
242	union omap_gem_size gsize = {
243			.tiled = {
244				.width = width,
245				.height = height,
246			},
247	};
248	if (!(flags & OMAP_BO_TILED)) {
249		return NULL;
250	}
251	return omap_bo_new_impl(dev, gsize, flags);
252}
253
254drm_public struct omap_bo *omap_bo_ref(struct omap_bo *bo)
255{
256	atomic_inc(&bo->refcnt);
257	return bo;
258}
259
260/* get buffer info */
261static int get_buffer_info(struct omap_bo *bo)
262{
263	struct drm_omap_gem_info req = {
264			.handle = bo->handle,
265	};
266	int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
267			&req, sizeof(req));
268	if (ret) {
269		return ret;
270	}
271
272	/* really all we need for now is mmap offset */
273	bo->offset = req.offset;
274	bo->size = req.size;
275
276	return 0;
277}
278
279/* import a buffer object from DRI2 name */
280drm_public struct omap_bo *
281omap_bo_from_name(struct omap_device *dev, uint32_t name)
282{
283	struct omap_bo *bo = NULL;
284	struct drm_gem_open req = {
285			.name = name,
286	};
287
288	pthread_mutex_lock(&table_lock);
289
290	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
291		goto fail;
292	}
293
294	bo = lookup_bo(dev, req.handle);
295	if (!bo) {
296		bo = bo_from_handle(dev, req.handle);
297		bo->name = name;
298	}
299
300	pthread_mutex_unlock(&table_lock);
301
302	return bo;
303
304fail:
305	pthread_mutex_unlock(&table_lock);
306	free(bo);
307	return NULL;
308}
309
310/* import a buffer from dmabuf fd, does not take ownership of the
311 * fd so caller should close() the fd when it is otherwise done
312 * with it (even if it is still using the 'struct omap_bo *')
313 */
314drm_public struct omap_bo *
315omap_bo_from_dmabuf(struct omap_device *dev, int fd)
316{
317	struct omap_bo *bo = NULL;
318	struct drm_prime_handle req = {
319			.fd = fd,
320	};
321	int ret;
322
323	pthread_mutex_lock(&table_lock);
324
325	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
326	if (ret) {
327		goto fail;
328	}
329
330	bo = lookup_bo(dev, req.handle);
331	if (!bo) {
332		bo = bo_from_handle(dev, req.handle);
333	}
334
335	pthread_mutex_unlock(&table_lock);
336
337	return bo;
338
339fail:
340	pthread_mutex_unlock(&table_lock);
341	free(bo);
342	return NULL;
343}
344
345/* destroy a buffer object */
346drm_public void omap_bo_del(struct omap_bo *bo)
347{
348	if (!bo) {
349		return;
350	}
351
352	if (!atomic_dec_and_test(&bo->refcnt))
353		return;
354
355	if (bo->map) {
356		munmap(bo->map, bo->size);
357	}
358
359	if (bo->fd >= 0) {
360		close(bo->fd);
361	}
362
363	if (bo->handle) {
364		pthread_mutex_lock(&table_lock);
365		drmHashDelete(bo->dev->handle_table, bo->handle);
366		drmCloseBufferHandle(bo->dev->fd, bo->handle);
367		pthread_mutex_unlock(&table_lock);
368	}
369
370	omap_device_del(bo->dev);
371
372	free(bo);
373}
374
375/* get the global flink/DRI2 buffer name */
376drm_public int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
377{
378	if (!bo->name) {
379		struct drm_gem_flink req = {
380				.handle = bo->handle,
381		};
382		int ret;
383
384		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
385		if (ret) {
386			return ret;
387		}
388
389		bo->name = req.name;
390	}
391
392	*name = bo->name;
393
394	return 0;
395}
396
397drm_public uint32_t omap_bo_handle(struct omap_bo *bo)
398{
399	return bo->handle;
400}
401
402/* caller owns the dmabuf fd that is returned and is responsible
403 * to close() it when done
404 */
405drm_public int omap_bo_dmabuf(struct omap_bo *bo)
406{
407	if (bo->fd < 0) {
408		struct drm_prime_handle req = {
409				.handle = bo->handle,
410				.flags = DRM_CLOEXEC | DRM_RDWR,
411		};
412		int ret;
413
414		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
415		if (ret) {
416			return ret;
417		}
418
419		bo->fd = req.fd;
420	}
421	return dup(bo->fd);
422}
423
424drm_public uint32_t omap_bo_size(struct omap_bo *bo)
425{
426	if (!bo->size) {
427		get_buffer_info(bo);
428	}
429	return bo->size;
430}
431
432drm_public void *omap_bo_map(struct omap_bo *bo)
433{
434	if (!bo->map) {
435		if (!bo->offset) {
436			get_buffer_info(bo);
437		}
438
439		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
440				MAP_SHARED, bo->dev->fd, bo->offset);
441		if (bo->map == MAP_FAILED) {
442			bo->map = NULL;
443		}
444	}
445	return bo->map;
446}
447
448drm_public int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
449{
450	struct drm_omap_gem_cpu_prep req = {
451			.handle = bo->handle,
452			.op = op,
453	};
454	return drmCommandWrite(bo->dev->fd,
455			DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
456}
457
458drm_public int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
459{
460	struct drm_omap_gem_cpu_fini req = {
461			.handle = bo->handle,
462			.op = op,
463			.nregions = 0,
464	};
465	return drmCommandWrite(bo->dev->fd,
466			DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
467}
468