1/*
2 * Copyright (C) 2014 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#include "os/os_mman.h"
28#include "util/hash_table.h"
29
30#include "etnaviv_priv.h"
31#include "etnaviv_drmif.h"
32
33simple_mtx_t etna_drm_table_lock = _SIMPLE_MTX_INITIALIZER_NP;
34void _etna_bo_del(struct etna_bo *bo);
35
36/* set buffer name, and add to table, call w/ etna_drm_table_lock held: */
37static void set_name(struct etna_bo *bo, uint32_t name)
38{
39	simple_mtx_assert_locked(&etna_drm_table_lock);
40
41	bo->name = name;
42	/* add ourself into the name table: */
43	_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
44}
45
46/* Called under etna_drm_table_lock */
47void _etna_bo_del(struct etna_bo *bo)
48{
49	VG_BO_FREE(bo);
50
51	simple_mtx_assert_locked(&etna_drm_table_lock);
52
53	if (bo->va)
54		util_vma_heap_free(&bo->dev->address_space, bo->va, bo->size);
55
56	if (bo->map)
57		os_munmap(bo->map, bo->size);
58
59	if (bo->handle) {
60		struct drm_gem_close req = {
61			.handle = bo->handle,
62		};
63
64		if (bo->name)
65			_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
66
67		_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
68		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
69	}
70
71	free(bo);
72}
73
74/* lookup a buffer from it's handle, call w/ etna_drm_table_lock held: */
75static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
76{
77	struct etna_bo *bo = NULL;
78	struct hash_entry *entry;
79
80	simple_mtx_assert_locked(&etna_drm_table_lock);
81
82	entry = _mesa_hash_table_search(tbl, &handle);
83
84	if (entry) {
85		/* found, incr refcnt and return: */
86		bo = etna_bo_ref(entry->data);
87
88		/* don't break the bucket if this bo was found in one */
89		if (list_is_linked(&bo->list)) {
90			VG_BO_OBTAIN(bo);
91			etna_device_ref(bo->dev);
92			list_delinit(&bo->list);
93		}
94	}
95
96	return bo;
97}
98
99/* allocate a new buffer object, call w/ etna_drm_table_lock held */
100static struct etna_bo *bo_from_handle(struct etna_device *dev,
101		uint32_t size, uint32_t handle, uint32_t flags)
102{
103	struct etna_bo *bo = calloc(sizeof(*bo), 1);
104
105	simple_mtx_assert_locked(&etna_drm_table_lock);
106
107	if (!bo) {
108		struct drm_gem_close req = {
109			.handle = handle,
110		};
111
112		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
113
114		return NULL;
115	}
116
117	bo->dev = etna_device_ref(dev);
118	bo->size = size;
119	bo->handle = handle;
120	bo->flags = flags;
121	p_atomic_set(&bo->refcnt, 1);
122	list_inithead(&bo->list);
123	/* add ourselves to the handle table: */
124	_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
125
126	if (dev->use_softpin)
127		bo->va = util_vma_heap_alloc(&dev->address_space, bo->size, 4096);
128
129	return bo;
130}
131
132/* allocate a new (un-tiled) buffer object */
133struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
134		uint32_t flags)
135{
136	struct etna_bo *bo;
137	int ret;
138	struct drm_etnaviv_gem_new req = {
139			.flags = flags,
140	};
141
142	bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
143	if (bo)
144		return bo;
145
146	req.size = size;
147	ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
148			&req, sizeof(req));
149	if (ret)
150		return NULL;
151
152	simple_mtx_lock(&etna_drm_table_lock);
153	bo = bo_from_handle(dev, size, req.handle, flags);
154	bo->reuse = 1;
155	simple_mtx_unlock(&etna_drm_table_lock);
156
157	VG_BO_ALLOC(bo);
158
159	return bo;
160}
161
162struct etna_bo *etna_bo_ref(struct etna_bo *bo)
163{
164	p_atomic_inc(&bo->refcnt);
165
166	return bo;
167}
168
169/* get buffer info */
170static int get_buffer_info(struct etna_bo *bo)
171{
172	int ret;
173	struct drm_etnaviv_gem_info req = {
174		.handle = bo->handle,
175	};
176
177	ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
178			&req, sizeof(req));
179	if (ret) {
180		return ret;
181	}
182
183	/* really all we need for now is mmap offset */
184	bo->offset = req.offset;
185
186	return 0;
187}
188
189/* import a buffer object from DRI2 name */
190struct etna_bo *etna_bo_from_name(struct etna_device *dev,
191		uint32_t name)
192{
193	struct etna_bo *bo;
194	struct drm_gem_open req = {
195		.name = name,
196	};
197
198	simple_mtx_lock(&etna_drm_table_lock);
199
200	/* check name table first, to see if bo is already open: */
201	bo = lookup_bo(dev->name_table, name);
202	if (bo)
203		goto out_unlock;
204
205	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
206		ERROR_MSG("gem-open failed: %s", strerror(errno));
207		goto out_unlock;
208	}
209
210	bo = lookup_bo(dev->handle_table, req.handle);
211	if (bo)
212		goto out_unlock;
213
214	bo = bo_from_handle(dev, req.size, req.handle, 0);
215	if (bo) {
216		set_name(bo, name);
217		VG_BO_ALLOC(bo);
218	}
219
220out_unlock:
221	simple_mtx_unlock(&etna_drm_table_lock);
222
223	return bo;
224}
225
226/* import a buffer from dmabuf fd, does not take ownership of the
227 * fd so caller should close() the fd when it is otherwise done
228 * with it (even if it is still using the 'struct etna_bo *')
229 */
230struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
231{
232	struct etna_bo *bo;
233	int ret, size;
234	uint32_t handle;
235
236	/* take the lock before calling drmPrimeFDToHandle to avoid
237	 * racing against etna_bo_del, which might invalidate the
238	 * returned handle.
239	 */
240	simple_mtx_lock(&etna_drm_table_lock);
241
242	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
243	if (ret) {
244		simple_mtx_unlock(&etna_drm_table_lock);
245		return NULL;
246	}
247
248	bo = lookup_bo(dev->handle_table, handle);
249	if (bo)
250		goto out_unlock;
251
252	/* lseek() to get bo size */
253	size = lseek(fd, 0, SEEK_END);
254	lseek(fd, 0, SEEK_CUR);
255
256	bo = bo_from_handle(dev, size, handle, 0);
257
258	VG_BO_ALLOC(bo);
259
260out_unlock:
261	simple_mtx_unlock(&etna_drm_table_lock);
262
263	return bo;
264}
265
266/* destroy a buffer object */
267void etna_bo_del(struct etna_bo *bo)
268{
269	if (!bo)
270		return;
271
272	struct etna_device *dev = bo->dev;
273
274	simple_mtx_lock(&etna_drm_table_lock);
275
276	/* Must test under table lock to avoid racing with the from_dmabuf/name
277	 * paths, which rely on the BO refcount to be stable over the lookup, so
278	 * they can grab a reference when the BO is found in the hash.
279	 */
280	if (!p_atomic_dec_zero(&bo->refcnt))
281	   goto out;
282
283	if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
284		goto out;
285
286	_etna_bo_del(bo);
287	etna_device_del_locked(dev);
288out:
289	simple_mtx_unlock(&etna_drm_table_lock);
290}
291
292/* get the global flink/DRI2 buffer name */
293int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
294{
295	if (!bo->name) {
296		struct drm_gem_flink req = {
297			.handle = bo->handle,
298		};
299		int ret;
300
301		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
302		if (ret) {
303			return ret;
304		}
305
306		simple_mtx_lock(&etna_drm_table_lock);
307		set_name(bo, req.name);
308		simple_mtx_unlock(&etna_drm_table_lock);
309		bo->reuse = 0;
310	}
311
312	*name = bo->name;
313
314	return 0;
315}
316
317uint32_t etna_bo_handle(struct etna_bo *bo)
318{
319	return bo->handle;
320}
321
322/* caller owns the dmabuf fd that is returned and is responsible
323 * to close() it when done
324 */
325int etna_bo_dmabuf(struct etna_bo *bo)
326{
327	int ret, prime_fd;
328
329	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
330				&prime_fd);
331	if (ret) {
332		ERROR_MSG("failed to get dmabuf fd: %d", ret);
333		return ret;
334	}
335
336	bo->reuse = 0;
337
338	return prime_fd;
339}
340
341uint32_t etna_bo_size(struct etna_bo *bo)
342{
343	return bo->size;
344}
345
346uint32_t etna_bo_gpu_va(struct etna_bo *bo)
347{
348	return bo->va;
349}
350
351void *etna_bo_map(struct etna_bo *bo)
352{
353	if (!bo->map) {
354		if (!bo->offset) {
355			get_buffer_info(bo);
356		}
357
358		bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE,
359				  MAP_SHARED, bo->dev->fd, bo->offset);
360		if (bo->map == MAP_FAILED) {
361			ERROR_MSG("mmap failed: %s", strerror(errno));
362			bo->map = NULL;
363		}
364	}
365
366	return bo->map;
367}
368
369int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
370{
371	struct drm_etnaviv_gem_cpu_prep req = {
372		.handle = bo->handle,
373		.op = op,
374	};
375
376	get_abs_timeout(&req.timeout, 5000000000);
377
378	return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
379			&req, sizeof(req));
380}
381
382void etna_bo_cpu_fini(struct etna_bo *bo)
383{
384	struct drm_etnaviv_gem_cpu_fini req = {
385		.handle = bo->handle,
386	};
387
388	drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
389			&req, sizeof(req));
390}
391