1/*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "os/os_mman.h"
28
29#include "freedreno_drmif.h"
30#include "freedreno_priv.h"
31
32pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
33void bo_del(struct fd_bo *bo);
34
35/* set buffer name, and add to table, call w/ table_lock held: */
36static void set_name(struct fd_bo *bo, uint32_t name)
37{
38	bo->name = name;
39	/* add ourself into the handle table: */
40	_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
41}
42
43/* lookup a buffer, call w/ table_lock held: */
44static struct fd_bo * lookup_bo(struct hash_table *tbl, uint32_t key)
45{
46	struct fd_bo *bo = NULL;
47	struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
48	if (entry) {
49		/* found, incr refcnt and return: */
50		bo = fd_bo_ref(entry->data);
51
52		/* don't break the bucket if this bo was found in one */
53		list_delinit(&bo->list);
54	}
55	return bo;
56}
57
58/* allocate a new buffer object, call w/ table_lock held */
59static struct fd_bo * bo_from_handle(struct fd_device *dev,
60		uint32_t size, uint32_t handle)
61{
62	struct fd_bo *bo;
63
64	bo = dev->funcs->bo_from_handle(dev, size, handle);
65	if (!bo) {
66		struct drm_gem_close req = {
67				.handle = handle,
68		};
69		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
70		return NULL;
71	}
72	bo->dev = fd_device_ref(dev);
73	bo->size = size;
74	bo->handle = handle;
75	p_atomic_set(&bo->refcnt, 1);
76	list_inithead(&bo->list);
77	/* add ourself into the handle table: */
78	_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
79	return bo;
80}
81
82static struct fd_bo *
83bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
84		struct fd_bo_cache *cache)
85{
86	struct fd_bo *bo = NULL;
87	uint32_t handle;
88	int ret;
89
90	bo = fd_bo_cache_alloc(cache, &size, flags);
91	if (bo)
92		return bo;
93
94	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
95	if (ret)
96		return NULL;
97
98	pthread_mutex_lock(&table_lock);
99	bo = bo_from_handle(dev, size, handle);
100	pthread_mutex_unlock(&table_lock);
101
102	VG_BO_ALLOC(bo);
103
104	return bo;
105}
106
107struct fd_bo *
108_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
109{
110	struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
111	if (bo)
112		bo->bo_reuse = BO_CACHE;
113	return bo;
114}
115
116void
117_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
118{
119	bo->funcs->set_name(bo, fmt, ap);
120}
121
122/* internal function to allocate bo's that use the ringbuffer cache
123 * instead of the normal bo_cache.  The purpose is, because cmdstream
124 * bo's get vmap'd on the kernel side, and that is expensive, we want
125 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
126 */
127struct fd_bo *
128fd_bo_new_ring(struct fd_device *dev, uint32_t size, uint32_t flags)
129{
130	struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
131	if (bo)
132		bo->bo_reuse = RING_CACHE;
133	fd_bo_set_name(bo, "cmdstream");
134	return bo;
135}
136
137struct fd_bo *
138fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
139{
140	struct fd_bo *bo = NULL;
141
142	pthread_mutex_lock(&table_lock);
143
144	bo = lookup_bo(dev->handle_table, handle);
145	if (bo)
146		goto out_unlock;
147
148	bo = bo_from_handle(dev, size, handle);
149
150	VG_BO_ALLOC(bo);
151
152out_unlock:
153	pthread_mutex_unlock(&table_lock);
154
155	return bo;
156}
157
158struct fd_bo *
159fd_bo_from_dmabuf(struct fd_device *dev, int fd)
160{
161	int ret, size;
162	uint32_t handle;
163	struct fd_bo *bo;
164
165	pthread_mutex_lock(&table_lock);
166	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
167	if (ret) {
168		pthread_mutex_unlock(&table_lock);
169		return NULL;
170	}
171
172	bo = lookup_bo(dev->handle_table, handle);
173	if (bo)
174		goto out_unlock;
175
176	/* lseek() to get bo size */
177	size = lseek(fd, 0, SEEK_END);
178	lseek(fd, 0, SEEK_CUR);
179
180	bo = bo_from_handle(dev, size, handle);
181
182	VG_BO_ALLOC(bo);
183
184out_unlock:
185	pthread_mutex_unlock(&table_lock);
186
187	return bo;
188}
189
190struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
191{
192	struct drm_gem_open req = {
193			.name = name,
194	};
195	struct fd_bo *bo;
196
197	pthread_mutex_lock(&table_lock);
198
199	/* check name table first, to see if bo is already open: */
200	bo = lookup_bo(dev->name_table, name);
201	if (bo)
202		goto out_unlock;
203
204	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
205		ERROR_MSG("gem-open failed: %s", strerror(errno));
206		goto out_unlock;
207	}
208
209	bo = lookup_bo(dev->handle_table, req.handle);
210	if (bo)
211		goto out_unlock;
212
213	bo = bo_from_handle(dev, req.size, req.handle);
214	if (bo) {
215		set_name(bo, name);
216		VG_BO_ALLOC(bo);
217	}
218
219out_unlock:
220	pthread_mutex_unlock(&table_lock);
221
222	return bo;
223}
224
225uint64_t fd_bo_get_iova(struct fd_bo *bo)
226{
227	if (!bo->iova)
228		bo->iova = bo->funcs->iova(bo);
229	return bo->iova;
230}
231
232void fd_bo_put_iova(struct fd_bo *bo)
233{
234	/* currently a no-op */
235}
236
237struct fd_bo * fd_bo_ref(struct fd_bo *bo)
238{
239	p_atomic_inc(&bo->refcnt);
240	return bo;
241}
242
243void fd_bo_del(struct fd_bo *bo)
244{
245	struct fd_device *dev = bo->dev;
246
247	if (!atomic_dec_and_test(&bo->refcnt))
248		return;
249
250	pthread_mutex_lock(&table_lock);
251
252	if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
253		goto out;
254	if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
255		goto out;
256
257	bo_del(bo);
258	fd_device_del_locked(dev);
259out:
260	pthread_mutex_unlock(&table_lock);
261}
262
263/* Called under table_lock */
264void bo_del(struct fd_bo *bo)
265{
266	VG_BO_FREE(bo);
267
268	if (bo->map)
269		os_munmap(bo->map, bo->size);
270
271	/* TODO probably bo's in bucket list get removed from
272	 * handle table??
273	 */
274
275	if (bo->handle) {
276		struct drm_gem_close req = {
277				.handle = bo->handle,
278		};
279		_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
280		if (bo->name)
281			_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
282		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
283	}
284
285	bo->funcs->destroy(bo);
286}
287
288int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
289{
290	if (!bo->name) {
291		struct drm_gem_flink req = {
292				.handle = bo->handle,
293		};
294		int ret;
295
296		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
297		if (ret) {
298			return ret;
299		}
300
301		pthread_mutex_lock(&table_lock);
302		set_name(bo, req.name);
303		pthread_mutex_unlock(&table_lock);
304		bo->bo_reuse = NO_CACHE;
305	}
306
307	*name = bo->name;
308
309	return 0;
310}
311
312uint32_t fd_bo_handle(struct fd_bo *bo)
313{
314	return bo->handle;
315}
316
317int fd_bo_dmabuf(struct fd_bo *bo)
318{
319	int ret, prime_fd;
320
321	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
322			&prime_fd);
323	if (ret) {
324		ERROR_MSG("failed to get dmabuf fd: %d", ret);
325		return ret;
326	}
327
328	bo->bo_reuse = NO_CACHE;
329
330	return prime_fd;
331}
332
333uint32_t fd_bo_size(struct fd_bo *bo)
334{
335	return bo->size;
336}
337
338void * fd_bo_map(struct fd_bo *bo)
339{
340	if (!bo->map) {
341		uint64_t offset;
342		int ret;
343
344		ret = bo->funcs->offset(bo, &offset);
345		if (ret) {
346			return NULL;
347		}
348
349		bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
350				bo->dev->fd, offset);
351		if (bo->map == MAP_FAILED) {
352			ERROR_MSG("mmap failed: %s", strerror(errno));
353			bo->map = NULL;
354		}
355	}
356	return bo->map;
357}
358
359/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
360int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
361{
362	return bo->funcs->cpu_prep(bo, pipe, op);
363}
364
365void fd_bo_cpu_fini(struct fd_bo *bo)
366{
367	bo->funcs->cpu_fini(bo);
368}
369