1b8e80941Smrg/*
2b8e80941Smrg * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21b8e80941Smrg * SOFTWARE.
22b8e80941Smrg *
23b8e80941Smrg * Authors:
24b8e80941Smrg *    Rob Clark <robclark@freedesktop.org>
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg#include "os/os_mman.h"
28b8e80941Smrg
29b8e80941Smrg#include "freedreno_drmif.h"
30b8e80941Smrg#include "freedreno_priv.h"
31b8e80941Smrg
32b8e80941Smrgpthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
33b8e80941Smrgvoid bo_del(struct fd_bo *bo);
34b8e80941Smrg
35b8e80941Smrg/* set buffer name, and add to table, call w/ table_lock held: */
36b8e80941Smrgstatic void set_name(struct fd_bo *bo, uint32_t name)
37b8e80941Smrg{
38b8e80941Smrg	bo->name = name;
39b8e80941Smrg	/* add ourself into the handle table: */
40b8e80941Smrg	_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
41b8e80941Smrg}
42b8e80941Smrg
43b8e80941Smrg/* lookup a buffer, call w/ table_lock held: */
44b8e80941Smrgstatic struct fd_bo * lookup_bo(struct hash_table *tbl, uint32_t key)
45b8e80941Smrg{
46b8e80941Smrg	struct fd_bo *bo = NULL;
47b8e80941Smrg	struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
48b8e80941Smrg	if (entry) {
49b8e80941Smrg		/* found, incr refcnt and return: */
50b8e80941Smrg		bo = fd_bo_ref(entry->data);
51b8e80941Smrg
52b8e80941Smrg		/* don't break the bucket if this bo was found in one */
53b8e80941Smrg		list_delinit(&bo->list);
54b8e80941Smrg	}
55b8e80941Smrg	return bo;
56b8e80941Smrg}
57b8e80941Smrg
58b8e80941Smrg/* allocate a new buffer object, call w/ table_lock held */
59b8e80941Smrgstatic struct fd_bo * bo_from_handle(struct fd_device *dev,
60b8e80941Smrg		uint32_t size, uint32_t handle)
61b8e80941Smrg{
62b8e80941Smrg	struct fd_bo *bo;
63b8e80941Smrg
64b8e80941Smrg	bo = dev->funcs->bo_from_handle(dev, size, handle);
65b8e80941Smrg	if (!bo) {
66b8e80941Smrg		struct drm_gem_close req = {
67b8e80941Smrg				.handle = handle,
68b8e80941Smrg		};
69b8e80941Smrg		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
70b8e80941Smrg		return NULL;
71b8e80941Smrg	}
72b8e80941Smrg	bo->dev = fd_device_ref(dev);
73b8e80941Smrg	bo->size = size;
74b8e80941Smrg	bo->handle = handle;
75b8e80941Smrg	p_atomic_set(&bo->refcnt, 1);
76b8e80941Smrg	list_inithead(&bo->list);
77b8e80941Smrg	/* add ourself into the handle table: */
78b8e80941Smrg	_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
79b8e80941Smrg	return bo;
80b8e80941Smrg}
81b8e80941Smrg
82b8e80941Smrgstatic struct fd_bo *
83b8e80941Smrgbo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
84b8e80941Smrg		struct fd_bo_cache *cache)
85b8e80941Smrg{
86b8e80941Smrg	struct fd_bo *bo = NULL;
87b8e80941Smrg	uint32_t handle;
88b8e80941Smrg	int ret;
89b8e80941Smrg
90b8e80941Smrg	bo = fd_bo_cache_alloc(cache, &size, flags);
91b8e80941Smrg	if (bo)
92b8e80941Smrg		return bo;
93b8e80941Smrg
94b8e80941Smrg	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
95b8e80941Smrg	if (ret)
96b8e80941Smrg		return NULL;
97b8e80941Smrg
98b8e80941Smrg	pthread_mutex_lock(&table_lock);
99b8e80941Smrg	bo = bo_from_handle(dev, size, handle);
100b8e80941Smrg	pthread_mutex_unlock(&table_lock);
101b8e80941Smrg
102b8e80941Smrg	VG_BO_ALLOC(bo);
103b8e80941Smrg
104b8e80941Smrg	return bo;
105b8e80941Smrg}
106b8e80941Smrg
107b8e80941Smrgstruct fd_bo *
108b8e80941Smrg_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
109b8e80941Smrg{
110b8e80941Smrg	struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
111b8e80941Smrg	if (bo)
112b8e80941Smrg		bo->bo_reuse = BO_CACHE;
113b8e80941Smrg	return bo;
114b8e80941Smrg}
115b8e80941Smrg
116b8e80941Smrgvoid
117b8e80941Smrg_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
118b8e80941Smrg{
119b8e80941Smrg	bo->funcs->set_name(bo, fmt, ap);
120b8e80941Smrg}
121b8e80941Smrg
122b8e80941Smrg/* internal function to allocate bo's that use the ringbuffer cache
123b8e80941Smrg * instead of the normal bo_cache.  The purpose is, because cmdstream
124b8e80941Smrg * bo's get vmap'd on the kernel side, and that is expensive, we want
125b8e80941Smrg * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
126b8e80941Smrg */
127b8e80941Smrgstruct fd_bo *
128b8e80941Smrgfd_bo_new_ring(struct fd_device *dev, uint32_t size, uint32_t flags)
129b8e80941Smrg{
130b8e80941Smrg	struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
131b8e80941Smrg	if (bo)
132b8e80941Smrg		bo->bo_reuse = RING_CACHE;
133b8e80941Smrg	fd_bo_set_name(bo, "cmdstream");
134b8e80941Smrg	return bo;
135b8e80941Smrg}
136b8e80941Smrg
137b8e80941Smrgstruct fd_bo *
138b8e80941Smrgfd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
139b8e80941Smrg{
140b8e80941Smrg	struct fd_bo *bo = NULL;
141b8e80941Smrg
142b8e80941Smrg	pthread_mutex_lock(&table_lock);
143b8e80941Smrg
144b8e80941Smrg	bo = lookup_bo(dev->handle_table, handle);
145b8e80941Smrg	if (bo)
146b8e80941Smrg		goto out_unlock;
147b8e80941Smrg
148b8e80941Smrg	bo = bo_from_handle(dev, size, handle);
149b8e80941Smrg
150b8e80941Smrg	VG_BO_ALLOC(bo);
151b8e80941Smrg
152b8e80941Smrgout_unlock:
153b8e80941Smrg	pthread_mutex_unlock(&table_lock);
154b8e80941Smrg
155b8e80941Smrg	return bo;
156b8e80941Smrg}
157b8e80941Smrg
158b8e80941Smrgstruct fd_bo *
159b8e80941Smrgfd_bo_from_dmabuf(struct fd_device *dev, int fd)
160b8e80941Smrg{
161b8e80941Smrg	int ret, size;
162b8e80941Smrg	uint32_t handle;
163b8e80941Smrg	struct fd_bo *bo;
164b8e80941Smrg
165b8e80941Smrg	pthread_mutex_lock(&table_lock);
166b8e80941Smrg	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
167b8e80941Smrg	if (ret) {
168b8e80941Smrg		pthread_mutex_unlock(&table_lock);
169b8e80941Smrg		return NULL;
170b8e80941Smrg	}
171b8e80941Smrg
172b8e80941Smrg	bo = lookup_bo(dev->handle_table, handle);
173b8e80941Smrg	if (bo)
174b8e80941Smrg		goto out_unlock;
175b8e80941Smrg
176b8e80941Smrg	/* lseek() to get bo size */
177b8e80941Smrg	size = lseek(fd, 0, SEEK_END);
178b8e80941Smrg	lseek(fd, 0, SEEK_CUR);
179b8e80941Smrg
180b8e80941Smrg	bo = bo_from_handle(dev, size, handle);
181b8e80941Smrg
182b8e80941Smrg	VG_BO_ALLOC(bo);
183b8e80941Smrg
184b8e80941Smrgout_unlock:
185b8e80941Smrg	pthread_mutex_unlock(&table_lock);
186b8e80941Smrg
187b8e80941Smrg	return bo;
188b8e80941Smrg}
189b8e80941Smrg
190b8e80941Smrgstruct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
191b8e80941Smrg{
192b8e80941Smrg	struct drm_gem_open req = {
193b8e80941Smrg			.name = name,
194b8e80941Smrg	};
195b8e80941Smrg	struct fd_bo *bo;
196b8e80941Smrg
197b8e80941Smrg	pthread_mutex_lock(&table_lock);
198b8e80941Smrg
199b8e80941Smrg	/* check name table first, to see if bo is already open: */
200b8e80941Smrg	bo = lookup_bo(dev->name_table, name);
201b8e80941Smrg	if (bo)
202b8e80941Smrg		goto out_unlock;
203b8e80941Smrg
204b8e80941Smrg	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
205b8e80941Smrg		ERROR_MSG("gem-open failed: %s", strerror(errno));
206b8e80941Smrg		goto out_unlock;
207b8e80941Smrg	}
208b8e80941Smrg
209b8e80941Smrg	bo = lookup_bo(dev->handle_table, req.handle);
210b8e80941Smrg	if (bo)
211b8e80941Smrg		goto out_unlock;
212b8e80941Smrg
213b8e80941Smrg	bo = bo_from_handle(dev, req.size, req.handle);
214b8e80941Smrg	if (bo) {
215b8e80941Smrg		set_name(bo, name);
216b8e80941Smrg		VG_BO_ALLOC(bo);
217b8e80941Smrg	}
218b8e80941Smrg
219b8e80941Smrgout_unlock:
220b8e80941Smrg	pthread_mutex_unlock(&table_lock);
221b8e80941Smrg
222b8e80941Smrg	return bo;
223b8e80941Smrg}
224b8e80941Smrg
225b8e80941Smrguint64_t fd_bo_get_iova(struct fd_bo *bo)
226b8e80941Smrg{
227b8e80941Smrg	if (!bo->iova)
228b8e80941Smrg		bo->iova = bo->funcs->iova(bo);
229b8e80941Smrg	return bo->iova;
230b8e80941Smrg}
231b8e80941Smrg
232b8e80941Smrgvoid fd_bo_put_iova(struct fd_bo *bo)
233b8e80941Smrg{
234b8e80941Smrg	/* currently a no-op */
235b8e80941Smrg}
236b8e80941Smrg
237b8e80941Smrgstruct fd_bo * fd_bo_ref(struct fd_bo *bo)
238b8e80941Smrg{
239b8e80941Smrg	p_atomic_inc(&bo->refcnt);
240b8e80941Smrg	return bo;
241b8e80941Smrg}
242b8e80941Smrg
243b8e80941Smrgvoid fd_bo_del(struct fd_bo *bo)
244b8e80941Smrg{
245b8e80941Smrg	struct fd_device *dev = bo->dev;
246b8e80941Smrg
247b8e80941Smrg	if (!atomic_dec_and_test(&bo->refcnt))
248b8e80941Smrg		return;
249b8e80941Smrg
250b8e80941Smrg	pthread_mutex_lock(&table_lock);
251b8e80941Smrg
252b8e80941Smrg	if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
253b8e80941Smrg		goto out;
254b8e80941Smrg	if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
255b8e80941Smrg		goto out;
256b8e80941Smrg
257b8e80941Smrg	bo_del(bo);
258b8e80941Smrg	fd_device_del_locked(dev);
259b8e80941Smrgout:
260b8e80941Smrg	pthread_mutex_unlock(&table_lock);
261b8e80941Smrg}
262b8e80941Smrg
263b8e80941Smrg/* Called under table_lock */
264b8e80941Smrgvoid bo_del(struct fd_bo *bo)
265b8e80941Smrg{
266b8e80941Smrg	VG_BO_FREE(bo);
267b8e80941Smrg
268b8e80941Smrg	if (bo->map)
269b8e80941Smrg		os_munmap(bo->map, bo->size);
270b8e80941Smrg
271b8e80941Smrg	/* TODO probably bo's in bucket list get removed from
272b8e80941Smrg	 * handle table??
273b8e80941Smrg	 */
274b8e80941Smrg
275b8e80941Smrg	if (bo->handle) {
276b8e80941Smrg		struct drm_gem_close req = {
277b8e80941Smrg				.handle = bo->handle,
278b8e80941Smrg		};
279b8e80941Smrg		_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
280b8e80941Smrg		if (bo->name)
281b8e80941Smrg			_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
282b8e80941Smrg		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
283b8e80941Smrg	}
284b8e80941Smrg
285b8e80941Smrg	bo->funcs->destroy(bo);
286b8e80941Smrg}
287b8e80941Smrg
288b8e80941Smrgint fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
289b8e80941Smrg{
290b8e80941Smrg	if (!bo->name) {
291b8e80941Smrg		struct drm_gem_flink req = {
292b8e80941Smrg				.handle = bo->handle,
293b8e80941Smrg		};
294b8e80941Smrg		int ret;
295b8e80941Smrg
296b8e80941Smrg		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
297b8e80941Smrg		if (ret) {
298b8e80941Smrg			return ret;
299b8e80941Smrg		}
300b8e80941Smrg
301b8e80941Smrg		pthread_mutex_lock(&table_lock);
302b8e80941Smrg		set_name(bo, req.name);
303b8e80941Smrg		pthread_mutex_unlock(&table_lock);
304b8e80941Smrg		bo->bo_reuse = NO_CACHE;
305b8e80941Smrg	}
306b8e80941Smrg
307b8e80941Smrg	*name = bo->name;
308b8e80941Smrg
309b8e80941Smrg	return 0;
310b8e80941Smrg}
311b8e80941Smrg
312b8e80941Smrguint32_t fd_bo_handle(struct fd_bo *bo)
313b8e80941Smrg{
314b8e80941Smrg	return bo->handle;
315b8e80941Smrg}
316b8e80941Smrg
317b8e80941Smrgint fd_bo_dmabuf(struct fd_bo *bo)
318b8e80941Smrg{
319b8e80941Smrg	int ret, prime_fd;
320b8e80941Smrg
321b8e80941Smrg	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
322b8e80941Smrg			&prime_fd);
323b8e80941Smrg	if (ret) {
324b8e80941Smrg		ERROR_MSG("failed to get dmabuf fd: %d", ret);
325b8e80941Smrg		return ret;
326b8e80941Smrg	}
327b8e80941Smrg
328b8e80941Smrg	bo->bo_reuse = NO_CACHE;
329b8e80941Smrg
330b8e80941Smrg	return prime_fd;
331b8e80941Smrg}
332b8e80941Smrg
333b8e80941Smrguint32_t fd_bo_size(struct fd_bo *bo)
334b8e80941Smrg{
335b8e80941Smrg	return bo->size;
336b8e80941Smrg}
337b8e80941Smrg
338b8e80941Smrgvoid * fd_bo_map(struct fd_bo *bo)
339b8e80941Smrg{
340b8e80941Smrg	if (!bo->map) {
341b8e80941Smrg		uint64_t offset;
342b8e80941Smrg		int ret;
343b8e80941Smrg
344b8e80941Smrg		ret = bo->funcs->offset(bo, &offset);
345b8e80941Smrg		if (ret) {
346b8e80941Smrg			return NULL;
347b8e80941Smrg		}
348b8e80941Smrg
349b8e80941Smrg		bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
350b8e80941Smrg				bo->dev->fd, offset);
351b8e80941Smrg		if (bo->map == MAP_FAILED) {
352b8e80941Smrg			ERROR_MSG("mmap failed: %s", strerror(errno));
353b8e80941Smrg			bo->map = NULL;
354b8e80941Smrg		}
355b8e80941Smrg	}
356b8e80941Smrg	return bo->map;
357b8e80941Smrg}
358b8e80941Smrg
359b8e80941Smrg/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
360b8e80941Smrgint fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
361b8e80941Smrg{
362b8e80941Smrg	return bo->funcs->cpu_prep(bo, pipe, op);
363b8e80941Smrg}
364b8e80941Smrg
365b8e80941Smrgvoid fd_bo_cpu_fini(struct fd_bo *bo)
366b8e80941Smrg{
367b8e80941Smrg	bo->funcs->cpu_fini(bo);
368b8e80941Smrg}
369