1/*
2 * Copyright 2012  Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26#include <sys/mman.h>
27#include <gbm.h>
28#include "amdgpu_drv.h"
29#include "amdgpu_bo_helper.h"
30#include "amdgpu_glamor.h"
31#include "amdgpu_pixmap.h"
32
33static uint32_t
34amdgpu_get_gbm_format(int depth, int bitsPerPixel)
35{
36	switch (depth) {
37#ifdef GBM_FORMAT_R8
38	case 8:
39		return GBM_FORMAT_R8;
40#endif
41	case 15:
42		return GBM_FORMAT_ARGB1555;
43	case 16:
44		return GBM_FORMAT_RGB565;
45	case 32:
46		return GBM_FORMAT_ARGB8888;
47	case 30:
48		return GBM_FORMAT_XRGB2101010;
49	case 24:
50		if (bitsPerPixel == 32)
51			return GBM_FORMAT_XRGB8888;
52		/* fall through */
53	default:
54		ErrorF("%s: Unsupported depth/bpp %d/%d\n", __func__,
55		       depth, bitsPerPixel);
56		return ~0U;
57	}
58}
59
60/* Calculate appropriate pitch for a pixmap and allocate a BO that can hold it.
61 */
62struct amdgpu_buffer *amdgpu_alloc_pixmap_bo(ScrnInfoPtr pScrn, int width,
63					      int height, int depth, int usage_hint,
64					      int bitsPerPixel, int *new_pitch)
65{
66	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
67	struct amdgpu_buffer *pixmap_buffer;
68
69	if (!(usage_hint & AMDGPU_CREATE_PIXMAP_GTT) && info->gbm) {
70		uint32_t bo_use = GBM_BO_USE_RENDERING;
71		uint32_t gbm_format = amdgpu_get_gbm_format(depth, bitsPerPixel);
72
73		if (gbm_format == ~0U)
74			return NULL;
75
76		pixmap_buffer = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
77		if (!pixmap_buffer) {
78			return NULL;
79		}
80		pixmap_buffer->ref_count = 1;
81
82		if (usage_hint & AMDGPU_CREATE_PIXMAP_SCANOUT)
83			bo_use |= GBM_BO_USE_SCANOUT;
84
85#ifdef HAVE_GBM_BO_USE_LINEAR
86		if (usage_hint == CREATE_PIXMAP_USAGE_SHARED ||
87		    (usage_hint & AMDGPU_CREATE_PIXMAP_LINEAR)) {
88			bo_use |= GBM_BO_USE_LINEAR;
89		}
90#endif
91
92		pixmap_buffer->bo.gbm = gbm_bo_create(info->gbm, width, height,
93						      gbm_format,
94						      bo_use);
95		if (!pixmap_buffer->bo.gbm) {
96			free(pixmap_buffer);
97			return NULL;
98		}
99
100		pixmap_buffer->flags |= AMDGPU_BO_FLAGS_GBM;
101
102		if (new_pitch)
103			*new_pitch = gbm_bo_get_stride(pixmap_buffer->bo.gbm);
104	} else {
105		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
106		unsigned cpp = (bitsPerPixel + 7) / 8;
107		unsigned pitch = cpp *
108			AMDGPU_ALIGN(width, drmmode_get_pitch_align(pScrn, cpp));
109		uint32_t domain = (usage_hint & AMDGPU_CREATE_PIXMAP_GTT) ?
110			AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
111
112		pixmap_buffer = amdgpu_bo_open(pAMDGPUEnt->pDev, pitch * height,
113					       4096, domain);
114
115		if (new_pitch)
116			*new_pitch = pitch;
117	}
118
119	return pixmap_buffer;
120}
121
122/* Clear the pixmap contents to black */
123void
124amdgpu_pixmap_clear(PixmapPtr pixmap)
125{
126	ScreenPtr screen = pixmap->drawable.pScreen;
127	AMDGPUInfoPtr info = AMDGPUPTR(xf86ScreenToScrn(screen));
128	GCPtr gc = GetScratchGC(pixmap->drawable.depth, screen);
129	xRectangle rect;
130
131	ValidateGC(&pixmap->drawable, gc);
132	rect.x = 0;
133	rect.y = 0;
134	rect.width = pixmap->drawable.width;
135	rect.height = pixmap->drawable.height;
136	info->force_accel = TRUE;
137	gc->ops->PolyFillRect(&pixmap->drawable, gc, 1, &rect);
138	info->force_accel = FALSE;
139	FreeScratchGC(gc);
140}
141
142Bool amdgpu_bo_get_handle(struct amdgpu_buffer *bo, uint32_t *handle)
143{
144	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
145		*handle = gbm_bo_get_handle(bo->bo.gbm).u32;
146		return TRUE;
147	}
148
149	return amdgpu_bo_export(bo->bo.amdgpu, amdgpu_bo_handle_type_kms,
150				handle) == 0;
151}
152
153#ifdef USE_GLAMOR
154
155static void amdgpu_pixmap_do_get_tiling_info(PixmapPtr pixmap)
156{
157	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
158	ScreenPtr screen = pixmap->drawable.pScreen;
159	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
160	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
161	struct drm_amdgpu_gem_metadata gem_metadata;
162
163	gem_metadata.handle = priv->handle;
164	gem_metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
165
166	if (drmCommandWriteRead(pAMDGPUEnt->fd, DRM_AMDGPU_GEM_METADATA,
167				&gem_metadata, sizeof(gem_metadata)) == 0)
168		priv->tiling_info = gem_metadata.data.tiling_info;
169}
170
171#endif
172
173uint64_t amdgpu_pixmap_get_tiling_info(PixmapPtr pixmap)
174{
175	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
176	uint32_t handle;
177
178	if (!priv || !priv->handle_valid) {
179		amdgpu_pixmap_get_handle(pixmap, &handle);
180		priv = amdgpu_get_pixmap_private(pixmap);
181	}
182
183	return priv->tiling_info;
184}
185
186Bool amdgpu_pixmap_get_handle(PixmapPtr pixmap, uint32_t *handle)
187{
188	ScreenPtr screen = pixmap->drawable.pScreen;
189	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
190	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
191	struct amdgpu_pixmap *priv;
192
193	if (info->shadow_fb)
194		return FALSE;
195
196	priv = amdgpu_get_pixmap_private(pixmap);
197	if (!priv) {
198		priv = calloc(1, sizeof(*priv));
199		amdgpu_set_pixmap_private(pixmap, priv);
200	}
201
202	if (priv->handle_valid)
203		goto success;
204
205#ifdef USE_GLAMOR
206	if (info->use_glamor) {
207		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
208		CARD16 stride;
209		CARD32 size;
210		int fd, r;
211
212		fd = glamor_fd_from_pixmap(screen, pixmap, &stride, &size);
213		if (fd < 0)
214			return FALSE;
215
216		r = drmPrimeFDToHandle(pAMDGPUEnt->fd, fd, &priv->handle);
217		close(fd);
218		if (r)
219			return FALSE;
220
221		amdgpu_pixmap_do_get_tiling_info(pixmap);
222		goto success;
223	}
224#endif
225
226	if (!priv->bo || !amdgpu_bo_get_handle(priv->bo, &priv->handle))
227		return FALSE;
228
229 success:
230	priv->handle_valid = TRUE;
231	*handle = priv->handle;
232	return TRUE;
233}
234
235int amdgpu_bo_map(ScrnInfoPtr pScrn, struct amdgpu_buffer *bo)
236{
237	int ret = 0;
238
239	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
240		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
241		uint32_t handle, stride, height;
242		union drm_amdgpu_gem_mmap args;
243		int fd = pAMDGPUEnt->fd;
244		void *ptr;
245
246		handle = gbm_bo_get_handle(bo->bo.gbm).u32;
247		stride = gbm_bo_get_stride(bo->bo.gbm);
248		height = gbm_bo_get_height(bo->bo.gbm);
249
250		memset(&args, 0, sizeof(union drm_amdgpu_gem_mmap));
251		args.in.handle = handle;
252
253		ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_MMAP,
254					&args, sizeof(args));
255		if (ret) {
256			ErrorF("Failed to get the mmap offset\n");
257			return ret;
258		}
259
260		ptr = mmap(NULL, stride * height,
261			PROT_READ | PROT_WRITE, MAP_SHARED,
262			fd, args.out.addr_ptr);
263
264		if (!ptr) {
265			ErrorF("Failed to mmap the bo\n");
266			return -1;
267		}
268
269		bo->cpu_ptr = ptr;
270	} else
271		ret = amdgpu_bo_cpu_map(bo->bo.amdgpu, &bo->cpu_ptr);
272
273	return ret;
274}
275
276void amdgpu_bo_unmap(struct amdgpu_buffer *bo)
277{
278	if (!bo->cpu_ptr)
279		return;
280
281	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
282		uint32_t stride, height;
283		stride = gbm_bo_get_stride(bo->bo.gbm);
284		height = gbm_bo_get_height(bo->bo.gbm);
285		munmap(bo->cpu_ptr, stride * height);
286	} else
287		amdgpu_bo_cpu_unmap(bo->bo.amdgpu);
288}
289
290struct amdgpu_buffer *amdgpu_bo_open(amdgpu_device_handle pDev,
291				       uint32_t alloc_size,
292				       uint32_t phys_alignment,
293				       uint32_t domains)
294{
295	struct amdgpu_bo_alloc_request alloc_request;
296	struct amdgpu_buffer *bo = NULL;
297
298	memset(&alloc_request, 0, sizeof(struct amdgpu_bo_alloc_request));
299
300	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
301	if (!bo)
302		return NULL;
303
304	alloc_request.alloc_size = alloc_size;
305	alloc_request.phys_alignment = phys_alignment;
306	alloc_request.preferred_heap = domains;
307
308	if (amdgpu_bo_alloc(pDev, &alloc_request, &bo->bo.amdgpu)) {
309		free(bo);
310		return NULL;
311	}
312
313	bo->ref_count = 1;
314
315	return bo;
316}
317
318void amdgpu_bo_ref(struct amdgpu_buffer *buffer)
319{
320	buffer->ref_count++;
321}
322
323void amdgpu_bo_unref(struct amdgpu_buffer **buffer)
324{
325	struct amdgpu_buffer *buf = *buffer;
326
327	buf->ref_count--;
328	if (buf->ref_count) {
329		return;
330	}
331
332	amdgpu_bo_unmap(buf);
333
334	if (buf->flags & AMDGPU_BO_FLAGS_GBM) {
335		gbm_bo_destroy(buf->bo.gbm);
336	} else {
337		amdgpu_bo_free(buf->bo.amdgpu);
338	}
339	free(buf);
340	*buffer = NULL;
341}
342
343int amdgpu_query_bo_size(amdgpu_bo_handle buf_handle, uint32_t *size)
344{
345	struct amdgpu_bo_info buffer_info;
346	int ret;
347
348	memset(&buffer_info, 0, sizeof(struct amdgpu_bo_info));
349	ret = amdgpu_bo_query_info(buf_handle, &buffer_info);
350	if (ret)
351		*size = 0;
352	else
353		*size = (uint32_t)(buffer_info.alloc_size);
354
355	return ret;
356}
357
358int amdgpu_query_heap_size(amdgpu_device_handle pDev,
359			    uint32_t heap,
360			    uint64_t *heap_size,
361			    uint64_t *max_allocation)
362{
363	struct amdgpu_heap_info heap_info;
364	int ret;
365
366	memset(&heap_info, 0, sizeof(struct amdgpu_heap_info));
367	ret = amdgpu_query_heap_info(pDev, heap, 0, &heap_info);
368	if (ret) {
369		*heap_size = 0;
370		*max_allocation = 0;
371	} else {
372		*heap_size = heap_info.heap_size;
373		*max_allocation = heap_info.max_allocation;
374	}
375
376	return ret;
377}
378
379struct amdgpu_buffer *amdgpu_gem_bo_open_prime(amdgpu_device_handle pDev,
380						 int fd_handle,
381						 uint32_t size)
382{
383	struct amdgpu_buffer *bo = NULL;
384	struct amdgpu_bo_import_result buffer = {0};
385
386	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
387	if (!bo)
388		return NULL;
389
390	if (amdgpu_bo_import(pDev, amdgpu_bo_handle_type_dma_buf_fd,
391			     (uint32_t)fd_handle, &buffer)) {
392		free(bo);
393		return FALSE;
394	}
395	bo->bo.amdgpu = buffer.buf_handle;
396	bo->ref_count = 1;
397
398	return bo;
399}
400
401
402Bool amdgpu_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle)
403{
404	ScrnInfoPtr pScrn = xf86ScreenToScrn(ppix->drawable.pScreen);
405	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
406	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
407	struct amdgpu_buffer *pixmap_buffer = NULL;
408	int ihandle = (int)(long)fd_handle;
409	uint32_t size = ppix->devKind * ppix->drawable.height;
410	Bool ret;
411
412	if (ihandle == -1)
413		return amdgpu_set_pixmap_bo(ppix, NULL);
414
415	if (info->gbm) {
416		struct amdgpu_buffer *bo;
417		struct gbm_import_fd_data data;
418		uint32_t bo_use = GBM_BO_USE_RENDERING;
419
420		data.format = amdgpu_get_gbm_format(ppix->drawable.depth,
421						    ppix->drawable.bitsPerPixel);
422		if (data.format == ~0U)
423			return FALSE;
424
425		bo = calloc(1, sizeof(struct amdgpu_buffer));
426		if (!bo)
427			return FALSE;
428		bo->ref_count = 1;
429
430		data.fd = ihandle;
431		data.width = ppix->drawable.width;
432		data.height = ppix->drawable.height;
433		data.stride = ppix->devKind;
434
435		if (ppix->drawable.bitsPerPixel == pScrn->bitsPerPixel)
436			bo_use |= GBM_BO_USE_SCANOUT;
437
438		bo->bo.gbm = gbm_bo_import(info->gbm, GBM_BO_IMPORT_FD, &data,
439					   bo_use);
440		if (!bo->bo.gbm) {
441			free(bo);
442			return FALSE;
443		}
444
445		bo->flags |= AMDGPU_BO_FLAGS_GBM;
446
447#ifdef USE_GLAMOR
448		if (info->use_glamor &&
449		    !amdgpu_glamor_create_textured_pixmap(ppix, bo)) {
450			amdgpu_bo_unref(&bo);
451			return FALSE;
452		}
453#endif
454
455		ret = amdgpu_set_pixmap_bo(ppix, bo);
456		/* amdgpu_set_pixmap_bo increments ref_count if it succeeds */
457		amdgpu_bo_unref(&bo);
458		return ret;
459	}
460
461	pixmap_buffer = amdgpu_gem_bo_open_prime(pAMDGPUEnt->pDev, ihandle, size);
462	if (!pixmap_buffer) {
463		return FALSE;
464	}
465
466	close(ihandle);
467
468	ret = amdgpu_set_pixmap_bo(ppix, pixmap_buffer);
469
470	/* we have a reference from the alloc and one from set pixmap bo,
471	   drop one */
472	amdgpu_bo_unref(&pixmap_buffer);
473
474	return ret;
475}
476