amdgpu_bo_helper.c revision 35d5b7c7
1/*
2 * Copyright 2012  Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26#include <sys/mman.h>
27#include <gbm.h>
28#include "amdgpu_drv.h"
29#include "amdgpu_bo_helper.h"
30#include "amdgpu_glamor.h"
31#include "amdgpu_pixmap.h"
32
33static uint32_t
34amdgpu_get_gbm_format(int depth, int bitsPerPixel)
35{
36	switch (depth) {
37#ifdef GBM_FORMAT_R8
38	case 8:
39		return GBM_FORMAT_R8;
40#endif
41	case 16:
42		return GBM_FORMAT_RGB565;
43	case 32:
44		return GBM_FORMAT_ARGB8888;
45	case 30:
46		return GBM_FORMAT_XRGB2101010;
47	case 24:
48		if (bitsPerPixel == 32)
49			return GBM_FORMAT_XRGB8888;
50		/* fall through */
51	default:
52		ErrorF("%s: Unsupported depth/bpp %d/%d\n", __func__,
53		       depth, bitsPerPixel);
54		return ~0U;
55	}
56}
57
58/* Calculate appropriate pitch for a pixmap and allocate a BO that can hold it.
59 */
60struct amdgpu_buffer *amdgpu_alloc_pixmap_bo(ScrnInfoPtr pScrn, int width,
61					      int height, int depth, int usage_hint,
62					      int bitsPerPixel, int *new_pitch)
63{
64	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
65	struct amdgpu_buffer *pixmap_buffer;
66
67	if (!(usage_hint & AMDGPU_CREATE_PIXMAP_GTT) && info->gbm) {
68		uint32_t bo_use = GBM_BO_USE_RENDERING;
69		uint32_t gbm_format = amdgpu_get_gbm_format(depth, bitsPerPixel);
70
71		if (gbm_format == ~0U)
72			return NULL;
73
74		pixmap_buffer = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
75		if (!pixmap_buffer) {
76			return NULL;
77		}
78		pixmap_buffer->ref_count = 1;
79
80		if ( bitsPerPixel == pScrn->bitsPerPixel)
81			bo_use |= GBM_BO_USE_SCANOUT;
82
83#ifdef HAVE_GBM_BO_USE_LINEAR
84		if (usage_hint == CREATE_PIXMAP_USAGE_SHARED ||
85		    (usage_hint & AMDGPU_CREATE_PIXMAP_LINEAR)) {
86			bo_use |= GBM_BO_USE_LINEAR;
87		}
88#endif
89
90		pixmap_buffer->bo.gbm = gbm_bo_create(info->gbm, width, height,
91						      gbm_format,
92						      bo_use);
93		if (!pixmap_buffer->bo.gbm) {
94			free(pixmap_buffer);
95			return NULL;
96		}
97
98		pixmap_buffer->flags |= AMDGPU_BO_FLAGS_GBM;
99
100		if (new_pitch)
101			*new_pitch = gbm_bo_get_stride(pixmap_buffer->bo.gbm);
102	} else {
103		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
104		unsigned cpp = (bitsPerPixel + 7) / 8;
105		unsigned pitch = cpp *
106			AMDGPU_ALIGN(width, drmmode_get_pitch_align(pScrn, cpp));
107		uint32_t domain = (usage_hint & AMDGPU_CREATE_PIXMAP_GTT) ?
108			AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
109
110		pixmap_buffer = amdgpu_bo_open(pAMDGPUEnt->pDev, pitch * height,
111					       4096, domain);
112
113		if (new_pitch)
114			*new_pitch = pitch;
115	}
116
117	return pixmap_buffer;
118}
119
120/* Clear the pixmap contents to black */
121void
122amdgpu_pixmap_clear(PixmapPtr pixmap)
123{
124	ScreenPtr screen = pixmap->drawable.pScreen;
125	AMDGPUInfoPtr info = AMDGPUPTR(xf86ScreenToScrn(screen));
126	GCPtr gc = GetScratchGC(pixmap->drawable.depth, screen);
127	xRectangle rect;
128
129	ValidateGC(&pixmap->drawable, gc);
130	rect.x = 0;
131	rect.y = 0;
132	rect.width = pixmap->drawable.width;
133	rect.height = pixmap->drawable.height;
134	info->force_accel = TRUE;
135	gc->ops->PolyFillRect(&pixmap->drawable, gc, 1, &rect);
136	info->force_accel = FALSE;
137	FreeScratchGC(gc);
138}
139
140Bool amdgpu_bo_get_handle(struct amdgpu_buffer *bo, uint32_t *handle)
141{
142	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
143		*handle = gbm_bo_get_handle(bo->bo.gbm).u32;
144		return TRUE;
145	}
146
147	return amdgpu_bo_export(bo->bo.amdgpu, amdgpu_bo_handle_type_kms,
148				handle) == 0;
149}
150
151static void amdgpu_pixmap_do_get_tiling_info(PixmapPtr pixmap)
152{
153	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
154	ScreenPtr screen = pixmap->drawable.pScreen;
155	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
156	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
157	struct drm_amdgpu_gem_metadata gem_metadata;
158
159	gem_metadata.handle = priv->handle;
160	gem_metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
161
162	if (drmCommandWriteRead(pAMDGPUEnt->fd, DRM_AMDGPU_GEM_METADATA,
163				&gem_metadata, sizeof(gem_metadata)) == 0)
164		priv->tiling_info = gem_metadata.data.tiling_info;
165}
166
167uint64_t amdgpu_pixmap_get_tiling_info(PixmapPtr pixmap)
168{
169	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
170	uint32_t handle;
171
172	if (!priv || !priv->handle_valid) {
173		amdgpu_pixmap_get_handle(pixmap, &handle);
174		priv = amdgpu_get_pixmap_private(pixmap);
175	}
176
177	return priv->tiling_info;
178}
179
180Bool amdgpu_pixmap_get_handle(PixmapPtr pixmap, uint32_t *handle)
181{
182#ifdef USE_GLAMOR
183	ScreenPtr screen = pixmap->drawable.pScreen;
184	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
185	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
186#endif
187	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
188
189	if (!priv) {
190		priv = calloc(1, sizeof(*priv));
191		amdgpu_set_pixmap_private(pixmap, priv);
192	}
193
194	if (priv->handle_valid)
195		goto success;
196
197#ifdef USE_GLAMOR
198	if (info->use_glamor) {
199		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
200		CARD16 stride;
201		CARD32 size;
202		int fd, r;
203
204		fd = glamor_fd_from_pixmap(screen, pixmap, &stride, &size);
205		if (fd < 0)
206			return FALSE;
207
208		r = drmPrimeFDToHandle(pAMDGPUEnt->fd, fd, &priv->handle);
209		close(fd);
210		if (r == 0)
211			goto get_tiling_info;
212	}
213#endif
214
215	if (!priv->bo || !amdgpu_bo_get_handle(priv->bo, &priv->handle))
216		return FALSE;
217
218 get_tiling_info:
219	amdgpu_pixmap_do_get_tiling_info(pixmap);
220 success:
221	priv->handle_valid = TRUE;
222	*handle = priv->handle;
223	return TRUE;
224}
225
226int amdgpu_bo_map(ScrnInfoPtr pScrn, struct amdgpu_buffer *bo)
227{
228	int ret = 0;
229
230	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
231		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
232		uint32_t handle, stride, height;
233		union drm_amdgpu_gem_mmap args;
234		int fd = pAMDGPUEnt->fd;
235		void *ptr;
236
237		handle = gbm_bo_get_handle(bo->bo.gbm).u32;
238		stride = gbm_bo_get_stride(bo->bo.gbm);
239		height = gbm_bo_get_height(bo->bo.gbm);
240
241		memset(&args, 0, sizeof(union drm_amdgpu_gem_mmap));
242		args.in.handle = handle;
243
244		ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_MMAP,
245					&args, sizeof(args));
246		if (ret) {
247			ErrorF("Failed to get the mmap offset\n");
248			return ret;
249		}
250
251		ptr = mmap(NULL, stride * height,
252			PROT_READ | PROT_WRITE, MAP_SHARED,
253			fd, args.out.addr_ptr);
254
255		if (!ptr) {
256			ErrorF("Failed to mmap the bo\n");
257			return -1;
258		}
259
260		bo->cpu_ptr = ptr;
261	} else
262		ret = amdgpu_bo_cpu_map(bo->bo.amdgpu, &bo->cpu_ptr);
263
264	return ret;
265}
266
267void amdgpu_bo_unmap(struct amdgpu_buffer *bo)
268{
269	if (!bo->cpu_ptr)
270		return;
271
272	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
273		uint32_t stride, height;
274		stride = gbm_bo_get_stride(bo->bo.gbm);
275		height = gbm_bo_get_height(bo->bo.gbm);
276		munmap(bo->cpu_ptr, stride * height);
277	} else
278		amdgpu_bo_cpu_unmap(bo->bo.amdgpu);
279}
280
281struct amdgpu_buffer *amdgpu_bo_open(amdgpu_device_handle pDev,
282				       uint32_t alloc_size,
283				       uint32_t phys_alignment,
284				       uint32_t domains)
285{
286	struct amdgpu_bo_alloc_request alloc_request;
287	struct amdgpu_buffer *bo = NULL;
288
289	memset(&alloc_request, 0, sizeof(struct amdgpu_bo_alloc_request));
290
291	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
292	if (!bo)
293		return NULL;
294
295	alloc_request.alloc_size = alloc_size;
296	alloc_request.phys_alignment = phys_alignment;
297	alloc_request.preferred_heap = domains;
298
299	if (amdgpu_bo_alloc(pDev, &alloc_request, &bo->bo.amdgpu)) {
300		free(bo);
301		return NULL;
302	}
303
304	bo->ref_count = 1;
305
306	return bo;
307}
308
309void amdgpu_bo_ref(struct amdgpu_buffer *buffer)
310{
311	buffer->ref_count++;
312}
313
314void amdgpu_bo_unref(struct amdgpu_buffer **buffer)
315{
316	struct amdgpu_buffer *buf = *buffer;
317
318	buf->ref_count--;
319	if (buf->ref_count) {
320		return;
321	}
322
323	amdgpu_bo_unmap(buf);
324
325	if (buf->flags & AMDGPU_BO_FLAGS_GBM) {
326		gbm_bo_destroy(buf->bo.gbm);
327	} else {
328		amdgpu_bo_free(buf->bo.amdgpu);
329	}
330	free(buf);
331	*buffer = NULL;
332}
333
334int amdgpu_query_bo_size(amdgpu_bo_handle buf_handle, uint32_t *size)
335{
336	struct amdgpu_bo_info buffer_info;
337	int ret;
338
339	memset(&buffer_info, 0, sizeof(struct amdgpu_bo_info));
340	ret = amdgpu_bo_query_info(buf_handle, &buffer_info);
341	if (ret)
342		*size = 0;
343	else
344		*size = (uint32_t)(buffer_info.alloc_size);
345
346	return ret;
347}
348
349int amdgpu_query_heap_size(amdgpu_device_handle pDev,
350			    uint32_t heap,
351			    uint64_t *heap_size,
352			    uint64_t *max_allocation)
353{
354	struct amdgpu_heap_info heap_info;
355	int ret;
356
357	memset(&heap_info, 0, sizeof(struct amdgpu_heap_info));
358	ret = amdgpu_query_heap_info(pDev, heap, 0, &heap_info);
359	if (ret) {
360		*heap_size = 0;
361		*max_allocation = 0;
362	} else {
363		*heap_size = heap_info.heap_size;
364		*max_allocation = heap_info.max_allocation;
365	}
366
367	return ret;
368}
369
370struct amdgpu_buffer *amdgpu_gem_bo_open_prime(amdgpu_device_handle pDev,
371						 int fd_handle,
372						 uint32_t size)
373{
374	struct amdgpu_buffer *bo = NULL;
375	struct amdgpu_bo_import_result buffer = {0};
376
377	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
378	if (!bo)
379		return NULL;
380
381	if (amdgpu_bo_import(pDev, amdgpu_bo_handle_type_dma_buf_fd,
382			     (uint32_t)fd_handle, &buffer)) {
383		free(bo);
384		return FALSE;
385	}
386	bo->bo.amdgpu = buffer.buf_handle;
387	bo->ref_count = 1;
388
389	return bo;
390}
391
392
393Bool amdgpu_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle)
394{
395	ScrnInfoPtr pScrn = xf86ScreenToScrn(ppix->drawable.pScreen);
396	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
397	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
398	struct amdgpu_buffer *pixmap_buffer = NULL;
399	int ihandle = (int)(long)fd_handle;
400	uint32_t size = ppix->devKind * ppix->drawable.height;
401	Bool ret;
402
403	if (ihandle == -1)
404		return amdgpu_set_pixmap_bo(ppix, NULL);
405
406	if (info->gbm) {
407		struct amdgpu_buffer *bo;
408		struct gbm_import_fd_data data;
409		uint32_t bo_use = GBM_BO_USE_RENDERING;
410
411		data.format = amdgpu_get_gbm_format(ppix->drawable.depth,
412						    ppix->drawable.bitsPerPixel);
413		if (data.format == ~0U)
414			return FALSE;
415
416		bo = calloc(1, sizeof(struct amdgpu_buffer));
417		if (!bo)
418			return FALSE;
419		bo->ref_count = 1;
420
421		data.fd = ihandle;
422		data.width = ppix->drawable.width;
423		data.height = ppix->drawable.height;
424		data.stride = ppix->devKind;
425
426		if (ppix->drawable.bitsPerPixel == pScrn->bitsPerPixel)
427			bo_use |= GBM_BO_USE_SCANOUT;
428
429		bo->bo.gbm = gbm_bo_import(info->gbm, GBM_BO_IMPORT_FD, &data,
430					   bo_use);
431		if (!bo->bo.gbm) {
432			free(bo);
433			return FALSE;
434		}
435
436		bo->flags |= AMDGPU_BO_FLAGS_GBM;
437
438#ifdef USE_GLAMOR
439		if (info->use_glamor &&
440		    !amdgpu_glamor_create_textured_pixmap(ppix, bo)) {
441			amdgpu_bo_unref(&bo);
442			return FALSE;
443		}
444#endif
445
446		ret = amdgpu_set_pixmap_bo(ppix, bo);
447		/* amdgpu_set_pixmap_bo increments ref_count if it succeeds */
448		amdgpu_bo_unref(&bo);
449		return ret;
450	}
451
452	pixmap_buffer = amdgpu_gem_bo_open_prime(pAMDGPUEnt->pDev, ihandle, size);
453	if (!pixmap_buffer) {
454		return FALSE;
455	}
456
457	close(ihandle);
458
459	ret = amdgpu_set_pixmap_bo(ppix, pixmap_buffer);
460
461	/* we have a reference from the alloc and one from set pixmap bo,
462	   drop one */
463	amdgpu_bo_unref(&pixmap_buffer);
464
465	return ret;
466}
467