amdgpu_bo_helper.c revision 90f2b693
1/*
2 * Copyright 2012  Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26#include <sys/mman.h>
27#include <gbm.h>
28#include "amdgpu_drv.h"
29#include "amdgpu_bo_helper.h"
30#include "amdgpu_glamor.h"
31#include "amdgpu_pixmap.h"
32
33static uint32_t
34amdgpu_get_gbm_format(int depth, int bitsPerPixel)
35{
36	switch (depth) {
37#ifdef GBM_FORMAT_R8
38	case 8:
39		return GBM_FORMAT_R8;
40#endif
41	case 15:
42		return GBM_FORMAT_ARGB1555;
43	case 16:
44		return GBM_FORMAT_RGB565;
45	case 32:
46		return GBM_FORMAT_ARGB8888;
47	case 30:
48		return GBM_FORMAT_XRGB2101010;
49	case 24:
50		if (bitsPerPixel == 32)
51			return GBM_FORMAT_XRGB8888;
52		/* fall through */
53	default:
54		ErrorF("%s: Unsupported depth/bpp %d/%d\n", __func__,
55		       depth, bitsPerPixel);
56		return ~0U;
57	}
58}
59
60/* Calculate appropriate pitch for a pixmap and allocate a BO that can hold it.
61 */
62struct amdgpu_buffer *amdgpu_alloc_pixmap_bo(ScrnInfoPtr pScrn, int width,
63					      int height, int depth, int usage_hint,
64					      int bitsPerPixel, int *new_pitch)
65{
66	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
67	struct amdgpu_buffer *pixmap_buffer;
68
69	if (!(usage_hint & AMDGPU_CREATE_PIXMAP_GTT) && info->gbm) {
70		uint32_t bo_use = GBM_BO_USE_RENDERING;
71		uint32_t gbm_format = amdgpu_get_gbm_format(depth, bitsPerPixel);
72
73		if (gbm_format == ~0U)
74			return NULL;
75
76		pixmap_buffer = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
77		if (!pixmap_buffer) {
78			return NULL;
79		}
80		pixmap_buffer->ref_count = 1;
81
82		if ( bitsPerPixel == pScrn->bitsPerPixel)
83			bo_use |= GBM_BO_USE_SCANOUT;
84
85#ifdef HAVE_GBM_BO_USE_LINEAR
86		if (usage_hint == CREATE_PIXMAP_USAGE_SHARED ||
87		    (usage_hint & AMDGPU_CREATE_PIXMAP_LINEAR)) {
88			bo_use |= GBM_BO_USE_LINEAR;
89		}
90#endif
91
92		pixmap_buffer->bo.gbm = gbm_bo_create(info->gbm, width, height,
93						      gbm_format,
94						      bo_use);
95		if (!pixmap_buffer->bo.gbm) {
96			free(pixmap_buffer);
97			return NULL;
98		}
99
100		pixmap_buffer->flags |= AMDGPU_BO_FLAGS_GBM;
101
102		if (new_pitch)
103			*new_pitch = gbm_bo_get_stride(pixmap_buffer->bo.gbm);
104	} else {
105		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
106		unsigned cpp = (bitsPerPixel + 7) / 8;
107		unsigned pitch = cpp *
108			AMDGPU_ALIGN(width, drmmode_get_pitch_align(pScrn, cpp));
109		uint32_t domain = (usage_hint & AMDGPU_CREATE_PIXMAP_GTT) ?
110			AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
111
112		pixmap_buffer = amdgpu_bo_open(pAMDGPUEnt->pDev, pitch * height,
113					       4096, domain);
114
115		if (new_pitch)
116			*new_pitch = pitch;
117	}
118
119	return pixmap_buffer;
120}
121
122/* Clear the pixmap contents to black */
123void
124amdgpu_pixmap_clear(PixmapPtr pixmap)
125{
126	ScreenPtr screen = pixmap->drawable.pScreen;
127	AMDGPUInfoPtr info = AMDGPUPTR(xf86ScreenToScrn(screen));
128	GCPtr gc = GetScratchGC(pixmap->drawable.depth, screen);
129	xRectangle rect;
130
131	ValidateGC(&pixmap->drawable, gc);
132	rect.x = 0;
133	rect.y = 0;
134	rect.width = pixmap->drawable.width;
135	rect.height = pixmap->drawable.height;
136	info->force_accel = TRUE;
137	gc->ops->PolyFillRect(&pixmap->drawable, gc, 1, &rect);
138	info->force_accel = FALSE;
139	FreeScratchGC(gc);
140}
141
142Bool amdgpu_bo_get_handle(struct amdgpu_buffer *bo, uint32_t *handle)
143{
144	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
145		*handle = gbm_bo_get_handle(bo->bo.gbm).u32;
146		return TRUE;
147	}
148
149	return amdgpu_bo_export(bo->bo.amdgpu, amdgpu_bo_handle_type_kms,
150				handle) == 0;
151}
152
153#ifdef USE_GLAMOR
154
155static void amdgpu_pixmap_do_get_tiling_info(PixmapPtr pixmap)
156{
157	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
158	ScreenPtr screen = pixmap->drawable.pScreen;
159	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
160	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
161	struct drm_amdgpu_gem_metadata gem_metadata;
162
163	gem_metadata.handle = priv->handle;
164	gem_metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
165
166	if (drmCommandWriteRead(pAMDGPUEnt->fd, DRM_AMDGPU_GEM_METADATA,
167				&gem_metadata, sizeof(gem_metadata)) == 0)
168		priv->tiling_info = gem_metadata.data.tiling_info;
169}
170
171#endif
172
173uint64_t amdgpu_pixmap_get_tiling_info(PixmapPtr pixmap)
174{
175	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
176	uint32_t handle;
177
178	if (!priv || !priv->handle_valid) {
179		amdgpu_pixmap_get_handle(pixmap, &handle);
180		priv = amdgpu_get_pixmap_private(pixmap);
181	}
182
183	return priv->tiling_info;
184}
185
186Bool amdgpu_pixmap_get_handle(PixmapPtr pixmap, uint32_t *handle)
187{
188#ifdef USE_GLAMOR
189	ScreenPtr screen = pixmap->drawable.pScreen;
190	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
191	AMDGPUInfoPtr info = AMDGPUPTR(scrn);
192#endif
193	struct amdgpu_pixmap *priv = amdgpu_get_pixmap_private(pixmap);
194
195	if (!priv) {
196		priv = calloc(1, sizeof(*priv));
197		amdgpu_set_pixmap_private(pixmap, priv);
198	}
199
200	if (priv->handle_valid)
201		goto success;
202
203#ifdef USE_GLAMOR
204	if (info->use_glamor) {
205		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn);
206		CARD16 stride;
207		CARD32 size;
208		int fd, r;
209
210		fd = glamor_fd_from_pixmap(screen, pixmap, &stride, &size);
211		if (fd < 0)
212			return FALSE;
213
214		r = drmPrimeFDToHandle(pAMDGPUEnt->fd, fd, &priv->handle);
215		close(fd);
216		if (r)
217			return FALSE;
218
219		amdgpu_pixmap_do_get_tiling_info(pixmap);
220		goto success;
221	}
222#endif
223
224	if (!priv->bo || !amdgpu_bo_get_handle(priv->bo, &priv->handle))
225		return FALSE;
226
227 success:
228	priv->handle_valid = TRUE;
229	*handle = priv->handle;
230	return TRUE;
231}
232
233int amdgpu_bo_map(ScrnInfoPtr pScrn, struct amdgpu_buffer *bo)
234{
235	int ret = 0;
236
237	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
238		AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
239		uint32_t handle, stride, height;
240		union drm_amdgpu_gem_mmap args;
241		int fd = pAMDGPUEnt->fd;
242		void *ptr;
243
244		handle = gbm_bo_get_handle(bo->bo.gbm).u32;
245		stride = gbm_bo_get_stride(bo->bo.gbm);
246		height = gbm_bo_get_height(bo->bo.gbm);
247
248		memset(&args, 0, sizeof(union drm_amdgpu_gem_mmap));
249		args.in.handle = handle;
250
251		ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_MMAP,
252					&args, sizeof(args));
253		if (ret) {
254			ErrorF("Failed to get the mmap offset\n");
255			return ret;
256		}
257
258		ptr = mmap(NULL, stride * height,
259			PROT_READ | PROT_WRITE, MAP_SHARED,
260			fd, args.out.addr_ptr);
261
262		if (!ptr) {
263			ErrorF("Failed to mmap the bo\n");
264			return -1;
265		}
266
267		bo->cpu_ptr = ptr;
268	} else
269		ret = amdgpu_bo_cpu_map(bo->bo.amdgpu, &bo->cpu_ptr);
270
271	return ret;
272}
273
274void amdgpu_bo_unmap(struct amdgpu_buffer *bo)
275{
276	if (!bo->cpu_ptr)
277		return;
278
279	if (bo->flags & AMDGPU_BO_FLAGS_GBM) {
280		uint32_t stride, height;
281		stride = gbm_bo_get_stride(bo->bo.gbm);
282		height = gbm_bo_get_height(bo->bo.gbm);
283		munmap(bo->cpu_ptr, stride * height);
284	} else
285		amdgpu_bo_cpu_unmap(bo->bo.amdgpu);
286}
287
288struct amdgpu_buffer *amdgpu_bo_open(amdgpu_device_handle pDev,
289				       uint32_t alloc_size,
290				       uint32_t phys_alignment,
291				       uint32_t domains)
292{
293	struct amdgpu_bo_alloc_request alloc_request;
294	struct amdgpu_buffer *bo = NULL;
295
296	memset(&alloc_request, 0, sizeof(struct amdgpu_bo_alloc_request));
297
298	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
299	if (!bo)
300		return NULL;
301
302	alloc_request.alloc_size = alloc_size;
303	alloc_request.phys_alignment = phys_alignment;
304	alloc_request.preferred_heap = domains;
305
306	if (amdgpu_bo_alloc(pDev, &alloc_request, &bo->bo.amdgpu)) {
307		free(bo);
308		return NULL;
309	}
310
311	bo->ref_count = 1;
312
313	return bo;
314}
315
316void amdgpu_bo_ref(struct amdgpu_buffer *buffer)
317{
318	buffer->ref_count++;
319}
320
321void amdgpu_bo_unref(struct amdgpu_buffer **buffer)
322{
323	struct amdgpu_buffer *buf = *buffer;
324
325	buf->ref_count--;
326	if (buf->ref_count) {
327		return;
328	}
329
330	amdgpu_bo_unmap(buf);
331
332	if (buf->flags & AMDGPU_BO_FLAGS_GBM) {
333		gbm_bo_destroy(buf->bo.gbm);
334	} else {
335		amdgpu_bo_free(buf->bo.amdgpu);
336	}
337	free(buf);
338	*buffer = NULL;
339}
340
341int amdgpu_query_bo_size(amdgpu_bo_handle buf_handle, uint32_t *size)
342{
343	struct amdgpu_bo_info buffer_info;
344	int ret;
345
346	memset(&buffer_info, 0, sizeof(struct amdgpu_bo_info));
347	ret = amdgpu_bo_query_info(buf_handle, &buffer_info);
348	if (ret)
349		*size = 0;
350	else
351		*size = (uint32_t)(buffer_info.alloc_size);
352
353	return ret;
354}
355
356int amdgpu_query_heap_size(amdgpu_device_handle pDev,
357			    uint32_t heap,
358			    uint64_t *heap_size,
359			    uint64_t *max_allocation)
360{
361	struct amdgpu_heap_info heap_info;
362	int ret;
363
364	memset(&heap_info, 0, sizeof(struct amdgpu_heap_info));
365	ret = amdgpu_query_heap_info(pDev, heap, 0, &heap_info);
366	if (ret) {
367		*heap_size = 0;
368		*max_allocation = 0;
369	} else {
370		*heap_size = heap_info.heap_size;
371		*max_allocation = heap_info.max_allocation;
372	}
373
374	return ret;
375}
376
377struct amdgpu_buffer *amdgpu_gem_bo_open_prime(amdgpu_device_handle pDev,
378						 int fd_handle,
379						 uint32_t size)
380{
381	struct amdgpu_buffer *bo = NULL;
382	struct amdgpu_bo_import_result buffer = {0};
383
384	bo = (struct amdgpu_buffer *)calloc(1, sizeof(struct amdgpu_buffer));
385	if (!bo)
386		return NULL;
387
388	if (amdgpu_bo_import(pDev, amdgpu_bo_handle_type_dma_buf_fd,
389			     (uint32_t)fd_handle, &buffer)) {
390		free(bo);
391		return FALSE;
392	}
393	bo->bo.amdgpu = buffer.buf_handle;
394	bo->ref_count = 1;
395
396	return bo;
397}
398
399
400Bool amdgpu_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle)
401{
402	ScrnInfoPtr pScrn = xf86ScreenToScrn(ppix->drawable.pScreen);
403	AMDGPUInfoPtr info = AMDGPUPTR(pScrn);
404	AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(pScrn);
405	struct amdgpu_buffer *pixmap_buffer = NULL;
406	int ihandle = (int)(long)fd_handle;
407	uint32_t size = ppix->devKind * ppix->drawable.height;
408	Bool ret;
409
410	if (ihandle == -1)
411		return amdgpu_set_pixmap_bo(ppix, NULL);
412
413	if (info->gbm) {
414		struct amdgpu_buffer *bo;
415		struct gbm_import_fd_data data;
416		uint32_t bo_use = GBM_BO_USE_RENDERING;
417
418		data.format = amdgpu_get_gbm_format(ppix->drawable.depth,
419						    ppix->drawable.bitsPerPixel);
420		if (data.format == ~0U)
421			return FALSE;
422
423		bo = calloc(1, sizeof(struct amdgpu_buffer));
424		if (!bo)
425			return FALSE;
426		bo->ref_count = 1;
427
428		data.fd = ihandle;
429		data.width = ppix->drawable.width;
430		data.height = ppix->drawable.height;
431		data.stride = ppix->devKind;
432
433		if (ppix->drawable.bitsPerPixel == pScrn->bitsPerPixel)
434			bo_use |= GBM_BO_USE_SCANOUT;
435
436		bo->bo.gbm = gbm_bo_import(info->gbm, GBM_BO_IMPORT_FD, &data,
437					   bo_use);
438		if (!bo->bo.gbm) {
439			free(bo);
440			return FALSE;
441		}
442
443		bo->flags |= AMDGPU_BO_FLAGS_GBM;
444
445#ifdef USE_GLAMOR
446		if (info->use_glamor &&
447		    !amdgpu_glamor_create_textured_pixmap(ppix, bo)) {
448			amdgpu_bo_unref(&bo);
449			return FALSE;
450		}
451#endif
452
453		ret = amdgpu_set_pixmap_bo(ppix, bo);
454		/* amdgpu_set_pixmap_bo increments ref_count if it succeeds */
455		amdgpu_bo_unref(&bo);
456		return ret;
457	}
458
459	pixmap_buffer = amdgpu_gem_bo_open_prime(pAMDGPUEnt->pDev, ihandle, size);
460	if (!pixmap_buffer) {
461		return FALSE;
462	}
463
464	close(ihandle);
465
466	ret = amdgpu_set_pixmap_bo(ppix, pixmap_buffer);
467
468	/* we have a reference from the alloc and one from set pixmap bo,
469	   drop one */
470	amdgpu_bo_unref(&pixmap_buffer);
471
472	return ret;
473}
474