amdgpu_bo.c revision 7e21dcc5
1/*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include <stdlib.h>
26#include <stdio.h>
27#include <stdint.h>
28#include <string.h>
29#include <errno.h>
30#include <fcntl.h>
31#include <unistd.h>
32#include <sys/ioctl.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include "libdrm_macros.h"
37#include "xf86drm.h"
38#include "amdgpu_drm.h"
39#include "amdgpu_internal.h"
40#include "util_math.h"
41
42static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
43				     uint32_t handle)
44{
45	struct drm_gem_close args = {};
46
47	args.handle = handle;
48	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
49}
50
51static int amdgpu_bo_create(amdgpu_device_handle dev,
52			    uint64_t size,
53			    uint32_t handle,
54			    amdgpu_bo_handle *buf_handle)
55{
56	struct amdgpu_bo *bo;
57
58	bo = calloc(1, sizeof(struct amdgpu_bo));
59	if (!bo)
60		return -ENOMEM;
61
62	atomic_set(&bo->refcount, 1);
63	bo->dev = dev;
64	bo->alloc_size = size;
65	bo->handle = handle;
66	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
67
68	*buf_handle = bo;
69	return 0;
70}
71
72drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
73			       struct amdgpu_bo_alloc_request *alloc_buffer,
74			       amdgpu_bo_handle *buf_handle)
75{
76	union drm_amdgpu_gem_create args;
77	int r;
78
79	memset(&args, 0, sizeof(args));
80	args.in.bo_size = alloc_buffer->alloc_size;
81	args.in.alignment = alloc_buffer->phys_alignment;
82
83	/* Set the placement. */
84	args.in.domains = alloc_buffer->preferred_heap;
85	args.in.domain_flags = alloc_buffer->flags;
86
87	/* Allocate the buffer with the preferred heap. */
88	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
89				&args, sizeof(args));
90	if (r)
91		goto out;
92
93	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
94			     buf_handle);
95	if (r) {
96		amdgpu_close_kms_handle(dev, args.out.handle);
97		goto out;
98	}
99
100	pthread_mutex_lock(&dev->bo_table_mutex);
101	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
102				*buf_handle);
103	pthread_mutex_unlock(&dev->bo_table_mutex);
104	if (r)
105		amdgpu_bo_free(*buf_handle);
106out:
107	return r;
108}
109
110drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
111				      struct amdgpu_bo_metadata *info)
112{
113	struct drm_amdgpu_gem_metadata args = {};
114
115	args.handle = bo->handle;
116	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
117	args.data.flags = info->flags;
118	args.data.tiling_info = info->tiling_info;
119
120	if (info->size_metadata > sizeof(args.data.data))
121		return -EINVAL;
122
123	if (info->size_metadata) {
124		args.data.data_size_bytes = info->size_metadata;
125		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
126	}
127
128	return drmCommandWriteRead(bo->dev->fd,
129				   DRM_AMDGPU_GEM_METADATA,
130				   &args, sizeof(args));
131}
132
133drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
134				    struct amdgpu_bo_info *info)
135{
136	struct drm_amdgpu_gem_metadata metadata = {};
137	struct drm_amdgpu_gem_create_in bo_info = {};
138	struct drm_amdgpu_gem_op gem_op = {};
139	int r;
140
141	/* Validate the BO passed in */
142	if (!bo->handle)
143		return -EINVAL;
144
145	/* Query metadata. */
146	metadata.handle = bo->handle;
147	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
148
149	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
150				&metadata, sizeof(metadata));
151	if (r)
152		return r;
153
154	if (metadata.data.data_size_bytes >
155	    sizeof(info->metadata.umd_metadata))
156		return -EINVAL;
157
158	/* Query buffer info. */
159	gem_op.handle = bo->handle;
160	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
161	gem_op.value = (uintptr_t)&bo_info;
162
163	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
164				&gem_op, sizeof(gem_op));
165	if (r)
166		return r;
167
168	memset(info, 0, sizeof(*info));
169	info->alloc_size = bo_info.bo_size;
170	info->phys_alignment = bo_info.alignment;
171	info->preferred_heap = bo_info.domains;
172	info->alloc_flags = bo_info.domain_flags;
173	info->metadata.flags = metadata.data.flags;
174	info->metadata.tiling_info = metadata.data.tiling_info;
175
176	info->metadata.size_metadata = metadata.data.data_size_bytes;
177	if (metadata.data.data_size_bytes > 0)
178		memcpy(info->metadata.umd_metadata, metadata.data.data,
179		       metadata.data.data_size_bytes);
180
181	return 0;
182}
183
184static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
185{
186	struct drm_gem_flink flink;
187	int fd, dma_fd;
188	uint32_t handle;
189	int r;
190
191	fd = bo->dev->fd;
192	handle = bo->handle;
193	if (bo->flink_name)
194		return 0;
195
196
197	if (bo->dev->flink_fd != bo->dev->fd) {
198		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
199				       &dma_fd);
200		if (!r) {
201			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
202			close(dma_fd);
203		}
204		if (r)
205			return r;
206		fd = bo->dev->flink_fd;
207	}
208	memset(&flink, 0, sizeof(flink));
209	flink.handle = handle;
210
211	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
212	if (r)
213		return r;
214
215	bo->flink_name = flink.name;
216
217	if (bo->dev->flink_fd != bo->dev->fd) {
218		struct drm_gem_close args = {};
219		args.handle = handle;
220		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
221	}
222
223	pthread_mutex_lock(&bo->dev->bo_table_mutex);
224	r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
225	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
226
227	return r;
228}
229
230drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
231				enum amdgpu_bo_handle_type type,
232				uint32_t *shared_handle)
233{
234	int r;
235
236	switch (type) {
237	case amdgpu_bo_handle_type_gem_flink_name:
238		r = amdgpu_bo_export_flink(bo);
239		if (r)
240			return r;
241
242		*shared_handle = bo->flink_name;
243		return 0;
244
245	case amdgpu_bo_handle_type_kms:
246	case amdgpu_bo_handle_type_kms_noimport:
247		*shared_handle = bo->handle;
248		return 0;
249
250	case amdgpu_bo_handle_type_dma_buf_fd:
251		return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
252					  DRM_CLOEXEC | DRM_RDWR,
253					  (int*)shared_handle);
254	}
255	return -EINVAL;
256}
257
258drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
259				enum amdgpu_bo_handle_type type,
260				uint32_t shared_handle,
261		     struct amdgpu_bo_import_result *output)
262{
263	struct drm_gem_open open_arg = {};
264	struct drm_gem_close close_arg = {};
265	struct amdgpu_bo *bo = NULL;
266	uint32_t handle = 0, flink_name = 0;
267	uint64_t alloc_size = 0;
268	int r = 0;
269	int dma_fd;
270	uint64_t dma_buf_size = 0;
271
272	/* We must maintain a list of pairs <handle, bo>, so that we always
273	 * return the same amdgpu_bo instance for the same handle. */
274	pthread_mutex_lock(&dev->bo_table_mutex);
275
276	/* Convert a DMA buf handle to a KMS handle now. */
277	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
278		off_t size;
279
280		/* Get a KMS handle. */
281		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
282		if (r)
283			goto unlock;
284
285		/* Query the buffer size. */
286		size = lseek(shared_handle, 0, SEEK_END);
287		if (size == (off_t)-1) {
288			r = -errno;
289			goto free_bo_handle;
290		}
291		lseek(shared_handle, 0, SEEK_SET);
292
293		dma_buf_size = size;
294		shared_handle = handle;
295	}
296
297	/* If we have already created a buffer with this handle, find it. */
298	switch (type) {
299	case amdgpu_bo_handle_type_gem_flink_name:
300		bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
301		break;
302
303	case amdgpu_bo_handle_type_dma_buf_fd:
304		bo = handle_table_lookup(&dev->bo_handles, shared_handle);
305		break;
306
307	case amdgpu_bo_handle_type_kms:
308	case amdgpu_bo_handle_type_kms_noimport:
309		/* Importing a KMS handle in not allowed. */
310		r = -EPERM;
311		goto unlock;
312
313	default:
314		r = -EINVAL;
315		goto unlock;
316	}
317
318	if (bo) {
319		/* The buffer already exists, just bump the refcount. */
320		atomic_inc(&bo->refcount);
321		pthread_mutex_unlock(&dev->bo_table_mutex);
322
323		output->buf_handle = bo;
324		output->alloc_size = bo->alloc_size;
325		return 0;
326	}
327
328	/* Open the handle. */
329	switch (type) {
330	case amdgpu_bo_handle_type_gem_flink_name:
331		open_arg.name = shared_handle;
332		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
333		if (r)
334			goto unlock;
335
336		flink_name = shared_handle;
337		handle = open_arg.handle;
338		alloc_size = open_arg.size;
339		if (dev->flink_fd != dev->fd) {
340			r = drmPrimeHandleToFD(dev->flink_fd, handle,
341					       DRM_CLOEXEC, &dma_fd);
342			if (r)
343				goto free_bo_handle;
344			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
345			close(dma_fd);
346			if (r)
347				goto free_bo_handle;
348			close_arg.handle = open_arg.handle;
349			r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
350				     &close_arg);
351			if (r)
352				goto free_bo_handle;
353		}
354		break;
355
356	case amdgpu_bo_handle_type_dma_buf_fd:
357		handle = shared_handle;
358		alloc_size = dma_buf_size;
359		break;
360
361	case amdgpu_bo_handle_type_kms:
362	case amdgpu_bo_handle_type_kms_noimport:
363		assert(0); /* unreachable */
364	}
365
366	/* Initialize it. */
367	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
368	if (r)
369		goto free_bo_handle;
370
371	r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
372	if (r)
373		goto free_bo_handle;
374	if (flink_name) {
375		bo->flink_name = flink_name;
376		r = handle_table_insert(&dev->bo_flink_names, flink_name,
377					bo);
378		if (r)
379			goto remove_handle;
380
381	}
382
383	output->buf_handle = bo;
384	output->alloc_size = bo->alloc_size;
385	pthread_mutex_unlock(&dev->bo_table_mutex);
386	return 0;
387
388remove_handle:
389	handle_table_remove(&dev->bo_handles, bo->handle);
390free_bo_handle:
391	if (flink_name && !close_arg.handle && open_arg.handle) {
392		close_arg.handle = open_arg.handle;
393		drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
394	}
395	if (bo)
396		amdgpu_bo_free(bo);
397	else
398		amdgpu_close_kms_handle(dev, handle);
399unlock:
400	pthread_mutex_unlock(&dev->bo_table_mutex);
401	return r;
402}
403
404drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
405{
406	struct amdgpu_device *dev;
407	struct amdgpu_bo *bo = buf_handle;
408
409	assert(bo != NULL);
410	dev = bo->dev;
411	pthread_mutex_lock(&dev->bo_table_mutex);
412
413	if (update_references(&bo->refcount, NULL)) {
414		/* Remove the buffer from the hash tables. */
415		handle_table_remove(&dev->bo_handles, bo->handle);
416
417		if (bo->flink_name)
418			handle_table_remove(&dev->bo_flink_names,
419					    bo->flink_name);
420
421		/* Release CPU access. */
422		if (bo->cpu_map_count > 0) {
423			bo->cpu_map_count = 1;
424			amdgpu_bo_cpu_unmap(bo);
425		}
426
427		amdgpu_close_kms_handle(dev, bo->handle);
428		pthread_mutex_destroy(&bo->cpu_access_mutex);
429		free(bo);
430	}
431
432	pthread_mutex_unlock(&dev->bo_table_mutex);
433	return 0;
434}
435
436drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
437{
438	atomic_inc(&bo->refcount);
439}
440
441drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
442{
443	union drm_amdgpu_gem_mmap args;
444	void *ptr;
445	int r;
446
447	pthread_mutex_lock(&bo->cpu_access_mutex);
448
449	if (bo->cpu_ptr) {
450		/* already mapped */
451		assert(bo->cpu_map_count > 0);
452		bo->cpu_map_count++;
453		*cpu = bo->cpu_ptr;
454		pthread_mutex_unlock(&bo->cpu_access_mutex);
455		return 0;
456	}
457
458	assert(bo->cpu_map_count == 0);
459
460	memset(&args, 0, sizeof(args));
461
462	/* Query the buffer address (args.addr_ptr).
463	 * The kernel driver ignores the offset and size parameters. */
464	args.in.handle = bo->handle;
465
466	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
467				sizeof(args));
468	if (r) {
469		pthread_mutex_unlock(&bo->cpu_access_mutex);
470		return r;
471	}
472
473	/* Map the buffer. */
474	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
475		       bo->dev->fd, args.out.addr_ptr);
476	if (ptr == MAP_FAILED) {
477		pthread_mutex_unlock(&bo->cpu_access_mutex);
478		return -errno;
479	}
480
481	bo->cpu_ptr = ptr;
482	bo->cpu_map_count = 1;
483	pthread_mutex_unlock(&bo->cpu_access_mutex);
484
485	*cpu = ptr;
486	return 0;
487}
488
489drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
490{
491	int r;
492
493	pthread_mutex_lock(&bo->cpu_access_mutex);
494	assert(bo->cpu_map_count >= 0);
495
496	if (bo->cpu_map_count == 0) {
497		/* not mapped */
498		pthread_mutex_unlock(&bo->cpu_access_mutex);
499		return -EINVAL;
500	}
501
502	bo->cpu_map_count--;
503	if (bo->cpu_map_count > 0) {
504		/* mapped multiple times */
505		pthread_mutex_unlock(&bo->cpu_access_mutex);
506		return 0;
507	}
508
509	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
510	bo->cpu_ptr = NULL;
511	pthread_mutex_unlock(&bo->cpu_access_mutex);
512	return r;
513}
514
515drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
516				struct amdgpu_buffer_size_alignments *info)
517{
518	info->size_local = dev->dev_info.pte_fragment_size;
519	info->size_remote = dev->dev_info.gart_page_size;
520	return 0;
521}
522
523drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
524				       uint64_t timeout_ns,
525			    bool *busy)
526{
527	union drm_amdgpu_gem_wait_idle args;
528	int r;
529
530	memset(&args, 0, sizeof(args));
531	args.in.handle = bo->handle;
532	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
533
534	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
535				&args, sizeof(args));
536
537	if (r == 0) {
538		*busy = args.out.status;
539		return 0;
540	} else {
541		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
542		return r;
543	}
544}
545
546drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
547					     void *cpu,
548					     uint64_t size,
549					     amdgpu_bo_handle *buf_handle,
550					     uint64_t *offset_in_bo)
551{
552	struct amdgpu_bo *bo;
553	uint32_t i;
554	int r = 0;
555
556	if (cpu == NULL || size == 0)
557		return -EINVAL;
558
559	/*
560	 * Workaround for a buggy application which tries to import previously
561	 * exposed CPU pointers. If we find a real world use case we should
562	 * improve that by asking the kernel for the right handle.
563	 */
564	pthread_mutex_lock(&dev->bo_table_mutex);
565	for (i = 0; i < dev->bo_handles.max_key; i++) {
566		bo = handle_table_lookup(&dev->bo_handles, i);
567		if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
568			continue;
569		if (cpu >= bo->cpu_ptr &&
570		    cpu < (void*)((char *)bo->cpu_ptr + bo->alloc_size))
571			break;
572	}
573
574	if (i < dev->bo_handles.max_key) {
575		atomic_inc(&bo->refcount);
576		*buf_handle = bo;
577		*offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
578	} else {
579		*buf_handle = NULL;
580		*offset_in_bo = 0;
581		r = -ENXIO;
582	}
583	pthread_mutex_unlock(&dev->bo_table_mutex);
584
585	return r;
586}
587
588drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
589					      void *cpu,
590					      uint64_t size,
591					      amdgpu_bo_handle *buf_handle)
592{
593	int r;
594	struct drm_amdgpu_gem_userptr args;
595
596	args.addr = (uintptr_t)cpu;
597	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
598		AMDGPU_GEM_USERPTR_VALIDATE;
599	args.size = size;
600	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
601				&args, sizeof(args));
602	if (r)
603		goto out;
604
605	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
606	if (r) {
607		amdgpu_close_kms_handle(dev, args.handle);
608		goto out;
609	}
610
611	pthread_mutex_lock(&dev->bo_table_mutex);
612	r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
613				*buf_handle);
614	pthread_mutex_unlock(&dev->bo_table_mutex);
615	if (r)
616		amdgpu_bo_free(*buf_handle);
617out:
618	return r;
619}
620
621drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
622				     uint32_t number_of_resources,
623				     amdgpu_bo_handle *resources,
624				     uint8_t *resource_prios,
625				     amdgpu_bo_list_handle *result)
626{
627	struct drm_amdgpu_bo_list_entry *list;
628	union drm_amdgpu_bo_list args;
629	unsigned i;
630	int r;
631
632	if (!number_of_resources)
633		return -EINVAL;
634
635	/* overflow check for multiplication */
636	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
637		return -EINVAL;
638
639	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
640	if (!list)
641		return -ENOMEM;
642
643	*result = malloc(sizeof(struct amdgpu_bo_list));
644	if (!*result) {
645		free(list);
646		return -ENOMEM;
647	}
648
649	memset(&args, 0, sizeof(args));
650	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
651	args.in.bo_number = number_of_resources;
652	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
653	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
654
655	for (i = 0; i < number_of_resources; i++) {
656		list[i].bo_handle = resources[i]->handle;
657		if (resource_prios)
658			list[i].bo_priority = resource_prios[i];
659		else
660			list[i].bo_priority = 0;
661	}
662
663	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
664				&args, sizeof(args));
665	free(list);
666	if (r) {
667		free(*result);
668		return r;
669	}
670
671	(*result)->dev = dev;
672	(*result)->handle = args.out.list_handle;
673	return 0;
674}
675
676drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
677{
678	union drm_amdgpu_bo_list args;
679	int r;
680
681	memset(&args, 0, sizeof(args));
682	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
683	args.in.list_handle = list->handle;
684
685	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
686				&args, sizeof(args));
687
688	if (!r)
689		free(list);
690
691	return r;
692}
693
694drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
695				     uint32_t number_of_resources,
696				     amdgpu_bo_handle *resources,
697				     uint8_t *resource_prios)
698{
699	struct drm_amdgpu_bo_list_entry *list;
700	union drm_amdgpu_bo_list args;
701	unsigned i;
702	int r;
703
704	if (!number_of_resources)
705		return -EINVAL;
706
707	/* overflow check for multiplication */
708	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
709		return -EINVAL;
710
711	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
712	if (!list)
713		return -ENOMEM;
714
715	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
716	args.in.list_handle = handle->handle;
717	args.in.bo_number = number_of_resources;
718	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
719	args.in.bo_info_ptr = (uintptr_t)list;
720
721	for (i = 0; i < number_of_resources; i++) {
722		list[i].bo_handle = resources[i]->handle;
723		if (resource_prios)
724			list[i].bo_priority = resource_prios[i];
725		else
726			list[i].bo_priority = 0;
727	}
728
729	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
730				&args, sizeof(args));
731	free(list);
732	return r;
733}
734
735drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
736			       uint64_t offset,
737			       uint64_t size,
738			       uint64_t addr,
739			       uint64_t flags,
740			       uint32_t ops)
741{
742	amdgpu_device_handle dev = bo->dev;
743
744	size = ALIGN(size, getpagesize());
745
746	return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
747				   AMDGPU_VM_PAGE_READABLE |
748				   AMDGPU_VM_PAGE_WRITEABLE |
749				   AMDGPU_VM_PAGE_EXECUTABLE, ops);
750}
751
752drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
753				   amdgpu_bo_handle bo,
754				   uint64_t offset,
755				   uint64_t size,
756				   uint64_t addr,
757				   uint64_t flags,
758				   uint32_t ops)
759{
760	struct drm_amdgpu_gem_va va;
761	int r;
762
763	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
764	    ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
765		return -EINVAL;
766
767	memset(&va, 0, sizeof(va));
768	va.handle = bo ? bo->handle : 0;
769	va.operation = ops;
770	va.flags = flags;
771	va.va_address = addr;
772	va.offset_in_bo = offset;
773	va.map_size = size;
774
775	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
776
777	return r;
778}
779