amdgpu_bo.c revision d8807b2f
1/*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <stdlib.h>
30#include <stdio.h>
31#include <stdint.h>
32#include <string.h>
33#include <errno.h>
34#include <fcntl.h>
35#include <unistd.h>
36#include <sys/ioctl.h>
37#include <sys/mman.h>
38#include <sys/time.h>
39
40#include "libdrm_macros.h"
41#include "xf86drm.h"
42#include "amdgpu_drm.h"
43#include "amdgpu_internal.h"
44#include "util_hash_table.h"
45#include "util_math.h"
46
47static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
48				     uint32_t handle)
49{
50	struct drm_gem_close args = {};
51
52	args.handle = handle;
53	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54}
55
56int amdgpu_bo_alloc(amdgpu_device_handle dev,
57		    struct amdgpu_bo_alloc_request *alloc_buffer,
58		    amdgpu_bo_handle *buf_handle)
59{
60	struct amdgpu_bo *bo;
61	union drm_amdgpu_gem_create args;
62	unsigned heap = alloc_buffer->preferred_heap;
63	int r = 0;
64
65	/* It's an error if the heap is not specified */
66	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
67		return -EINVAL;
68
69	bo = calloc(1, sizeof(struct amdgpu_bo));
70	if (!bo)
71		return -ENOMEM;
72
73	atomic_set(&bo->refcount, 1);
74	bo->dev = dev;
75	bo->alloc_size = alloc_buffer->alloc_size;
76
77	memset(&args, 0, sizeof(args));
78	args.in.bo_size = alloc_buffer->alloc_size;
79	args.in.alignment = alloc_buffer->phys_alignment;
80
81	/* Set the placement. */
82	args.in.domains = heap;
83	args.in.domain_flags = alloc_buffer->flags;
84
85	/* Allocate the buffer with the preferred heap. */
86	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
87				&args, sizeof(args));
88	if (r) {
89		free(bo);
90		return r;
91	}
92
93	bo->handle = args.out.handle;
94
95	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
96
97	*buf_handle = bo;
98	return 0;
99}
100
101int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
102			   struct amdgpu_bo_metadata *info)
103{
104	struct drm_amdgpu_gem_metadata args = {};
105
106	args.handle = bo->handle;
107	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
108	args.data.flags = info->flags;
109	args.data.tiling_info = info->tiling_info;
110
111	if (info->size_metadata > sizeof(args.data.data))
112		return -EINVAL;
113
114	if (info->size_metadata) {
115		args.data.data_size_bytes = info->size_metadata;
116		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
117	}
118
119	return drmCommandWriteRead(bo->dev->fd,
120				   DRM_AMDGPU_GEM_METADATA,
121				   &args, sizeof(args));
122}
123
124int amdgpu_bo_query_info(amdgpu_bo_handle bo,
125			 struct amdgpu_bo_info *info)
126{
127	struct drm_amdgpu_gem_metadata metadata = {};
128	struct drm_amdgpu_gem_create_in bo_info = {};
129	struct drm_amdgpu_gem_op gem_op = {};
130	int r;
131
132	/* Validate the BO passed in */
133	if (!bo->handle)
134		return -EINVAL;
135
136	/* Query metadata. */
137	metadata.handle = bo->handle;
138	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
139
140	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
141				&metadata, sizeof(metadata));
142	if (r)
143		return r;
144
145	if (metadata.data.data_size_bytes >
146	    sizeof(info->metadata.umd_metadata))
147		return -EINVAL;
148
149	/* Query buffer info. */
150	gem_op.handle = bo->handle;
151	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
152	gem_op.value = (uintptr_t)&bo_info;
153
154	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
155				&gem_op, sizeof(gem_op));
156	if (r)
157		return r;
158
159	memset(info, 0, sizeof(*info));
160	info->alloc_size = bo_info.bo_size;
161	info->phys_alignment = bo_info.alignment;
162	info->preferred_heap = bo_info.domains;
163	info->alloc_flags = bo_info.domain_flags;
164	info->metadata.flags = metadata.data.flags;
165	info->metadata.tiling_info = metadata.data.tiling_info;
166
167	info->metadata.size_metadata = metadata.data.data_size_bytes;
168	if (metadata.data.data_size_bytes > 0)
169		memcpy(info->metadata.umd_metadata, metadata.data.data,
170		       metadata.data.data_size_bytes);
171
172	return 0;
173}
174
175static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
176{
177	pthread_mutex_lock(&bo->dev->bo_table_mutex);
178	util_hash_table_set(bo->dev->bo_handles,
179			    (void*)(uintptr_t)bo->handle, bo);
180	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
181}
182
183static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
184{
185	struct drm_gem_flink flink;
186	int fd, dma_fd;
187	uint32_t handle;
188	int r;
189
190	fd = bo->dev->fd;
191	handle = bo->handle;
192	if (bo->flink_name)
193		return 0;
194
195
196	if (bo->dev->flink_fd != bo->dev->fd) {
197		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
198				       &dma_fd);
199		if (!r) {
200			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
201			close(dma_fd);
202		}
203		if (r)
204			return r;
205		fd = bo->dev->flink_fd;
206	}
207	memset(&flink, 0, sizeof(flink));
208	flink.handle = handle;
209
210	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
211	if (r)
212		return r;
213
214	bo->flink_name = flink.name;
215
216	if (bo->dev->flink_fd != bo->dev->fd) {
217		struct drm_gem_close args = {};
218		args.handle = handle;
219		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
220	}
221
222	pthread_mutex_lock(&bo->dev->bo_table_mutex);
223	util_hash_table_set(bo->dev->bo_flink_names,
224			    (void*)(uintptr_t)bo->flink_name,
225			    bo);
226	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
227
228	return 0;
229}
230
231int amdgpu_bo_export(amdgpu_bo_handle bo,
232		     enum amdgpu_bo_handle_type type,
233		     uint32_t *shared_handle)
234{
235	int r;
236
237	switch (type) {
238	case amdgpu_bo_handle_type_gem_flink_name:
239		r = amdgpu_bo_export_flink(bo);
240		if (r)
241			return r;
242
243		*shared_handle = bo->flink_name;
244		return 0;
245
246	case amdgpu_bo_handle_type_kms:
247		amdgpu_add_handle_to_table(bo);
248		*shared_handle = bo->handle;
249		return 0;
250
251	case amdgpu_bo_handle_type_dma_buf_fd:
252		amdgpu_add_handle_to_table(bo);
253		return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
254				       (int*)shared_handle);
255	}
256	return -EINVAL;
257}
258
259int amdgpu_bo_import(amdgpu_device_handle dev,
260		     enum amdgpu_bo_handle_type type,
261		     uint32_t shared_handle,
262		     struct amdgpu_bo_import_result *output)
263{
264	struct drm_gem_open open_arg = {};
265	struct amdgpu_bo *bo = NULL;
266	int r;
267	int dma_fd;
268	uint64_t dma_buf_size = 0;
269
270	/* We must maintain a list of pairs <handle, bo>, so that we always
271	 * return the same amdgpu_bo instance for the same handle. */
272	pthread_mutex_lock(&dev->bo_table_mutex);
273
274	/* Convert a DMA buf handle to a KMS handle now. */
275	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
276		uint32_t handle;
277		off_t size;
278
279		/* Get a KMS handle. */
280		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
281		if (r) {
282			pthread_mutex_unlock(&dev->bo_table_mutex);
283			return r;
284		}
285
286		/* Query the buffer size. */
287		size = lseek(shared_handle, 0, SEEK_END);
288		if (size == (off_t)-1) {
289			pthread_mutex_unlock(&dev->bo_table_mutex);
290			amdgpu_close_kms_handle(dev, handle);
291			return -errno;
292		}
293		lseek(shared_handle, 0, SEEK_SET);
294
295		dma_buf_size = size;
296		shared_handle = handle;
297	}
298
299	/* If we have already created a buffer with this handle, find it. */
300	switch (type) {
301	case amdgpu_bo_handle_type_gem_flink_name:
302		bo = util_hash_table_get(dev->bo_flink_names,
303					 (void*)(uintptr_t)shared_handle);
304		break;
305
306	case amdgpu_bo_handle_type_dma_buf_fd:
307		bo = util_hash_table_get(dev->bo_handles,
308					 (void*)(uintptr_t)shared_handle);
309		break;
310
311	case amdgpu_bo_handle_type_kms:
312		/* Importing a KMS handle in not allowed. */
313		pthread_mutex_unlock(&dev->bo_table_mutex);
314		return -EPERM;
315
316	default:
317		pthread_mutex_unlock(&dev->bo_table_mutex);
318		return -EINVAL;
319	}
320
321	if (bo) {
322		/* The buffer already exists, just bump the refcount. */
323		atomic_inc(&bo->refcount);
324		pthread_mutex_unlock(&dev->bo_table_mutex);
325
326		output->buf_handle = bo;
327		output->alloc_size = bo->alloc_size;
328		return 0;
329	}
330
331	bo = calloc(1, sizeof(struct amdgpu_bo));
332	if (!bo) {
333		pthread_mutex_unlock(&dev->bo_table_mutex);
334		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
335			amdgpu_close_kms_handle(dev, shared_handle);
336		}
337		return -ENOMEM;
338	}
339
340	/* Open the handle. */
341	switch (type) {
342	case amdgpu_bo_handle_type_gem_flink_name:
343		open_arg.name = shared_handle;
344		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
345		if (r) {
346			free(bo);
347			pthread_mutex_unlock(&dev->bo_table_mutex);
348			return r;
349		}
350
351		bo->handle = open_arg.handle;
352		if (dev->flink_fd != dev->fd) {
353			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
354			if (r) {
355				free(bo);
356				pthread_mutex_unlock(&dev->bo_table_mutex);
357				return r;
358			}
359			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
360
361			close(dma_fd);
362
363			if (r) {
364				free(bo);
365				pthread_mutex_unlock(&dev->bo_table_mutex);
366				return r;
367			}
368		}
369		bo->flink_name = shared_handle;
370		bo->alloc_size = open_arg.size;
371		util_hash_table_set(dev->bo_flink_names,
372				    (void*)(uintptr_t)bo->flink_name, bo);
373		break;
374
375	case amdgpu_bo_handle_type_dma_buf_fd:
376		bo->handle = shared_handle;
377		bo->alloc_size = dma_buf_size;
378		break;
379
380	case amdgpu_bo_handle_type_kms:
381		assert(0); /* unreachable */
382	}
383
384	/* Initialize it. */
385	atomic_set(&bo->refcount, 1);
386	bo->dev = dev;
387	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
388
389	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
390	pthread_mutex_unlock(&dev->bo_table_mutex);
391
392	output->buf_handle = bo;
393	output->alloc_size = bo->alloc_size;
394	return 0;
395}
396
397int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
398{
399	struct amdgpu_device *dev;
400	struct amdgpu_bo *bo = buf_handle;
401
402	assert(bo != NULL);
403	dev = bo->dev;
404	pthread_mutex_lock(&dev->bo_table_mutex);
405
406	if (update_references(&bo->refcount, NULL)) {
407		/* Remove the buffer from the hash tables. */
408		util_hash_table_remove(dev->bo_handles,
409					(void*)(uintptr_t)bo->handle);
410
411		if (bo->flink_name) {
412			util_hash_table_remove(dev->bo_flink_names,
413						(void*)(uintptr_t)bo->flink_name);
414		}
415
416		/* Release CPU access. */
417		if (bo->cpu_map_count > 0) {
418			bo->cpu_map_count = 1;
419			amdgpu_bo_cpu_unmap(bo);
420		}
421
422		amdgpu_close_kms_handle(dev, bo->handle);
423		pthread_mutex_destroy(&bo->cpu_access_mutex);
424		free(bo);
425	}
426
427	pthread_mutex_unlock(&dev->bo_table_mutex);
428	return 0;
429}
430
431int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
432{
433	union drm_amdgpu_gem_mmap args;
434	void *ptr;
435	int r;
436
437	pthread_mutex_lock(&bo->cpu_access_mutex);
438
439	if (bo->cpu_ptr) {
440		/* already mapped */
441		assert(bo->cpu_map_count > 0);
442		bo->cpu_map_count++;
443		*cpu = bo->cpu_ptr;
444		pthread_mutex_unlock(&bo->cpu_access_mutex);
445		return 0;
446	}
447
448	assert(bo->cpu_map_count == 0);
449
450	memset(&args, 0, sizeof(args));
451
452	/* Query the buffer address (args.addr_ptr).
453	 * The kernel driver ignores the offset and size parameters. */
454	args.in.handle = bo->handle;
455
456	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
457				sizeof(args));
458	if (r) {
459		pthread_mutex_unlock(&bo->cpu_access_mutex);
460		return r;
461	}
462
463	/* Map the buffer. */
464	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
465		       bo->dev->fd, args.out.addr_ptr);
466	if (ptr == MAP_FAILED) {
467		pthread_mutex_unlock(&bo->cpu_access_mutex);
468		return -errno;
469	}
470
471	bo->cpu_ptr = ptr;
472	bo->cpu_map_count = 1;
473	pthread_mutex_unlock(&bo->cpu_access_mutex);
474
475	*cpu = ptr;
476	return 0;
477}
478
479int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
480{
481	int r;
482
483	pthread_mutex_lock(&bo->cpu_access_mutex);
484	assert(bo->cpu_map_count >= 0);
485
486	if (bo->cpu_map_count == 0) {
487		/* not mapped */
488		pthread_mutex_unlock(&bo->cpu_access_mutex);
489		return -EINVAL;
490	}
491
492	bo->cpu_map_count--;
493	if (bo->cpu_map_count > 0) {
494		/* mapped multiple times */
495		pthread_mutex_unlock(&bo->cpu_access_mutex);
496		return 0;
497	}
498
499	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
500	bo->cpu_ptr = NULL;
501	pthread_mutex_unlock(&bo->cpu_access_mutex);
502	return r;
503}
504
505int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
506				struct amdgpu_buffer_size_alignments *info)
507{
508	info->size_local = dev->dev_info.pte_fragment_size;
509	info->size_remote = dev->dev_info.gart_page_size;
510	return 0;
511}
512
513int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
514			    uint64_t timeout_ns,
515			    bool *busy)
516{
517	union drm_amdgpu_gem_wait_idle args;
518	int r;
519
520	memset(&args, 0, sizeof(args));
521	args.in.handle = bo->handle;
522	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
523
524	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
525				&args, sizeof(args));
526
527	if (r == 0) {
528		*busy = args.out.status;
529		return 0;
530	} else {
531		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
532		return r;
533	}
534}
535
536int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
537				    void *cpu,
538				    uint64_t size,
539				    amdgpu_bo_handle *buf_handle)
540{
541	int r;
542	struct amdgpu_bo *bo;
543	struct drm_amdgpu_gem_userptr args;
544
545	args.addr = (uintptr_t)cpu;
546	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
547		AMDGPU_GEM_USERPTR_VALIDATE;
548	args.size = size;
549	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
550				&args, sizeof(args));
551	if (r)
552		return r;
553
554	bo = calloc(1, sizeof(struct amdgpu_bo));
555	if (!bo)
556		return -ENOMEM;
557
558	atomic_set(&bo->refcount, 1);
559	bo->dev = dev;
560	bo->alloc_size = size;
561	bo->handle = args.handle;
562
563	*buf_handle = bo;
564
565	return r;
566}
567
568int amdgpu_bo_list_create(amdgpu_device_handle dev,
569			  uint32_t number_of_resources,
570			  amdgpu_bo_handle *resources,
571			  uint8_t *resource_prios,
572			  amdgpu_bo_list_handle *result)
573{
574	struct drm_amdgpu_bo_list_entry *list;
575	union drm_amdgpu_bo_list args;
576	unsigned i;
577	int r;
578
579	if (!number_of_resources)
580		return -EINVAL;
581
582	/* overflow check for multiplication */
583	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
584		return -EINVAL;
585
586	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
587	if (!list)
588		return -ENOMEM;
589
590	*result = malloc(sizeof(struct amdgpu_bo_list));
591	if (!*result) {
592		free(list);
593		return -ENOMEM;
594	}
595
596	memset(&args, 0, sizeof(args));
597	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
598	args.in.bo_number = number_of_resources;
599	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
600	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
601
602	for (i = 0; i < number_of_resources; i++) {
603		list[i].bo_handle = resources[i]->handle;
604		if (resource_prios)
605			list[i].bo_priority = resource_prios[i];
606		else
607			list[i].bo_priority = 0;
608	}
609
610	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
611				&args, sizeof(args));
612	free(list);
613	if (r) {
614		free(*result);
615		return r;
616	}
617
618	(*result)->dev = dev;
619	(*result)->handle = args.out.list_handle;
620	return 0;
621}
622
623int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
624{
625	union drm_amdgpu_bo_list args;
626	int r;
627
628	memset(&args, 0, sizeof(args));
629	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
630	args.in.list_handle = list->handle;
631
632	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
633				&args, sizeof(args));
634
635	if (!r)
636		free(list);
637
638	return r;
639}
640
641int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
642			  uint32_t number_of_resources,
643			  amdgpu_bo_handle *resources,
644			  uint8_t *resource_prios)
645{
646	struct drm_amdgpu_bo_list_entry *list;
647	union drm_amdgpu_bo_list args;
648	unsigned i;
649	int r;
650
651	if (!number_of_resources)
652		return -EINVAL;
653
654	/* overflow check for multiplication */
655	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
656		return -EINVAL;
657
658	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
659	if (!list)
660		return -ENOMEM;
661
662	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
663	args.in.list_handle = handle->handle;
664	args.in.bo_number = number_of_resources;
665	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
666	args.in.bo_info_ptr = (uintptr_t)list;
667
668	for (i = 0; i < number_of_resources; i++) {
669		list[i].bo_handle = resources[i]->handle;
670		if (resource_prios)
671			list[i].bo_priority = resource_prios[i];
672		else
673			list[i].bo_priority = 0;
674	}
675
676	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
677				&args, sizeof(args));
678	free(list);
679	return r;
680}
681
682int amdgpu_bo_va_op(amdgpu_bo_handle bo,
683		     uint64_t offset,
684		     uint64_t size,
685		     uint64_t addr,
686		     uint64_t flags,
687		     uint32_t ops)
688{
689	amdgpu_device_handle dev = bo->dev;
690
691	size = ALIGN(size, getpagesize());
692
693	return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
694				   AMDGPU_VM_PAGE_READABLE |
695				   AMDGPU_VM_PAGE_WRITEABLE |
696				   AMDGPU_VM_PAGE_EXECUTABLE, ops);
697}
698
699int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
700			amdgpu_bo_handle bo,
701			uint64_t offset,
702			uint64_t size,
703			uint64_t addr,
704			uint64_t flags,
705			uint32_t ops)
706{
707	struct drm_amdgpu_gem_va va;
708	int r;
709
710	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
711	    ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
712		return -EINVAL;
713
714	memset(&va, 0, sizeof(va));
715	va.handle = bo ? bo->handle : 0;
716	va.operation = ops;
717	va.flags = flags;
718	va.va_address = addr;
719	va.offset_in_bo = offset;
720	va.map_size = size;
721
722	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
723
724	return r;
725}
726