1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <stdlib.h>
25#include <stdio.h>
26#include <string.h>
27#include <errno.h>
28#include <pthread.h>
29#include <sched.h>
30#include <sys/ioctl.h>
31#if HAVE_ALLOCA_H
32# include <alloca.h>
33#endif
34
35#include "xf86drm.h"
36#include "amdgpu_drm.h"
37#include "amdgpu_internal.h"
38
39static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
40static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
41
42/**
43 * Create command submission context
44 *
45 * \param   dev      - \c [in] Device handle. See #amdgpu_device_initialize()
46 * \param   priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
47 * \param   context  - \c [out] GPU Context handle
48 *
49 * \return  0 on success otherwise POSIX Error code
50*/
51drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
52				     uint32_t priority,
53				     amdgpu_context_handle *context)
54{
55	struct amdgpu_context *gpu_context;
56	union drm_amdgpu_ctx args;
57	int i, j, k;
58	int r;
59	char *override_priority;
60
61	if (!dev || !context)
62		return -EINVAL;
63
64	override_priority = getenv("AMD_PRIORITY");
65	if (override_priority) {
66		/* The priority is a signed integer. The variable type is
67		 * wrong. If parsing fails, priority is unchanged.
68		 */
69		if (sscanf(override_priority, "%i", &priority) == 1) {
70			printf("amdgpu: context priority changed to %i\n",
71			       priority);
72		}
73	}
74
75	gpu_context = calloc(1, sizeof(struct amdgpu_context));
76	if (!gpu_context)
77		return -ENOMEM;
78
79	gpu_context->dev = dev;
80
81	r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
82	if (r)
83		goto error;
84
85	/* Create the context */
86	memset(&args, 0, sizeof(args));
87	args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
88	args.in.priority = priority;
89
90	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
91	if (r)
92		goto error;
93
94	gpu_context->id = args.out.alloc.ctx_id;
95	for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
96		for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
97			for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
98				list_inithead(&gpu_context->sem_list[i][j][k]);
99	*context = (amdgpu_context_handle)gpu_context;
100
101	return 0;
102
103error:
104	pthread_mutex_destroy(&gpu_context->sequence_mutex);
105	free(gpu_context);
106	return r;
107}
108
109drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
110				    amdgpu_context_handle *context)
111{
112	return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
113}
114
115/**
116 * Release command submission context
117 *
118 * \param   dev - \c [in] amdgpu device handle
119 * \param   context - \c [in] amdgpu context handle
120 *
121 * \return  0 on success otherwise POSIX Error code
122*/
123drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
124{
125	union drm_amdgpu_ctx args;
126	int i, j, k;
127	int r;
128
129	if (!context)
130		return -EINVAL;
131
132	pthread_mutex_destroy(&context->sequence_mutex);
133
134	/* now deal with kernel side */
135	memset(&args, 0, sizeof(args));
136	args.in.op = AMDGPU_CTX_OP_FREE_CTX;
137	args.in.ctx_id = context->id;
138	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
139				&args, sizeof(args));
140	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
141		for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
142			for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
143				amdgpu_semaphore_handle sem, tmp;
144				LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, &context->sem_list[i][j][k], list) {
145					list_del(&sem->list);
146					amdgpu_cs_reset_sem(sem);
147					amdgpu_cs_unreference_sem(sem);
148				}
149			}
150		}
151	}
152	free(context);
153
154	return r;
155}
156
157drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
158                                               amdgpu_context_handle context,
159                                               int master_fd,
160                                               unsigned priority)
161{
162	union drm_amdgpu_sched args;
163	int r;
164
165	if (!dev || !context || master_fd < 0)
166		return -EINVAL;
167
168	memset(&args, 0, sizeof(args));
169
170	args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
171	args.in.fd = dev->fd;
172	args.in.priority = priority;
173	args.in.ctx_id = context->id;
174
175	r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
176	if (r)
177		return r;
178
179	return 0;
180}
181
182drm_public int amdgpu_cs_ctx_stable_pstate(amdgpu_context_handle context,
183					   uint32_t op,
184					   uint32_t flags,
185					   uint32_t *out_flags)
186{
187	union drm_amdgpu_ctx args;
188	int r;
189
190	if (!context)
191		return -EINVAL;
192
193	memset(&args, 0, sizeof(args));
194	args.in.op = op;
195	args.in.ctx_id = context->id;
196	args.in.flags = flags;
197	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
198				&args, sizeof(args));
199	if (!r && out_flags)
200		*out_flags = args.out.pstate.flags;
201	return r;
202}
203
204drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
205					   uint32_t *state, uint32_t *hangs)
206{
207	union drm_amdgpu_ctx args;
208	int r;
209
210	if (!context)
211		return -EINVAL;
212
213	memset(&args, 0, sizeof(args));
214	args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
215	args.in.ctx_id = context->id;
216	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
217				&args, sizeof(args));
218	if (!r) {
219		*state = args.out.state.reset_status;
220		*hangs = args.out.state.hangs;
221	}
222	return r;
223}
224
225drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
226					    uint64_t *flags)
227{
228	union drm_amdgpu_ctx args;
229	int r;
230
231	if (!context)
232		return -EINVAL;
233
234	memset(&args, 0, sizeof(args));
235	args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
236	args.in.ctx_id = context->id;
237	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
238				&args, sizeof(args));
239	if (!r)
240		*flags = args.out.state.flags;
241	return r;
242}
243
244/**
245 * Submit command to kernel DRM
246 * \param   dev - \c [in]  Device handle
247 * \param   context - \c [in]  GPU Context
248 * \param   ibs_request - \c [in]  Pointer to submission requests
249 * \param   fence - \c [out] return fence for this submission
250 *
251 * \return  0 on success otherwise POSIX Error code
252 * \sa amdgpu_cs_submit()
253*/
254static int amdgpu_cs_submit_one(amdgpu_context_handle context,
255				struct amdgpu_cs_request *ibs_request)
256{
257	struct drm_amdgpu_cs_chunk *chunks;
258	struct drm_amdgpu_cs_chunk_data *chunk_data;
259	struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
260	struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
261	amdgpu_device_handle dev = context->dev;
262	struct list_head *sem_list;
263	amdgpu_semaphore_handle sem, tmp;
264	uint32_t i, size, num_chunks, bo_list_handle = 0, sem_count = 0;
265	uint64_t seq_no;
266	bool user_fence;
267	int r = 0;
268
269	if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
270		return -EINVAL;
271	if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
272		return -EINVAL;
273	if (ibs_request->number_of_ibs == 0) {
274		ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
275		return 0;
276	}
277	user_fence = (ibs_request->fence_info.handle != NULL);
278
279	size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
280
281	chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
282
283	size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
284
285	chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
286
287	if (ibs_request->resources)
288		bo_list_handle = ibs_request->resources->handle;
289	num_chunks = ibs_request->number_of_ibs;
290	/* IB chunks */
291	for (i = 0; i < ibs_request->number_of_ibs; i++) {
292		struct amdgpu_cs_ib_info *ib;
293		chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
294		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
295		chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
296
297		ib = &ibs_request->ibs[i];
298
299		chunk_data[i].ib_data._pad = 0;
300		chunk_data[i].ib_data.va_start = ib->ib_mc_address;
301		chunk_data[i].ib_data.ib_bytes = ib->size * 4;
302		chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
303		chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
304		chunk_data[i].ib_data.ring = ibs_request->ring;
305		chunk_data[i].ib_data.flags = ib->flags;
306	}
307
308	pthread_mutex_lock(&context->sequence_mutex);
309
310	if (user_fence) {
311		i = num_chunks++;
312
313		/* fence chunk */
314		chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
315		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
316		chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
317
318		/* fence bo handle */
319		chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
320		/* offset */
321		chunk_data[i].fence_data.offset =
322			ibs_request->fence_info.offset * sizeof(uint64_t);
323	}
324
325	if (ibs_request->number_of_dependencies) {
326		dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) *
327			ibs_request->number_of_dependencies);
328		if (!dependencies) {
329			r = -ENOMEM;
330			goto error_unlock;
331		}
332
333		for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
334			struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
335			struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
336			dep->ip_type = info->ip_type;
337			dep->ip_instance = info->ip_instance;
338			dep->ring = info->ring;
339			dep->ctx_id = info->context->id;
340			dep->handle = info->fence;
341		}
342
343		i = num_chunks++;
344
345		/* dependencies chunk */
346		chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
347		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
348			* ibs_request->number_of_dependencies;
349		chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
350	}
351
352	sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
353	LIST_FOR_EACH_ENTRY(sem, sem_list, list)
354		sem_count++;
355	if (sem_count) {
356		sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
357		if (!sem_dependencies) {
358			r = -ENOMEM;
359			goto error_unlock;
360		}
361		sem_count = 0;
362		LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
363			struct amdgpu_cs_fence *info = &sem->signal_fence;
364			struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
365			dep->ip_type = info->ip_type;
366			dep->ip_instance = info->ip_instance;
367			dep->ring = info->ring;
368			dep->ctx_id = info->context->id;
369			dep->handle = info->fence;
370
371			list_del(&sem->list);
372			amdgpu_cs_reset_sem(sem);
373			amdgpu_cs_unreference_sem(sem);
374		}
375		i = num_chunks++;
376
377		/* dependencies chunk */
378		chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
379		chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
380		chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
381	}
382
383	r = amdgpu_cs_submit_raw2(dev, context, bo_list_handle, num_chunks,
384				  chunks, &seq_no);
385	if (r)
386		goto error_unlock;
387
388	ibs_request->seq_no = seq_no;
389	context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
390error_unlock:
391	pthread_mutex_unlock(&context->sequence_mutex);
392	return r;
393}
394
395drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
396				uint64_t flags,
397				struct amdgpu_cs_request *ibs_request,
398				uint32_t number_of_requests)
399{
400	uint32_t i;
401	int r;
402
403	if (!context || !ibs_request)
404		return -EINVAL;
405
406	r = 0;
407	for (i = 0; i < number_of_requests; i++) {
408		r = amdgpu_cs_submit_one(context, ibs_request);
409		if (r)
410			break;
411		ibs_request++;
412	}
413
414	return r;
415}
416
417/**
418 * Calculate absolute timeout.
419 *
420 * \param   timeout - \c [in] timeout in nanoseconds.
421 *
422 * \return  absolute timeout in nanoseconds
423*/
424drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
425{
426	int r;
427
428	if (timeout != AMDGPU_TIMEOUT_INFINITE) {
429		struct timespec current;
430		uint64_t current_ns;
431		r = clock_gettime(CLOCK_MONOTONIC, &current);
432		if (r) {
433			fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
434			return AMDGPU_TIMEOUT_INFINITE;
435		}
436
437		current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
438		current_ns += current.tv_nsec;
439		timeout += current_ns;
440		if (timeout < current_ns)
441			timeout = AMDGPU_TIMEOUT_INFINITE;
442	}
443	return timeout;
444}
445
446static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
447				unsigned ip,
448				unsigned ip_instance,
449				uint32_t ring,
450				uint64_t handle,
451				uint64_t timeout_ns,
452				uint64_t flags,
453				bool *busy)
454{
455	amdgpu_device_handle dev = context->dev;
456	union drm_amdgpu_wait_cs args;
457	int r;
458
459	memset(&args, 0, sizeof(args));
460	args.in.handle = handle;
461	args.in.ip_type = ip;
462	args.in.ip_instance = ip_instance;
463	args.in.ring = ring;
464	args.in.ctx_id = context->id;
465
466	if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
467		args.in.timeout = timeout_ns;
468	else
469		args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
470
471	r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
472	if (r)
473		return -errno;
474
475	*busy = args.out.status;
476	return 0;
477}
478
479drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
480					    uint64_t timeout_ns,
481					    uint64_t flags,
482					    uint32_t *expired)
483{
484	bool busy = true;
485	int r;
486
487	if (!fence || !expired || !fence->context)
488		return -EINVAL;
489	if (fence->ip_type >= AMDGPU_HW_IP_NUM)
490		return -EINVAL;
491	if (fence->ring >= AMDGPU_CS_MAX_RINGS)
492		return -EINVAL;
493	if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
494		*expired = true;
495		return 0;
496	}
497
498	*expired = false;
499
500	r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
501				fence->ip_instance, fence->ring,
502			       	fence->fence, timeout_ns, flags, &busy);
503
504	if (!r && !busy)
505		*expired = true;
506
507	return r;
508}
509
510static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
511				    uint32_t fence_count,
512				    bool wait_all,
513				    uint64_t timeout_ns,
514				    uint32_t *status,
515				    uint32_t *first)
516{
517	struct drm_amdgpu_fence *drm_fences;
518	amdgpu_device_handle dev = fences[0].context->dev;
519	union drm_amdgpu_wait_fences args;
520	int r;
521	uint32_t i;
522
523	drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
524	for (i = 0; i < fence_count; i++) {
525		drm_fences[i].ctx_id = fences[i].context->id;
526		drm_fences[i].ip_type = fences[i].ip_type;
527		drm_fences[i].ip_instance = fences[i].ip_instance;
528		drm_fences[i].ring = fences[i].ring;
529		drm_fences[i].seq_no = fences[i].fence;
530	}
531
532	memset(&args, 0, sizeof(args));
533	args.in.fences = (uint64_t)(uintptr_t)drm_fences;
534	args.in.fence_count = fence_count;
535	args.in.wait_all = wait_all;
536	args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
537
538	r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
539	if (r)
540		return -errno;
541
542	*status = args.out.status;
543
544	if (first)
545		*first = args.out.first_signaled;
546
547	return 0;
548}
549
550drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
551				     uint32_t fence_count,
552				     bool wait_all,
553				     uint64_t timeout_ns,
554				     uint32_t *status,
555				     uint32_t *first)
556{
557	uint32_t i;
558
559	/* Sanity check */
560	if (!fences || !status || !fence_count)
561		return -EINVAL;
562
563	for (i = 0; i < fence_count; i++) {
564		if (NULL == fences[i].context)
565			return -EINVAL;
566		if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
567			return -EINVAL;
568		if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
569			return -EINVAL;
570	}
571
572	*status = 0;
573
574	return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
575					timeout_ns, status, first);
576}
577
578drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
579{
580	struct amdgpu_semaphore *gpu_semaphore;
581
582	if (!sem)
583		return -EINVAL;
584
585	gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
586	if (!gpu_semaphore)
587		return -ENOMEM;
588
589	atomic_set(&gpu_semaphore->refcount, 1);
590	*sem = gpu_semaphore;
591
592	return 0;
593}
594
595drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
596					  uint32_t ip_type,
597			       uint32_t ip_instance,
598			       uint32_t ring,
599			       amdgpu_semaphore_handle sem)
600{
601	int ret;
602
603	if (!ctx || !sem)
604		return -EINVAL;
605	if (ip_type >= AMDGPU_HW_IP_NUM)
606		return -EINVAL;
607	if (ring >= AMDGPU_CS_MAX_RINGS)
608		return -EINVAL;
609
610	pthread_mutex_lock(&ctx->sequence_mutex);
611	/* sem has been signaled */
612	if (sem->signal_fence.context) {
613		ret = -EINVAL;
614		goto unlock;
615	}
616	sem->signal_fence.context = ctx;
617	sem->signal_fence.ip_type = ip_type;
618	sem->signal_fence.ip_instance = ip_instance;
619	sem->signal_fence.ring = ring;
620	sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
621	update_references(NULL, &sem->refcount);
622	ret = 0;
623unlock:
624	pthread_mutex_unlock(&ctx->sequence_mutex);
625	return ret;
626}
627
628drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
629					uint32_t ip_type,
630			     uint32_t ip_instance,
631			     uint32_t ring,
632			     amdgpu_semaphore_handle sem)
633{
634	if (!ctx || !sem)
635		return -EINVAL;
636	if (ip_type >= AMDGPU_HW_IP_NUM)
637		return -EINVAL;
638	if (ring >= AMDGPU_CS_MAX_RINGS)
639		return -EINVAL;
640	/* must signal first */
641	if (!sem->signal_fence.context)
642		return -EINVAL;
643
644	pthread_mutex_lock(&ctx->sequence_mutex);
645	list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
646	pthread_mutex_unlock(&ctx->sequence_mutex);
647	return 0;
648}
649
650static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
651{
652	if (!sem || !sem->signal_fence.context)
653		return -EINVAL;
654
655	sem->signal_fence.context = NULL;
656	sem->signal_fence.ip_type = 0;
657	sem->signal_fence.ip_instance = 0;
658	sem->signal_fence.ring = 0;
659	sem->signal_fence.fence = 0;
660
661	return 0;
662}
663
664static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
665{
666	if (!sem)
667		return -EINVAL;
668
669	if (update_references(&sem->refcount, NULL))
670		free(sem);
671	return 0;
672}
673
674drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
675{
676	return amdgpu_cs_unreference_sem(sem);
677}
678
679drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
680					 uint32_t  flags,
681					 uint32_t *handle)
682{
683	if (NULL == dev)
684		return -EINVAL;
685
686	return drmSyncobjCreate(dev->fd, flags, handle);
687}
688
689drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
690					uint32_t *handle)
691{
692	if (NULL == dev)
693		return -EINVAL;
694
695	return drmSyncobjCreate(dev->fd, 0, handle);
696}
697
698drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
699					 uint32_t handle)
700{
701	if (NULL == dev)
702		return -EINVAL;
703
704	return drmSyncobjDestroy(dev->fd, handle);
705}
706
707drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
708				       const uint32_t *syncobjs,
709				       uint32_t syncobj_count)
710{
711	if (NULL == dev)
712		return -EINVAL;
713
714	return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
715}
716
717drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
718					const uint32_t *syncobjs,
719					uint32_t syncobj_count)
720{
721	if (NULL == dev)
722		return -EINVAL;
723
724	return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
725}
726
727drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
728						 const uint32_t *syncobjs,
729						 uint64_t *points,
730						 uint32_t syncobj_count)
731{
732	if (NULL == dev)
733		return -EINVAL;
734
735	return drmSyncobjTimelineSignal(dev->fd, syncobjs,
736					points, syncobj_count);
737}
738
739drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
740				      uint32_t *handles, unsigned num_handles,
741				      int64_t timeout_nsec, unsigned flags,
742				      uint32_t *first_signaled)
743{
744	if (NULL == dev)
745		return -EINVAL;
746
747	return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
748			      flags, first_signaled);
749}
750
751drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
752					       uint32_t *handles, uint64_t *points,
753					       unsigned num_handles,
754					       int64_t timeout_nsec, unsigned flags,
755					       uint32_t *first_signaled)
756{
757	if (NULL == dev)
758		return -EINVAL;
759
760	return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
761				      timeout_nsec, flags, first_signaled);
762}
763
764drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
765				       uint32_t *handles, uint64_t *points,
766				       unsigned num_handles)
767{
768	if (NULL == dev)
769		return -EINVAL;
770
771	return drmSyncobjQuery(dev->fd, handles, points, num_handles);
772}
773
774drm_public int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
775					uint32_t *handles, uint64_t *points,
776					unsigned num_handles, uint32_t flags)
777{
778	if (!dev)
779		return -EINVAL;
780
781	return drmSyncobjQuery2(dev->fd, handles, points, num_handles, flags);
782}
783
784drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
785					uint32_t handle,
786					int *shared_fd)
787{
788	if (NULL == dev)
789		return -EINVAL;
790
791	return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
792}
793
794drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
795					int shared_fd,
796					uint32_t *handle)
797{
798	if (NULL == dev)
799		return -EINVAL;
800
801	return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
802}
803
804drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
805						  uint32_t syncobj,
806						  int *sync_file_fd)
807{
808	if (NULL == dev)
809		return -EINVAL;
810
811	return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
812}
813
814drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
815						  uint32_t syncobj,
816						  int sync_file_fd)
817{
818	if (NULL == dev)
819		return -EINVAL;
820
821	return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
822}
823
824drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
825						   uint32_t syncobj,
826						   uint64_t point,
827						   uint32_t flags,
828						   int *sync_file_fd)
829{
830	uint32_t binary_handle;
831	int ret;
832
833	if (NULL == dev)
834		return -EINVAL;
835
836	if (!point)
837		return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
838
839	ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
840	if (ret)
841		return ret;
842
843	ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
844				 syncobj, point, flags);
845	if (ret)
846		goto out;
847	ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
848out:
849	drmSyncobjDestroy(dev->fd, binary_handle);
850	return ret;
851}
852
853drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
854						   uint32_t syncobj,
855						   uint64_t point,
856						   int sync_file_fd)
857{
858	uint32_t binary_handle;
859	int ret;
860
861	if (NULL == dev)
862		return -EINVAL;
863
864	if (!point)
865		return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
866
867	ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
868	if (ret)
869		return ret;
870	ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
871	if (ret)
872		goto out;
873	ret = drmSyncobjTransfer(dev->fd, syncobj, point,
874				 binary_handle, 0, 0);
875out:
876	drmSyncobjDestroy(dev->fd, binary_handle);
877	return ret;
878}
879
880drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
881					  uint32_t dst_handle,
882					  uint64_t dst_point,
883					  uint32_t src_handle,
884					  uint64_t src_point,
885					  uint32_t flags)
886{
887	if (NULL == dev)
888		return -EINVAL;
889
890	return drmSyncobjTransfer(dev->fd,
891				  dst_handle, dst_point,
892				  src_handle, src_point,
893				  flags);
894}
895
896drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
897				    amdgpu_context_handle context,
898				    amdgpu_bo_list_handle bo_list_handle,
899				    int num_chunks,
900				    struct drm_amdgpu_cs_chunk *chunks,
901				    uint64_t *seq_no)
902{
903	union drm_amdgpu_cs cs;
904	uint64_t *chunk_array;
905	int i, r;
906	if (num_chunks == 0)
907		return -EINVAL;
908
909	memset(&cs, 0, sizeof(cs));
910	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
911	for (i = 0; i < num_chunks; i++)
912		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
913	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
914	cs.in.ctx_id = context->id;
915	cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
916	cs.in.num_chunks = num_chunks;
917	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
918				&cs, sizeof(cs));
919	if (r)
920		return r;
921
922	if (seq_no)
923		*seq_no = cs.out.handle;
924	return 0;
925}
926
927drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
928				     amdgpu_context_handle context,
929				     uint32_t bo_list_handle,
930				     int num_chunks,
931				     struct drm_amdgpu_cs_chunk *chunks,
932				     uint64_t *seq_no)
933{
934	union drm_amdgpu_cs cs;
935	uint64_t *chunk_array;
936	int i, r;
937
938	memset(&cs, 0, sizeof(cs));
939	chunk_array = alloca(sizeof(uint64_t) * num_chunks);
940	for (i = 0; i < num_chunks; i++)
941		chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
942	cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
943	cs.in.ctx_id = context->id;
944	cs.in.bo_list_handle = bo_list_handle;
945	cs.in.num_chunks = num_chunks;
946	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
947				&cs, sizeof(cs));
948	if (!r && seq_no)
949		*seq_no = cs.out.handle;
950	return r;
951}
952
953drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
954					struct drm_amdgpu_cs_chunk_data *data)
955{
956	data->fence_data.handle = fence_info->handle->handle;
957	data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
958}
959
960drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
961					struct drm_amdgpu_cs_chunk_dep *dep)
962{
963	dep->ip_type = fence->ip_type;
964	dep->ip_instance = fence->ip_instance;
965	dep->ring = fence->ring;
966	dep->ctx_id = fence->context->id;
967	dep->handle = fence->fence;
968}
969
970drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
971					 struct amdgpu_cs_fence *fence,
972					 uint32_t what,
973					 uint32_t *out_handle)
974{
975	union drm_amdgpu_fence_to_handle fth;
976	int r;
977
978	memset(&fth, 0, sizeof(fth));
979	fth.in.fence.ctx_id = fence->context->id;
980	fth.in.fence.ip_type = fence->ip_type;
981	fth.in.fence.ip_instance = fence->ip_instance;
982	fth.in.fence.ring = fence->ring;
983	fth.in.fence.seq_no = fence->fence;
984	fth.in.what = what;
985
986	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
987				&fth, sizeof(fth));
988	if (r == 0)
989		*out_handle = fth.out.handle;
990	return r;
991}
992