vcn_tests.c revision 6532f28e
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <inttypes.h>
26
27#include "CUnit/Basic.h"
28
29#include "util_math.h"
30
31#include "amdgpu_test.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34#include "decode_messages.h"
35
36#define IB_SIZE		4096
37#define MAX_RESOURCES	16
38
39struct amdgpu_vcn_bo {
40	amdgpu_bo_handle handle;
41	amdgpu_va_handle va_handle;
42	uint64_t addr;
43	uint64_t size;
44	uint8_t *ptr;
45};
46
47static amdgpu_device_handle device_handle;
48static uint32_t major_version;
49static uint32_t minor_version;
50static uint32_t family_id;
51
52static amdgpu_context_handle context_handle;
53static amdgpu_bo_handle ib_handle;
54static amdgpu_va_handle ib_va_handle;
55static uint64_t ib_mc_address;
56static uint32_t *ib_cpu;
57
58static amdgpu_bo_handle resources[MAX_RESOURCES];
59static unsigned num_resources;
60
61static void amdgpu_cs_vcn_dec_create(void);
62static void amdgpu_cs_vcn_dec_decode(void);
63static void amdgpu_cs_vcn_dec_destroy(void);
64
65static void amdgpu_cs_vcn_enc_create(void);
66static void amdgpu_cs_vcn_enc_encode(void);
67static void amdgpu_cs_vcn_enc_destroy(void);
68
69CU_TestInfo vcn_tests[] = {
70
71	{ "VCN DEC create",  amdgpu_cs_vcn_dec_create },
72	{ "VCN DEC decode",  amdgpu_cs_vcn_dec_decode },
73	{ "VCN DEC destroy",  amdgpu_cs_vcn_dec_destroy },
74
75	{ "VCN ENC create",  amdgpu_cs_vcn_enc_create },
76	{ "VCN ENC decode",  amdgpu_cs_vcn_enc_encode },
77	{ "VCN ENC destroy",  amdgpu_cs_vcn_enc_destroy },
78	CU_TEST_INFO_NULL,
79};
80
81CU_BOOL suite_vcn_tests_enable(void)
82{
83
84	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
85				   &minor_version, &device_handle))
86		return CU_FALSE;
87
88	family_id = device_handle->info.family_id;
89
90	if (amdgpu_device_deinitialize(device_handle))
91			return CU_FALSE;
92
93
94	if (family_id < AMDGPU_FAMILY_RV) {
95		printf("\n\nThe ASIC NOT support VCN, suite disabled\n");
96		return CU_FALSE;
97	}
98
99	return CU_TRUE;
100}
101
102int suite_vcn_tests_init(void)
103{
104	int r;
105
106	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
107				     &minor_version, &device_handle);
108	if (r)
109		return CUE_SINIT_FAILED;
110
111	family_id = device_handle->info.family_id;
112
113	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
114	if (r)
115		return CUE_SINIT_FAILED;
116
117	r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
118				    AMDGPU_GEM_DOMAIN_GTT, 0,
119				    &ib_handle, (void**)&ib_cpu,
120				    &ib_mc_address, &ib_va_handle);
121	if (r)
122		return CUE_SINIT_FAILED;
123
124	return CUE_SUCCESS;
125}
126
127int suite_vcn_tests_clean(void)
128{
129	int r;
130
131	r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
132			     ib_mc_address, IB_SIZE);
133	if (r)
134		return CUE_SCLEAN_FAILED;
135
136	r = amdgpu_cs_ctx_free(context_handle);
137	if (r)
138		return CUE_SCLEAN_FAILED;
139
140	r = amdgpu_device_deinitialize(device_handle);
141	if (r)
142		return CUE_SCLEAN_FAILED;
143
144	return CUE_SUCCESS;
145}
146
147static int submit(unsigned ndw, unsigned ip)
148{
149	struct amdgpu_cs_request ibs_request = {0};
150	struct amdgpu_cs_ib_info ib_info = {0};
151	struct amdgpu_cs_fence fence_status = {0};
152	uint32_t expired;
153	int r;
154
155	ib_info.ib_mc_address = ib_mc_address;
156	ib_info.size = ndw;
157
158	ibs_request.ip_type = ip;
159
160	r = amdgpu_bo_list_create(device_handle, num_resources, resources,
161				  NULL, &ibs_request.resources);
162	if (r)
163		return r;
164
165	ibs_request.number_of_ibs = 1;
166	ibs_request.ibs = &ib_info;
167	ibs_request.fence_info.handle = NULL;
168
169	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
170	if (r)
171		return r;
172
173	r = amdgpu_bo_list_destroy(ibs_request.resources);
174	if (r)
175		return r;
176
177	fence_status.context = context_handle;
178	fence_status.ip_type = ip;
179	fence_status.fence = ibs_request.seq_no;
180
181	r = amdgpu_cs_query_fence_status(&fence_status,
182					 AMDGPU_TIMEOUT_INFINITE,
183					 0, &expired);
184	if (r)
185		return r;
186
187	return 0;
188}
189
190static void alloc_resource(struct amdgpu_vcn_bo *vcn_bo,
191			unsigned size, unsigned domain)
192{
193	struct amdgpu_bo_alloc_request req = {0};
194	amdgpu_bo_handle buf_handle;
195	amdgpu_va_handle va_handle;
196	uint64_t va = 0;
197	int r;
198
199	req.alloc_size = ALIGN(size, 4096);
200	req.preferred_heap = domain;
201	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
202	CU_ASSERT_EQUAL(r, 0);
203	r = amdgpu_va_range_alloc(device_handle,
204				  amdgpu_gpu_va_range_general,
205				  req.alloc_size, 1, 0, &va,
206				  &va_handle, 0);
207	CU_ASSERT_EQUAL(r, 0);
208	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
209			    AMDGPU_VA_OP_MAP);
210	CU_ASSERT_EQUAL(r, 0);
211	vcn_bo->addr = va;
212	vcn_bo->handle = buf_handle;
213	vcn_bo->size = req.alloc_size;
214	vcn_bo->va_handle = va_handle;
215	r = amdgpu_bo_cpu_map(vcn_bo->handle, (void **)&vcn_bo->ptr);
216	CU_ASSERT_EQUAL(r, 0);
217	memset(vcn_bo->ptr, 0, size);
218	r = amdgpu_bo_cpu_unmap(vcn_bo->handle);
219	CU_ASSERT_EQUAL(r, 0);
220}
221
222static void free_resource(struct amdgpu_vcn_bo *vcn_bo)
223{
224	int r;
225
226	r = amdgpu_bo_va_op(vcn_bo->handle, 0, vcn_bo->size,
227			    vcn_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
228	CU_ASSERT_EQUAL(r, 0);
229
230	r = amdgpu_va_range_free(vcn_bo->va_handle);
231	CU_ASSERT_EQUAL(r, 0);
232
233	r = amdgpu_bo_free(vcn_bo->handle);
234	CU_ASSERT_EQUAL(r, 0);
235	memset(vcn_bo, 0, sizeof(*vcn_bo));
236}
237
238static void vcn_dec_cmd(uint64_t addr, unsigned cmd, int *idx)
239{
240	ib_cpu[(*idx)++] = 0x81C4;
241	ib_cpu[(*idx)++] = addr;
242	ib_cpu[(*idx)++] = 0x81C5;
243	ib_cpu[(*idx)++] = addr >> 32;
244	ib_cpu[(*idx)++] = 0x81C3;
245	ib_cpu[(*idx)++] = cmd << 1;
246}
247
248static void amdgpu_cs_vcn_dec_create(void)
249{
250	struct amdgpu_vcn_bo msg_buf;
251	int len, r;
252
253	num_resources  = 0;
254	alloc_resource(&msg_buf, 4096, AMDGPU_GEM_DOMAIN_GTT);
255	resources[num_resources++] = msg_buf.handle;
256	resources[num_resources++] = ib_handle;
257
258	r = amdgpu_bo_cpu_map(msg_buf.handle, (void **)&msg_buf.ptr);
259	CU_ASSERT_EQUAL(r, 0);
260
261	memset(msg_buf.ptr, 0, 4096);
262	memcpy(msg_buf.ptr, vcn_dec_create_msg, sizeof(vcn_dec_create_msg));
263
264	len = 0;
265	ib_cpu[len++] = 0x81C4;
266	ib_cpu[len++] = msg_buf.addr;
267	ib_cpu[len++] = 0x81C5;
268	ib_cpu[len++] = msg_buf.addr >> 32;
269	ib_cpu[len++] = 0x81C3;
270	ib_cpu[len++] = 0;
271	for (; len % 16; ) {
272		ib_cpu[len++] = 0x81ff;
273		ib_cpu[len++] = 0;
274	}
275
276	r = submit(len, AMDGPU_HW_IP_VCN_DEC);
277	CU_ASSERT_EQUAL(r, 0);
278
279	free_resource(&msg_buf);
280}
281
282static void amdgpu_cs_vcn_dec_decode(void)
283{
284	const unsigned dpb_size = 15923584, dt_size = 737280;
285	uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr, sum;
286	struct amdgpu_vcn_bo dec_buf;
287	int size, len, i, r;
288	uint8_t *dec;
289
290	size = 4*1024; /* msg */
291	size += 4*1024; /* fb */
292	size += 4096; /*it_scaling_table*/
293	size += ALIGN(sizeof(uvd_bitstream), 4*1024);
294	size += ALIGN(dpb_size, 4*1024);
295	size += ALIGN(dt_size, 4*1024);
296
297	num_resources  = 0;
298	alloc_resource(&dec_buf, size, AMDGPU_GEM_DOMAIN_GTT);
299	resources[num_resources++] = dec_buf.handle;
300	resources[num_resources++] = ib_handle;
301
302	r = amdgpu_bo_cpu_map(dec_buf.handle, (void **)&dec_buf.ptr);
303	dec = dec_buf.ptr;
304
305	CU_ASSERT_EQUAL(r, 0);
306	memset(dec_buf.ptr, 0, size);
307	memcpy(dec_buf.ptr, vcn_dec_decode_msg, sizeof(vcn_dec_decode_msg));
308	memcpy(dec_buf.ptr + sizeof(vcn_dec_decode_msg),
309			avc_decode_msg, sizeof(avc_decode_msg));
310
311	dec += 4*1024;
312	dec += 4*1024;
313	memcpy(dec, uvd_it_scaling_table, sizeof(uvd_it_scaling_table));
314
315	dec += 4*1024;
316	memcpy(dec, uvd_bitstream, sizeof(uvd_bitstream));
317
318	dec += ALIGN(sizeof(uvd_bitstream), 4*1024);
319
320	dec += ALIGN(dpb_size, 4*1024);
321
322	msg_addr = dec_buf.addr;
323	fb_addr = msg_addr + 4*1024;
324	it_addr = fb_addr + 4*1024;
325	bs_addr = it_addr + 4*1024;
326	dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);
327	ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024);
328	dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);
329
330	len = 0;
331	vcn_dec_cmd(msg_addr, 0x0, &len);
332	vcn_dec_cmd(dpb_addr, 0x1, &len);
333	vcn_dec_cmd(dt_addr, 0x2, &len);
334	vcn_dec_cmd(fb_addr, 0x3, &len);
335	vcn_dec_cmd(bs_addr, 0x100, &len);
336	vcn_dec_cmd(it_addr, 0x204, &len);
337	vcn_dec_cmd(ctx_addr, 0x206, &len);
338
339	ib_cpu[len++] = 0x81C6;
340	ib_cpu[len++] = 0x1;
341	for (; len % 16; ) {
342		ib_cpu[len++] = 0x81ff;
343		ib_cpu[len++] = 0;
344	}
345
346	r = submit(len, AMDGPU_HW_IP_VCN_DEC);
347	CU_ASSERT_EQUAL(r, 0);
348
349	for (i = 0, sum = 0; i < dt_size; ++i)
350		sum += dec[i];
351
352	CU_ASSERT_EQUAL(sum, SUM_DECODE);
353
354	free_resource(&dec_buf);
355}
356
357static void amdgpu_cs_vcn_dec_destroy(void)
358{
359	struct amdgpu_vcn_bo msg_buf;
360	int len, r;
361
362	num_resources  = 0;
363	alloc_resource(&msg_buf, 1024, AMDGPU_GEM_DOMAIN_GTT);
364	resources[num_resources++] = msg_buf.handle;
365	resources[num_resources++] = ib_handle;
366
367	r = amdgpu_bo_cpu_map(msg_buf.handle, (void **)&msg_buf.ptr);
368	CU_ASSERT_EQUAL(r, 0);
369
370	memset(msg_buf.ptr, 0, 1024);
371	memcpy(msg_buf.ptr, vcn_dec_destroy_msg, sizeof(vcn_dec_destroy_msg));
372
373	len = 0;
374	ib_cpu[len++] = 0x81C4;
375	ib_cpu[len++] = msg_buf.addr;
376	ib_cpu[len++] = 0x81C5;
377	ib_cpu[len++] = msg_buf.addr >> 32;
378	ib_cpu[len++] = 0x81C3;
379	ib_cpu[len++] = 0;
380	for (; len % 16; ) {
381		ib_cpu[len++] = 0x81ff;
382		ib_cpu[len++] = 0;
383	}
384
385	r = submit(len, AMDGPU_HW_IP_VCN_DEC);
386	CU_ASSERT_EQUAL(r, 0);
387
388	free_resource(&msg_buf);
389}
390
391static void amdgpu_cs_vcn_enc_create(void)
392{
393	/* TODO */
394}
395
396static void amdgpu_cs_vcn_enc_encode(void)
397{
398	/* TODO */
399}
400
401static void amdgpu_cs_vcn_enc_destroy(void)
402{
403	/* TODO */
404}
405