vce_tests.c revision 7cdc0497
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <inttypes.h>
26
27#include "CUnit/Basic.h"
28
29#include "util_math.h"
30
31#include "amdgpu_test.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34
35#include "vce_ib.h"
36#include "frame.h"
37
38#define IB_SIZE		4096
39#define MAX_RESOURCES	16
40#define FW_53_0_03 ((53 << 24) | (0 << 16) | (03 << 8))
41
42struct amdgpu_vce_bo {
43	amdgpu_bo_handle handle;
44	amdgpu_va_handle va_handle;
45	uint64_t addr;
46	uint64_t size;
47	uint8_t *ptr;
48};
49
50struct amdgpu_vce_encode {
51	unsigned width;
52	unsigned height;
53	struct amdgpu_vce_bo vbuf;
54	struct amdgpu_vce_bo bs[2];
55	struct amdgpu_vce_bo fb[2];
56	struct amdgpu_vce_bo cpb;
57	unsigned ib_len;
58	bool two_instance;
59	struct amdgpu_vce_bo mvrefbuf;
60	struct amdgpu_vce_bo mvb;
61	unsigned mvbuf_size;
62};
63
64static amdgpu_device_handle device_handle;
65static uint32_t major_version;
66static uint32_t minor_version;
67static uint32_t family_id;
68static uint32_t vce_harvest_config;
69static uint32_t chip_rev;
70static uint32_t chip_id;
71static uint32_t ids_flags;
72static bool is_mv_supported = true;
73
74static amdgpu_context_handle context_handle;
75static amdgpu_bo_handle ib_handle;
76static amdgpu_va_handle ib_va_handle;
77static uint64_t ib_mc_address;
78static uint32_t *ib_cpu;
79
80static struct amdgpu_vce_encode enc;
81static amdgpu_bo_handle resources[MAX_RESOURCES];
82static unsigned num_resources;
83
84static void amdgpu_cs_vce_create(void);
85static void amdgpu_cs_vce_encode(void);
86static void amdgpu_cs_vce_encode_mv(void);
87static void amdgpu_cs_vce_destroy(void);
88
89CU_TestInfo vce_tests[] = {
90	{ "VCE create",  amdgpu_cs_vce_create },
91	{ "VCE encode",  amdgpu_cs_vce_encode },
92	{ "VCE MV dump",  amdgpu_cs_vce_encode_mv },
93	{ "VCE destroy",  amdgpu_cs_vce_destroy },
94	CU_TEST_INFO_NULL,
95};
96
97CU_BOOL suite_vce_tests_enable(void)
98{
99	uint32_t version, feature;
100	CU_BOOL ret_mv = CU_FALSE;
101
102	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
103					     &minor_version, &device_handle))
104		return CU_FALSE;
105
106	family_id = device_handle->info.family_id;
107	chip_rev = device_handle->info.chip_rev;
108	chip_id = device_handle->info.chip_external_rev;
109	ids_flags = device_handle->info.ids_flags;
110
111	amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
112					  0, &version, &feature);
113
114	if (amdgpu_device_deinitialize(device_handle))
115		return CU_FALSE;
116
117	if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI) {
118		printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
119		return CU_FALSE;
120	}
121
122	if (!(chip_id == (chip_rev + 0x3C) || /* FIJI */
123			chip_id == (chip_rev + 0x50) || /* Polaris 10*/
124			chip_id == (chip_rev + 0x5A) || /* Polaris 11*/
125			chip_id == (chip_rev + 0x64) || /* Polaris 12*/
126			(family_id >= AMDGPU_FAMILY_AI && !ids_flags))) /* dGPU > Polaris */
127		printf("\n\nThe ASIC NOT support VCE MV, suite disabled\n");
128	else if (FW_53_0_03 > version)
129		printf("\n\nThe ASIC FW version NOT support VCE MV, suite disabled\n");
130	else
131		ret_mv = CU_TRUE;
132
133	if (ret_mv == CU_FALSE) {
134		amdgpu_set_test_active("VCE Tests", "VCE MV dump", ret_mv);
135		is_mv_supported = false;
136	}
137
138	return CU_TRUE;
139}
140
141int suite_vce_tests_init(void)
142{
143	int r;
144
145	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
146				     &minor_version, &device_handle);
147	if (r) {
148		if ((r == -EACCES) && (errno == EACCES))
149			printf("\n\nError:%s. "
150				"Hint:Try to run this test program as root.",
151				strerror(errno));
152
153		return CUE_SINIT_FAILED;
154	}
155
156	family_id = device_handle->info.family_id;
157	vce_harvest_config = device_handle->info.vce_harvest_config;
158
159	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
160	if (r)
161		return CUE_SINIT_FAILED;
162
163	r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
164				    AMDGPU_GEM_DOMAIN_GTT, 0,
165				    &ib_handle, (void**)&ib_cpu,
166				    &ib_mc_address, &ib_va_handle);
167	if (r)
168		return CUE_SINIT_FAILED;
169
170	memset(&enc, 0, sizeof(struct amdgpu_vce_encode));
171
172	return CUE_SUCCESS;
173}
174
175int suite_vce_tests_clean(void)
176{
177	int r;
178
179	r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
180				     ib_mc_address, IB_SIZE);
181	if (r)
182		return CUE_SCLEAN_FAILED;
183
184	r = amdgpu_cs_ctx_free(context_handle);
185	if (r)
186		return CUE_SCLEAN_FAILED;
187
188	r = amdgpu_device_deinitialize(device_handle);
189	if (r)
190		return CUE_SCLEAN_FAILED;
191
192	return CUE_SUCCESS;
193}
194
195static int submit(unsigned ndw, unsigned ip)
196{
197	struct amdgpu_cs_request ibs_request = {0};
198	struct amdgpu_cs_ib_info ib_info = {0};
199	struct amdgpu_cs_fence fence_status = {0};
200	uint32_t expired;
201	int r;
202
203	ib_info.ib_mc_address = ib_mc_address;
204	ib_info.size = ndw;
205
206	ibs_request.ip_type = ip;
207
208	r = amdgpu_bo_list_create(device_handle, num_resources, resources,
209				  NULL, &ibs_request.resources);
210	if (r)
211		return r;
212
213	ibs_request.number_of_ibs = 1;
214	ibs_request.ibs = &ib_info;
215	ibs_request.fence_info.handle = NULL;
216
217	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
218	if (r)
219		return r;
220
221	r = amdgpu_bo_list_destroy(ibs_request.resources);
222	if (r)
223		return r;
224
225	fence_status.context = context_handle;
226	fence_status.ip_type = ip;
227	fence_status.fence = ibs_request.seq_no;
228
229	r = amdgpu_cs_query_fence_status(&fence_status,
230					 AMDGPU_TIMEOUT_INFINITE,
231					 0, &expired);
232	if (r)
233		return r;
234
235	return 0;
236}
237
238static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned domain)
239{
240	struct amdgpu_bo_alloc_request req = {0};
241	amdgpu_bo_handle buf_handle;
242	amdgpu_va_handle va_handle;
243	uint64_t va = 0;
244	int r;
245
246	req.alloc_size = ALIGN(size, 4096);
247	req.preferred_heap = domain;
248	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
249	CU_ASSERT_EQUAL(r, 0);
250	r = amdgpu_va_range_alloc(device_handle,
251				  amdgpu_gpu_va_range_general,
252				  req.alloc_size, 1, 0, &va,
253				  &va_handle, 0);
254	CU_ASSERT_EQUAL(r, 0);
255	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
256			    AMDGPU_VA_OP_MAP);
257	CU_ASSERT_EQUAL(r, 0);
258	vce_bo->addr = va;
259	vce_bo->handle = buf_handle;
260	vce_bo->size = req.alloc_size;
261	vce_bo->va_handle = va_handle;
262	r = amdgpu_bo_cpu_map(vce_bo->handle, (void **)&vce_bo->ptr);
263	CU_ASSERT_EQUAL(r, 0);
264	memset(vce_bo->ptr, 0, size);
265	r = amdgpu_bo_cpu_unmap(vce_bo->handle);
266	CU_ASSERT_EQUAL(r, 0);
267}
268
269static void free_resource(struct amdgpu_vce_bo *vce_bo)
270{
271	int r;
272
273	r = amdgpu_bo_va_op(vce_bo->handle, 0, vce_bo->size,
274			    vce_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
275	CU_ASSERT_EQUAL(r, 0);
276
277	r = amdgpu_va_range_free(vce_bo->va_handle);
278	CU_ASSERT_EQUAL(r, 0);
279
280	r = amdgpu_bo_free(vce_bo->handle);
281	CU_ASSERT_EQUAL(r, 0);
282	memset(vce_bo, 0, sizeof(*vce_bo));
283}
284
285static void amdgpu_cs_vce_create(void)
286{
287	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
288	int len, r;
289
290	enc.width = vce_create[6];
291	enc.height = vce_create[7];
292
293	num_resources  = 0;
294	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
295	resources[num_resources++] = enc.fb[0].handle;
296	resources[num_resources++] = ib_handle;
297
298	len = 0;
299	memcpy(ib_cpu, vce_session, sizeof(vce_session));
300	len += sizeof(vce_session) / 4;
301	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
302	len += sizeof(vce_taskinfo) / 4;
303	memcpy((ib_cpu + len), vce_create, sizeof(vce_create));
304	ib_cpu[len + 8] = ALIGN(enc.width, align);
305	ib_cpu[len + 9] = ALIGN(enc.width, align);
306	if (is_mv_supported == true) {/* disableTwoInstance */
307		if (family_id >= AMDGPU_FAMILY_AI)
308			ib_cpu[len + 11] = 0x01000001;
309		else
310			ib_cpu[len + 11] = 0x01000201;
311	}
312	len += sizeof(vce_create) / 4;
313	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
314	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
315	ib_cpu[len + 3] = enc.fb[0].addr;
316	len += sizeof(vce_feedback) / 4;
317
318	r = submit(len, AMDGPU_HW_IP_VCE);
319	CU_ASSERT_EQUAL(r, 0);
320
321	free_resource(&enc.fb[0]);
322}
323
324static void amdgpu_cs_vce_config(void)
325{
326	int len = 0, r;
327
328	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
329	len += sizeof(vce_session) / 4;
330	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
331	ib_cpu[len + 3] = 2;
332	ib_cpu[len + 6] = 0xffffffff;
333	len += sizeof(vce_taskinfo) / 4;
334	memcpy((ib_cpu + len), vce_rate_ctrl, sizeof(vce_rate_ctrl));
335	len += sizeof(vce_rate_ctrl) / 4;
336	memcpy((ib_cpu + len), vce_config_ext, sizeof(vce_config_ext));
337	len += sizeof(vce_config_ext) / 4;
338	memcpy((ib_cpu + len), vce_motion_est, sizeof(vce_motion_est));
339	len += sizeof(vce_motion_est) / 4;
340	memcpy((ib_cpu + len), vce_rdo, sizeof(vce_rdo));
341	len += sizeof(vce_rdo) / 4;
342	memcpy((ib_cpu + len), vce_pic_ctrl, sizeof(vce_pic_ctrl));
343	if (is_mv_supported == true)
344		ib_cpu[len + 27] = 0x00000001; /* encSliceMode */
345	len += sizeof(vce_pic_ctrl) / 4;
346
347	r = submit(len, AMDGPU_HW_IP_VCE);
348	CU_ASSERT_EQUAL(r, 0);
349}
350
351static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
352{
353
354	uint64_t luma_offset, chroma_offset;
355	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
356	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
357	int len = 0, i, r;
358
359	luma_offset = enc->vbuf.addr;
360	chroma_offset = luma_offset + luma_size;
361
362	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
363	len += sizeof(vce_session) / 4;
364	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
365	len += sizeof(vce_taskinfo) / 4;
366	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
367	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
368	ib_cpu[len + 3] = enc->bs[0].addr;
369	len += sizeof(vce_bs_buffer) / 4;
370	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
371	ib_cpu[len + 2] = enc->cpb.addr >> 32;
372	ib_cpu[len + 3] = enc->cpb.addr;
373	len += sizeof(vce_context_buffer) / 4;
374	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
375	for (i = 0; i <  8; ++i)
376		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
377	for (i = 0; i <  8; ++i)
378		ib_cpu[len + 10 + i] = luma_size * 1.5;
379	len += sizeof(vce_aux_buffer) / 4;
380	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
381	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
382	ib_cpu[len + 3] = enc->fb[0].addr;
383	len += sizeof(vce_feedback) / 4;
384	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
385	ib_cpu[len + 9] = luma_offset >> 32;
386	ib_cpu[len + 10] = luma_offset;
387	ib_cpu[len + 11] = chroma_offset >> 32;
388	ib_cpu[len + 12] = chroma_offset;
389	ib_cpu[len + 14] = ALIGN(enc->width, align);
390	ib_cpu[len + 15] = ALIGN(enc->width, align);
391	ib_cpu[len + 73] = luma_size * 1.5;
392	ib_cpu[len + 74] = luma_size * 2.5;
393	len += sizeof(vce_encode) / 4;
394	enc->ib_len = len;
395	if (!enc->two_instance) {
396		r = submit(len, AMDGPU_HW_IP_VCE);
397		CU_ASSERT_EQUAL(r, 0);
398	}
399}
400
401static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
402{
403	uint64_t luma_offset, chroma_offset;
404	int len, i, r;
405	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
406	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
407
408	len = (enc->two_instance) ? enc->ib_len : 0;
409	luma_offset = enc->vbuf.addr;
410	chroma_offset = luma_offset + luma_size;
411
412	if (!enc->two_instance) {
413		memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
414		len += sizeof(vce_session) / 4;
415	}
416	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
417	len += sizeof(vce_taskinfo) / 4;
418	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
419	ib_cpu[len + 2] = enc->bs[1].addr >> 32;
420	ib_cpu[len + 3] = enc->bs[1].addr;
421	len += sizeof(vce_bs_buffer) / 4;
422	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
423	ib_cpu[len + 2] = enc->cpb.addr >> 32;
424	ib_cpu[len + 3] = enc->cpb.addr;
425	len += sizeof(vce_context_buffer) / 4;
426	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
427	for (i = 0; i <  8; ++i)
428		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
429	for (i = 0; i <  8; ++i)
430		ib_cpu[len + 10 + i] = luma_size * 1.5;
431	len += sizeof(vce_aux_buffer) / 4;
432	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
433	ib_cpu[len + 2] = enc->fb[1].addr >> 32;
434	ib_cpu[len + 3] = enc->fb[1].addr;
435	len += sizeof(vce_feedback) / 4;
436	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
437	ib_cpu[len + 2] = 0;
438	ib_cpu[len + 9] = luma_offset >> 32;
439	ib_cpu[len + 10] = luma_offset;
440	ib_cpu[len + 11] = chroma_offset >> 32;
441	ib_cpu[len + 12] = chroma_offset;
442	ib_cpu[len + 14] = ALIGN(enc->width, align);
443	ib_cpu[len + 15] = ALIGN(enc->width, align);
444	ib_cpu[len + 18] = 0;
445	ib_cpu[len + 19] = 0;
446	ib_cpu[len + 56] = 3;
447	ib_cpu[len + 57] = 0;
448	ib_cpu[len + 58] = 0;
449	ib_cpu[len + 59] = luma_size * 1.5;
450	ib_cpu[len + 60] = luma_size * 2.5;
451	ib_cpu[len + 73] = 0;
452	ib_cpu[len + 74] = luma_size;
453	ib_cpu[len + 81] = 1;
454	ib_cpu[len + 82] = 1;
455	len += sizeof(vce_encode) / 4;
456
457	r = submit(len, AMDGPU_HW_IP_VCE);
458	CU_ASSERT_EQUAL(r, 0);
459}
460
461static void check_result(struct amdgpu_vce_encode *enc)
462{
463	uint64_t sum;
464	uint32_t s[2] = {180325, 15946};
465	uint32_t *ptr, size;
466	int i, j, r;
467
468	for (i = 0; i < 2; ++i) {
469		r = amdgpu_bo_cpu_map(enc->fb[i].handle, (void **)&enc->fb[i].ptr);
470		CU_ASSERT_EQUAL(r, 0);
471		ptr = (uint32_t *)enc->fb[i].ptr;
472		size = ptr[4] - ptr[9];
473		r = amdgpu_bo_cpu_unmap(enc->fb[i].handle);
474		CU_ASSERT_EQUAL(r, 0);
475		r = amdgpu_bo_cpu_map(enc->bs[i].handle, (void **)&enc->bs[i].ptr);
476		CU_ASSERT_EQUAL(r, 0);
477		for (j = 0, sum = 0; j < size; ++j)
478			sum += enc->bs[i].ptr[j];
479		CU_ASSERT_EQUAL(sum, s[i]);
480		r = amdgpu_bo_cpu_unmap(enc->bs[i].handle);
481		CU_ASSERT_EQUAL(r, 0);
482	}
483}
484
485static void amdgpu_cs_vce_encode(void)
486{
487	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
488	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
489	int i, r;
490
491	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
492	cpb_size = vbuf_size * 10;
493	num_resources = 0;
494	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
495	resources[num_resources++] = enc.fb[0].handle;
496	alloc_resource(&enc.fb[1], 4096, AMDGPU_GEM_DOMAIN_GTT);
497	resources[num_resources++] = enc.fb[1].handle;
498	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
499	resources[num_resources++] = enc.bs[0].handle;
500	alloc_resource(&enc.bs[1], bs_size, AMDGPU_GEM_DOMAIN_GTT);
501	resources[num_resources++] = enc.bs[1].handle;
502	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
503	resources[num_resources++] = enc.vbuf.handle;
504	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
505	resources[num_resources++] = enc.cpb.handle;
506	resources[num_resources++] = ib_handle;
507
508	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
509	CU_ASSERT_EQUAL(r, 0);
510
511	memset(enc.vbuf.ptr, 0, vbuf_size);
512	for (i = 0; i < enc.height; ++i) {
513		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
514		enc.vbuf.ptr += ALIGN(enc.width, align);
515	}
516	for (i = 0; i < enc.height / 2; ++i) {
517		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
518		enc.vbuf.ptr += ALIGN(enc.width, align);
519	}
520
521	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
522	CU_ASSERT_EQUAL(r, 0);
523
524	amdgpu_cs_vce_config();
525
526	if (family_id >= AMDGPU_FAMILY_VI) {
527		vce_taskinfo[3] = 3;
528		amdgpu_cs_vce_encode_idr(&enc);
529		amdgpu_cs_vce_encode_p(&enc);
530		check_result(&enc);
531
532		/* two pipes */
533		vce_encode[16] = 0;
534		amdgpu_cs_vce_encode_idr(&enc);
535		amdgpu_cs_vce_encode_p(&enc);
536		check_result(&enc);
537
538		/* two instances */
539		if (vce_harvest_config == 0) {
540			enc.two_instance = true;
541			vce_taskinfo[2] = 0x83;
542			vce_taskinfo[4] = 1;
543			amdgpu_cs_vce_encode_idr(&enc);
544			vce_taskinfo[2] = 0xffffffff;
545			vce_taskinfo[4] = 2;
546			amdgpu_cs_vce_encode_p(&enc);
547			check_result(&enc);
548		}
549	} else {
550		vce_taskinfo[3] = 3;
551		vce_encode[16] = 0;
552		amdgpu_cs_vce_encode_idr(&enc);
553		amdgpu_cs_vce_encode_p(&enc);
554		check_result(&enc);
555	}
556
557	free_resource(&enc.fb[0]);
558	free_resource(&enc.fb[1]);
559	free_resource(&enc.bs[0]);
560	free_resource(&enc.bs[1]);
561	free_resource(&enc.vbuf);
562	free_resource(&enc.cpb);
563}
564
565static void amdgpu_cs_vce_mv(struct amdgpu_vce_encode *enc)
566{
567	uint64_t luma_offset, chroma_offset;
568	uint64_t mv_ref_luma_offset;
569	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
570	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
571	int len = 0, i, r;
572
573	luma_offset = enc->vbuf.addr;
574	chroma_offset = luma_offset + luma_size;
575	mv_ref_luma_offset = enc->mvrefbuf.addr;
576
577	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
578	len += sizeof(vce_session) / 4;
579	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
580	len += sizeof(vce_taskinfo) / 4;
581	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
582	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
583	ib_cpu[len + 3] = enc->bs[0].addr;
584	len += sizeof(vce_bs_buffer) / 4;
585	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
586	ib_cpu[len + 2] = enc->cpb.addr >> 32;
587	ib_cpu[len + 3] = enc->cpb.addr;
588	len += sizeof(vce_context_buffer) / 4;
589	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
590	for (i = 0; i <  8; ++i)
591		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
592	for (i = 0; i <  8; ++i)
593		ib_cpu[len + 10 + i] = luma_size * 1.5;
594	len += sizeof(vce_aux_buffer) / 4;
595	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
596	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
597	ib_cpu[len + 3] = enc->fb[0].addr;
598	len += sizeof(vce_feedback) / 4;
599	memcpy((ib_cpu + len), vce_mv_buffer, sizeof(vce_mv_buffer));
600	ib_cpu[len + 2] = mv_ref_luma_offset >> 32;
601	ib_cpu[len + 3] = mv_ref_luma_offset;
602	ib_cpu[len + 4] = ALIGN(enc->width, align);
603	ib_cpu[len + 5] = ALIGN(enc->width, align);
604	ib_cpu[len + 6] = luma_size;
605	ib_cpu[len + 7] = enc->mvb.addr >> 32;
606	ib_cpu[len + 8] = enc->mvb.addr;
607	len += sizeof(vce_mv_buffer) / 4;
608	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
609	ib_cpu[len + 2] = 0;
610	ib_cpu[len + 3] = 0;
611	ib_cpu[len + 4] = 0x154000;
612	ib_cpu[len + 9] = luma_offset >> 32;
613	ib_cpu[len + 10] = luma_offset;
614	ib_cpu[len + 11] = chroma_offset >> 32;
615	ib_cpu[len + 12] = chroma_offset;
616	ib_cpu[len + 13] = ALIGN(enc->height, 16);;
617	ib_cpu[len + 14] = ALIGN(enc->width, align);
618	ib_cpu[len + 15] = ALIGN(enc->width, align);
619	/* encDisableMBOffloading-encDisableTwoPipeMode-encInputPicArrayMode-encInputPicAddrMode */
620	ib_cpu[len + 16] = 0x01010000;
621	ib_cpu[len + 18] = 0; /* encPicType */
622	ib_cpu[len + 19] = 0; /* encIdrFlag */
623	ib_cpu[len + 20] = 0; /* encIdrPicId */
624	ib_cpu[len + 21] = 0; /* encMGSKeyPic */
625	ib_cpu[len + 22] = 0; /* encReferenceFlag */
626	ib_cpu[len + 23] = 0; /* encTemporalLayerIndex */
627	ib_cpu[len + 55] = 0; /* pictureStructure */
628	ib_cpu[len + 56] = 0; /* encPicType -ref[0] */
629	ib_cpu[len + 61] = 0; /* pictureStructure */
630	ib_cpu[len + 62] = 0; /* encPicType -ref[1] */
631	ib_cpu[len + 67] = 0; /* pictureStructure */
632	ib_cpu[len + 68] = 0; /* encPicType -ref1 */
633	ib_cpu[len + 81] = 1; /* frameNumber */
634	ib_cpu[len + 82] = 2; /* pictureOrderCount */
635	ib_cpu[len + 83] = 0xffffffff; /* numIPicRemainInRCGOP */
636	ib_cpu[len + 84] = 0xffffffff; /* numPPicRemainInRCGOP */
637	ib_cpu[len + 85] = 0xffffffff; /* numBPicRemainInRCGOP */
638	ib_cpu[len + 86] = 0xffffffff; /* numIRPicRemainInRCGOP */
639	ib_cpu[len + 87] = 0; /* remainedIntraRefreshPictures */
640	len += sizeof(vce_encode) / 4;
641
642	enc->ib_len = len;
643	r = submit(len, AMDGPU_HW_IP_VCE);
644	CU_ASSERT_EQUAL(r, 0);
645}
646
647static void check_mv_result(struct amdgpu_vce_encode *enc)
648{
649	uint64_t sum;
650	uint32_t s = 140790;
651	uint32_t *ptr, size;
652	int i, j, r;
653
654	r = amdgpu_bo_cpu_map(enc->fb[0].handle, (void **)&enc->fb[0].ptr);
655	CU_ASSERT_EQUAL(r, 0);
656	ptr = (uint32_t *)enc->fb[0].ptr;
657	r = amdgpu_bo_cpu_unmap(enc->fb[0].handle);
658	CU_ASSERT_EQUAL(r, 0);
659	r = amdgpu_bo_cpu_map(enc->mvb.handle, (void **)&enc->mvb.ptr);
660	CU_ASSERT_EQUAL(r, 0);
661	for (j = 0, sum = 0; j < enc->mvbuf_size; ++j)
662		sum += enc->mvb.ptr[j];
663	CU_ASSERT_EQUAL(sum, s);
664	r = amdgpu_bo_cpu_unmap(enc->mvb.handle);
665	CU_ASSERT_EQUAL(r, 0);
666}
667
668static void amdgpu_cs_vce_encode_mv(void)
669{
670	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
671	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
672	int i, r;
673
674	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
675	enc.mvbuf_size = ALIGN(enc.width, 16) * ALIGN(enc.height, 16) / 8;
676	cpb_size = vbuf_size * 10;
677	num_resources = 0;
678	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
679	resources[num_resources++] = enc.fb[0].handle;
680	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
681	resources[num_resources++] = enc.bs[0].handle;
682	alloc_resource(&enc.mvb, enc.mvbuf_size, AMDGPU_GEM_DOMAIN_GTT);
683	resources[num_resources++] = enc.mvb.handle;
684	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
685	resources[num_resources++] = enc.vbuf.handle;
686	alloc_resource(&enc.mvrefbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
687	resources[num_resources++] = enc.mvrefbuf.handle;
688	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
689	resources[num_resources++] = enc.cpb.handle;
690	resources[num_resources++] = ib_handle;
691
692	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
693	CU_ASSERT_EQUAL(r, 0);
694
695	memset(enc.vbuf.ptr, 0, vbuf_size);
696	for (i = 0; i < enc.height; ++i) {
697		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
698		enc.vbuf.ptr += ALIGN(enc.width, align);
699	}
700	for (i = 0; i < enc.height / 2; ++i) {
701		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
702		enc.vbuf.ptr += ALIGN(enc.width, align);
703	}
704
705	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
706	CU_ASSERT_EQUAL(r, 0);
707
708	r = amdgpu_bo_cpu_map(enc.mvrefbuf.handle, (void **)&enc.mvrefbuf.ptr);
709	CU_ASSERT_EQUAL(r, 0);
710
711	memset(enc.mvrefbuf.ptr, 0, vbuf_size);
712	for (i = 0; i < enc.height; ++i) {
713		memcpy(enc.mvrefbuf.ptr, (frame + (enc.height - i -1) * enc.width), enc.width);
714		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
715	}
716	for (i = 0; i < enc.height / 2; ++i) {
717		memcpy(enc.mvrefbuf.ptr,
718		((frame + enc.height * enc.width) + (enc.height / 2 - i -1) * enc.width), enc.width);
719		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
720	}
721
722	r = amdgpu_bo_cpu_unmap(enc.mvrefbuf.handle);
723	CU_ASSERT_EQUAL(r, 0);
724
725	amdgpu_cs_vce_config();
726
727	vce_taskinfo[3] = 3;
728	amdgpu_cs_vce_mv(&enc);
729	check_mv_result(&enc);
730
731	free_resource(&enc.fb[0]);
732	free_resource(&enc.bs[0]);
733	free_resource(&enc.vbuf);
734	free_resource(&enc.cpb);
735	free_resource(&enc.mvrefbuf);
736	free_resource(&enc.mvb);
737}
738
739static void amdgpu_cs_vce_destroy(void)
740{
741	int len, r;
742
743	num_resources  = 0;
744	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
745	resources[num_resources++] = enc.fb[0].handle;
746	resources[num_resources++] = ib_handle;
747
748	len = 0;
749	memcpy(ib_cpu, vce_session, sizeof(vce_session));
750	len += sizeof(vce_session) / 4;
751	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
752	ib_cpu[len + 3] = 1;
753	len += sizeof(vce_taskinfo) / 4;
754	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
755	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
756	ib_cpu[len + 3] = enc.fb[0].addr;
757	len += sizeof(vce_feedback) / 4;
758	memcpy((ib_cpu + len), vce_destroy, sizeof(vce_destroy));
759	len += sizeof(vce_destroy) / 4;
760
761	r = submit(len, AMDGPU_HW_IP_VCE);
762	CU_ASSERT_EQUAL(r, 0);
763
764	free_resource(&enc.fb[0]);
765}
766