vce_tests.c revision 41687f09
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <inttypes.h>
26
27#include "CUnit/Basic.h"
28
29#include "util_math.h"
30
31#include "amdgpu_test.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34
35#include "vce_ib.h"
36#include "frame.h"
37
38#define IB_SIZE		4096
39#define MAX_RESOURCES	16
40#define FW_53_0_03 ((53 << 24) | (0 << 16) | (03 << 8))
41
42struct amdgpu_vce_bo {
43	amdgpu_bo_handle handle;
44	amdgpu_va_handle va_handle;
45	uint64_t addr;
46	uint64_t size;
47	uint8_t *ptr;
48};
49
50struct amdgpu_vce_encode {
51	unsigned width;
52	unsigned height;
53	struct amdgpu_vce_bo vbuf;
54	struct amdgpu_vce_bo bs[2];
55	struct amdgpu_vce_bo fb[2];
56	struct amdgpu_vce_bo cpb;
57	unsigned ib_len;
58	bool two_instance;
59	struct amdgpu_vce_bo mvrefbuf;
60	struct amdgpu_vce_bo mvb;
61	unsigned mvbuf_size;
62};
63
64static amdgpu_device_handle device_handle;
65static uint32_t major_version;
66static uint32_t minor_version;
67static uint32_t family_id;
68static uint32_t vce_harvest_config;
69static uint32_t chip_rev;
70static uint32_t chip_id;
71static uint32_t ids_flags;
72static bool is_mv_supported = true;
73
74static amdgpu_context_handle context_handle;
75static amdgpu_bo_handle ib_handle;
76static amdgpu_va_handle ib_va_handle;
77static uint64_t ib_mc_address;
78static uint32_t *ib_cpu;
79
80static struct amdgpu_vce_encode enc;
81static amdgpu_bo_handle resources[MAX_RESOURCES];
82static unsigned num_resources;
83
84static void amdgpu_cs_vce_create(void);
85static void amdgpu_cs_vce_encode(void);
86static void amdgpu_cs_vce_encode_mv(void);
87static void amdgpu_cs_vce_destroy(void);
88
89CU_TestInfo vce_tests[] = {
90	{ "VCE create",  amdgpu_cs_vce_create },
91	{ "VCE encode",  amdgpu_cs_vce_encode },
92	{ "VCE MV dump",  amdgpu_cs_vce_encode_mv },
93	{ "VCE destroy",  amdgpu_cs_vce_destroy },
94	CU_TEST_INFO_NULL,
95};
96
97CU_BOOL suite_vce_tests_enable(void)
98{
99	uint32_t version, feature, asic_id;
100	CU_BOOL ret_mv = CU_FALSE;
101
102	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
103					     &minor_version, &device_handle))
104		return CU_FALSE;
105
106	family_id = device_handle->info.family_id;
107	chip_rev = device_handle->info.chip_rev;
108	chip_id = device_handle->info.chip_external_rev;
109	ids_flags = device_handle->info.ids_flags;
110	asic_id = device_handle->info.asic_id;
111
112	amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
113					  0, &version, &feature);
114
115	if (amdgpu_device_deinitialize(device_handle))
116		return CU_FALSE;
117
118	if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
119		asic_is_arcturus(asic_id)) {
120		printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
121		return CU_FALSE;
122	}
123
124	if (!(chip_id == (chip_rev + 0x3C) || /* FIJI */
125			chip_id == (chip_rev + 0x50) || /* Polaris 10*/
126			chip_id == (chip_rev + 0x5A) || /* Polaris 11*/
127			chip_id == (chip_rev + 0x64) || /* Polaris 12*/
128			(family_id >= AMDGPU_FAMILY_AI && !ids_flags))) /* dGPU > Polaris */
129		printf("\n\nThe ASIC NOT support VCE MV, suite disabled\n");
130	else if (FW_53_0_03 > version)
131		printf("\n\nThe ASIC FW version NOT support VCE MV, suite disabled\n");
132	else
133		ret_mv = CU_TRUE;
134
135	if (ret_mv == CU_FALSE) {
136		amdgpu_set_test_active("VCE Tests", "VCE MV dump", ret_mv);
137		is_mv_supported = false;
138	}
139
140	return CU_TRUE;
141}
142
143int suite_vce_tests_init(void)
144{
145	int r;
146
147	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
148				     &minor_version, &device_handle);
149	if (r) {
150		if ((r == -EACCES) && (errno == EACCES))
151			printf("\n\nError:%s. "
152				"Hint:Try to run this test program as root.",
153				strerror(errno));
154
155		return CUE_SINIT_FAILED;
156	}
157
158	family_id = device_handle->info.family_id;
159	vce_harvest_config = device_handle->info.vce_harvest_config;
160
161	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
162	if (r)
163		return CUE_SINIT_FAILED;
164
165	r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
166				    AMDGPU_GEM_DOMAIN_GTT, 0,
167				    &ib_handle, (void**)&ib_cpu,
168				    &ib_mc_address, &ib_va_handle);
169	if (r)
170		return CUE_SINIT_FAILED;
171
172	memset(&enc, 0, sizeof(struct amdgpu_vce_encode));
173
174	return CUE_SUCCESS;
175}
176
177int suite_vce_tests_clean(void)
178{
179	int r;
180
181	r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
182				     ib_mc_address, IB_SIZE);
183	if (r)
184		return CUE_SCLEAN_FAILED;
185
186	r = amdgpu_cs_ctx_free(context_handle);
187	if (r)
188		return CUE_SCLEAN_FAILED;
189
190	r = amdgpu_device_deinitialize(device_handle);
191	if (r)
192		return CUE_SCLEAN_FAILED;
193
194	return CUE_SUCCESS;
195}
196
197static int submit(unsigned ndw, unsigned ip)
198{
199	struct amdgpu_cs_request ibs_request = {0};
200	struct amdgpu_cs_ib_info ib_info = {0};
201	struct amdgpu_cs_fence fence_status = {0};
202	uint32_t expired;
203	int r;
204
205	ib_info.ib_mc_address = ib_mc_address;
206	ib_info.size = ndw;
207
208	ibs_request.ip_type = ip;
209
210	r = amdgpu_bo_list_create(device_handle, num_resources, resources,
211				  NULL, &ibs_request.resources);
212	if (r)
213		return r;
214
215	ibs_request.number_of_ibs = 1;
216	ibs_request.ibs = &ib_info;
217	ibs_request.fence_info.handle = NULL;
218
219	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
220	if (r)
221		return r;
222
223	r = amdgpu_bo_list_destroy(ibs_request.resources);
224	if (r)
225		return r;
226
227	fence_status.context = context_handle;
228	fence_status.ip_type = ip;
229	fence_status.fence = ibs_request.seq_no;
230
231	r = amdgpu_cs_query_fence_status(&fence_status,
232					 AMDGPU_TIMEOUT_INFINITE,
233					 0, &expired);
234	if (r)
235		return r;
236
237	return 0;
238}
239
240static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned domain)
241{
242	struct amdgpu_bo_alloc_request req = {0};
243	amdgpu_bo_handle buf_handle;
244	amdgpu_va_handle va_handle;
245	uint64_t va = 0;
246	int r;
247
248	req.alloc_size = ALIGN(size, 4096);
249	req.preferred_heap = domain;
250	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
251	CU_ASSERT_EQUAL(r, 0);
252	r = amdgpu_va_range_alloc(device_handle,
253				  amdgpu_gpu_va_range_general,
254				  req.alloc_size, 1, 0, &va,
255				  &va_handle, 0);
256	CU_ASSERT_EQUAL(r, 0);
257	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
258			    AMDGPU_VA_OP_MAP);
259	CU_ASSERT_EQUAL(r, 0);
260	vce_bo->addr = va;
261	vce_bo->handle = buf_handle;
262	vce_bo->size = req.alloc_size;
263	vce_bo->va_handle = va_handle;
264	r = amdgpu_bo_cpu_map(vce_bo->handle, (void **)&vce_bo->ptr);
265	CU_ASSERT_EQUAL(r, 0);
266	memset(vce_bo->ptr, 0, size);
267	r = amdgpu_bo_cpu_unmap(vce_bo->handle);
268	CU_ASSERT_EQUAL(r, 0);
269}
270
271static void free_resource(struct amdgpu_vce_bo *vce_bo)
272{
273	int r;
274
275	r = amdgpu_bo_va_op(vce_bo->handle, 0, vce_bo->size,
276			    vce_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
277	CU_ASSERT_EQUAL(r, 0);
278
279	r = amdgpu_va_range_free(vce_bo->va_handle);
280	CU_ASSERT_EQUAL(r, 0);
281
282	r = amdgpu_bo_free(vce_bo->handle);
283	CU_ASSERT_EQUAL(r, 0);
284	memset(vce_bo, 0, sizeof(*vce_bo));
285}
286
287static void amdgpu_cs_vce_create(void)
288{
289	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
290	int len, r;
291
292	enc.width = vce_create[6];
293	enc.height = vce_create[7];
294
295	num_resources  = 0;
296	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
297	resources[num_resources++] = enc.fb[0].handle;
298	resources[num_resources++] = ib_handle;
299
300	len = 0;
301	memcpy(ib_cpu, vce_session, sizeof(vce_session));
302	len += sizeof(vce_session) / 4;
303	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
304	len += sizeof(vce_taskinfo) / 4;
305	memcpy((ib_cpu + len), vce_create, sizeof(vce_create));
306	ib_cpu[len + 8] = ALIGN(enc.width, align);
307	ib_cpu[len + 9] = ALIGN(enc.width, align);
308	if (is_mv_supported == true) {/* disableTwoInstance */
309		if (family_id >= AMDGPU_FAMILY_AI)
310			ib_cpu[len + 11] = 0x01000001;
311		else
312			ib_cpu[len + 11] = 0x01000201;
313	}
314	len += sizeof(vce_create) / 4;
315	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
316	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
317	ib_cpu[len + 3] = enc.fb[0].addr;
318	len += sizeof(vce_feedback) / 4;
319
320	r = submit(len, AMDGPU_HW_IP_VCE);
321	CU_ASSERT_EQUAL(r, 0);
322
323	free_resource(&enc.fb[0]);
324}
325
326static void amdgpu_cs_vce_config(void)
327{
328	int len = 0, r;
329
330	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
331	len += sizeof(vce_session) / 4;
332	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
333	ib_cpu[len + 3] = 2;
334	ib_cpu[len + 6] = 0xffffffff;
335	len += sizeof(vce_taskinfo) / 4;
336	memcpy((ib_cpu + len), vce_rate_ctrl, sizeof(vce_rate_ctrl));
337	len += sizeof(vce_rate_ctrl) / 4;
338	memcpy((ib_cpu + len), vce_config_ext, sizeof(vce_config_ext));
339	len += sizeof(vce_config_ext) / 4;
340	memcpy((ib_cpu + len), vce_motion_est, sizeof(vce_motion_est));
341	len += sizeof(vce_motion_est) / 4;
342	memcpy((ib_cpu + len), vce_rdo, sizeof(vce_rdo));
343	len += sizeof(vce_rdo) / 4;
344	memcpy((ib_cpu + len), vce_pic_ctrl, sizeof(vce_pic_ctrl));
345	if (is_mv_supported == true)
346		ib_cpu[len + 27] = 0x00000001; /* encSliceMode */
347	len += sizeof(vce_pic_ctrl) / 4;
348
349	r = submit(len, AMDGPU_HW_IP_VCE);
350	CU_ASSERT_EQUAL(r, 0);
351}
352
353static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
354{
355
356	uint64_t luma_offset, chroma_offset;
357	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
358	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
359	int len = 0, i, r;
360
361	luma_offset = enc->vbuf.addr;
362	chroma_offset = luma_offset + luma_size;
363
364	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
365	len += sizeof(vce_session) / 4;
366	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
367	len += sizeof(vce_taskinfo) / 4;
368	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
369	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
370	ib_cpu[len + 3] = enc->bs[0].addr;
371	len += sizeof(vce_bs_buffer) / 4;
372	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
373	ib_cpu[len + 2] = enc->cpb.addr >> 32;
374	ib_cpu[len + 3] = enc->cpb.addr;
375	len += sizeof(vce_context_buffer) / 4;
376	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
377	for (i = 0; i <  8; ++i)
378		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
379	for (i = 0; i <  8; ++i)
380		ib_cpu[len + 10 + i] = luma_size * 1.5;
381	len += sizeof(vce_aux_buffer) / 4;
382	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
383	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
384	ib_cpu[len + 3] = enc->fb[0].addr;
385	len += sizeof(vce_feedback) / 4;
386	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
387	ib_cpu[len + 9] = luma_offset >> 32;
388	ib_cpu[len + 10] = luma_offset;
389	ib_cpu[len + 11] = chroma_offset >> 32;
390	ib_cpu[len + 12] = chroma_offset;
391	ib_cpu[len + 14] = ALIGN(enc->width, align);
392	ib_cpu[len + 15] = ALIGN(enc->width, align);
393	ib_cpu[len + 73] = luma_size * 1.5;
394	ib_cpu[len + 74] = luma_size * 2.5;
395	len += sizeof(vce_encode) / 4;
396	enc->ib_len = len;
397	if (!enc->two_instance) {
398		r = submit(len, AMDGPU_HW_IP_VCE);
399		CU_ASSERT_EQUAL(r, 0);
400	}
401}
402
403static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
404{
405	uint64_t luma_offset, chroma_offset;
406	int len, i, r;
407	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
408	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
409
410	len = (enc->two_instance) ? enc->ib_len : 0;
411	luma_offset = enc->vbuf.addr;
412	chroma_offset = luma_offset + luma_size;
413
414	if (!enc->two_instance) {
415		memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
416		len += sizeof(vce_session) / 4;
417	}
418	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
419	len += sizeof(vce_taskinfo) / 4;
420	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
421	ib_cpu[len + 2] = enc->bs[1].addr >> 32;
422	ib_cpu[len + 3] = enc->bs[1].addr;
423	len += sizeof(vce_bs_buffer) / 4;
424	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
425	ib_cpu[len + 2] = enc->cpb.addr >> 32;
426	ib_cpu[len + 3] = enc->cpb.addr;
427	len += sizeof(vce_context_buffer) / 4;
428	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
429	for (i = 0; i <  8; ++i)
430		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
431	for (i = 0; i <  8; ++i)
432		ib_cpu[len + 10 + i] = luma_size * 1.5;
433	len += sizeof(vce_aux_buffer) / 4;
434	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
435	ib_cpu[len + 2] = enc->fb[1].addr >> 32;
436	ib_cpu[len + 3] = enc->fb[1].addr;
437	len += sizeof(vce_feedback) / 4;
438	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
439	ib_cpu[len + 2] = 0;
440	ib_cpu[len + 9] = luma_offset >> 32;
441	ib_cpu[len + 10] = luma_offset;
442	ib_cpu[len + 11] = chroma_offset >> 32;
443	ib_cpu[len + 12] = chroma_offset;
444	ib_cpu[len + 14] = ALIGN(enc->width, align);
445	ib_cpu[len + 15] = ALIGN(enc->width, align);
446	ib_cpu[len + 18] = 0;
447	ib_cpu[len + 19] = 0;
448	ib_cpu[len + 56] = 3;
449	ib_cpu[len + 57] = 0;
450	ib_cpu[len + 58] = 0;
451	ib_cpu[len + 59] = luma_size * 1.5;
452	ib_cpu[len + 60] = luma_size * 2.5;
453	ib_cpu[len + 73] = 0;
454	ib_cpu[len + 74] = luma_size;
455	ib_cpu[len + 81] = 1;
456	ib_cpu[len + 82] = 1;
457	len += sizeof(vce_encode) / 4;
458
459	r = submit(len, AMDGPU_HW_IP_VCE);
460	CU_ASSERT_EQUAL(r, 0);
461}
462
463static void check_result(struct amdgpu_vce_encode *enc)
464{
465	uint64_t sum;
466	uint32_t s[2] = {180325, 15946};
467	uint32_t *ptr, size;
468	int i, j, r;
469
470	for (i = 0; i < 2; ++i) {
471		r = amdgpu_bo_cpu_map(enc->fb[i].handle, (void **)&enc->fb[i].ptr);
472		CU_ASSERT_EQUAL(r, 0);
473		ptr = (uint32_t *)enc->fb[i].ptr;
474		size = ptr[4] - ptr[9];
475		r = amdgpu_bo_cpu_unmap(enc->fb[i].handle);
476		CU_ASSERT_EQUAL(r, 0);
477		r = amdgpu_bo_cpu_map(enc->bs[i].handle, (void **)&enc->bs[i].ptr);
478		CU_ASSERT_EQUAL(r, 0);
479		for (j = 0, sum = 0; j < size; ++j)
480			sum += enc->bs[i].ptr[j];
481		CU_ASSERT_EQUAL(sum, s[i]);
482		r = amdgpu_bo_cpu_unmap(enc->bs[i].handle);
483		CU_ASSERT_EQUAL(r, 0);
484	}
485}
486
487static void amdgpu_cs_vce_encode(void)
488{
489	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
490	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
491	int i, r;
492
493	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
494	cpb_size = vbuf_size * 10;
495	num_resources = 0;
496	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
497	resources[num_resources++] = enc.fb[0].handle;
498	alloc_resource(&enc.fb[1], 4096, AMDGPU_GEM_DOMAIN_GTT);
499	resources[num_resources++] = enc.fb[1].handle;
500	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
501	resources[num_resources++] = enc.bs[0].handle;
502	alloc_resource(&enc.bs[1], bs_size, AMDGPU_GEM_DOMAIN_GTT);
503	resources[num_resources++] = enc.bs[1].handle;
504	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
505	resources[num_resources++] = enc.vbuf.handle;
506	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
507	resources[num_resources++] = enc.cpb.handle;
508	resources[num_resources++] = ib_handle;
509
510	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
511	CU_ASSERT_EQUAL(r, 0);
512
513	memset(enc.vbuf.ptr, 0, vbuf_size);
514	for (i = 0; i < enc.height; ++i) {
515		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
516		enc.vbuf.ptr += ALIGN(enc.width, align);
517	}
518	for (i = 0; i < enc.height / 2; ++i) {
519		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
520		enc.vbuf.ptr += ALIGN(enc.width, align);
521	}
522
523	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
524	CU_ASSERT_EQUAL(r, 0);
525
526	amdgpu_cs_vce_config();
527
528	if (family_id >= AMDGPU_FAMILY_VI) {
529		vce_taskinfo[3] = 3;
530		amdgpu_cs_vce_encode_idr(&enc);
531		amdgpu_cs_vce_encode_p(&enc);
532		check_result(&enc);
533
534		/* two pipes */
535		vce_encode[16] = 0;
536		amdgpu_cs_vce_encode_idr(&enc);
537		amdgpu_cs_vce_encode_p(&enc);
538		check_result(&enc);
539
540		/* two instances */
541		if (vce_harvest_config == 0) {
542			enc.two_instance = true;
543			vce_taskinfo[2] = 0x83;
544			vce_taskinfo[4] = 1;
545			amdgpu_cs_vce_encode_idr(&enc);
546			vce_taskinfo[2] = 0xffffffff;
547			vce_taskinfo[4] = 2;
548			amdgpu_cs_vce_encode_p(&enc);
549			check_result(&enc);
550		}
551	} else {
552		vce_taskinfo[3] = 3;
553		vce_encode[16] = 0;
554		amdgpu_cs_vce_encode_idr(&enc);
555		amdgpu_cs_vce_encode_p(&enc);
556		check_result(&enc);
557	}
558
559	free_resource(&enc.fb[0]);
560	free_resource(&enc.fb[1]);
561	free_resource(&enc.bs[0]);
562	free_resource(&enc.bs[1]);
563	free_resource(&enc.vbuf);
564	free_resource(&enc.cpb);
565}
566
567static void amdgpu_cs_vce_mv(struct amdgpu_vce_encode *enc)
568{
569	uint64_t luma_offset, chroma_offset;
570	uint64_t mv_ref_luma_offset;
571	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
572	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
573	int len = 0, i, r;
574
575	luma_offset = enc->vbuf.addr;
576	chroma_offset = luma_offset + luma_size;
577	mv_ref_luma_offset = enc->mvrefbuf.addr;
578
579	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
580	len += sizeof(vce_session) / 4;
581	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
582	len += sizeof(vce_taskinfo) / 4;
583	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
584	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
585	ib_cpu[len + 3] = enc->bs[0].addr;
586	len += sizeof(vce_bs_buffer) / 4;
587	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
588	ib_cpu[len + 2] = enc->cpb.addr >> 32;
589	ib_cpu[len + 3] = enc->cpb.addr;
590	len += sizeof(vce_context_buffer) / 4;
591	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
592	for (i = 0; i <  8; ++i)
593		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
594	for (i = 0; i <  8; ++i)
595		ib_cpu[len + 10 + i] = luma_size * 1.5;
596	len += sizeof(vce_aux_buffer) / 4;
597	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
598	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
599	ib_cpu[len + 3] = enc->fb[0].addr;
600	len += sizeof(vce_feedback) / 4;
601	memcpy((ib_cpu + len), vce_mv_buffer, sizeof(vce_mv_buffer));
602	ib_cpu[len + 2] = mv_ref_luma_offset >> 32;
603	ib_cpu[len + 3] = mv_ref_luma_offset;
604	ib_cpu[len + 4] = ALIGN(enc->width, align);
605	ib_cpu[len + 5] = ALIGN(enc->width, align);
606	ib_cpu[len + 6] = luma_size;
607	ib_cpu[len + 7] = enc->mvb.addr >> 32;
608	ib_cpu[len + 8] = enc->mvb.addr;
609	len += sizeof(vce_mv_buffer) / 4;
610	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
611	ib_cpu[len + 2] = 0;
612	ib_cpu[len + 3] = 0;
613	ib_cpu[len + 4] = 0x154000;
614	ib_cpu[len + 9] = luma_offset >> 32;
615	ib_cpu[len + 10] = luma_offset;
616	ib_cpu[len + 11] = chroma_offset >> 32;
617	ib_cpu[len + 12] = chroma_offset;
618	ib_cpu[len + 13] = ALIGN(enc->height, 16);;
619	ib_cpu[len + 14] = ALIGN(enc->width, align);
620	ib_cpu[len + 15] = ALIGN(enc->width, align);
621	/* encDisableMBOffloading-encDisableTwoPipeMode-encInputPicArrayMode-encInputPicAddrMode */
622	ib_cpu[len + 16] = 0x01010000;
623	ib_cpu[len + 18] = 0; /* encPicType */
624	ib_cpu[len + 19] = 0; /* encIdrFlag */
625	ib_cpu[len + 20] = 0; /* encIdrPicId */
626	ib_cpu[len + 21] = 0; /* encMGSKeyPic */
627	ib_cpu[len + 22] = 0; /* encReferenceFlag */
628	ib_cpu[len + 23] = 0; /* encTemporalLayerIndex */
629	ib_cpu[len + 55] = 0; /* pictureStructure */
630	ib_cpu[len + 56] = 0; /* encPicType -ref[0] */
631	ib_cpu[len + 61] = 0; /* pictureStructure */
632	ib_cpu[len + 62] = 0; /* encPicType -ref[1] */
633	ib_cpu[len + 67] = 0; /* pictureStructure */
634	ib_cpu[len + 68] = 0; /* encPicType -ref1 */
635	ib_cpu[len + 81] = 1; /* frameNumber */
636	ib_cpu[len + 82] = 2; /* pictureOrderCount */
637	ib_cpu[len + 83] = 0xffffffff; /* numIPicRemainInRCGOP */
638	ib_cpu[len + 84] = 0xffffffff; /* numPPicRemainInRCGOP */
639	ib_cpu[len + 85] = 0xffffffff; /* numBPicRemainInRCGOP */
640	ib_cpu[len + 86] = 0xffffffff; /* numIRPicRemainInRCGOP */
641	ib_cpu[len + 87] = 0; /* remainedIntraRefreshPictures */
642	len += sizeof(vce_encode) / 4;
643
644	enc->ib_len = len;
645	r = submit(len, AMDGPU_HW_IP_VCE);
646	CU_ASSERT_EQUAL(r, 0);
647}
648
649static void check_mv_result(struct amdgpu_vce_encode *enc)
650{
651	uint64_t sum;
652	uint32_t s = 140790;
653	int j, r;
654
655	r = amdgpu_bo_cpu_map(enc->fb[0].handle, (void **)&enc->fb[0].ptr);
656	CU_ASSERT_EQUAL(r, 0);
657	r = amdgpu_bo_cpu_unmap(enc->fb[0].handle);
658	CU_ASSERT_EQUAL(r, 0);
659	r = amdgpu_bo_cpu_map(enc->mvb.handle, (void **)&enc->mvb.ptr);
660	CU_ASSERT_EQUAL(r, 0);
661	for (j = 0, sum = 0; j < enc->mvbuf_size; ++j)
662		sum += enc->mvb.ptr[j];
663	CU_ASSERT_EQUAL(sum, s);
664	r = amdgpu_bo_cpu_unmap(enc->mvb.handle);
665	CU_ASSERT_EQUAL(r, 0);
666}
667
668static void amdgpu_cs_vce_encode_mv(void)
669{
670	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
671	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
672	int i, r;
673
674	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
675	enc.mvbuf_size = ALIGN(enc.width, 16) * ALIGN(enc.height, 16) / 8;
676	cpb_size = vbuf_size * 10;
677	num_resources = 0;
678	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
679	resources[num_resources++] = enc.fb[0].handle;
680	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
681	resources[num_resources++] = enc.bs[0].handle;
682	alloc_resource(&enc.mvb, enc.mvbuf_size, AMDGPU_GEM_DOMAIN_GTT);
683	resources[num_resources++] = enc.mvb.handle;
684	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
685	resources[num_resources++] = enc.vbuf.handle;
686	alloc_resource(&enc.mvrefbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
687	resources[num_resources++] = enc.mvrefbuf.handle;
688	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
689	resources[num_resources++] = enc.cpb.handle;
690	resources[num_resources++] = ib_handle;
691
692	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
693	CU_ASSERT_EQUAL(r, 0);
694
695	memset(enc.vbuf.ptr, 0, vbuf_size);
696	for (i = 0; i < enc.height; ++i) {
697		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
698		enc.vbuf.ptr += ALIGN(enc.width, align);
699	}
700	for (i = 0; i < enc.height / 2; ++i) {
701		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
702		enc.vbuf.ptr += ALIGN(enc.width, align);
703	}
704
705	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
706	CU_ASSERT_EQUAL(r, 0);
707
708	r = amdgpu_bo_cpu_map(enc.mvrefbuf.handle, (void **)&enc.mvrefbuf.ptr);
709	CU_ASSERT_EQUAL(r, 0);
710
711	memset(enc.mvrefbuf.ptr, 0, vbuf_size);
712	for (i = 0; i < enc.height; ++i) {
713		memcpy(enc.mvrefbuf.ptr, (frame + (enc.height - i -1) * enc.width), enc.width);
714		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
715	}
716	for (i = 0; i < enc.height / 2; ++i) {
717		memcpy(enc.mvrefbuf.ptr,
718		((frame + enc.height * enc.width) + (enc.height / 2 - i -1) * enc.width), enc.width);
719		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
720	}
721
722	r = amdgpu_bo_cpu_unmap(enc.mvrefbuf.handle);
723	CU_ASSERT_EQUAL(r, 0);
724
725	amdgpu_cs_vce_config();
726
727	vce_taskinfo[3] = 3;
728	amdgpu_cs_vce_mv(&enc);
729	check_mv_result(&enc);
730
731	free_resource(&enc.fb[0]);
732	free_resource(&enc.bs[0]);
733	free_resource(&enc.vbuf);
734	free_resource(&enc.cpb);
735	free_resource(&enc.mvrefbuf);
736	free_resource(&enc.mvb);
737}
738
739static void amdgpu_cs_vce_destroy(void)
740{
741	int len, r;
742
743	num_resources  = 0;
744	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
745	resources[num_resources++] = enc.fb[0].handle;
746	resources[num_resources++] = ib_handle;
747
748	len = 0;
749	memcpy(ib_cpu, vce_session, sizeof(vce_session));
750	len += sizeof(vce_session) / 4;
751	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
752	ib_cpu[len + 3] = 1;
753	len += sizeof(vce_taskinfo) / 4;
754	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
755	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
756	ib_cpu[len + 3] = enc.fb[0].addr;
757	len += sizeof(vce_feedback) / 4;
758	memcpy((ib_cpu + len), vce_destroy, sizeof(vce_destroy));
759	len += sizeof(vce_destroy) / 4;
760
761	r = submit(len, AMDGPU_HW_IP_VCE);
762	CU_ASSERT_EQUAL(r, 0);
763
764	free_resource(&enc.fb[0]);
765}
766