vm_tests.c revision 6532f28e
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include "CUnit/Basic.h"
25
26#include "amdgpu_test.h"
27#include "amdgpu_drm.h"
28#include "amdgpu_internal.h"
29
30static  amdgpu_device_handle device_handle;
31static  uint32_t  major_version;
32static  uint32_t  minor_version;
33
34static void amdgpu_vmid_reserve_test(void);
35static void amdgpu_vm_unaligned_map(void);
36static void amdgpu_vm_mapping_test(void);
37
38CU_BOOL suite_vm_tests_enable(void)
39{
40    CU_BOOL enable = CU_TRUE;
41
42	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
43				     &minor_version, &device_handle))
44		return CU_FALSE;
45
46	if (device_handle->info.family_id == AMDGPU_FAMILY_SI) {
47		printf("\n\nCurrently hangs the CP on this ASIC, VM suite disabled\n");
48		enable = CU_FALSE;
49	}
50
51	if (amdgpu_device_deinitialize(device_handle))
52		return CU_FALSE;
53
54	return enable;
55}
56
57int suite_vm_tests_init(void)
58{
59	int r;
60
61	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
62				   &minor_version, &device_handle);
63
64	if (r) {
65		if ((r == -EACCES) && (errno == EACCES))
66			printf("\n\nError:%s. "
67				"Hint:Try to run this test program as root.",
68				strerror(errno));
69		return CUE_SINIT_FAILED;
70	}
71
72	return CUE_SUCCESS;
73}
74
75int suite_vm_tests_clean(void)
76{
77	int r = amdgpu_device_deinitialize(device_handle);
78
79	if (r == 0)
80		return CUE_SUCCESS;
81	else
82		return CUE_SCLEAN_FAILED;
83}
84
85
86CU_TestInfo vm_tests[] = {
87	{ "resere vmid test",  amdgpu_vmid_reserve_test },
88	{ "unaligned map",  amdgpu_vm_unaligned_map },
89	{ "vm mapping test",  amdgpu_vm_mapping_test },
90	CU_TEST_INFO_NULL,
91};
92
93static void amdgpu_vmid_reserve_test(void)
94{
95	amdgpu_context_handle context_handle;
96	amdgpu_bo_handle ib_result_handle;
97	void *ib_result_cpu;
98	uint64_t ib_result_mc_address;
99	struct amdgpu_cs_request ibs_request;
100	struct amdgpu_cs_ib_info ib_info;
101	struct amdgpu_cs_fence fence_status;
102	uint32_t expired, flags;
103	int i, r;
104	amdgpu_bo_list_handle bo_list;
105	amdgpu_va_handle va_handle;
106	static uint32_t *ptr;
107
108	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
109	CU_ASSERT_EQUAL(r, 0);
110
111	flags = 0;
112	r = amdgpu_vm_reserve_vmid(device_handle, flags);
113	CU_ASSERT_EQUAL(r, 0);
114
115
116	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
117			AMDGPU_GEM_DOMAIN_GTT, 0,
118						    &ib_result_handle, &ib_result_cpu,
119						    &ib_result_mc_address, &va_handle);
120	CU_ASSERT_EQUAL(r, 0);
121
122	r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
123			       &bo_list);
124	CU_ASSERT_EQUAL(r, 0);
125
126	ptr = ib_result_cpu;
127
128	for (i = 0; i < 16; ++i)
129		ptr[i] = 0xffff1000;
130
131	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
132	ib_info.ib_mc_address = ib_result_mc_address;
133	ib_info.size = 16;
134
135	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
136	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
137	ibs_request.ring = 0;
138	ibs_request.number_of_ibs = 1;
139	ibs_request.ibs = &ib_info;
140	ibs_request.resources = bo_list;
141	ibs_request.fence_info.handle = NULL;
142
143	r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
144	CU_ASSERT_EQUAL(r, 0);
145
146
147	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
148	fence_status.context = context_handle;
149	fence_status.ip_type = AMDGPU_HW_IP_GFX;
150	fence_status.ip_instance = 0;
151	fence_status.ring = 0;
152	fence_status.fence = ibs_request.seq_no;
153
154	r = amdgpu_cs_query_fence_status(&fence_status,
155			AMDGPU_TIMEOUT_INFINITE,0, &expired);
156	CU_ASSERT_EQUAL(r, 0);
157
158	r = amdgpu_bo_list_destroy(bo_list);
159	CU_ASSERT_EQUAL(r, 0);
160
161	r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
162				     ib_result_mc_address, 4096);
163	CU_ASSERT_EQUAL(r, 0);
164
165	flags = 0;
166	r = amdgpu_vm_unreserve_vmid(device_handle, flags);
167	CU_ASSERT_EQUAL(r, 0);
168
169
170	r = amdgpu_cs_ctx_free(context_handle);
171	CU_ASSERT_EQUAL(r, 0);
172}
173
174static void amdgpu_vm_unaligned_map(void)
175{
176	const uint64_t map_size = (4ULL << 30) - (2 << 12);
177	struct amdgpu_bo_alloc_request request = {};
178	amdgpu_bo_handle buf_handle;
179	amdgpu_va_handle handle;
180	uint64_t vmc_addr;
181	int r;
182
183	request.alloc_size = 4ULL << 30;
184	request.phys_alignment = 4096;
185	request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
186	request.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
187
188	r = amdgpu_bo_alloc(device_handle, &request, &buf_handle);
189	/* Don't let the test fail if the device doesn't have enough VRAM */
190	if (r)
191		return;
192
193	r = amdgpu_va_range_alloc(device_handle, amdgpu_gpu_va_range_general,
194				  4ULL << 30, 1ULL << 30, 0, &vmc_addr,
195				  &handle, 0);
196	CU_ASSERT_EQUAL(r, 0);
197	if (r)
198		goto error_va_alloc;
199
200	vmc_addr += 1 << 12;
201
202	r = amdgpu_bo_va_op(buf_handle, 0, map_size, vmc_addr, 0,
203			    AMDGPU_VA_OP_MAP);
204	CU_ASSERT_EQUAL(r, 0);
205	if (r)
206		goto error_va_alloc;
207
208	amdgpu_bo_va_op(buf_handle, 0, map_size, vmc_addr, 0,
209			AMDGPU_VA_OP_UNMAP);
210
211error_va_alloc:
212	amdgpu_bo_free(buf_handle);
213}
214
215static void amdgpu_vm_mapping_test(void)
216{
217	struct amdgpu_bo_alloc_request req = {0};
218	struct drm_amdgpu_info_device dev_info;
219	const uint64_t size = 4096;
220	amdgpu_bo_handle buf;
221	uint64_t addr;
222	int r;
223
224	req.alloc_size = size;
225	req.phys_alignment = 0;
226	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
227	req.flags = 0;
228
229	r = amdgpu_bo_alloc(device_handle, &req, &buf);
230	CU_ASSERT_EQUAL(r, 0);
231
232	r = amdgpu_query_info(device_handle, AMDGPU_INFO_DEV_INFO,
233			      sizeof(dev_info), &dev_info);
234	CU_ASSERT_EQUAL(r, 0);
235
236	addr = dev_info.virtual_address_offset;
237	r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
238	CU_ASSERT_EQUAL(r, 0);
239
240	addr = dev_info.virtual_address_max - size;
241	r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
242	CU_ASSERT_EQUAL(r, 0);
243
244	if (dev_info.high_va_offset) {
245		addr = dev_info.high_va_offset;
246		r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
247		CU_ASSERT_EQUAL(r, 0);
248
249		addr = dev_info.high_va_max - size;
250		r = amdgpu_bo_va_op(buf, 0, size, addr, 0, AMDGPU_VA_OP_MAP);
251		CU_ASSERT_EQUAL(r, 0);
252	}
253
254	amdgpu_bo_free(buf);
255}
256