amdgpu_vamgr.c revision 00a23bda
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdlib.h>
29#include <string.h>
30#include <errno.h>
31#include "amdgpu.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34#include "util_math.h"
35
36int amdgpu_va_range_query(amdgpu_device_handle dev,
37			  enum amdgpu_gpu_va_range type,
38			  uint64_t *start, uint64_t *end)
39{
40	if (type != amdgpu_gpu_va_range_general)
41		return -EINVAL;
42
43	*start = dev->dev_info.virtual_address_offset;
44	*end = dev->dev_info.virtual_address_max;
45	return 0;
46}
47
48drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
49				   uint64_t max, uint64_t alignment)
50{
51	struct amdgpu_bo_va_hole *n;
52
53	mgr->va_max = max;
54	mgr->va_alignment = alignment;
55
56	list_inithead(&mgr->va_holes);
57	pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58	pthread_mutex_lock(&mgr->bo_va_mutex);
59	n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
60	n->size = mgr->va_max - start;
61	n->offset = start;
62	list_add(&n->list, &mgr->va_holes);
63	pthread_mutex_unlock(&mgr->bo_va_mutex);
64}
65
66drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
67{
68	struct amdgpu_bo_va_hole *hole, *tmp;
69	LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
70		list_del(&hole->list);
71		free(hole);
72	}
73	pthread_mutex_destroy(&mgr->bo_va_mutex);
74}
75
76static drm_private uint64_t
77amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
78		     uint64_t alignment, uint64_t base_required)
79{
80	struct amdgpu_bo_va_hole *hole, *n;
81	uint64_t offset = 0, waste = 0;
82
83
84	alignment = MAX2(alignment, mgr->va_alignment);
85	size = ALIGN(size, mgr->va_alignment);
86
87	if (base_required % alignment)
88		return AMDGPU_INVALID_VA_ADDRESS;
89
90	pthread_mutex_lock(&mgr->bo_va_mutex);
91	LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
92		if (base_required) {
93			if (hole->offset > base_required ||
94			    (hole->offset + hole->size) < (base_required + size))
95				continue;
96			waste = base_required - hole->offset;
97			offset = base_required;
98		} else {
99			offset = hole->offset;
100			waste = offset % alignment;
101			waste = waste ? alignment - waste : 0;
102			offset += waste;
103			if (offset >= (hole->offset + hole->size)) {
104				continue;
105			}
106		}
107		if (!waste && hole->size == size) {
108			offset = hole->offset;
109			list_del(&hole->list);
110			free(hole);
111			pthread_mutex_unlock(&mgr->bo_va_mutex);
112			return offset;
113		}
114		if ((hole->size - waste) > size) {
115			if (waste) {
116				n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
117				n->size = waste;
118				n->offset = hole->offset;
119				list_add(&n->list, &hole->list);
120			}
121			hole->size -= (size + waste);
122			hole->offset += size + waste;
123			pthread_mutex_unlock(&mgr->bo_va_mutex);
124			return offset;
125		}
126		if ((hole->size - waste) == size) {
127			hole->size = waste;
128			pthread_mutex_unlock(&mgr->bo_va_mutex);
129			return offset;
130		}
131	}
132
133	pthread_mutex_unlock(&mgr->bo_va_mutex);
134	return AMDGPU_INVALID_VA_ADDRESS;
135}
136
137static drm_private void
138amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
139{
140	struct amdgpu_bo_va_hole *hole, *next;
141
142	if (va == AMDGPU_INVALID_VA_ADDRESS)
143		return;
144
145	size = ALIGN(size, mgr->va_alignment);
146
147	pthread_mutex_lock(&mgr->bo_va_mutex);
148	hole = container_of(&mgr->va_holes, hole, list);
149	LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
150		if (next->offset < va)
151			break;
152		hole = next;
153	}
154
155	if (&hole->list != &mgr->va_holes) {
156		/* Grow upper hole if it's adjacent */
157		if (hole->offset == (va + size)) {
158			hole->offset = va;
159			hole->size += size;
160			/* Merge lower hole if it's adjacent */
161			if (next != hole &&
162			    &next->list != &mgr->va_holes &&
163			    (next->offset + next->size) == va) {
164				next->size += hole->size;
165				list_del(&hole->list);
166				free(hole);
167			}
168			goto out;
169		}
170	}
171
172	/* Grow lower hole if it's adjacent */
173	if (next != hole && &next->list != &mgr->va_holes &&
174	    (next->offset + next->size) == va) {
175		next->size += size;
176		goto out;
177	}
178
179	/* FIXME on allocation failure we just lose virtual address space
180	 * maybe print a warning
181	 */
182	next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
183	if (next) {
184		next->size = size;
185		next->offset = va;
186		list_add(&next->list, &hole->list);
187	}
188
189out:
190	pthread_mutex_unlock(&mgr->bo_va_mutex);
191}
192
193int amdgpu_va_range_alloc(amdgpu_device_handle dev,
194			  enum amdgpu_gpu_va_range va_range_type,
195			  uint64_t size,
196			  uint64_t va_base_alignment,
197			  uint64_t va_base_required,
198			  uint64_t *va_base_allocated,
199			  amdgpu_va_handle *va_range_handle,
200			  uint64_t flags)
201{
202	struct amdgpu_bo_va_mgr *vamgr;
203
204	/* Clear the flag when the high VA manager is not initialized */
205	if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
206		flags &= ~AMDGPU_VA_RANGE_HIGH;
207
208	if (flags & AMDGPU_VA_RANGE_HIGH) {
209		if (flags & AMDGPU_VA_RANGE_32_BIT)
210			vamgr = &dev->vamgr_high_32;
211		else
212			vamgr = &dev->vamgr_high;
213	} else {
214		if (flags & AMDGPU_VA_RANGE_32_BIT)
215			vamgr = &dev->vamgr_32;
216		else
217			vamgr = &dev->vamgr;
218	}
219
220	va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
221	size = ALIGN(size, vamgr->va_alignment);
222
223	*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
224					va_base_alignment, va_base_required);
225
226	if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
227	    (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
228		/* fallback to 32bit address */
229		if (flags & AMDGPU_VA_RANGE_HIGH)
230			vamgr = &dev->vamgr_high_32;
231		else
232			vamgr = &dev->vamgr_32;
233		*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
234					va_base_alignment, va_base_required);
235	}
236
237	if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
238		struct amdgpu_va* va;
239		va = calloc(1, sizeof(struct amdgpu_va));
240		if(!va){
241			amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
242			return -ENOMEM;
243		}
244		va->dev = dev;
245		va->address = *va_base_allocated;
246		va->size = size;
247		va->range = va_range_type;
248		va->vamgr = vamgr;
249		*va_range_handle = va;
250	} else {
251		return -EINVAL;
252	}
253
254	return 0;
255}
256
257int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
258{
259	if(!va_range_handle || !va_range_handle->address)
260		return 0;
261
262	amdgpu_vamgr_free_va(va_range_handle->vamgr,
263			va_range_handle->address,
264			va_range_handle->size);
265	free(va_range_handle);
266	return 0;
267}
268