amdgpu_device.c revision d8807b2f
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24/**
25 * \file amdgpu_device.c
26 *
27 *  Implementation of functions for AMD GPU device
28 *
29 */
30
31#ifdef HAVE_CONFIG_H
32#include "config.h"
33#endif
34
35#include <sys/stat.h>
36#include <errno.h>
37#include <string.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <unistd.h>
41
42#include "xf86drm.h"
43#include "amdgpu_drm.h"
44#include "amdgpu_internal.h"
45#include "util_hash_table.h"
46#include "util_math.h"
47
48#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
49#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
50
51static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
52static struct util_hash_table *fd_tab;
53
54static unsigned handle_hash(void *key)
55{
56	return PTR_TO_UINT(key);
57}
58
59static int handle_compare(void *key1, void *key2)
60{
61	return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
62}
63
64static unsigned fd_hash(void *key)
65{
66	int fd = PTR_TO_UINT(key);
67	char *name = drmGetPrimaryDeviceNameFromFd(fd);
68	unsigned result = 0;
69	char *c;
70
71	if (name == NULL)
72		return 0;
73
74	for (c = name; *c; ++c)
75		result += *c;
76
77	free(name);
78
79	return result;
80}
81
82static int fd_compare(void *key1, void *key2)
83{
84	int fd1 = PTR_TO_UINT(key1);
85	int fd2 = PTR_TO_UINT(key2);
86	char *name1 = drmGetPrimaryDeviceNameFromFd(fd1);
87	char *name2 = drmGetPrimaryDeviceNameFromFd(fd2);
88	int result;
89
90	if (name1 == NULL || name2 == NULL) {
91		free(name1);
92		free(name2);
93		return 0;
94	}
95
96	result = strcmp(name1, name2);
97	free(name1);
98	free(name2);
99
100	return result;
101}
102
103/**
104* Get the authenticated form fd,
105*
106* \param   fd   - \c [in]  File descriptor for AMD GPU device
107* \param   auth - \c [out] Pointer to output the fd is authenticated or not
108*                          A render node fd, output auth = 0
109*                          A legacy fd, get the authenticated for compatibility root
110*
111* \return   0 on success\n
112*          >0 - AMD specific error code\n
113*          <0 - Negative POSIX Error code
114*/
115static int amdgpu_get_auth(int fd, int *auth)
116{
117	int r = 0;
118	drm_client_t client = {};
119
120	if (drmGetNodeTypeFromFd(fd) == DRM_NODE_RENDER)
121		*auth = 0;
122	else {
123		client.idx = 0;
124		r = drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client);
125		if (!r)
126			*auth = client.auth;
127	}
128	return r;
129}
130
131static void amdgpu_device_free_internal(amdgpu_device_handle dev)
132{
133	const struct amdgpu_asic_id *id;
134	amdgpu_vamgr_deinit(&dev->vamgr_32);
135	amdgpu_vamgr_deinit(&dev->vamgr);
136	util_hash_table_destroy(dev->bo_flink_names);
137	util_hash_table_destroy(dev->bo_handles);
138	pthread_mutex_destroy(&dev->bo_table_mutex);
139	util_hash_table_remove(fd_tab, UINT_TO_PTR(dev->fd));
140	close(dev->fd);
141	if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
142		close(dev->flink_fd);
143	if (dev->asic_ids) {
144		for (id = dev->asic_ids; id->did; id++)
145			free(id->marketing_name);
146
147		free(dev->asic_ids);
148	}
149	free(dev);
150}
151
152/**
153 * Assignment between two amdgpu_device pointers with reference counting.
154 *
155 * Usage:
156 *    struct amdgpu_device *dst = ... , *src = ...;
157 *
158 *    dst = src;
159 *    // No reference counting. Only use this when you need to move
160 *    // a reference from one pointer to another.
161 *
162 *    amdgpu_device_reference(&dst, src);
163 *    // Reference counters are updated. dst is decremented and src is
164 *    // incremented. dst is freed if its reference counter is 0.
165 */
166static void amdgpu_device_reference(struct amdgpu_device **dst,
167			     struct amdgpu_device *src)
168{
169	if (update_references(&(*dst)->refcount, &src->refcount))
170		amdgpu_device_free_internal(*dst);
171	*dst = src;
172}
173
174int amdgpu_device_initialize(int fd,
175			     uint32_t *major_version,
176			     uint32_t *minor_version,
177			     amdgpu_device_handle *device_handle)
178{
179	struct amdgpu_device *dev;
180	drmVersionPtr version;
181	int r;
182	int flag_auth = 0;
183	int flag_authexist=0;
184	uint32_t accel_working = 0;
185	uint64_t start, max;
186
187	*device_handle = NULL;
188
189	pthread_mutex_lock(&fd_mutex);
190	if (!fd_tab)
191		fd_tab = util_hash_table_create(fd_hash, fd_compare);
192	r = amdgpu_get_auth(fd, &flag_auth);
193	if (r) {
194		pthread_mutex_unlock(&fd_mutex);
195		return r;
196	}
197	dev = util_hash_table_get(fd_tab, UINT_TO_PTR(fd));
198	if (dev) {
199		r = amdgpu_get_auth(dev->fd, &flag_authexist);
200		if (r) {
201			pthread_mutex_unlock(&fd_mutex);
202			return r;
203		}
204		if ((flag_auth) && (!flag_authexist)) {
205			dev->flink_fd = dup(fd);
206		}
207		*major_version = dev->major_version;
208		*minor_version = dev->minor_version;
209		amdgpu_device_reference(device_handle, dev);
210		pthread_mutex_unlock(&fd_mutex);
211		return 0;
212	}
213
214	dev = calloc(1, sizeof(struct amdgpu_device));
215	if (!dev) {
216		pthread_mutex_unlock(&fd_mutex);
217		return -ENOMEM;
218	}
219
220	dev->fd = -1;
221	dev->flink_fd = -1;
222
223	atomic_set(&dev->refcount, 1);
224
225	version = drmGetVersion(fd);
226	if (version->version_major != 3) {
227		fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
228			"only compatible with 3.x.x.\n",
229			__func__,
230			version->version_major,
231			version->version_minor,
232			version->version_patchlevel);
233		drmFreeVersion(version);
234		r = -EBADF;
235		goto cleanup;
236	}
237
238	dev->fd = dup(fd);
239	dev->flink_fd = dev->fd;
240	dev->major_version = version->version_major;
241	dev->minor_version = version->version_minor;
242	drmFreeVersion(version);
243
244	dev->bo_flink_names = util_hash_table_create(handle_hash,
245						     handle_compare);
246	dev->bo_handles = util_hash_table_create(handle_hash, handle_compare);
247	pthread_mutex_init(&dev->bo_table_mutex, NULL);
248
249	/* Check if acceleration is working. */
250	r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
251	if (r)
252		goto cleanup;
253	if (!accel_working) {
254		r = -EBADF;
255		goto cleanup;
256	}
257
258	r = amdgpu_query_gpu_info_init(dev);
259	if (r)
260		goto cleanup;
261
262	amdgpu_vamgr_init(&dev->vamgr, dev->dev_info.virtual_address_offset,
263			  dev->dev_info.virtual_address_max,
264			  dev->dev_info.virtual_address_alignment);
265
266	max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
267	start = amdgpu_vamgr_find_va(&dev->vamgr,
268				     max - dev->dev_info.virtual_address_offset,
269				     dev->dev_info.virtual_address_alignment, 0);
270	if (start > 0xffffffff)
271		goto free_va; /* shouldn't get here */
272
273	amdgpu_vamgr_init(&dev->vamgr_32, start, max,
274			  dev->dev_info.virtual_address_alignment);
275
276	r = amdgpu_parse_asic_ids(&dev->asic_ids);
277	if (r) {
278		fprintf(stderr, "%s: Cannot parse ASIC IDs, 0x%x.",
279			__func__, r);
280	}
281
282	*major_version = dev->major_version;
283	*minor_version = dev->minor_version;
284	*device_handle = dev;
285	util_hash_table_set(fd_tab, UINT_TO_PTR(dev->fd), dev);
286	pthread_mutex_unlock(&fd_mutex);
287
288	return 0;
289
290free_va:
291	r = -ENOMEM;
292	amdgpu_vamgr_free_va(&dev->vamgr, start,
293			     max - dev->dev_info.virtual_address_offset);
294	amdgpu_vamgr_deinit(&dev->vamgr);
295
296cleanup:
297	if (dev->fd >= 0)
298		close(dev->fd);
299	free(dev);
300	pthread_mutex_unlock(&fd_mutex);
301	return r;
302}
303
304int amdgpu_device_deinitialize(amdgpu_device_handle dev)
305{
306	amdgpu_device_reference(&dev, NULL);
307	return 0;
308}
309
310const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
311{
312	const struct amdgpu_asic_id *id;
313
314	if (!dev->asic_ids)
315		return NULL;
316
317	for (id = dev->asic_ids; id->did; id++) {
318		if ((id->did == dev->info.asic_id) &&
319		    (id->rid == dev->info.pci_rev_id))
320			return id->marketing_name;
321	}
322
323	return NULL;
324}
325