nouveau.c revision e6188e58
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef HAVE_CONFIG_H
26#include <config.h>
27#endif
28
29#include <stdio.h>
30#include <stdlib.h>
31#include <stdint.h>
32#include <string.h>
33#include <strings.h>
34#include <stdbool.h>
35#include <assert.h>
36#include <errno.h>
37#include <fcntl.h>
38
39#include <xf86drm.h>
40#include <xf86atomic.h>
41#include "libdrm_macros.h"
42#include "libdrm_lists.h"
43#include "nouveau_drm.h"
44
45#include "nouveau.h"
46#include "private.h"
47
48#ifdef DEBUG
49drm_private uint32_t nouveau_debug = 0;
50
51static void
52debug_init(char *args)
53{
54	if (args) {
55		int n = strtol(args, NULL, 0);
56		if (n >= 0)
57			nouveau_debug = n;
58	}
59}
60#endif
61
62/* this is the old libdrm's version of nouveau_device_wrap(), the symbol
63 * is kept here to prevent AIGLX from crashing if the DDX is linked against
64 * the new libdrm, but the DRI driver against the old
65 */
66int
67nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
68			     drm_context_t ctx)
69{
70	return -EACCES;
71}
72
73int
74nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
75{
76	struct nouveau_device_priv *nvdev = calloc(1, sizeof(*nvdev));
77	struct nouveau_device *dev = &nvdev->base;
78	uint64_t chipset, vram, gart, bousage;
79	drmVersionPtr ver;
80	int ret;
81	char *tmp;
82
83#ifdef DEBUG
84	debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
85#endif
86
87	if (!nvdev)
88		return -ENOMEM;
89	ret = pthread_mutex_init(&nvdev->lock, NULL);
90	if (ret) {
91		free(nvdev);
92		return ret;
93	}
94
95	nvdev->base.fd = fd;
96
97	ver = drmGetVersion(fd);
98	if (ver) dev->drm_version = (ver->version_major << 24) |
99				    (ver->version_minor << 8) |
100				     ver->version_patchlevel;
101	drmFreeVersion(ver);
102
103	if ( dev->drm_version != 0x00000010 &&
104	    (dev->drm_version <  0x01000000 ||
105	     dev->drm_version >= 0x02000000)) {
106		nouveau_device_del(&dev);
107		return -EINVAL;
108	}
109
110	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &chipset);
111	if (ret == 0)
112	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &vram);
113	if (ret == 0)
114	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &gart);
115	if (ret) {
116		nouveau_device_del(&dev);
117		return ret;
118	}
119
120	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &bousage);
121	if (ret == 0)
122		nvdev->have_bo_usage = (bousage != 0);
123
124	nvdev->close = close;
125
126	tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
127	if (tmp)
128		nvdev->vram_limit_percent = atoi(tmp);
129	else
130		nvdev->vram_limit_percent = 80;
131	tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
132	if (tmp)
133		nvdev->gart_limit_percent = atoi(tmp);
134	else
135		nvdev->gart_limit_percent = 80;
136	DRMINITLISTHEAD(&nvdev->bo_list);
137	nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
138	nvdev->base.lib_version = 0x01000000;
139	nvdev->base.chipset = chipset;
140	nvdev->base.vram_size = vram;
141	nvdev->base.gart_size = gart;
142	nvdev->base.vram_limit =
143		(nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
144	nvdev->base.gart_limit =
145		(nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
146
147	*pdev = &nvdev->base;
148	return 0;
149}
150
151int
152nouveau_device_open(const char *busid, struct nouveau_device **pdev)
153{
154	int ret = -ENODEV, fd = drmOpen("nouveau", busid);
155	if (fd >= 0) {
156		ret = nouveau_device_wrap(fd, 1, pdev);
157		if (ret)
158			drmClose(fd);
159	}
160	return ret;
161}
162
163void
164nouveau_device_del(struct nouveau_device **pdev)
165{
166	struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
167	if (nvdev) {
168		if (nvdev->close)
169			drmClose(nvdev->base.fd);
170		free(nvdev->client);
171		pthread_mutex_destroy(&nvdev->lock);
172		free(nvdev);
173		*pdev = NULL;
174	}
175}
176
177int
178nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
179{
180	struct drm_nouveau_getparam r = { param, 0 };
181	int fd = dev->fd, ret =
182		drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
183	*value = r.value;
184	return ret;
185}
186
187int
188nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
189{
190	struct drm_nouveau_setparam r = { param, value };
191	return drmCommandWrite(dev->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
192}
193
194int
195nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
196{
197	struct nouveau_device_priv *nvdev = nouveau_device(dev);
198	struct nouveau_client_priv *pcli;
199	int id = 0, i, ret = -ENOMEM;
200	uint32_t *clients;
201
202	pthread_mutex_lock(&nvdev->lock);
203
204	for (i = 0; i < nvdev->nr_client; i++) {
205		id = ffs(nvdev->client[i]) - 1;
206		if (id >= 0)
207			goto out;
208	}
209
210	clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
211	if (!clients)
212		goto unlock;
213	nvdev->client = clients;
214	nvdev->client[i] = 0;
215	nvdev->nr_client++;
216
217out:
218	pcli = calloc(1, sizeof(*pcli));
219	if (pcli) {
220		nvdev->client[i] |= (1 << id);
221		pcli->base.device = dev;
222		pcli->base.id = (i * 32) + id;
223		ret = 0;
224	}
225
226	*pclient = &pcli->base;
227
228unlock:
229	pthread_mutex_unlock(&nvdev->lock);
230	return ret;
231}
232
233void
234nouveau_client_del(struct nouveau_client **pclient)
235{
236	struct nouveau_client_priv *pcli = nouveau_client(*pclient);
237	struct nouveau_device_priv *nvdev;
238	if (pcli) {
239		int id = pcli->base.id;
240		nvdev = nouveau_device(pcli->base.device);
241		pthread_mutex_lock(&nvdev->lock);
242		nvdev->client[id / 32] &= ~(1 << (id % 32));
243		pthread_mutex_unlock(&nvdev->lock);
244		free(pcli->kref);
245		free(pcli);
246	}
247}
248
249int
250nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
251		   uint32_t oclass, void *data, uint32_t length,
252		   struct nouveau_object **pobj)
253{
254	struct nouveau_device *dev;
255	struct nouveau_object *obj;
256	int ret = -EINVAL;
257
258	if (length == 0)
259		length = sizeof(struct nouveau_object *);
260	obj = malloc(sizeof(*obj) + length);
261	obj->parent = parent;
262	obj->handle = handle;
263	obj->oclass = oclass;
264	obj->length = length;
265	obj->data = obj + 1;
266	if (data)
267		memcpy(obj->data, data, length);
268	*(struct nouveau_object **)obj->data = obj;
269
270	dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
271	switch (parent->oclass) {
272	case NOUVEAU_DEVICE_CLASS:
273		switch (obj->oclass) {
274		case NOUVEAU_FIFO_CHANNEL_CLASS:
275		{
276			if (dev->chipset < 0xc0)
277				ret = abi16_chan_nv04(obj);
278			else
279			if (dev->chipset < 0xe0)
280				ret = abi16_chan_nvc0(obj);
281			else
282				ret = abi16_chan_nve0(obj);
283		}
284			break;
285		default:
286			break;
287		}
288		break;
289	case NOUVEAU_FIFO_CHANNEL_CLASS:
290		switch (obj->oclass) {
291		case NOUVEAU_NOTIFIER_CLASS:
292			ret = abi16_ntfy(obj);
293			break;
294		default:
295			ret = abi16_engobj(obj);
296			break;
297		}
298	default:
299		break;
300	}
301
302	if (ret) {
303		free(obj);
304		return ret;
305	}
306
307	*pobj = obj;
308	return 0;
309}
310
311void
312nouveau_object_del(struct nouveau_object **pobj)
313{
314	struct nouveau_object *obj = *pobj;
315	struct nouveau_device *dev;
316	if (obj) {
317		dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
318		if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
319			struct drm_nouveau_channel_free req;
320			req.channel = obj->handle;
321			drmCommandWrite(dev->fd, DRM_NOUVEAU_CHANNEL_FREE,
322					&req, sizeof(req));
323		} else {
324			struct drm_nouveau_gpuobj_free req;
325			req.channel = obj->parent->handle;
326			req.handle  = obj->handle;
327			drmCommandWrite(dev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
328					&req, sizeof(req));
329		}
330	}
331	free(obj);
332	*pobj = NULL;
333}
334
335void *
336nouveau_object_find(struct nouveau_object *obj, uint32_t pclass)
337{
338	while (obj && obj->oclass != pclass) {
339		obj = obj->parent;
340		if (pclass == NOUVEAU_PARENT_CLASS)
341			break;
342	}
343	return obj;
344}
345
346static void
347nouveau_bo_del(struct nouveau_bo *bo)
348{
349	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
350	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
351	struct drm_gem_close req = { bo->handle };
352
353	if (nvbo->head.next) {
354		pthread_mutex_lock(&nvdev->lock);
355		if (atomic_read(&nvbo->refcnt) == 0) {
356			DRMLISTDEL(&nvbo->head);
357			/*
358			 * This bo has to be closed with the lock held because
359			 * gem handles are not refcounted. If a shared bo is
360			 * closed and re-opened in another thread a race
361			 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
362			 * might cause the bo to be closed accidentally while
363			 * re-importing.
364			 */
365			drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
366		}
367		pthread_mutex_unlock(&nvdev->lock);
368	} else {
369		drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
370	}
371	if (bo->map)
372		drm_munmap(bo->map, bo->size);
373	free(nvbo);
374}
375
376int
377nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
378	       uint64_t size, union nouveau_bo_config *config,
379	       struct nouveau_bo **pbo)
380{
381	struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
382	struct nouveau_bo *bo = &nvbo->base;
383	int ret;
384
385	if (!nvbo)
386		return -ENOMEM;
387	atomic_set(&nvbo->refcnt, 1);
388	bo->device = dev;
389	bo->flags = flags;
390	bo->size = size;
391
392	ret = abi16_bo_init(bo, align, config);
393	if (ret) {
394		free(nvbo);
395		return ret;
396	}
397
398	*pbo = bo;
399	return 0;
400}
401
402static int
403nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
404		       struct nouveau_bo **pbo, int name)
405{
406	struct nouveau_device_priv *nvdev = nouveau_device(dev);
407	struct drm_nouveau_gem_info req = { .handle = handle };
408	struct nouveau_bo_priv *nvbo;
409	int ret;
410
411	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
412		if (nvbo->base.handle == handle) {
413			if (atomic_inc_return(&nvbo->refcnt) == 1) {
414				/*
415				 * Uh oh, this bo is dead and someone else
416				 * will free it, but because refcnt is
417				 * now non-zero fortunately they won't
418				 * call the ioctl to close the bo.
419				 *
420				 * Remove this bo from the list so other
421				 * calls to nouveau_bo_wrap_locked will
422				 * see our replacement nvbo.
423				 */
424				DRMLISTDEL(&nvbo->head);
425				if (!name)
426					name = nvbo->name;
427				break;
428			}
429
430			*pbo = &nvbo->base;
431			return 0;
432		}
433	}
434
435	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_INFO,
436				  &req, sizeof(req));
437	if (ret)
438		return ret;
439
440	nvbo = calloc(1, sizeof(*nvbo));
441	if (nvbo) {
442		atomic_set(&nvbo->refcnt, 1);
443		nvbo->base.device = dev;
444		abi16_bo_info(&nvbo->base, &req);
445		nvbo->name = name;
446		DRMLISTADD(&nvbo->head, &nvdev->bo_list);
447		*pbo = &nvbo->base;
448		return 0;
449	}
450
451	return -ENOMEM;
452}
453
454static void
455nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
456{
457	if (!nvbo->head.next) {
458		struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
459		pthread_mutex_lock(&nvdev->lock);
460		if (!nvbo->head.next)
461			DRMLISTADD(&nvbo->head, &nvdev->bo_list);
462		pthread_mutex_unlock(&nvdev->lock);
463	}
464}
465
466int
467nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
468		struct nouveau_bo **pbo)
469{
470	struct nouveau_device_priv *nvdev = nouveau_device(dev);
471	int ret;
472	pthread_mutex_lock(&nvdev->lock);
473	ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
474	pthread_mutex_unlock(&nvdev->lock);
475	return ret;
476}
477
478int
479nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
480		    struct nouveau_bo **pbo)
481{
482	struct nouveau_device_priv *nvdev = nouveau_device(dev);
483	struct nouveau_bo_priv *nvbo;
484	struct drm_gem_open req = { .name = name };
485	int ret;
486
487	pthread_mutex_lock(&nvdev->lock);
488	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
489		if (nvbo->name == name) {
490			ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
491						     pbo, name);
492			pthread_mutex_unlock(&nvdev->lock);
493			return ret;
494		}
495	}
496
497	ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
498	if (ret == 0) {
499		ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
500	}
501
502	pthread_mutex_unlock(&nvdev->lock);
503	return ret;
504}
505
506int
507nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
508{
509	struct drm_gem_flink req = { .handle = bo->handle };
510	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
511
512	*name = nvbo->name;
513	if (!*name) {
514		int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req);
515
516		if (ret) {
517			*name = 0;
518			return ret;
519		}
520		nvbo->name = *name = req.name;
521
522		nouveau_bo_make_global(nvbo);
523	}
524	return 0;
525}
526
527void
528nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
529{
530	struct nouveau_bo *ref = *pref;
531	if (bo) {
532		atomic_inc(&nouveau_bo(bo)->refcnt);
533	}
534	if (ref) {
535		if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
536			nouveau_bo_del(ref);
537	}
538	*pref = bo;
539}
540
541int
542nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
543			    struct nouveau_bo **bo)
544{
545	struct nouveau_device_priv *nvdev = nouveau_device(dev);
546	int ret;
547	unsigned int handle;
548
549	nouveau_bo_ref(NULL, bo);
550
551	pthread_mutex_lock(&nvdev->lock);
552	ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
553	if (ret == 0) {
554		ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
555	}
556	pthread_mutex_unlock(&nvdev->lock);
557	return ret;
558}
559
560int
561nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
562{
563	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
564	int ret;
565
566	ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
567	if (ret)
568		return ret;
569
570	nouveau_bo_make_global(nvbo);
571	return 0;
572}
573
574int
575nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
576		struct nouveau_client *client)
577{
578	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
579	struct drm_nouveau_gem_cpu_prep req;
580	struct nouveau_pushbuf *push;
581	int ret = 0;
582
583	if (!(access & NOUVEAU_BO_RDWR))
584		return 0;
585
586	push = cli_push_get(client, bo);
587	if (push && push->channel)
588		nouveau_pushbuf_kick(push, push->channel);
589
590	if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
591				!(access & NOUVEAU_BO_WR))
592		return 0;
593
594	req.handle = bo->handle;
595	req.flags = 0;
596	if (access & NOUVEAU_BO_WR)
597		req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
598	if (access & NOUVEAU_BO_NOBLOCK)
599		req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
600
601	ret = drmCommandWrite(bo->device->fd, DRM_NOUVEAU_GEM_CPU_PREP,
602			      &req, sizeof(req));
603	if (ret == 0)
604		nvbo->access = 0;
605	return ret;
606}
607
608int
609nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
610	       struct nouveau_client *client)
611{
612	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
613	if (bo->map == NULL) {
614		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
615			       MAP_SHARED, bo->device->fd, nvbo->map_handle);
616		if (bo->map == MAP_FAILED) {
617			bo->map = NULL;
618			return -errno;
619		}
620	}
621	return nouveau_bo_wait(bo, access, client);
622}
623