nouveau.c revision 857b0bc6
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef HAVE_CONFIG_H
26#include <config.h>
27#endif
28
29#include <stdio.h>
30#include <stdlib.h>
31#include <stdint.h>
32#include <string.h>
33#include <stdbool.h>
34#include <assert.h>
35#include <errno.h>
36#include <sys/mman.h>
37#include <fcntl.h>
38
39#include <xf86drm.h>
40#include <xf86atomic.h>
41#include "libdrm_lists.h"
42#include "nouveau_drm.h"
43
44#include "nouveau.h"
45#include "private.h"
46
47#ifdef DEBUG
48uint32_t nouveau_debug = 0;
49
50static void
51debug_init(char *args)
52{
53	if (args) {
54		int n = strtol(args, NULL, 0);
55		if (n >= 0)
56			nouveau_debug = n;
57	}
58}
59#endif
60
61/* this is the old libdrm's version of nouveau_device_wrap(), the symbol
62 * is kept here to prevent AIGLX from crashing if the DDX is linked against
63 * the new libdrm, but the DRI driver against the old
64 */
65int
66nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
67			     drm_context_t ctx)
68{
69	return -EACCES;
70}
71
72int
73nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
74{
75	struct nouveau_device_priv *nvdev = calloc(1, sizeof(*nvdev));
76	struct nouveau_device *dev = &nvdev->base;
77	uint64_t chipset, vram, gart, bousage;
78	drmVersionPtr ver;
79	int ret;
80	char *tmp;
81
82#ifdef DEBUG
83	debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
84#endif
85
86	if (!nvdev)
87		return -ENOMEM;
88	ret = pthread_mutex_init(&nvdev->lock, NULL);
89	if (ret) {
90		free(nvdev);
91		return ret;
92	}
93
94	nvdev->base.fd = fd;
95
96	ver = drmGetVersion(fd);
97	if (ver) dev->drm_version = (ver->version_major << 24) |
98				    (ver->version_minor << 8) |
99				     ver->version_patchlevel;
100	drmFreeVersion(ver);
101
102	if ( dev->drm_version != 0x00000010 &&
103	    (dev->drm_version <  0x01000000 ||
104	     dev->drm_version >= 0x02000000)) {
105		nouveau_device_del(&dev);
106		return -EINVAL;
107	}
108
109	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &chipset);
110	if (ret == 0)
111	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &vram);
112	if (ret == 0)
113	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &gart);
114	if (ret) {
115		nouveau_device_del(&dev);
116		return ret;
117	}
118
119	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &bousage);
120	if (ret == 0)
121		nvdev->have_bo_usage = (bousage != 0);
122
123	nvdev->close = close;
124
125	tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
126	if (tmp)
127		nvdev->vram_limit_percent = atoi(tmp);
128	else
129		nvdev->vram_limit_percent = 80;
130	tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
131	if (tmp)
132		nvdev->gart_limit_percent = atoi(tmp);
133	else
134		nvdev->gart_limit_percent = 80;
135	DRMINITLISTHEAD(&nvdev->bo_list);
136	nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
137	nvdev->base.lib_version = 0x01000000;
138	nvdev->base.chipset = chipset;
139	nvdev->base.vram_size = vram;
140	nvdev->base.gart_size = gart;
141	nvdev->base.vram_limit =
142		(nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
143	nvdev->base.gart_limit =
144		(nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
145
146	*pdev = &nvdev->base;
147	return 0;
148}
149
150int
151nouveau_device_open(const char *busid, struct nouveau_device **pdev)
152{
153	int ret = -ENODEV, fd = drmOpen("nouveau", busid);
154	if (fd >= 0) {
155		ret = nouveau_device_wrap(fd, 1, pdev);
156		if (ret)
157			drmClose(fd);
158	}
159	return ret;
160}
161
162void
163nouveau_device_del(struct nouveau_device **pdev)
164{
165	struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
166	if (nvdev) {
167		if (nvdev->close)
168			drmClose(nvdev->base.fd);
169		free(nvdev->client);
170		pthread_mutex_destroy(&nvdev->lock);
171		free(nvdev);
172		*pdev = NULL;
173	}
174}
175
176int
177nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
178{
179	struct drm_nouveau_getparam r = { param, 0 };
180	int fd = dev->fd, ret =
181		drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
182	*value = r.value;
183	return ret;
184}
185
186int
187nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
188{
189	struct drm_nouveau_setparam r = { param, value };
190	return drmCommandWrite(dev->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
191}
192
193int
194nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
195{
196	struct nouveau_device_priv *nvdev = nouveau_device(dev);
197	struct nouveau_client_priv *pcli;
198	int id = 0, i, ret = -ENOMEM;
199	uint32_t *clients;
200
201	pthread_mutex_lock(&nvdev->lock);
202
203	for (i = 0; i < nvdev->nr_client; i++) {
204		id = ffs(nvdev->client[i]) - 1;
205		if (id >= 0)
206			goto out;
207	}
208
209	clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
210	if (!clients)
211		goto unlock;
212	nvdev->client = clients;
213	nvdev->client[i] = 0;
214	nvdev->nr_client++;
215
216out:
217	pcli = calloc(1, sizeof(*pcli));
218	if (pcli) {
219		nvdev->client[i] |= (1 << id);
220		pcli->base.device = dev;
221		pcli->base.id = (i * 32) + id;
222		ret = 0;
223	}
224
225	*pclient = &pcli->base;
226
227unlock:
228	pthread_mutex_unlock(&nvdev->lock);
229	return ret;
230}
231
232void
233nouveau_client_del(struct nouveau_client **pclient)
234{
235	struct nouveau_client_priv *pcli = nouveau_client(*pclient);
236	struct nouveau_device_priv *nvdev;
237	if (pcli) {
238		int id = pcli->base.id;
239		nvdev = nouveau_device(pcli->base.device);
240		pthread_mutex_lock(&nvdev->lock);
241		nvdev->client[id / 32] &= ~(1 << (id % 32));
242		pthread_mutex_unlock(&nvdev->lock);
243		free(pcli->kref);
244		free(pcli);
245	}
246}
247
248int
249nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
250		   uint32_t oclass, void *data, uint32_t length,
251		   struct nouveau_object **pobj)
252{
253	struct nouveau_device *dev;
254	struct nouveau_object *obj;
255	int ret = -EINVAL;
256
257	if (length == 0)
258		length = sizeof(struct nouveau_object *);
259	obj = malloc(sizeof(*obj) + length);
260	obj->parent = parent;
261	obj->handle = handle;
262	obj->oclass = oclass;
263	obj->length = length;
264	obj->data = obj + 1;
265	if (data)
266		memcpy(obj->data, data, length);
267	*(struct nouveau_object **)obj->data = obj;
268
269	dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
270	switch (parent->oclass) {
271	case NOUVEAU_DEVICE_CLASS:
272		switch (obj->oclass) {
273		case NOUVEAU_FIFO_CHANNEL_CLASS:
274		{
275			if (dev->chipset < 0xc0)
276				ret = abi16_chan_nv04(obj);
277			else
278			if (dev->chipset < 0xe0)
279				ret = abi16_chan_nvc0(obj);
280			else
281				ret = abi16_chan_nve0(obj);
282		}
283			break;
284		default:
285			break;
286		}
287		break;
288	case NOUVEAU_FIFO_CHANNEL_CLASS:
289		switch (obj->oclass) {
290		case NOUVEAU_NOTIFIER_CLASS:
291			ret = abi16_ntfy(obj);
292			break;
293		default:
294			ret = abi16_engobj(obj);
295			break;
296		}
297	default:
298		break;
299	}
300
301	if (ret) {
302		free(obj);
303		return ret;
304	}
305
306	*pobj = obj;
307	return 0;
308}
309
310void
311nouveau_object_del(struct nouveau_object **pobj)
312{
313	struct nouveau_object *obj = *pobj;
314	struct nouveau_device *dev;
315	if (obj) {
316		dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
317		if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
318			struct drm_nouveau_channel_free req;
319			req.channel = obj->handle;
320			drmCommandWrite(dev->fd, DRM_NOUVEAU_CHANNEL_FREE,
321					&req, sizeof(req));
322		} else {
323			struct drm_nouveau_gpuobj_free req;
324			req.channel = obj->parent->handle;
325			req.handle  = obj->handle;
326			drmCommandWrite(dev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
327					&req, sizeof(req));
328		}
329	}
330	free(obj);
331	*pobj = NULL;
332}
333
334void *
335nouveau_object_find(struct nouveau_object *obj, uint32_t pclass)
336{
337	while (obj && obj->oclass != pclass) {
338		obj = obj->parent;
339		if (pclass == NOUVEAU_PARENT_CLASS)
340			break;
341	}
342	return obj;
343}
344
345static void
346nouveau_bo_del(struct nouveau_bo *bo)
347{
348	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
349	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
350	struct drm_gem_close req = { bo->handle };
351
352	pthread_mutex_lock(&nvdev->lock);
353	if (nvbo->name) {
354		if (atomic_read(&nvbo->refcnt)) {
355			/*
356			 * bo has been revived by a race with
357			 * nouveau_bo_prime_handle_ref, or nouveau_bo_name_ref.
358			 *
359			 * In theory there's still a race possible with
360			 * nouveau_bo_wrap, but when using this function
361			 * the lifetime of the handle is probably already
362			 * handled in another way. If there are races
363			 * you're probably using nouveau_bo_wrap wrong.
364			 */
365			pthread_mutex_unlock(&nvdev->lock);
366			return;
367		}
368		DRMLISTDEL(&nvbo->head);
369		/*
370		 * This bo has to be closed with the lock held because gem
371		 * handles are not refcounted. If a shared bo is closed and
372		 * re-opened in another thread a race against
373		 * DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle might cause the
374		 * bo to be closed accidentally while re-importing.
375		 */
376		drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
377		pthread_mutex_unlock(&nvdev->lock);
378	} else {
379		DRMLISTDEL(&nvbo->head);
380		pthread_mutex_unlock(&nvdev->lock);
381		drmIoctl(bo->device->fd, DRM_IOCTL_GEM_CLOSE, &req);
382	}
383	if (bo->map)
384		munmap(bo->map, bo->size);
385	free(nvbo);
386}
387
388int
389nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
390	       uint64_t size, union nouveau_bo_config *config,
391	       struct nouveau_bo **pbo)
392{
393	struct nouveau_device_priv *nvdev = nouveau_device(dev);
394	struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
395	struct nouveau_bo *bo = &nvbo->base;
396	int ret;
397
398	if (!nvbo)
399		return -ENOMEM;
400	atomic_set(&nvbo->refcnt, 1);
401	bo->device = dev;
402	bo->flags = flags;
403	bo->size = size;
404
405	ret = abi16_bo_init(bo, align, config);
406	if (ret) {
407		free(nvbo);
408		return ret;
409	}
410
411	pthread_mutex_lock(&nvdev->lock);
412	DRMLISTADD(&nvbo->head, &nvdev->bo_list);
413	pthread_mutex_unlock(&nvdev->lock);
414
415	*pbo = bo;
416	return 0;
417}
418
419static int
420nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
421		       struct nouveau_bo **pbo)
422{
423	struct nouveau_device_priv *nvdev = nouveau_device(dev);
424	struct drm_nouveau_gem_info req = { .handle = handle };
425	struct nouveau_bo_priv *nvbo;
426	int ret;
427
428	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
429		if (nvbo->base.handle == handle) {
430			*pbo = NULL;
431			nouveau_bo_ref(&nvbo->base, pbo);
432			return 0;
433		}
434	}
435
436	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_INFO,
437				  &req, sizeof(req));
438	if (ret)
439		return ret;
440
441	nvbo = calloc(1, sizeof(*nvbo));
442	if (nvbo) {
443		atomic_set(&nvbo->refcnt, 1);
444		nvbo->base.device = dev;
445		abi16_bo_info(&nvbo->base, &req);
446		DRMLISTADD(&nvbo->head, &nvdev->bo_list);
447		*pbo = &nvbo->base;
448		return 0;
449	}
450
451	return -ENOMEM;
452}
453
454int
455nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
456		struct nouveau_bo **pbo)
457{
458	struct nouveau_device_priv *nvdev = nouveau_device(dev);
459	int ret;
460	pthread_mutex_lock(&nvdev->lock);
461	ret = nouveau_bo_wrap_locked(dev, handle, pbo);
462	pthread_mutex_unlock(&nvdev->lock);
463	return ret;
464}
465
466int
467nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
468		    struct nouveau_bo **pbo)
469{
470	struct nouveau_device_priv *nvdev = nouveau_device(dev);
471	struct nouveau_bo_priv *nvbo;
472	struct drm_gem_open req = { .name = name };
473	int ret;
474
475	pthread_mutex_lock(&nvdev->lock);
476	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
477		if (nvbo->name == name) {
478			*pbo = NULL;
479			nouveau_bo_ref(&nvbo->base, pbo);
480			pthread_mutex_unlock(&nvdev->lock);
481			return 0;
482		}
483	}
484
485	ret = drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req);
486	if (ret == 0) {
487		ret = nouveau_bo_wrap_locked(dev, req.handle, pbo);
488		nouveau_bo((*pbo))->name = name;
489		pthread_mutex_unlock(&nvdev->lock);
490	}
491
492	return ret;
493}
494
495int
496nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
497{
498	struct drm_gem_flink req = { .handle = bo->handle };
499	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
500
501	*name = nvbo->name;
502	if (!*name || *name == ~0U) {
503		int ret = drmIoctl(bo->device->fd, DRM_IOCTL_GEM_FLINK, &req);
504		if (ret) {
505			*name = 0;
506			return ret;
507		}
508		nvbo->name = *name = req.name;
509	}
510	return 0;
511}
512
513void
514nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
515{
516	struct nouveau_bo *ref = *pref;
517	if (bo) {
518		atomic_inc(&nouveau_bo(bo)->refcnt);
519	}
520	if (ref) {
521		if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
522			nouveau_bo_del(ref);
523	}
524	*pref = bo;
525}
526
527int
528nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
529			    struct nouveau_bo **bo)
530{
531	struct nouveau_device_priv *nvdev = nouveau_device(dev);
532	int ret;
533	unsigned int handle;
534
535	nouveau_bo_ref(NULL, bo);
536
537	pthread_mutex_lock(&nvdev->lock);
538	ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
539	if (ret == 0) {
540		ret = nouveau_bo_wrap_locked(dev, handle, bo);
541		if (!ret) {
542			struct nouveau_bo_priv *nvbo = nouveau_bo(*bo);
543			if (!nvbo->name) {
544				/*
545				 * XXX: Force locked DRM_IOCTL_GEM_CLOSE
546				 * to rule out race conditions
547				 */
548				nvbo->name = ~0;
549			}
550		}
551	}
552	pthread_mutex_unlock(&nvdev->lock);
553	return ret;
554}
555
556int
557nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
558{
559	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
560	int ret;
561
562	ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
563	if (ret)
564		return ret;
565	if (!nvbo->name)
566		nvbo->name = ~0;
567	return 0;
568}
569
570int
571nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
572		struct nouveau_client *client)
573{
574	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
575	struct drm_nouveau_gem_cpu_prep req;
576	struct nouveau_pushbuf *push;
577	int ret = 0;
578
579	if (!(access & NOUVEAU_BO_RDWR))
580		return 0;
581
582	push = cli_push_get(client, bo);
583	if (push && push->channel)
584		nouveau_pushbuf_kick(push, push->channel);
585
586	if (!nvbo->name && !(nvbo->access & NOUVEAU_BO_WR) &&
587			   !(      access & NOUVEAU_BO_WR))
588		return 0;
589
590	req.handle = bo->handle;
591	req.flags = 0;
592	if (access & NOUVEAU_BO_WR)
593		req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
594	if (access & NOUVEAU_BO_NOBLOCK)
595		req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
596
597	ret = drmCommandWrite(bo->device->fd, DRM_NOUVEAU_GEM_CPU_PREP,
598			      &req, sizeof(req));
599	if (ret == 0)
600		nvbo->access = 0;
601	return ret;
602}
603
604int
605nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
606	       struct nouveau_client *client)
607{
608	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
609	if (bo->map == NULL) {
610		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
611			       MAP_SHARED, bo->device->fd, nvbo->map_handle);
612		if (bo->map == MAP_FAILED) {
613			bo->map = NULL;
614			return -errno;
615		}
616	}
617	return nouveau_bo_wait(bo, access, client);
618}
619