1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <stdint.h>
28#include <string.h>
29#include <strings.h>
30#include <stdbool.h>
31#include <assert.h>
32#include <errno.h>
33#include <fcntl.h>
34
35#include <xf86drm.h>
36#include <xf86atomic.h>
37#include "libdrm_macros.h"
38#include "libdrm_lists.h"
39#include "nouveau_drm.h"
40
41#include "nouveau.h"
42#include "private.h"
43
44#include "nvif/class.h"
45#include "nvif/cl0080.h"
46#include "nvif/ioctl.h"
47#include "nvif/unpack.h"
48
49drm_private FILE *nouveau_out = NULL;
50drm_private uint32_t nouveau_debug = 0;
51
52static void
53debug_init(void)
54{
55	static bool once = false;
56	char *debug, *out;
57
58	if (once)
59		return;
60	once = true;
61
62	debug = getenv("NOUVEAU_LIBDRM_DEBUG");
63	if (debug) {
64		int n = strtol(debug, NULL, 0);
65		if (n >= 0)
66			nouveau_debug = n;
67
68	}
69
70	nouveau_out = stderr;
71	out = getenv("NOUVEAU_LIBDRM_OUT");
72	if (out) {
73		FILE *fout = fopen(out, "w");
74		if (fout)
75			nouveau_out = fout;
76	}
77}
78
79static int
80nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
81{
82	struct nouveau_drm *drm = nouveau_drm(obj);
83	union {
84		struct nvif_ioctl_v0 v0;
85	} *args = data;
86	uint32_t argc = size;
87	int ret = -ENOSYS;
88
89	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
90		if (!obj->length) {
91			if (obj != &drm->client)
92				args->v0.object = (unsigned long)(void *)obj;
93			else
94				args->v0.object = 0;
95			args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
96			args->v0.route = 0x00;
97		} else {
98			args->v0.route = 0xff;
99			args->v0.token = obj->handle;
100		}
101	} else
102		return ret;
103
104	return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
105}
106
107drm_public int
108nouveau_object_mthd(struct nouveau_object *obj,
109		    uint32_t mthd, void *data, uint32_t size)
110{
111	struct nouveau_drm *drm = nouveau_drm(obj);
112	struct {
113		struct nvif_ioctl_v0 ioctl;
114		struct nvif_ioctl_mthd_v0 mthd;
115	} *args;
116	uint32_t argc = sizeof(*args) + size;
117	uint8_t stack[128];
118	int ret;
119
120	if (!drm->nvif)
121		return -ENOSYS;
122
123	if (argc > sizeof(stack)) {
124		if (!(args = malloc(argc)))
125			return -ENOMEM;
126	} else {
127		args = (void *)stack;
128	}
129	args->ioctl.version = 0;
130	args->ioctl.type = NVIF_IOCTL_V0_MTHD;
131	args->mthd.version = 0;
132	args->mthd.method = mthd;
133
134	memcpy(args->mthd.data, data, size);
135	ret = nouveau_object_ioctl(obj, args, argc);
136	memcpy(data, args->mthd.data, size);
137	if (args != (void *)stack)
138		free(args);
139	return ret;
140}
141
142drm_public void
143nouveau_object_sclass_put(struct nouveau_sclass **psclass)
144{
145	free(*psclass);
146	*psclass = NULL;
147}
148
149drm_public int
150nouveau_object_sclass_get(struct nouveau_object *obj,
151			  struct nouveau_sclass **psclass)
152{
153	struct nouveau_drm *drm = nouveau_drm(obj);
154	struct {
155		struct nvif_ioctl_v0 ioctl;
156		struct nvif_ioctl_sclass_v0 sclass;
157	} *args = NULL;
158	struct nouveau_sclass *sclass;
159	int ret, cnt = 0, i;
160	uint32_t size;
161
162	if (!drm->nvif)
163		return abi16_sclass(obj, psclass);
164
165	while (1) {
166		size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
167		if (!(args = malloc(size)))
168			return -ENOMEM;
169		args->ioctl.version = 0;
170		args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
171		args->sclass.version = 0;
172		args->sclass.count = cnt;
173
174		ret = nouveau_object_ioctl(obj, args, size);
175		if (ret == 0 && args->sclass.count <= cnt)
176			break;
177		cnt = args->sclass.count;
178		free(args);
179		if (ret != 0)
180			return ret;
181	}
182
183	if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
184		for (i = 0; i < args->sclass.count; i++) {
185			sclass[i].oclass = args->sclass.oclass[i].oclass;
186			sclass[i].minver = args->sclass.oclass[i].minver;
187			sclass[i].maxver = args->sclass.oclass[i].maxver;
188		}
189		*psclass = sclass;
190		ret = args->sclass.count;
191	} else {
192		ret = -ENOMEM;
193	}
194
195	free(args);
196	return ret;
197}
198
199drm_public int
200nouveau_object_mclass(struct nouveau_object *obj,
201		      const struct nouveau_mclass *mclass)
202{
203	struct nouveau_sclass *sclass;
204	int ret = -ENODEV;
205	int cnt, i, j;
206
207	cnt = nouveau_object_sclass_get(obj, &sclass);
208	if (cnt < 0)
209		return cnt;
210
211	for (i = 0; ret < 0 && mclass[i].oclass; i++) {
212		for (j = 0; j < cnt; j++) {
213			if (mclass[i].oclass  == sclass[j].oclass &&
214			    mclass[i].version >= sclass[j].minver &&
215			    mclass[i].version <= sclass[j].maxver) {
216				ret = i;
217				break;
218			}
219		}
220	}
221
222	nouveau_object_sclass_put(&sclass);
223	return ret;
224}
225
226static void
227nouveau_object_fini(struct nouveau_object *obj)
228{
229	struct {
230		struct nvif_ioctl_v0 ioctl;
231		struct nvif_ioctl_del del;
232	} args = {
233		.ioctl.type = NVIF_IOCTL_V0_DEL,
234	};
235
236	if (obj->data) {
237		abi16_delete(obj);
238		free(obj->data);
239		obj->data = NULL;
240		return;
241	}
242
243	nouveau_object_ioctl(obj, &args, sizeof(args));
244}
245
246static int
247nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
248		    int32_t oclass, void *data, uint32_t size,
249		    struct nouveau_object *obj)
250{
251	struct nouveau_drm *drm = nouveau_drm(parent);
252	struct {
253		struct nvif_ioctl_v0 ioctl;
254		struct nvif_ioctl_new_v0 new;
255	} *args;
256	uint32_t argc = sizeof(*args) + size;
257	int (*func)(struct nouveau_object *);
258	int ret = -ENOSYS;
259
260	obj->parent = parent;
261	obj->handle = handle;
262	obj->oclass = oclass;
263	obj->length = 0;
264	obj->data = NULL;
265
266	if (!abi16_object(obj, &func) && drm->nvif) {
267		if (!(args = malloc(argc)))
268			return -ENOMEM;
269		args->ioctl.version = 0;
270		args->ioctl.type = NVIF_IOCTL_V0_NEW;
271		args->new.version = 0;
272		args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
273		args->new.token = (unsigned long)(void *)obj;
274		args->new.object = (unsigned long)(void *)obj;
275		args->new.handle = handle;
276		args->new.oclass = oclass;
277		memcpy(args->new.data, data, size);
278		ret = nouveau_object_ioctl(parent, args, argc);
279		memcpy(data, args->new.data, size);
280		free(args);
281	} else
282	if (func) {
283		obj->length = size ? size : sizeof(struct nouveau_object *);
284		if (!(obj->data = malloc(obj->length)))
285			return -ENOMEM;
286		if (data)
287			memcpy(obj->data, data, obj->length);
288		*(struct nouveau_object **)obj->data = obj;
289
290		ret = func(obj);
291	}
292
293	if (ret) {
294		nouveau_object_fini(obj);
295		return ret;
296	}
297
298	return 0;
299}
300
301drm_public int
302nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
303		   uint32_t oclass, void *data, uint32_t length,
304		   struct nouveau_object **pobj)
305{
306	struct nouveau_object *obj;
307	int ret;
308
309	if (!(obj = malloc(sizeof(*obj))))
310		return -ENOMEM;
311
312	ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
313	if (ret) {
314		free(obj);
315		return ret;
316	}
317
318	*pobj = obj;
319	return 0;
320}
321
322drm_public void
323nouveau_object_del(struct nouveau_object **pobj)
324{
325	struct nouveau_object *obj = *pobj;
326	if (obj) {
327		nouveau_object_fini(obj);
328		free(obj);
329		*pobj = NULL;
330	}
331}
332
333drm_public void
334nouveau_drm_del(struct nouveau_drm **pdrm)
335{
336	free(*pdrm);
337	*pdrm = NULL;
338}
339
340drm_public int
341nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
342{
343	struct nouveau_drm *drm;
344	drmVersionPtr ver;
345
346	debug_init();
347
348	if (!(drm = calloc(1, sizeof(*drm))))
349		return -ENOMEM;
350	drm->fd = fd;
351
352	if (!(ver = drmGetVersion(fd))) {
353		nouveau_drm_del(&drm);
354		return -EINVAL;
355	}
356	*pdrm = drm;
357
358	drm->version = (ver->version_major << 24) |
359		       (ver->version_minor << 8) |
360		        ver->version_patchlevel;
361	drm->nvif = (drm->version >= 0x01000301);
362	drmFreeVersion(ver);
363	return 0;
364}
365
366/* this is the old libdrm's version of nouveau_device_wrap(), the symbol
367 * is kept here to prevent AIGLX from crashing if the DDX is linked against
368 * the new libdrm, but the DRI driver against the old
369 */
370drm_public int
371nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
372			     drm_context_t ctx)
373{
374	return -EACCES;
375}
376
377drm_public int
378nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
379		   void *data, uint32_t size, struct nouveau_device **pdev)
380{
381	struct nv_device_info_v0 info = {};
382	union {
383		struct nv_device_v0 v0;
384	} *args = data;
385	uint32_t argc = size;
386	struct nouveau_drm *drm = nouveau_drm(parent);
387	struct nouveau_device_priv *nvdev;
388	struct nouveau_device *dev;
389	uint64_t v;
390	char *tmp;
391	int ret = -ENOSYS;
392
393	if (oclass != NV_DEVICE ||
394	    nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
395		return ret;
396
397	if (!(nvdev = calloc(1, sizeof(*nvdev))))
398		return -ENOMEM;
399	dev = *pdev = &nvdev->base;
400	dev->fd = -1;
401
402	if (drm->nvif) {
403		ret = nouveau_object_init(parent, 0, oclass, args, argc,
404					  &dev->object);
405		if (ret)
406			goto done;
407
408		info.version = 0;
409
410		ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
411					  &info, sizeof(info));
412		if (ret)
413			goto done;
414
415		nvdev->base.chipset = info.chipset;
416		nvdev->have_bo_usage = true;
417	} else
418	if (args->v0.device == ~0ULL) {
419		nvdev->base.object.parent = &drm->client;
420		nvdev->base.object.handle = ~0ULL;
421		nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
422		nvdev->base.object.length = ~0;
423
424		ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
425		if (ret)
426			goto done;
427		nvdev->base.chipset = v;
428
429		ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
430		if (ret == 0)
431			nvdev->have_bo_usage = (v != 0);
432	} else
433		return -ENOSYS;
434
435	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
436	if (ret)
437		goto done;
438	nvdev->base.vram_size = v;
439
440	ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
441	if (ret)
442		goto done;
443	nvdev->base.gart_size = v;
444
445	tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
446	if (tmp)
447		nvdev->vram_limit_percent = atoi(tmp);
448	else
449		nvdev->vram_limit_percent = 80;
450
451	nvdev->base.vram_limit =
452		(nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
453
454	tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
455	if (tmp)
456		nvdev->gart_limit_percent = atoi(tmp);
457	else
458		nvdev->gart_limit_percent = 80;
459
460	nvdev->base.gart_limit =
461		(nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
462
463	ret = pthread_mutex_init(&nvdev->lock, NULL);
464	DRMINITLISTHEAD(&nvdev->bo_list);
465done:
466	if (ret)
467		nouveau_device_del(pdev);
468	return ret;
469}
470
471drm_public int
472nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
473{
474	struct nouveau_drm *drm;
475	struct nouveau_device_priv *nvdev;
476	int ret;
477
478	ret = nouveau_drm_new(fd, &drm);
479	if (ret)
480		return ret;
481	drm->nvif = false;
482
483	ret = nouveau_device_new(&drm->client, NV_DEVICE,
484				 &(struct nv_device_v0) {
485					.device = ~0ULL,
486				 }, sizeof(struct nv_device_v0), pdev);
487	if (ret) {
488		nouveau_drm_del(&drm);
489		return ret;
490	}
491
492	nvdev = nouveau_device(*pdev);
493	nvdev->base.fd = drm->fd;
494	nvdev->base.drm_version = drm->version;
495	nvdev->close = close;
496	return 0;
497}
498
499drm_public int
500nouveau_device_open(const char *busid, struct nouveau_device **pdev)
501{
502	int ret = -ENODEV, fd = drmOpen("nouveau", busid);
503	if (fd >= 0) {
504		ret = nouveau_device_wrap(fd, 1, pdev);
505		if (ret)
506			drmClose(fd);
507	}
508	return ret;
509}
510
511drm_public void
512nouveau_device_del(struct nouveau_device **pdev)
513{
514	struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
515	if (nvdev) {
516		free(nvdev->client);
517		pthread_mutex_destroy(&nvdev->lock);
518		if (nvdev->base.fd >= 0) {
519			struct nouveau_drm *drm =
520				nouveau_drm(&nvdev->base.object);
521			nouveau_drm_del(&drm);
522			if (nvdev->close)
523				drmClose(nvdev->base.fd);
524		}
525		free(nvdev);
526		*pdev = NULL;
527	}
528}
529
530drm_public int
531nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
532{
533	struct nouveau_drm *drm = nouveau_drm(&dev->object);
534	struct drm_nouveau_getparam r = { .param = param };
535	int fd = drm->fd, ret =
536		drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
537	*value = r.value;
538	return ret;
539}
540
541drm_public int
542nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
543{
544	struct nouveau_drm *drm = nouveau_drm(&dev->object);
545	struct drm_nouveau_setparam r = { .param = param, .value = value };
546	return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
547}
548
549drm_public int
550nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
551{
552	struct nouveau_device_priv *nvdev = nouveau_device(dev);
553	struct nouveau_client_priv *pcli;
554	int id = 0, i, ret = -ENOMEM;
555	uint32_t *clients;
556
557	pthread_mutex_lock(&nvdev->lock);
558
559	for (i = 0; i < nvdev->nr_client; i++) {
560		id = ffs(nvdev->client[i]) - 1;
561		if (id >= 0)
562			goto out;
563	}
564
565	clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
566	if (!clients)
567		goto unlock;
568	nvdev->client = clients;
569	nvdev->client[i] = 0;
570	nvdev->nr_client++;
571
572out:
573	pcli = calloc(1, sizeof(*pcli));
574	if (pcli) {
575		nvdev->client[i] |= (1 << id);
576		pcli->base.device = dev;
577		pcli->base.id = (i * 32) + id;
578		ret = 0;
579	}
580
581	*pclient = &pcli->base;
582
583unlock:
584	pthread_mutex_unlock(&nvdev->lock);
585	return ret;
586}
587
588drm_public void
589nouveau_client_del(struct nouveau_client **pclient)
590{
591	struct nouveau_client_priv *pcli = nouveau_client(*pclient);
592	struct nouveau_device_priv *nvdev;
593	if (pcli) {
594		int id = pcli->base.id;
595		nvdev = nouveau_device(pcli->base.device);
596		pthread_mutex_lock(&nvdev->lock);
597		nvdev->client[id / 32] &= ~(1 << (id % 32));
598		pthread_mutex_unlock(&nvdev->lock);
599		free(pcli->kref);
600		free(pcli);
601	}
602}
603
604static void
605nouveau_bo_del(struct nouveau_bo *bo)
606{
607	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
608	struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
609	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
610
611	if (nvbo->head.next) {
612		pthread_mutex_lock(&nvdev->lock);
613		if (atomic_read(&nvbo->refcnt) == 0) {
614			DRMLISTDEL(&nvbo->head);
615			/*
616			 * This bo has to be closed with the lock held because
617			 * gem handles are not refcounted. If a shared bo is
618			 * closed and re-opened in another thread a race
619			 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
620			 * might cause the bo to be closed accidentally while
621			 * re-importing.
622			 */
623			drmCloseBufferHandle(drm->fd, bo->handle);
624		}
625		pthread_mutex_unlock(&nvdev->lock);
626	} else {
627		drmCloseBufferHandle(drm->fd, bo->handle);
628	}
629	if (bo->map)
630		drm_munmap(bo->map, bo->size);
631	free(nvbo);
632}
633
634drm_public int
635nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
636	       uint64_t size, union nouveau_bo_config *config,
637	       struct nouveau_bo **pbo)
638{
639	struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
640	struct nouveau_bo *bo = &nvbo->base;
641	int ret;
642
643	if (!nvbo)
644		return -ENOMEM;
645	atomic_set(&nvbo->refcnt, 1);
646	bo->device = dev;
647	bo->flags = flags;
648	bo->size = size;
649
650	ret = abi16_bo_init(bo, align, config);
651	if (ret) {
652		free(nvbo);
653		return ret;
654	}
655
656	*pbo = bo;
657	return 0;
658}
659
660static int
661nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
662		       struct nouveau_bo **pbo, int name)
663{
664	struct nouveau_drm *drm = nouveau_drm(&dev->object);
665	struct nouveau_device_priv *nvdev = nouveau_device(dev);
666	struct drm_nouveau_gem_info req = { .handle = handle };
667	struct nouveau_bo_priv *nvbo;
668	int ret;
669
670	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
671		if (nvbo->base.handle == handle) {
672			if (atomic_inc_return(&nvbo->refcnt) == 1) {
673				/*
674				 * Uh oh, this bo is dead and someone else
675				 * will free it, but because refcnt is
676				 * now non-zero fortunately they won't
677				 * call the ioctl to close the bo.
678				 *
679				 * Remove this bo from the list so other
680				 * calls to nouveau_bo_wrap_locked will
681				 * see our replacement nvbo.
682				 */
683				DRMLISTDEL(&nvbo->head);
684				if (!name)
685					name = nvbo->name;
686				break;
687			}
688
689			*pbo = &nvbo->base;
690			return 0;
691		}
692	}
693
694	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
695				  &req, sizeof(req));
696	if (ret)
697		return ret;
698
699	nvbo = calloc(1, sizeof(*nvbo));
700	if (nvbo) {
701		atomic_set(&nvbo->refcnt, 1);
702		nvbo->base.device = dev;
703		abi16_bo_info(&nvbo->base, &req);
704		nvbo->name = name;
705		DRMLISTADD(&nvbo->head, &nvdev->bo_list);
706		*pbo = &nvbo->base;
707		return 0;
708	}
709
710	return -ENOMEM;
711}
712
713static void
714nouveau_nvbo_make_global(struct nouveau_bo_priv *nvbo)
715{
716	if (!nvbo->head.next) {
717		struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
718		pthread_mutex_lock(&nvdev->lock);
719		if (!nvbo->head.next)
720			DRMLISTADD(&nvbo->head, &nvdev->bo_list);
721		pthread_mutex_unlock(&nvdev->lock);
722	}
723}
724
725drm_public void
726nouveau_bo_make_global(struct nouveau_bo *bo)
727{
728    struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
729
730    nouveau_nvbo_make_global(nvbo);
731}
732
733drm_public int
734nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
735		struct nouveau_bo **pbo)
736{
737	struct nouveau_device_priv *nvdev = nouveau_device(dev);
738	int ret;
739	pthread_mutex_lock(&nvdev->lock);
740	ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
741	pthread_mutex_unlock(&nvdev->lock);
742	return ret;
743}
744
745drm_public int
746nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
747		    struct nouveau_bo **pbo)
748{
749	struct nouveau_drm *drm = nouveau_drm(&dev->object);
750	struct nouveau_device_priv *nvdev = nouveau_device(dev);
751	struct nouveau_bo_priv *nvbo;
752	struct drm_gem_open req = { .name = name };
753	int ret;
754
755	pthread_mutex_lock(&nvdev->lock);
756	DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
757		if (nvbo->name == name) {
758			ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
759						     pbo, name);
760			pthread_mutex_unlock(&nvdev->lock);
761			return ret;
762		}
763	}
764
765	ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
766	if (ret == 0) {
767		ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
768	}
769
770	pthread_mutex_unlock(&nvdev->lock);
771	return ret;
772}
773
774drm_public int
775nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
776{
777	struct drm_gem_flink req = { .handle = bo->handle };
778	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
779	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
780
781	*name = nvbo->name;
782	if (!*name) {
783		int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
784
785		if (ret) {
786			*name = 0;
787			return ret;
788		}
789		nvbo->name = *name = req.name;
790
791		nouveau_nvbo_make_global(nvbo);
792	}
793	return 0;
794}
795
796drm_public void
797nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
798{
799	struct nouveau_bo *ref = *pref;
800	if (bo) {
801		atomic_inc(&nouveau_bo(bo)->refcnt);
802	}
803	if (ref) {
804		if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
805			nouveau_bo_del(ref);
806	}
807	*pref = bo;
808}
809
810drm_public int
811nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
812			    struct nouveau_bo **bo)
813{
814	struct nouveau_drm *drm = nouveau_drm(&dev->object);
815	struct nouveau_device_priv *nvdev = nouveau_device(dev);
816	int ret;
817	unsigned int handle;
818
819	nouveau_bo_ref(NULL, bo);
820
821	pthread_mutex_lock(&nvdev->lock);
822	ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
823	if (ret == 0) {
824		ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
825	}
826	pthread_mutex_unlock(&nvdev->lock);
827	return ret;
828}
829
830drm_public int
831nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
832{
833	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
834	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
835	int ret;
836
837	ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
838	if (ret)
839		return ret;
840
841	nouveau_nvbo_make_global(nvbo);
842	return 0;
843}
844
845drm_public int
846nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
847		struct nouveau_client *client)
848{
849	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
850	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
851	struct drm_nouveau_gem_cpu_prep req;
852	struct nouveau_pushbuf *push;
853	int ret = 0;
854
855	if (!(access & NOUVEAU_BO_RDWR))
856		return 0;
857
858	push = cli_push_get(client, bo);
859	if (push && push->channel)
860		nouveau_pushbuf_kick(push, push->channel);
861
862	if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
863				!(access & NOUVEAU_BO_WR))
864		return 0;
865
866	req.handle = bo->handle;
867	req.flags = 0;
868	if (access & NOUVEAU_BO_WR)
869		req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
870	if (access & NOUVEAU_BO_NOBLOCK)
871		req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
872
873	ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
874			      &req, sizeof(req));
875	if (ret == 0)
876		nvbo->access = 0;
877	return ret;
878}
879
880drm_public int
881nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
882	       struct nouveau_client *client)
883{
884	struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
885	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
886	if (bo->map == NULL) {
887		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
888			       MAP_SHARED, drm->fd, nvbo->map_handle);
889		if (bo->map == MAP_FAILED) {
890			bo->map = NULL;
891			return -errno;
892		}
893	}
894	return nouveau_bo_wait(bo, access, client);
895}
896