pushbuf.c revision 4babd585
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <stdint.h>
28#include <stdbool.h>
29#include <string.h>
30#include <assert.h>
31#include <errno.h>
32
33#include <xf86drm.h>
34#include <xf86atomic.h>
35#include "libdrm_lists.h"
36#include "nouveau_drm.h"
37
38#include "nouveau.h"
39#include "private.h"
40
41struct nouveau_pushbuf_krec {
42	struct nouveau_pushbuf_krec *next;
43	struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
44	struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
45	struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
46	int nr_buffer;
47	int nr_reloc;
48	int nr_push;
49	uint64_t vram_used;
50	uint64_t gart_used;
51};
52
53struct nouveau_pushbuf_priv {
54	struct nouveau_pushbuf base;
55	struct nouveau_pushbuf_krec *list;
56	struct nouveau_pushbuf_krec *krec;
57	struct nouveau_list bctx_list;
58	struct nouveau_bo *bo;
59	uint32_t type;
60	uint32_t suffix0;
61	uint32_t suffix1;
62	uint32_t *ptr;
63	uint32_t *bgn;
64	int bo_next;
65	int bo_nr;
66	struct nouveau_bo *bos[];
67};
68
69static inline struct nouveau_pushbuf_priv *
70nouveau_pushbuf(struct nouveau_pushbuf *push)
71{
72	return (struct nouveau_pushbuf_priv *)push;
73}
74
75static int pushbuf_validate(struct nouveau_pushbuf *, bool);
76static int pushbuf_flush(struct nouveau_pushbuf *);
77
78static bool
79pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
80		  uint32_t *domains)
81{
82	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
83	struct nouveau_pushbuf_krec *krec = nvpb->krec;
84	struct nouveau_device *dev = push->client->device;
85	struct nouveau_bo *kbo;
86	struct drm_nouveau_gem_pushbuf_bo *kref;
87	int i;
88
89	/* VRAM is the only valid domain.  GART and VRAM|GART buffers
90	 * are all accounted to GART, so if this doesn't fit in VRAM
91	 * straight up, a flush is needed.
92	 */
93	if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
94		if (krec->vram_used + bo->size > dev->vram_limit)
95			return false;
96		krec->vram_used += bo->size;
97		return true;
98	}
99
100	/* GART or VRAM|GART buffer.  Account both of these buffer types
101	 * to GART only for the moment, which simplifies things.  If the
102	 * buffer can fit already, we're done here.
103	 */
104	if (krec->gart_used + bo->size <= dev->gart_limit) {
105		krec->gart_used += bo->size;
106		return true;
107	}
108
109	/* Ran out of GART space, if it's a VRAM|GART buffer and it'll
110	 * fit into available VRAM, turn it into a VRAM buffer
111	 */
112	if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
113	    krec->vram_used + bo->size <= dev->vram_limit) {
114		*domains &= NOUVEAU_GEM_DOMAIN_VRAM;
115		krec->vram_used += bo->size;
116		return true;
117	}
118
119	/* Still couldn't fit the buffer in anywhere, so as a last resort;
120	 * scan the buffer list for VRAM|GART buffers and turn them into
121	 * VRAM buffers until we have enough space in GART for this one
122	 */
123	kref = krec->buffer;
124	for (i = 0; i < krec->nr_buffer; i++, kref++) {
125		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
126			continue;
127
128		kbo = (void *)(unsigned long)kref->user_priv;
129		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
130		    krec->vram_used + kbo->size > dev->vram_limit)
131			continue;
132
133		kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
134		krec->gart_used -= kbo->size;
135		krec->vram_used += kbo->size;
136		if (krec->gart_used + bo->size <= dev->gart_limit) {
137			krec->gart_used += bo->size;
138			return true;
139		}
140	}
141
142	/* Couldn't resolve a placement, need to force a flush */
143	return false;
144}
145
146static struct drm_nouveau_gem_pushbuf_bo *
147pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
148	     uint32_t flags)
149{
150	struct nouveau_device *dev = push->client->device;
151	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
152	struct nouveau_pushbuf_krec *krec = nvpb->krec;
153	struct nouveau_pushbuf *fpush;
154	struct drm_nouveau_gem_pushbuf_bo *kref;
155	uint32_t domains, domains_wr, domains_rd;
156
157	domains = 0;
158	if (flags & NOUVEAU_BO_VRAM)
159		domains |= NOUVEAU_GEM_DOMAIN_VRAM;
160	if (flags & NOUVEAU_BO_GART)
161		domains |= NOUVEAU_GEM_DOMAIN_GART;
162	domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
163	domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
164
165	/* if buffer is referenced on another pushbuf that is owned by the
166	 * same client, we need to flush the other pushbuf first to ensure
167	 * the correct ordering of commands
168	 */
169	fpush = cli_push_get(push->client, bo);
170	if (fpush && fpush != push)
171		pushbuf_flush(fpush);
172
173	kref = cli_kref_get(push->client, bo);
174	if (kref) {
175		/* possible conflict in memory types - flush and retry */
176		if (!(kref->valid_domains & domains))
177			return NULL;
178
179		/* VRAM|GART buffer turning into a VRAM buffer.  Make sure
180		 * it'll fit in VRAM and force a flush if not.
181		 */
182		if ((kref->valid_domains  & NOUVEAU_GEM_DOMAIN_GART) &&
183		    (            domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
184			if (krec->vram_used + bo->size > dev->vram_limit)
185				return NULL;
186			krec->vram_used += bo->size;
187			krec->gart_used -= bo->size;
188		}
189
190		kref->valid_domains &= domains;
191		kref->write_domains |= domains_wr;
192		kref->read_domains  |= domains_rd;
193	} else {
194		if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
195		    !pushbuf_kref_fits(push, bo, &domains))
196			return NULL;
197
198		kref = &krec->buffer[krec->nr_buffer++];
199		kref->user_priv = (unsigned long)bo;
200		kref->handle = bo->handle;
201		kref->valid_domains = domains;
202		kref->write_domains = domains_wr;
203		kref->read_domains = domains_rd;
204		kref->presumed.valid = 1;
205		kref->presumed.offset = bo->offset;
206		if (bo->flags & NOUVEAU_BO_VRAM)
207			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
208		else
209			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
210
211		cli_kref_set(push->client, bo, kref, push);
212		atomic_inc(&nouveau_bo(bo)->refcnt);
213	}
214
215	return kref;
216}
217
218static uint32_t
219pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
220	     uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
221{
222	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
223	struct nouveau_pushbuf_krec *krec = nvpb->krec;
224	struct drm_nouveau_gem_pushbuf_reloc *krel;
225	struct drm_nouveau_gem_pushbuf_bo *pkref;
226	struct drm_nouveau_gem_pushbuf_bo *bkref;
227	uint32_t reloc = data;
228
229	pkref = cli_kref_get(push->client, nvpb->bo);
230	bkref = cli_kref_get(push->client, bo);
231	krel  = &krec->reloc[krec->nr_reloc++];
232
233	assert(pkref);
234	assert(bkref);
235	krel->reloc_bo_index = pkref - krec->buffer;
236	krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
237	krel->bo_index = bkref - krec->buffer;
238	krel->flags = 0;
239	krel->data = data;
240	krel->vor = vor;
241	krel->tor = tor;
242
243	if (flags & NOUVEAU_BO_LOW) {
244		reloc = (bkref->presumed.offset + data);
245		krel->flags |= NOUVEAU_GEM_RELOC_LOW;
246	} else
247	if (flags & NOUVEAU_BO_HIGH) {
248		reloc = (bkref->presumed.offset + data) >> 32;
249		krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
250	}
251	if (flags & NOUVEAU_BO_OR) {
252		if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
253			reloc |= vor;
254		else
255			reloc |= tor;
256		krel->flags |= NOUVEAU_GEM_RELOC_OR;
257	}
258
259	return reloc;
260}
261
262static void
263pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
264{
265	struct drm_nouveau_gem_pushbuf_reloc *krel;
266	struct drm_nouveau_gem_pushbuf_push *kpsh;
267	struct drm_nouveau_gem_pushbuf_bo *kref;
268	struct nouveau_bo *bo;
269	uint32_t *bgn, *end;
270	int i;
271
272	err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
273	    krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
274
275	kref = krec->buffer;
276	for (i = 0; i < krec->nr_buffer; i++, kref++) {
277		err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
278		    kref->handle, kref->valid_domains,
279		    kref->read_domains, kref->write_domains);
280	}
281
282	krel = krec->reloc;
283	for (i = 0; i < krec->nr_reloc; i++, krel++) {
284		err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
285		    chid, krel->reloc_bo_index, krel->reloc_bo_offset,
286		    krel->bo_index, krel->flags, krel->data,
287		    krel->vor, krel->tor);
288	}
289
290	kpsh = krec->push;
291	for (i = 0; i < krec->nr_push; i++, kpsh++) {
292		kref = krec->buffer + kpsh->bo_index;
293		bo = (void *)(unsigned long)kref->user_priv;
294		bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
295		end = bgn + ((kpsh->length & 0x7fffff) /4);
296
297		err("ch%d: psh %s%08x %010llx %010llx\n", chid,
298		    bo->map ? "" : "(unmapped) ", kpsh->bo_index,
299		    (unsigned long long)kpsh->offset,
300		    (unsigned long long)(kpsh->offset + kpsh->length));
301		if (!bo->map)
302			continue;
303		while (bgn < end)
304			err("\t0x%08x\n", *bgn++);
305	}
306}
307
308static int
309pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
310{
311	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
312	struct nouveau_pushbuf_krec *krec = nvpb->list;
313	struct nouveau_device *dev = push->client->device;
314	struct nouveau_drm *drm = nouveau_drm(&dev->object);
315	struct drm_nouveau_gem_pushbuf_bo_presumed *info;
316	struct drm_nouveau_gem_pushbuf_bo *kref;
317	struct drm_nouveau_gem_pushbuf req;
318	struct nouveau_fifo *fifo = chan->data;
319	struct nouveau_bo *bo;
320	int krec_id = 0;
321	int ret = 0, i;
322
323	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
324		return -EINVAL;
325
326	if (push->kick_notify)
327		push->kick_notify(push);
328
329	nouveau_pushbuf_data(push, NULL, 0, 0);
330
331	while (krec && krec->nr_push) {
332		req.channel = fifo->channel;
333		req.nr_buffers = krec->nr_buffer;
334		req.buffers = (uint64_t)(unsigned long)krec->buffer;
335		req.nr_relocs = krec->nr_reloc;
336		req.nr_push = krec->nr_push;
337		req.relocs = (uint64_t)(unsigned long)krec->reloc;
338		req.push = (uint64_t)(unsigned long)krec->push;
339		req.suffix0 = nvpb->suffix0;
340		req.suffix1 = nvpb->suffix1;
341		req.vram_available = 0; /* for valgrind */
342		if (dbg_on(1))
343			req.vram_available |= NOUVEAU_GEM_PUSHBUF_SYNC;
344		req.gart_available = 0;
345
346		if (dbg_on(0))
347			pushbuf_dump(krec, krec_id++, fifo->channel);
348
349#ifndef SIMULATE
350		ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
351					  &req, sizeof(req));
352		nvpb->suffix0 = req.suffix0;
353		nvpb->suffix1 = req.suffix1;
354		dev->vram_limit = (req.vram_available *
355				nouveau_device(dev)->vram_limit_percent) / 100;
356		dev->gart_limit = (req.gart_available *
357				nouveau_device(dev)->gart_limit_percent) / 100;
358#else
359		if (dbg_on(31))
360			ret = -EINVAL;
361#endif
362
363		if (ret) {
364			err("kernel rejected pushbuf: %s\n", strerror(-ret));
365			pushbuf_dump(krec, krec_id++, fifo->channel);
366			break;
367		}
368
369		kref = krec->buffer;
370		for (i = 0; i < krec->nr_buffer; i++, kref++) {
371			bo = (void *)(unsigned long)kref->user_priv;
372
373			info = &kref->presumed;
374			if (!info->valid) {
375				bo->flags &= ~NOUVEAU_BO_APER;
376				if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
377					bo->flags |= NOUVEAU_BO_VRAM;
378				else
379					bo->flags |= NOUVEAU_BO_GART;
380				bo->offset = info->offset;
381			}
382
383			if (kref->write_domains)
384				nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
385			if (kref->read_domains)
386				nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
387		}
388
389		krec = krec->next;
390	}
391
392	return ret;
393}
394
395static int
396pushbuf_flush(struct nouveau_pushbuf *push)
397{
398	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
399	struct nouveau_pushbuf_krec *krec = nvpb->krec;
400	struct drm_nouveau_gem_pushbuf_bo *kref;
401	struct nouveau_bufctx *bctx, *btmp;
402	struct nouveau_bo *bo;
403	int ret = 0, i;
404
405	if (push->channel) {
406		ret = pushbuf_submit(push, push->channel);
407	} else {
408		nouveau_pushbuf_data(push, NULL, 0, 0);
409		krec->next = malloc(sizeof(*krec));
410		nvpb->krec = krec->next;
411	}
412
413	kref = krec->buffer;
414	for (i = 0; i < krec->nr_buffer; i++, kref++) {
415		bo = (void *)(unsigned long)kref->user_priv;
416		cli_kref_set(push->client, bo, NULL, NULL);
417		if (push->channel)
418			nouveau_bo_ref(NULL, &bo);
419	}
420
421	krec = nvpb->krec;
422	krec->vram_used = 0;
423	krec->gart_used = 0;
424	krec->nr_buffer = 0;
425	krec->nr_reloc = 0;
426	krec->nr_push = 0;
427
428	DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
429		DRMLISTJOIN(&bctx->current, &bctx->pending);
430		DRMINITLISTHEAD(&bctx->current);
431		DRMLISTDELINIT(&bctx->head);
432	}
433
434	return ret;
435}
436
437static void
438pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
439{
440	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
441	struct nouveau_pushbuf_krec *krec = nvpb->krec;
442	struct drm_nouveau_gem_pushbuf_bo *kref;
443
444	kref = krec->buffer + sref;
445	while (krec->nr_buffer-- > sref) {
446		struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
447		cli_kref_set(push->client, bo, NULL, NULL);
448		nouveau_bo_ref(NULL, &bo);
449		kref++;
450	}
451	krec->nr_buffer = sref;
452	krec->nr_reloc = srel;
453}
454
455static int
456pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
457	     struct nouveau_pushbuf_refn *refs, int nr)
458{
459	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
460	struct nouveau_pushbuf_krec *krec = nvpb->krec;
461	struct drm_nouveau_gem_pushbuf_bo *kref;
462	int sref = krec->nr_buffer;
463	int ret = 0, i;
464
465	for (i = 0; i < nr; i++) {
466		kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
467		if (!kref) {
468			ret = -ENOSPC;
469			break;
470		}
471	}
472
473	if (ret) {
474		pushbuf_refn_fail(push, sref, krec->nr_reloc);
475		if (retry) {
476			pushbuf_flush(push);
477			nouveau_pushbuf_space(push, 0, 0, 0);
478			return pushbuf_refn(push, false, refs, nr);
479		}
480	}
481
482	return ret;
483}
484
485static int
486pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
487{
488	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
489	struct nouveau_pushbuf_krec *krec = nvpb->krec;
490	struct drm_nouveau_gem_pushbuf_bo *kref;
491	struct nouveau_bufctx *bctx = push->bufctx;
492	struct nouveau_bufref *bref;
493	int relocs = bctx ? bctx->relocs * 2: 0;
494	int sref, srel, ret;
495
496	ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
497	if (ret || bctx == NULL)
498		return ret;
499
500	sref = krec->nr_buffer;
501	srel = krec->nr_reloc;
502
503	DRMLISTDEL(&bctx->head);
504	DRMLISTADD(&bctx->head, &nvpb->bctx_list);
505
506	DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
507		kref = pushbuf_kref(push, bref->bo, bref->flags);
508		if (!kref) {
509			ret = -ENOSPC;
510			break;
511		}
512
513		if (bref->packet) {
514			pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
515			*push->cur++ = 0;
516			pushbuf_krel(push, bref->bo, bref->data, bref->flags,
517					   bref->vor, bref->tor);
518			*push->cur++ = 0;
519		}
520	}
521
522	DRMLISTJOIN(&bctx->pending, &bctx->current);
523	DRMINITLISTHEAD(&bctx->pending);
524
525	if (ret) {
526		pushbuf_refn_fail(push, sref, srel);
527		if (retry) {
528			pushbuf_flush(push);
529			return pushbuf_validate(push, false);
530		}
531	}
532
533	return ret;
534}
535
536drm_public int
537nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
538		    int nr, uint32_t size, bool immediate,
539		    struct nouveau_pushbuf **ppush)
540{
541	struct nouveau_drm *drm = nouveau_drm(&client->device->object);
542	struct nouveau_fifo *fifo = chan->data;
543	struct nouveau_pushbuf_priv *nvpb;
544	struct nouveau_pushbuf *push;
545	struct drm_nouveau_gem_pushbuf req = {};
546	int ret;
547
548	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
549		return -EINVAL;
550
551	/* nop pushbuf call, to get the current "return to main" sequence
552	 * we need to append to the pushbuf on early chipsets
553	 */
554	req.channel = fifo->channel;
555	req.nr_push = 0;
556	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
557				  &req, sizeof(req));
558	if (ret)
559		return ret;
560
561	nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
562	if (!nvpb)
563		return -ENOMEM;
564
565#ifndef SIMULATE
566	nvpb->suffix0 = req.suffix0;
567	nvpb->suffix1 = req.suffix1;
568#else
569	nvpb->suffix0 = 0xffffffff;
570	nvpb->suffix1 = 0xffffffff;
571#endif
572	nvpb->krec = calloc(1, sizeof(*nvpb->krec));
573	nvpb->list = nvpb->krec;
574	if (!nvpb->krec) {
575		free(nvpb);
576		return -ENOMEM;
577	}
578
579	push = &nvpb->base;
580	push->client = client;
581	push->channel = immediate ? chan : NULL;
582	push->flags = NOUVEAU_BO_RD;
583	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
584		push->flags |= NOUVEAU_BO_GART;
585		nvpb->type   = NOUVEAU_BO_GART;
586	} else
587	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
588		push->flags |= NOUVEAU_BO_VRAM;
589		nvpb->type   = NOUVEAU_BO_VRAM;
590	}
591	nvpb->type |= NOUVEAU_BO_MAP;
592
593	for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
594		ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
595				     NULL, &nvpb->bos[nvpb->bo_nr]);
596		if (ret) {
597			nouveau_pushbuf_del(&push);
598			return ret;
599		}
600	}
601
602	DRMINITLISTHEAD(&nvpb->bctx_list);
603	*ppush = push;
604	return 0;
605}
606
607drm_public void
608nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
609{
610	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
611	if (nvpb) {
612		struct drm_nouveau_gem_pushbuf_bo *kref;
613		struct nouveau_pushbuf_krec *krec;
614		while ((krec = nvpb->list)) {
615			kref = krec->buffer;
616			while (krec->nr_buffer--) {
617				unsigned long priv = kref++->user_priv;
618				struct nouveau_bo *bo = (void *)priv;
619				cli_kref_set(nvpb->base.client, bo, NULL, NULL);
620				nouveau_bo_ref(NULL, &bo);
621			}
622			nvpb->list = krec->next;
623			free(krec);
624		}
625		while (nvpb->bo_nr--)
626			nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
627		nouveau_bo_ref(NULL, &nvpb->bo);
628		free(nvpb);
629	}
630	*ppush = NULL;
631}
632
633drm_public struct nouveau_bufctx *
634nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
635{
636	struct nouveau_bufctx *prev = push->bufctx;
637	push->bufctx = ctx;
638	return prev;
639}
640
641drm_public int
642nouveau_pushbuf_space(struct nouveau_pushbuf *push,
643		      uint32_t dwords, uint32_t relocs, uint32_t pushes)
644{
645	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
646	struct nouveau_pushbuf_krec *krec = nvpb->krec;
647	struct nouveau_client *client = push->client;
648	struct nouveau_bo *bo = NULL;
649	bool flushed = false;
650	int ret = 0;
651
652	/* switch to next buffer if insufficient space in the current one */
653	if (push->cur + dwords >= push->end) {
654		if (nvpb->bo_next < nvpb->bo_nr) {
655			nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
656			if (nvpb->bo_next == nvpb->bo_nr && push->channel)
657				nvpb->bo_next = 0;
658		} else {
659			ret = nouveau_bo_new(client->device, nvpb->type, 0,
660					     nvpb->bos[0]->size, NULL, &bo);
661			if (ret)
662				return ret;
663		}
664	}
665
666	/* make sure there's always enough space to queue up the pending
667	 * data in the pushbuf proper
668	 */
669	pushes++;
670
671	/* need to flush if we've run out of space on an immediate pushbuf,
672	 * if the new buffer won't fit, or if the kernel push/reloc limits
673	 * have been hit
674	 */
675	if ((bo && ( push->channel ||
676		    !pushbuf_kref(push, bo, push->flags))) ||
677	    krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
678	    krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
679		if (nvpb->bo && krec->nr_buffer)
680			pushbuf_flush(push);
681		flushed = true;
682	}
683
684	/* if necessary, switch to new buffer */
685	if (bo) {
686		ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
687		if (ret)
688			return ret;
689
690		nouveau_pushbuf_data(push, NULL, 0, 0);
691		nouveau_bo_ref(bo, &nvpb->bo);
692		nouveau_bo_ref(NULL, &bo);
693
694		nvpb->bgn = nvpb->bo->map;
695		nvpb->ptr = nvpb->bgn;
696		push->cur = nvpb->bgn;
697		push->end = push->cur + (nvpb->bo->size / 4);
698		push->end -= 2 + push->rsvd_kick; /* space for suffix */
699	}
700
701	pushbuf_kref(push, nvpb->bo, push->flags);
702	return flushed ? pushbuf_validate(push, false) : 0;
703}
704
705drm_public void
706nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
707		     uint64_t offset, uint64_t length)
708{
709	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
710	struct nouveau_pushbuf_krec *krec = nvpb->krec;
711	struct drm_nouveau_gem_pushbuf_push *kpsh;
712	struct drm_nouveau_gem_pushbuf_bo *kref;
713
714	if (bo != nvpb->bo && nvpb->bgn != push->cur) {
715		if (nvpb->suffix0 || nvpb->suffix1) {
716			*push->cur++ = nvpb->suffix0;
717			*push->cur++ = nvpb->suffix1;
718		}
719
720		nouveau_pushbuf_data(push, nvpb->bo,
721				     (nvpb->bgn - nvpb->ptr) * 4,
722				     (push->cur - nvpb->bgn) * 4);
723		nvpb->bgn = push->cur;
724	}
725
726	if (bo) {
727		kref = cli_kref_get(push->client, bo);
728		assert(kref);
729		kpsh = &krec->push[krec->nr_push++];
730		kpsh->bo_index = kref - krec->buffer;
731		kpsh->offset   = offset;
732		kpsh->length   = length;
733	}
734}
735
736drm_public int
737nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
738		     struct nouveau_pushbuf_refn *refs, int nr)
739{
740	return pushbuf_refn(push, true, refs, nr);
741}
742
743drm_public void
744nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
745		      uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
746{
747	*push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
748	push->cur++;
749}
750
751drm_public int
752nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
753{
754	return pushbuf_validate(push, true);
755}
756
757drm_public uint32_t
758nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
759{
760	struct drm_nouveau_gem_pushbuf_bo *kref;
761	uint32_t flags = 0;
762
763	if (cli_push_get(push->client, bo) == push) {
764		kref = cli_kref_get(push->client, bo);
765		assert(kref);
766		if (kref->read_domains)
767			flags |= NOUVEAU_BO_RD;
768		if (kref->write_domains)
769			flags |= NOUVEAU_BO_WR;
770	}
771
772	return flags;
773}
774
775drm_public int
776nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
777{
778	if (!push->channel)
779		return pushbuf_submit(push, chan);
780	pushbuf_flush(push);
781	return pushbuf_validate(push, false);
782}
783